pax_global_header00006660000000000000000000000064147534104360014521gustar00rootroot0000000000000052 comment=36d89a300ef730f64d21f5ec67824e9bf1f5b1fd mpi4py-4.0.3/000077500000000000000000000000001475341043600127475ustar00rootroot00000000000000mpi4py-4.0.3/.appveyor.yml000066400000000000000000000030631475341043600154170ustar00rootroot00000000000000# https://ci.appveyor.com/project/mpi4py/mpi4py image: Visual Studio 2019 environment: matrix: - PYTHON: "C:\\Python37-x64" - PYTHON: "C:\\Python38-x64" - PYTHON: "C:\\Python39-x64" - PYTHON: "C:\\Python310-x64" - PYTHON: "C:\\Python311-x64" - PYTHON: "C:\\Python312-x64" - PYTHON: "C:\\Python313-x64" clone_depth: 1 branches: only: - master - maint - ci/all - ci/appveyor init: - "ECHO Python from %PYTHON%" install: # Python - "%PYTHON%\\python.exe --version" - "%PYTHON%\\python.exe -m pip install --upgrade pip setuptools wheel" - "%PYTHON%\\python.exe -m pip --version" - "%PYTHON%\\python.exe -m wheel version" # Microsoft MPI - "powershell .azure\\install-msmpi.ps1" - "SetEnvMPI.cmd" build: false build_script: - "%PYTHON%\\python.exe -m pip wheel -v --wheel-dir=dist ." test: false test_script: - "%PYTHON%\\python.exe -m pip install mpi4py --no-cache-dir --no-index --find-links=dist" - "\"%MSMPI_BIN%\\mpiexec.exe\" -n 1 %PYTHON%\\python.exe -m mpi4py --mpi-std-version" - "\"%MSMPI_BIN%\\mpiexec.exe\" -n 1 %PYTHON%\\python.exe -m mpi4py --mpi-lib-version" - "\"%MSMPI_BIN%\\mpiexec.exe\" -n 1 %PYTHON%\\python.exe %CD%\\test\\main.py -v" - "\"%MSMPI_BIN%\\mpiexec.exe\" -n 1 %PYTHON%\\python.exe %CD%\\demo\\futures\\test_futures.py -v" - "\"%MSMPI_BIN%\\mpiexec.exe\" -n 2 %PYTHON%\\python.exe %CD%\\demo\\futures\\test_futures.py -v" - "%PYTHON%\\python.exe %CD%\\demo\\test-run\\test_run.py -v" - "%PYTHON%\\python.exe -m pip uninstall --yes mpi4py" artifacts: - path: dist\* mpi4py-4.0.3/.azure/000077500000000000000000000000001475341043600141535ustar00rootroot00000000000000mpi4py-4.0.3/.azure/install-mpich.sh000066400000000000000000000002171475341043600172530ustar00rootroot00000000000000#!/bin/bash set -e case `uname` in Linux) set -x; sudo apt install -y mpich libmpich-dev ;; Darwin) set -x; brew install mpich ;; esac mpi4py-4.0.3/.azure/install-msmpi.ps1000066400000000000000000000073021475341043600173730ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com $ErrorActionPreference = "Stop" $MS_DOWNLOAD_URL = "https://download.microsoft.com/download/" $MSMPI_HASH_URL_V0500 = "3/7/6/3764A48C-5C4E-4E4D-91DA-68CED9032EDE/" $MSMPI_HASH_URL_V0600 = "6/4/A/64A7852A-A8C3-476D-908C-30501F761DF3/" $MSMPI_HASH_URL_V0700 = "D/7/B/D7BBA00F-71B7-436B-80BC-4D22F2EE9862/" $MSMPI_HASH_URL_V0710 = "E/8/A/E8A080AF-040D-43FF-97B4-065D4F220301/" $MSMPI_HASH_URL_V0800 = "B/2/E/B2EB83FE-98C2-4156-834A-E1711E6884FB/" $MSMPI_HASH_URL_V0810 = "D/B/B/DBB64BA1-7B51-43DB-8BF1-D1FB45EACF7A/" $MSMPI_HASH_URL_V0900 = "2/E/C/2EC96D7F-687B-4613-80F6-E10F670A2D97/" $MSMPI_HASH_URL_V0901 = "4/A/6/4A6AAED8-200C-457C-AB86-37505DE4C90D/" $MSMPI_HASH_URL_V1000 = "A/E/0/AE002626-9D9D-448D-8197-1EA510E297CE/" $MSMPI_HASH_URL_V1011 = "2/9/e/29efe9b1-16d7-4912-a229-6734b0c4e235/" $MSMPI_HASH_URL_V1012 = "a/5/2/a5207ca5-1203-491a-8fb8-906fd68ae623/" $MSMPI_HASH_URL_V1013 = "7/2/7/72731ebb-b63c-4170-ade7-836966263a8f/" $MSMPI_HASH_URL = $MSMPI_HASH_URL_V1013 $MSMPI_BASE_URL = $MS_DOWNLOAD_URL + $MSMPI_HASH_URL $DOWNLOADS = "C:\Downloads\MSMPI" function Download ($url, $filename, $destdir) { if ($destdir) { $item = New-Item $destdir -ItemType directory -Force $destdir = $item.FullName } else { $destdir = $pwd.Path } $filepath = Join-Path $destdir $filename if (Test-Path $filepath) { Write-Host "Reusing" $filename "from" $destdir return $filepath } Write-Host "Downloading" $filename "from" $url $webclient = New-Object System.Net.WebClient foreach($i in 1..3) { try { $webclient.DownloadFile($url, $filepath) Write-Host "File saved at" $filepath return $filepath } Catch [Exception] { Start-Sleep 1 } } Write-Host "Failed to download" $filename "from" $url return $null } function InstallMicrosoftMPISDK ($baseurl, $filename) { Write-Host "Installing Microsoft MPI SDK" $url = $baseurl + $filename $filepath = Download $url $filename $DOWNLOADS Write-Host "Installing" $filename $prog = "msiexec.exe" $args = "/quiet /qn /i $filepath" Write-Host "Executing:" $prog $args Start-Process -FilePath $prog -ArgumentList $args -Wait Write-Host "Microsoft MPI SDK installation complete" } function InstallMicrosoftMPIRuntime ($baseurl, $filename) { Write-Host "Installing Microsoft MPI Runtime" $url = $baseurl + $filename $filepath = Download $url $filename $DOWNLOADS Write-Host "Installing" $filename $prog = $filepath $args = "-unattend" Write-Host "Executing:" $prog $args Start-Process -FilePath $prog -ArgumentList $args -Wait Write-Host "Microsoft MPI Runtime installation complete" } function SaveMicrosoftMPIEnvironment ($filepath) { Write-Host "Saving Microsoft MPI environment variables to" $filepath $envlist = @("MSMPI_BIN", "MSMPI_INC", "MSMPI_LIB32", "MSMPI_LIB64") $stream = [IO.StreamWriter] $filepath foreach ($variable in $envlist) { $value = [Environment]::GetEnvironmentVariable($variable, "Machine") if ($value) { $stream.WriteLine("SET $variable=$value") } if ($value) { Write-Host "$variable=$value" } } $stream.Close() } function InstallMicrosoftMPI () { InstallMicrosoftMPISDK $MSMPI_BASE_URL "msmpisdk.msi" InstallMicrosoftMPIRuntime $MSMPI_BASE_URL "MSMpiSetup.exe" SaveMicrosoftMPIEnvironment "SetEnvMPI.cmd" $MSMPI_BIN = [Environment]::GetEnvironmentVariable("MSMPI_BIN", "Machine") if ($Env:GITHUB_PATH) { echo "$MSMPI_BIN" >> $Env:GITHUB_PATH } Write-Host "##vso[task.prependpath]$MSMPI_BIN"; } function main () { InstallMicrosoftMPI } main mpi4py-4.0.3/.azure/install-msmpi.sh000066400000000000000000000001131475341043600172730ustar00rootroot00000000000000#!/bin/bash _dir=$(dirname ${BASH_SOURCE[0]}) pwsh $_dir/install-msmpi.ps1 mpi4py-4.0.3/.azure/install-openmpi.sh000066400000000000000000000013241475341043600176220ustar00rootroot00000000000000#!/bin/bash set -e case `uname` in Linux) set -x; sudo apt install -y openmpi-bin libopenmpi-dev ;; Darwin) set -x; brew install openmpi ;; esac openmpi_mca_params=$HOME/.openmpi/mca-params.conf mkdir -p $(dirname $openmpi_mca_params) echo plm_ssh_agent=false >> $openmpi_mca_params echo btl=tcp,self >> $openmpi_mca_params echo mpi_yield_when_idle=true >> $openmpi_mca_params echo rmaps_base_oversubscribe=true >> $openmpi_mca_params echo btl_base_warn_component_unused=false >> $openmpi_mca_params echo btl_vader_single_copy_mechanism=none >> $openmpi_mca_params prte_mca_params=$HOME/.prte/mca-params.conf mkdir -p $(dirname $prte_mca_params) echo rmaps_default_mapping_policy=:oversubscribe >> $prte_mca_params mpi4py-4.0.3/.azure/pipelines.yml000066400000000000000000000044531475341043600166740ustar00rootroot00000000000000trigger: batch: false branches: include: - master - maint - ci/all - ci/azure jobs: - job: Linux pool: vmImage: 'Ubuntu-22.04' strategy: matrix: Python37_MPICH: PYTHON_VERSION: '3.7' MPI: 'mpich' Python37_OpenMPI: PYTHON_VERSION: '3.7' MPI: 'openmpi' Python38_MPICH: PYTHON_VERSION: '3.8' MPI: 'mpich' Python38_OpenMPI: PYTHON_VERSION: '3.8' MPI: 'openmpi' Python39_MPICH: PYTHON_VERSION: '3.9' MPI: 'mpich' Python39_OpenMPI: PYTHON_VERSION: '3.9' MPI: 'openmpi' Python310_MPICH: PYTHON_VERSION: '3.10' MPI: 'mpich' Python310_OpenMPI: PYTHON_VERSION: '3.10' MPI: 'openmpi' steps: - bash: echo 127.0.0.1 `hostname` | sudo tee -a /etc/hosts > /dev/null displayName: 'Configure hostname' - template: steps.yml - job: macOS pool: vmImage: 'macOS-13' strategy: matrix: Python37_MPICH: PYTHON_VERSION: '3.7' MPI: 'mpich' Python37_OpenMPI: PYTHON_VERSION: '3.7' MPI: 'openmpi' Python38_MPICH: PYTHON_VERSION: '3.8' MPI: 'mpich' Python38_OpenMPI: PYTHON_VERSION: '3.8' MPI: 'openmpi' Python39_MPICH: PYTHON_VERSION: '3.9' MPI: 'mpich' Python39_OpenMPI: PYTHON_VERSION: '3.9' MPI: 'openmpi' Python310_MPICH: PYTHON_VERSION: '3.10' MPI: 'mpich' Python310_OpenMPI: PYTHON_VERSION: '3.10' MPI: 'openmpi' steps: - bash: echo 127.0.0.1 `hostname` | sudo tee -a /etc/hosts > /dev/null displayName: 'Configure hostname' - template: steps.yml - job: Windows pool: vmImage: 'windows-2019' strategy: matrix: Python37_MSMPI: PYTHON_VERSION: '3.7' MPI: 'msmpi' Python38_MSMPI: PYTHON_VERSION: '3.8' MPI: 'msmpi' Python39_MSMPI: PYTHON_VERSION: '3.9' MPI: 'msmpi' Python310_MSMPI: PYTHON_VERSION: '3.10' MPI: 'msmpi' steps: - template: steps.yml - publish: dist artifact: 'Windows-py$(PYTHON_VERSION)-$(MPI)' displayName: 'Publish package artifacts' condition: and(succeeded(), eq(variables['Agent.OS'], 'Windows_NT')) mpi4py-4.0.3/.azure/steps.yml000066400000000000000000000027661475341043600160470ustar00rootroot00000000000000steps: - checkout: self clean: true fetchDepth: 1 - bash: source .azure/install-$(MPI).sh displayName: 'Install MPI' - task: UsePythonVersion@0 inputs: versionSpec: $(PYTHON_VERSION) architecture: x64 displayName: 'Use Python $(PYTHON_VERSION)' - script: python -m pip install --upgrade pip setuptools wheel displayName: 'Install packaging tools' - script: python -m pip wheel -v --wheel-dir=dist . displayName: 'Build package' - script: python -m pip install --upgrade -r conf/requirements-test.txt displayName: 'Install test dependencies' - script: python -m pip install mpi4py --no-index --find-links=dist displayName: 'Install package for testing' - script: mpiexec -n 1 python -m mpi4py --mpi-std-version displayName: 'Test package' - script: mpiexec -n 1 python -m mpi4py --mpi-lib-version displayName: 'Test package' - script: mpiexec -n 1 python test/main.py -v displayName: 'Test package' - script: mpiexec -n 2 python test/main.py -v -f -e spawn displayName: 'Test package' - script: mpiexec -n 1 python demo/futures/test_futures.py -v displayName: 'Test subpackage' - script: mpiexec -n 2 python demo/futures/test_futures.py -v displayName: 'Test subpackage' - script: python demo/test-run/test_run.py -v displayName: 'Test subpackage' - script: bash demo/init-fini/run.sh displayName: 'Test extra' - script: bash demo/check-mpiexec/run.sh displayName: 'Test extra' - script: python -m pip uninstall --yes mpi4py displayName: 'Uninstall package after testing' mpi4py-4.0.3/.circleci/000077500000000000000000000000001475341043600146025ustar00rootroot00000000000000mpi4py-4.0.3/.circleci/anaconda.sh000066400000000000000000000045661475341043600167150ustar00rootroot00000000000000#!/bin/bash RUN() { echo + $@; $@; } RUN export ANACONDA=${ANACONDA-/opt/conda} install-miniforge() { local PROJECT=https://github.com/conda-forge/miniforge local BASEURL=$PROJECT/releases/latest/download local INSTALLER=Miniforge3-Linux-x86_64.sh RUN curl -sSL -o ~/$INSTALLER $BASEURL/$INSTALLER RUN bash ~/$INSTALLER -b -f -p $ANACONDA RUN source $ANACONDA/bin/activate base RUN conda config --set channel_priority strict RUN conda config --set show_channel_urls yes RUN conda deactivate } parse-args() { unset PY unset MPI unset RUNTESTS unset COVERAGE for arg in $@; do case $arg in python=?*) PY="${arg#*=}";; py=?*) PY="${arg#*=}";; MPI=?*) MPI="${arg#*=}";; mpi=?*) MPI="${arg#*=}";; runtests=?*) RUNTESTS="${arg#*=}";; coverage=?*) COVERAGE="${arg#*=}";; *) break esac done PY=${PY-3} MPI=${MPI-mpich} ENV=py$PY-$MPI RUNTESTS=${RUNTESTS-no} COVERAGE=${COVERAGE-no} } create-env() { parse-args $@ RUN rm -rf $ANACONDA/envs/$ENV RUN source $ANACONDA/bin/activate base local packages=(python=$PY $MPI $MPI-mpicc setuptools numpy cython coverage) RUN mamba create --yes -n $ENV ${packages[@]} RUN conda deactivate } package-install() { parse-args $@ RUN source $ANACONDA/bin/activate $ENV RUN python setup.py build_src --force RUN python setup.py install RUN python setup.py --quiet clean --all RUN conda deactivate } package-testing() { parse-args $@ RUN source $ANACONDA/bin/activate $ENV RUN python -m mpi4py --version if [[ "$RUNTESTS" == "yes" ]]; then if [[ "$MPI" == "mpich" ]]; then local P=2; else local P=5; fi local MPIEXEC=${MPIEXEC-mpiexec} RUN $MPIEXEC -n 1 python $PWD/test/main.py RUN $MPIEXEC -n $P python $PWD/test/main.py -f RUN $MPIEXEC -n 1 python $PWD/demo/futures/test_futures.py RUN $MPIEXEC -n $P python $PWD/demo/futures/test_futures.py -f RUN $MPIEXEC -n 1 python -m mpi4py.futures $PWD/demo/futures/test_futures.py RUN $MPIEXEC -n $P python -m mpi4py.futures $PWD/demo/futures/test_futures.py -f RUN python $PWD/demo/test-run/test_run.py fi if [[ "$COVERAGE" == "yes" ]]; then RUN test/coverage.sh RUN coverage report RUN coverage xml RUN mv coverage.xml coverage-py$PY-$MPI-$(uname).xml fi RUN conda deactivate } mpi4py-4.0.3/.circleci/config.yml000066400000000000000000000022651475341043600165770ustar00rootroot00000000000000version: 2.1 orbs: codecov: codecov/codecov@4.0.1 jobs: test: parameters: os: type: executor py: type: string mpi: type: string executor: << parameters.os >> steps: - checkout - run: .circleci/step-setup py=<< parameters.py >> mpi=<< parameters.mpi >> - run: .circleci/step-install py=<< parameters.py >> mpi=<< parameters.mpi >> - run: .circleci/step-coverage py=<< parameters.py >> mpi=<< parameters.mpi >> - run: apt-get update && apt-get install --yes curl gnupg # codecov/upload - codecov/upload executors: linux: docker: - image: condaforge/miniforge3 workflows: test-all: jobs: - test: filters: branches: only: - master - maint - ci/all - ci/circle matrix: parameters: os: - linux py: - "3.8" - "3.9" - "3.10" - "3.11" - "3.12" - "3.13" mpi: - "mpich" - "openmpi" mpi4py-4.0.3/.circleci/env.sh000066400000000000000000000010601475341043600157230ustar00rootroot00000000000000#!/bin/bash export CFLAGS="-O0 -Wp,-U_FORTIFY_SOURCE" export CPPFLAGS=$CFLAGS export MPI4PY_COVERAGE_PLUGIN=cycoverage export PYTHONPATH=$PWD/conf export HYDRA_LAUNCHER=fork export OMPI_MCA_plm_ssh_agent=false export OMPI_MCA_pml=ob1 export OMPI_MCA_btl=tcp,self export OMPI_MCA_mpi_yield_when_idle=true export OMPI_MCA_btl_base_warn_component_unused=false export PRTE_MCA_rmaps_default_mapping_policy=:oversubscribe export OMPI_ALLOW_RUN_AS_ROOT=1 export OMPI_ALLOW_RUN_AS_ROOT_CONFIRM=1 export ANACONDA=${ANACONDA-/opt/conda} source .circleci/anaconda.sh mpi4py-4.0.3/.circleci/step-coverage000077500000000000000000000001171475341043600172730ustar00rootroot00000000000000#!/bin/bash set -e source $(dirname $0)/env.sh package-testing coverage=yes $@ mpi4py-4.0.3/.circleci/step-install000077500000000000000000000001021475341043600171400ustar00rootroot00000000000000#!/bin/bash set -e source $(dirname $0)/env.sh package-install $@ mpi4py-4.0.3/.circleci/step-setup000077500000000000000000000000751475341043600166430ustar00rootroot00000000000000#!/bin/bash set -e source $(dirname $0)/env.sh create-env $@ mpi4py-4.0.3/.codespellrc000066400000000000000000000004011475341043600152420ustar00rootroot00000000000000# https://github.com/codespell-project/codespell #-*- mode: conf -*- [codespell] check-hidden = check-filenames = skip = build,.eggs,.git,.tox,.*_cache,htmlcov,*/mpi4py/MPI.c ignore-words-list = assertIn, bootup, ccompiler, improbe, inout, nd, mpi4py-4.0.3/.coveragerc000066400000000000000000000004121475341043600150650ustar00rootroot00000000000000# https://coverage.readthedocs.io # -*- mode: conf -*- [run] source = mpi4py branch = True parallel = True plugins = ${MPI4PY_COVERAGE_PLUGIN} [paths] source = src/mpi4py */mpi4py [report] exclude_lines = .*#\s*(pragma)[:\s]?\s*(no)\s*(cover) .*#\s*~>\s* mpi4py-4.0.3/.cython-lint.toml000066400000000000000000000004261475341043600161740ustar00rootroot00000000000000[tool.cython-lint] exclude = "libmpi\\.pxd" ignore = [ "E201", # whitespace after '(' "E202", # whitespace before ')' "E221", # multiple spaces before operator "E222", # multiple spaces after operator "E701", # multiple statements on one line (colon) ] mpi4py-4.0.3/.flake8000066400000000000000000000005071475341043600141240ustar00rootroot00000000000000[flake8] ignore = W503,U101 per-file-ignores = conf/*: D docs/*: D src/mpi4py/futures/_core.py: D,B036 src/mpi4py/futures/aplus.py: B036,D402 src/mpi4py/futures/pool.py: D402 src/mpi4py/futures/util.py: B036 src/mpi4py/typing.py: U100 src/mpi4py/util/pkl5.py: U100 unused-arguments-ignore-variadic-names = True mpi4py-4.0.3/.github/000077500000000000000000000000001475341043600143075ustar00rootroot00000000000000mpi4py-4.0.3/.github/dependabot.yml000066400000000000000000000003011475341043600171310ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: github-actions directory: / schedule: interval: weekly - package-ecosystem: pip directory: / schedule: interval: weekly mpi4py-4.0.3/.github/workflows/000077500000000000000000000000001475341043600163445ustar00rootroot00000000000000mpi4py-4.0.3/.github/workflows/ci-build.yml000066400000000000000000000077731475341043600205750ustar00rootroot00000000000000name: ci-build on: # yamllint disable-line rule:truthy schedule: - cron: '0 3 * * 0' workflow_call: inputs: py: description: 'Python version' required: false default: 3 type: string workflow_dispatch: inputs: py: description: 'Python version' required: true default: 3 type: string permissions: contents: read jobs: build: runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: backend: - skbuild - mesonpy os: - ubuntu-24.04 - ubuntu-22.04 - macos-15 - macos-14 - macos-13 - windows-2022 - windows-2019 mpi: - mpich - openmpi - impi - msmpi exclude: - backend: mesonpy os: windows-2022 mpi: impi - backend: mesonpy os: windows-2019 mpi: impi - os: ubuntu-24.04 mpi: msmpi - os: ubuntu-22.04 mpi: msmpi - os: macos-15 mpi: impi - os: macos-15 mpi: msmpi - os: macos-14 mpi: impi - os: macos-14 mpi: msmpi - os: macos-13 mpi: impi - os: macos-13 mpi: msmpi - os: windows-2022 mpi: mpich - os: windows-2022 mpi: openmpi - os: windows-2019 mpi: mpich - os: windows-2019 mpi: openmpi steps: - name: Checkout uses: actions/checkout@v4 - name: Setup MSVC if: matrix.backend == 'mesonpy' && runner.os == 'Windows' uses: bus1/cabuild/action/msdevshell@v1 with: architecture: x64 - name: Setup MPI (${{ matrix.mpi }}) uses: mpi4py/setup-mpi@v1 with: mpi: ${{ matrix.mpi }} - name: Setup Python (${{ github.event.inputs.py || 3 }}) uses: actions/setup-python@v5 with: python-version: ${{ github.event.inputs.py || 3 }} cache: pip cache-dependency-path: | conf/requirements-build-cython.txt conf/requirements-build-${{ matrix.backend }}.txt - name: Upgrade pip run: python -m pip install -U pip - name: Install Python packages (build) run: python -m pip install -U build - name: Build sdist and wheel (${{ matrix.backend }}) run: python -m build env: MPI4PY_BUILD_BACKEND: ${{ matrix.backend }} MPI4PY_LOCAL_VERSION: ${{ matrix.mpi }} - name: Upload wheel uses: actions/upload-artifact@v4 with: name: mpi4py-${{ matrix.backend }}-${{ matrix.os }}-${{ matrix.mpi }} path: dist/mpi4py-*.whl - name: Install wheel run: python -m pip install mpi4py --verbose --no-index --find-links=dist - name: Test wheel after install (cmdline) run: mpiexec -n 1 python -m mpi4py --mpi-lib-version - name: Test wheel after install (test_package) run: mpiexec -n 1 python test/main.py test_package - name: Test wheel after install (helloworld) run: mpiexec -n 2 python -m mpi4py.bench helloworld - name: Uninstall wheel after testing run: python -m pip uninstall mpi4py --verbose --yes - name: Install package with pip (${{ matrix.backend }}) run: python -m pip install . --verbose env: MPI4PY_BUILD_BACKEND: ${{ matrix.backend }} MPI4PY_LOCAL_VERSION: ${{ matrix.mpi }} - name: Test wheel after install (cmdline) run: mpiexec -n 1 python -m mpi4py --mpi-lib-version - name: Test package after install (test_package) run: mpiexec -n 1 python test/main.py test_package - name: Test package after install (helloworld) run: mpiexec -n 2 python -m mpi4py.bench helloworld - name: Uninstall package after testing run: python -m pip uninstall mpi4py --verbose --yes mpi4py-4.0.3/.github/workflows/ci-check.yml000066400000000000000000000117461475341043600205460ustar00rootroot00000000000000name: ci-check on: # yamllint disable-line rule:truthy schedule: - cron: '0 3 * * 0' workflow_call: inputs: py: description: 'Python version' required: false default: 3 type: string workflow_dispatch: inputs: py: description: 'Python version' required: true default: 3 type: string permissions: contents: read jobs: conf: runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: - ubuntu-latest - macos-latest - windows-latest steps: - name: Checkout uses: actions/checkout@v4 - name: Setup Python (${{ github.event.inputs.py || 3 }}) uses: actions/setup-python@v5 with: python-version: ${{ github.event.inputs.py || 3 }} cache: pip cache-dependency-path: | conf/requirements-build-cython.txt - name: Upgrade pip run: python -m pip install -U pip setuptools - name: Install Python packages (build) run: python -m pip install -U -r conf/requirements-build-cython.txt - name: Build package run: python setup.py build env: MPICFG: nompi CFLAGS: -O0 - name: Check build configuration run: | # check nompi pympiconf.h cp "src/lib-mpi/pympiconf.h" "conf/nompi/pympiconf.h" git diff --exit-code - name: Check code generation run: | python conf/mpiapigen.py git diff --exit-code docs: runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 - name: Setup Python (${{ github.event.inputs.py || 3 }}) uses: actions/setup-python@v5 with: python-version: ${{ github.event.inputs.py || 3 }} cache: pip cache-dependency-path: | conf/requirements-build-cython.txt conf/requirements-build-docs.txt - name: Upgrade pip run: python -m pip install -U pip - name: Install package for documenting run: python -m pip install . env: MPICFG: nompi-fast CFLAGS: -O0 - name: Install Python packages (docs) run: python -m pip install -U -r conf/requirements-docs.txt - name: sphinx-build coverage run: sphinx-build -M coverage docs/source build -q -E -W -jauto - name: check coverage run: | coverage=build/coverage/python.txt pattern='(classes|functions|methods):' if grep -qE $pattern $coverage; then cat $coverage; exit 1; fi lint: runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 - name: Setup Python (${{ github.event.inputs.py || 3 }}) uses: actions/setup-python@v5 with: python-version: ${{ github.event.inputs.py || 3 }} cache: pip cache-dependency-path: | conf/requirements-build-cython.txt conf/requirements-build-lint.txt - name: Upgrade pip run: python -m pip install -U pip - name: Install package for linting run: python -m pip install . env: MPICFG: nompi-fast CFLAGS: -O0 - name: Install Python packages (lint) run: python -m pip install -U -r conf/requirements-lint.txt - name: ruff run: ruff check - name: flake8 run: flake8 docs src - name: flake8 (testsuite) run: flake8 --select=A test - name: pylint run: pylint mpi4py - name: codespell run: codespell - name: cython run: conf/cythonize.sh -Wextra -Werror - name: cython-lint run: cython-lint . - name: yamllint run: yamllint . type: runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 - name: Setup Python (${{ github.event.inputs.py || 3 }}) uses: actions/setup-python@v5 with: python-version: ${{ github.event.inputs.py || 3 }} cache: pip cache-dependency-path: | conf/requirements-build-cython.txt conf/requirements-build-type.txt - name: Upgrade pip run: python -m pip install -U pip - name: Install package for typing run: python -m pip install . env: MPICFG: nompi-fast CFLAGS: -O0 - name: Check stub generation run: | python conf/mpistubgen.py git diff --exit-code - name: Install Python packages (type) run: python -m pip install -U -r conf/requirements-type.txt - name: mypy (typecheck) run: | mypy --python-version 3.13 -p mpi4py mypy --python-version 3.12 -p mpi4py mypy --python-version 3.11 -p mpi4py mypy --python-version 3.10 -p mpi4py mypy --python-version 3.9 -p mpi4py mypy --python-version 3.8 -p mpi4py - name: mypy (stubtest) run: stubtest mpi4py ${{ github.event.inputs.py == '3.8' && '$( echo mpi4py.*.Executor.submit > allowlist && echo --allowlist=allowlist )' || '' }} mpi4py-4.0.3/.github/workflows/ci-cover.yml000066400000000000000000000072761475341043600206120ustar00rootroot00000000000000name: ci-cover on: # yamllint disable-line rule:truthy workflow_call: inputs: py: description: 'Python version' required: false default: 3 type: string workflow_dispatch: inputs: py: description: 'Python version' required: true default: 3 type: string permissions: contents: read env: MPI4PY_COVERAGE_PLUGIN: cycoverage PYTHONPATH: ${{ github.workspace }}/conf jobs: cover: runs-on: ubuntu-latest timeout-minutes: 15 strategy: fail-fast: false matrix: mpi: - mpich - openmpi py: - ${{ github.event.inputs.py || 3 }} defaults: run: shell: bash -el {0} steps: - uses: actions/checkout@v4 - uses: mamba-org/setup-micromamba@v2 with: init-shell: bash post-cleanup: none environment-name: cover create-args: >- ${{ matrix.mpi }}${{ matrix.mpi == 'openmpi' && '=5.0.0' || '' }} ${{ matrix.mpi }}-mpicc python=${{ matrix.py }} pip setuptools cython coverage numpy condarc: | show_channel_urls: true channel_priority: strict channels: - conda-forge - nodefaults - run: python -m pip install . env: CFLAGS: -O0 CPPFLAGS: -O0 -Wp,-U_FORTIFY_SOURCE PIP_VERBOSE: 3 PIP_NO_CACHE_DIR: true PIP_NO_BUILD_ISOLATION: false # pypa/pip#5735 PIP_DISABLE_PIP_VERSION_CHECK: true - name: Tweak MPI run: | openmpi_mca_params=$HOME/.openmpi/mca-params.conf mkdir -p $(dirname $openmpi_mca_params) echo plm_ssh_agent=false >> $openmpi_mca_params echo pml=ob1 >> $openmpi_mca_params echo btl=tcp,self >> $openmpi_mca_params echo mpi_yield_when_idle=true >> $openmpi_mca_params echo btl_base_warn_component_unused=false >> $openmpi_mca_params prte_mca_params=$HOME/.prte/mca-params.conf mkdir -p $(dirname $prte_mca_params) echo rmaps_default_mapping_policy=:oversubscribe >> $prte_mca_params - name: Run coverage run: test/coverage.sh - name: Prepare coverage data run: mv .coverage .coverage.${TAG} env: TAG: ${{ runner.os }}.${{ matrix.mpi }}.py${{ matrix.py }} - name: Upload coverage data uses: actions/upload-artifact@v4 with: name: coverage-data-${{ runner.os }}-${{ matrix.mpi }}-${{ matrix.py }} path: ".coverage.*" if-no-files-found: ignore include-hidden-files: true report: runs-on: ubuntu-latest needs: cover if: always() steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: ${{ github.event.inputs.py || 3 }} cache: pip cache-dependency-path: | conf/requirements-build-cython.txt - run: python -m pip install -U pip setuptools - run: python -m pip install -U -r conf/requirements-build-cython.txt - run: python -m pip install -U coverage - run: python setup.py build_src - name: Download coverage data uses: actions/download-artifact@v4 with: pattern: coverage-data-* merge-multiple: true - name: Report coverage run: | python -m coverage combine python -m coverage html python -m coverage xml python -m coverage report --format=markdown >> $GITHUB_STEP_SUMMARY python -m coverage report --fail-under=100 - name: Upload HTML report uses: actions/upload-artifact@v4 with: name: coverage-html path: htmlcov if: failure() mpi4py-4.0.3/.github/workflows/ci-test.yml000066400000000000000000000056021475341043600204420ustar00rootroot00000000000000name: ci-test on: # yamllint disable-line rule:truthy schedule: - cron: '0 3 * * 0' workflow_call: workflow_dispatch: permissions: contents: read jobs: test: runs-on: ${{ matrix.os }} timeout-minutes: 30 strategy: fail-fast: false matrix: os: - ubuntu-24.04 - macos-15 - windows-2022 mpi: - mpich - openmpi - msmpi py: - "3.11" - "3.12" - "3.13" - "3.14-dev" - "pypy-3.9" - "pypy-3.10" exclude: - os: ubuntu-24.04 mpi: msmpi - os: macos-15 mpi: msmpi - os: windows-2022 mpi: mpich - os: windows-2022 mpi: openmpi steps: - name: Configure hostname run: echo 127.0.0.1 `hostname` | sudo tee -a /etc/hosts > /dev/null if: runner.os == 'Linux' || runner.os == 'macOS' - name: Checkout uses: actions/checkout@v4 - name: Setup MPI (${{ matrix.mpi }}) uses: mpi4py/setup-mpi@v1 with: mpi: ${{ matrix.mpi }} - name: Setup Python (${{ matrix.py }}) uses: actions/setup-python@v5 with: python-version: ${{ matrix.py }} cache: pip cache-dependency-path: | conf/requirements-build-cython.txt conf/requirements-test.txt - name: Upgrade pip run: python -m pip install -U pip - name: Build package run: python -m pip wheel -v --wheel-dir=dist . - name: Upload package artifacts uses: actions/upload-artifact@v4 with: name: mpi4py-${{ matrix.os }}-${{ matrix.mpi }}-${{ matrix.py }} path: dist/mpi4py-*.whl - name: Install package for testing run: python -m pip install mpi4py --verbose --no-index --find-links=dist - name: Install Python packages (test) run: python -m pip install -U -r conf/requirements-test.txt if: ${{ !endsWith(matrix.py, '-dev') && !contains(matrix.py, '-alpha') }} - name: Test mpi4py (np=1) run: mpiexec -n 1 python test/main.py -v - name: Test mpi4py (np=2) run: mpiexec -n 2 python test/main.py -v -f -e spawn - name: Test mpi4py.futures (np=1) run: mpiexec -n 1 python demo/futures/test_futures.py -v - name: Test mpi4py.futures (np=2) run: mpiexec -n 2 python demo/futures/test_futures.py -v - name: Test mpi4py.run run: python demo/test-run/test_run.py -v if: matrix.mpi != 'msmpi' && ! (matrix.mpi == 'mpich' && matrix.os == 'ubuntu-24.04') - name: Test init-fini run: bash demo/init-fini/run.sh if: matrix.mpi != 'msmpi' - name: Test check-mpiexec run: bash demo/check-mpiexec/run.sh if: matrix.mpi != 'msmpi' - name: Uninstall package after testing run: python -m pip uninstall --yes mpi4py mpi4py-4.0.3/.github/workflows/ci.yml000066400000000000000000000014461475341043600174670ustar00rootroot00000000000000name: ci on: # yamllint disable-line rule:truthy push: branches: - master - maint - ci/all - ci/github pull_request: branches: - master - maint workflow_dispatch: concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} permissions: contents: read jobs: test: uses: ./.github/workflows/ci-test.yml build: uses: ./.github/workflows/ci-build.yml check: uses: ./.github/workflows/ci-check.yml cover: uses: ./.github/workflows/ci-cover.yml ci-status: runs-on: ubuntu-latest if: ${{ success() || failure() }} needs: - test - build - check - cover steps: - run: ${{ !(contains(needs.*.result, 'failure')) }} mpi4py-4.0.3/.github/workflows/dist.yml000066400000000000000000000100571475341043600200350ustar00rootroot00000000000000name: dist on: # yamllint disable-line rule:truthy release: types: - published workflow_dispatch: permissions: contents: read jobs: docs: runs-on: ubuntu-latest steps: - name: Install LaTeX run: | # Install Tex Live sudo apt update && \ sudo apt install -y \ texlive-latex-base \ texlive-latex-recommended \ texlive-latex-extra \ latexmk \ texinfo - name: Checkout uses: actions/checkout@v4 - name: Set SOURCE_DATE_EPOCH run: | SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct) echo SOURCE_DATE_EPOCH=$SOURCE_DATE_EPOCH >> $GITHUB_ENV echo $(git log -1 --pretty=%ci) [timestamp=$SOURCE_DATE_EPOCH] - name: Setup Python uses: actions/setup-python@v5 with: python-version: 3 - name: Upgrade pip run: python -m pip install -U pip - name: Install tox run: python -m pip install -U tox - name: Build documentation run: python -m tox run -m docs - name: Archive documentation run: | archive=mpi4py-docs.zip rootdir=${archive%.zip} ln -s docs $rootdir python conf/metadata.py version > $rootdir/version python -m zipfile -c $archive $rootdir - name: Upload documentation artifact uses: actions/upload-artifact@v4 with: name: mpi4py-docs path: mpi4py-docs.zip sdist: runs-on: ubuntu-latest permissions: contents: read id-token: write attestations: write steps: - name: Checkout uses: actions/checkout@v4 - name: Set SOURCE_DATE_EPOCH run: | SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct) echo SOURCE_DATE_EPOCH=$SOURCE_DATE_EPOCH >> $GITHUB_ENV echo $(git log -1 --pretty=%ci) [timestamp=$SOURCE_DATE_EPOCH] - name: Setup Python uses: actions/setup-python@v5 with: python-version: 3 - name: Upgrade pip run: python -m pip install -U pip - name: Install build and twine run: python -m pip install -U build twine - name: Build source distribution run: python -m build --sdist - name: Check source distribution run: python -m twine check dist/mpi4py-*.tar.gz - name: Compute SHA256 checksum run: sha256sum -b mpi4py-*.tar.gz >> sha256sum.txt working-directory: dist - name: Report SHA256 checksum run: | echo '```' >> $GITHUB_STEP_SUMMARY cat sha256sum.txt >> $GITHUB_STEP_SUMMARY echo '```' >> $GITHUB_STEP_SUMMARY working-directory: dist - name: Upload distribution artifact uses: actions/upload-artifact@v4 with: name: mpi4py-dist path: dist/mpi4py-*.tar.gz - if: ${{ github.event_name == 'release' }} name: Attest distribution artifact uses: actions/attest-build-provenance@v2 with: subject-path: dist/mpi4py-*.tar.gz gh-publish: if: ${{ github.event_name == 'release' }} runs-on: ubuntu-latest needs: sdist environment: name: gh url: https://github.com/mpi4py/mpi4py/releases permissions: contents: write steps: - name: Checkout uses: actions/checkout@v4 - name: Download distribution artifact uses: actions/download-artifact@v4 with: name: mpi4py-dist path: dist - name: Publish package distributions to GitHub Releases run: gh release upload $TAG dist/mpi4py-*.tar.gz env: TAG: ${{ github.event.release.tag_name }} GITHUB_TOKEN: ${{ github.token }} pypi-publish: if: ${{ github.event_name == 'release' }} runs-on: ubuntu-latest needs: sdist environment: name: pypi url: https://pypi.org/p/mpi4py/ permissions: id-token: write steps: - name: Download distribution artifact uses: actions/download-artifact@v4 with: name: mpi4py-dist path: dist - name: Publish package distributions to PyPI uses: pypa/gh-action-pypi-publish@release/v1 mpi4py-4.0.3/.gitignore000066400000000000000000000004341475341043600147400ustar00rootroot00000000000000/build/** /dist/** /.eggs/** /MANIFEST /.tox/** **/__pycache__/ /docs/*.html /docs/*.pdf /docs/*.info /docs/*.[137] /docs/html/** /docs/source/_build/** /docs/source/reference/** /src/mpi4py/MPI.c /src/mpi4py/MPI.h /src/mpi4py/MPI_api.h /src/mpi4py/MPI.*.so /src/mpi4py.egg-info/** mpi4py-4.0.3/.mailmap000066400000000000000000000002131475341043600143640ustar00rootroot00000000000000Lisandro Dalcin Lisandro Dalcin mpi4py-4.0.3/.mypy.ini000066400000000000000000000006011475341043600145210ustar00rootroot00000000000000[mypy] disallow_any_unimported = True disallow_any_expr = True disallow_any_generics = True disallow_subclassing_any = True disallow_any_decorated = True # disallow_any_explicit = True disallow_untyped_calls = True disallow_untyped_defs = True disallow_incomplete_defs = True disallow_untyped_decorators = True warn_unused_ignores = True warn_return_any = True warn_unreachable = True mpi4py-4.0.3/.pylintrc000066400000000000000000000004601475341043600146140ustar00rootroot00000000000000# https://pylint.readthedocs.io # -*- mode: conf -*- [MASTER] ignore-patterns = .*\.pyi disable = locally-disabled,file-ignored,no-else-return enable = useless-suppression good-names = i,j,k,_,fs,fn,tb typealias-rgx=[_A-Z][_a-zA-Z0-9]* extension-pkg-allow-list = mpi4py reports = no score = no jobs = 0 mpi4py-4.0.3/.pytest.ini000066400000000000000000000000571475341043600150600ustar00rootroot00000000000000[pytest] testpaths = test demo/futures mpi4py-4.0.3/.readthedocs.yaml000066400000000000000000000004321475341043600161750ustar00rootroot00000000000000# https://readthedocs.org/projects/mpi4py/builds/ version: 2 formats: all build: os: ubuntu-24.04 tools: python: "3.13" python: install: - path: . - requirements: conf/requirements-docs.txt sphinx: configuration: docs/source/conf.py fail_on_warning: true mpi4py-4.0.3/.ruff.toml000066400000000000000000000010431475341043600146620ustar00rootroot00000000000000target-version = "py37" src = ["src"] exclude = [ "demo", "test", ] lint.select = [ "A", "B", "C", "D", "E", "F", "G", "I", "Q", "S", "W", "UP", "ARG", "ISC", "PIE", #"PTH", "PYI", "RET", "RUF", "TRY", "YTT", ] lint.ignore = [ "A001", "A002", "A003", "A005", "C901", "I001", "Q000", "PYI011", "UP006", "UP007", "UP036", "RET504", "RET505", "RUF012", "RUF022", "TRY003", ] [lint.per-file-ignores] "conf/*" = ["D",] "demo/*" = ["D",] "docs/*" = ["D",] "test/*" = ["D",] "*.pyi" = ["D", "E501", "E701"] [lint.pydocstyle] convention = "pep257" mpi4py-4.0.3/.spin.toml000066400000000000000000000007531475341043600147000ustar00rootroot00000000000000[project] name = 'mpi4py' [tool.spin.commands] "Build" = [ ".spin/cmds.py:build", ".spin/cmds.py:test", ".spin/cmds.py:sdist", ".spin/cmds.py:wheel", ] "Check" = [ ".spin/cmds.py:lint", ".spin/cmds.py:type", ] "Environment" = [ ".spin/cmds.py:shell", ".spin/cmds.py:ipython", ".spin/cmds.py:mpiexec", ".spin/cmds.py:mpi4py", ] "Install" = [ ".spin/cmds.py:install", ".spin/cmds.py:editable", ] "Documentation" = [ ".spin/cmds.py:docs", ".spin/cmds.py:browse", ] mpi4py-4.0.3/.spin/000077500000000000000000000000001475341043600137765ustar00rootroot00000000000000mpi4py-4.0.3/.spin/cmds.py000066400000000000000000000143611475341043600153030ustar00rootroot00000000000000"""https://github.com/scientific-python/spin""" # noqa: D400 import os import shlex import shutil import sys import click from spin.cmds.util import run PYTHON = shlex.split(os.environ.get("PYTHON", sys.executable)) MPIEXEC = shlex.split(os.environ.get("MPIEXEC", "mpiexec")) SHELL = shlex.split(os.environ.get("SHELL", "sh")) @click.command() @click.option("-c", "--clean", is_flag=True, help="Clean build directory.") @click.option("-f", "--force", is_flag=True, help="Force build everything.") @click.option("-q", "--quiet", is_flag=True, help="Run quietly.") def build(clean, force, quiet): """🔧 Build in-place.""" opt_force = ["--force"] if force else [] opt_quiet = ["--quiet"] if quiet else [] if clean: run([*PYTHON, "setup.py", *opt_quiet, "clean", "--all"]) run([*PYTHON, "setup.py", *opt_quiet, "build", *opt_force, "--inplace"]) @click.command() @click.option("-n", default=1, help="Number of MPI processes") @click.option("-s", "--singleton", is_flag=True, help="Singleton mode.") @click.argument("test_args", nargs=-1) @click.pass_context def test(ctx, n, singleton, test_args): """🔧 Test in-place.""" ctx.invoke(build, quiet=True) launcher = [] if singleton else [*MPIEXEC, "-n", f"{n}"] run([*launcher, *PYTHON, "test/main.py", "--inplace", *test_args]) def _get_site_packages(): script = os.path.abspath(__file__) testdir = os.path.dirname(script) rootdir = os.path.dirname(testdir) return os.path.join(rootdir, "src") def _set_pythonpath(path, quiet=False): pythonpath = os.environ.get("PYTHONPATH") if pythonpath is not None: pythonpath = f"{path}{os.pathsep}{pythonpath}" else: pythonpath = path os.environ["PYTHONPATH"] = pythonpath if not quiet: click.secho( f'$ export PYTHONPATH="{pythonpath}"', bold=True, fg="bright_blue" ) return path def _setup_environment(ctx, quiet=False): path = _get_site_packages() ctx.invoke(build, quiet=True) _set_pythonpath(path, quiet=quiet) def _run_check_commands(*commands): for cmd, *args in commands: if shutil.which(cmd) is not None: run([cmd, *args]) else: click.secho( f'{cmd}: command not found...', bold=True, fg="bright_red" ) @click.command() @click.pass_context def lint(ctx): """🔦 Lint-check sources with linters.""" _setup_environment(ctx) _run_check_commands( ["ruff", "check", "--quiet"], ["flake8", "docs", "src"], ["pylint", "mpi4py"], ["cython-lint", "."], ["codespell"], ["yamllint", "."], ) @click.command() @click.pass_context def type(ctx): """🦆 Type-check sources with mypy.""" _setup_environment(ctx) _run_check_commands( ["stubtest", "mpi4py"], ["mypy", "-p", "mpi4py"], ) @click.command(context_settings={"ignore_unknown_options": True}) @click.argument("shell_args", nargs=-1) @click.pass_context def shell(ctx, shell_args): """💻 Launch shell with PYTHONPATH set.""" _setup_environment(ctx) run([*SHELL, *shell_args], replace=True) @click.command(context_settings={"ignore_unknown_options": True}) @click.argument("ipython_args", nargs=-1) @click.pass_context def ipython(ctx, ipython_args): """🐍 Launch ipython with PYTHONPATH set.""" _setup_environment(ctx) ipython = [*PYTHON, "-m", "IPython"] run([*ipython, *ipython_args], replace=True) @click.command(context_settings={"ignore_unknown_options": True}) @click.option("-n", default=1, help="Number of MPI processes") @click.argument("mpiexec_args", nargs=-1) @click.pass_context def mpiexec(ctx, n, mpiexec_args): """🏁 Run mpiexec with PYTHONPATH set.""" _setup_environment(ctx) run([*MPIEXEC, "-n", f"{n}", *mpiexec_args], replace=True) @click.command(context_settings={"ignore_unknown_options": True}) @click.option("-n", default=1, help="Number of MPI processes") @click.argument("mpi4py_args", nargs=-1) @click.pass_context def mpi4py(ctx, n, mpi4py_args): """🏁 Run mpi4py with PYTHONPATH set.""" _setup_environment(ctx) mpi4py = [*MPIEXEC, "-n", f"{n}", *PYTHON, "-m", "mpi4py"] mpi4py_args = mpi4py_args or ["--help"] run([*mpi4py, *mpi4py_args], replace=True) @click.command() @click.option("-e", "--editable", is_flag=True, help="Editable mode.") @click.option("-q", "--quiet", is_flag=True, help="Run quietly.") @click.argument("pip_args", nargs=-1) def install(editable, quiet, pip_args): """🔧 Install package.""" pip = [*PYTHON, "-m", "pip"] pip_args = [*pip_args] if quiet: pip_args.append("--quiet") if editable: pip_args.append("--editable") run([*pip, "install", *pip_args, "."]) @click.command() @click.option("-q", "--quiet", is_flag=True, help="Run quietly.") @click.argument("pip_args", nargs=-1) @click.pass_context def editable(ctx, quiet, pip_args): # noqa: ARG001 """🔧 Install package in editable mode.""" ctx.forward(install, editable=True) @click.command() def sdist(): """📦 Build sdist.""" run([*PYTHON, "-m", "build", ".", "--sdist"]) @click.command() def wheel(): """📦 Build wheel.""" run([*PYTHON, "-m", "build", ".", "--wheel"]) @click.command() @click.option("-b", "--builder", default="html", help="Builder to use.") @click.option("-f", "--force", is_flag=True, help="Ignore cached environment.") @click.option("-j", "--jobs", default="auto", help="Build in parallel.") @click.option("-q", "--quiet", is_flag=True, help="Run quietly.") @click.pass_context def docs(ctx, builder="html", force=False, jobs="auto", quiet=False): """📖 Build Sphinx documentation.""" sphinx = [*PYTHON, "-m", "sphinx.cmd.build"] srcdir = "docs/source" outdir = "build" options = [] if quiet: options.append("-q") if force: options.append("-E") options.append("-W") options.extend(["--jobs", jobs]) _setup_environment(ctx) run([*sphinx, "-M", builder, srcdir, outdir, *options]) @click.command() @click.pass_context def browse(ctx): """🌐 Browse Sphinx documentation.""" ctx.invoke(docs, quiet=True) browser = [*PYTHON, "-m", "webbrowser", "-n"] url = os.path.join("build", "html", "index.html") run([*browser, url]) mpi4py-4.0.3/.yamllint.yml000066400000000000000000000004731475341043600154050ustar00rootroot00000000000000extends: default ignore-from-file: - .gitignore rules: document-start: present: false colons: max-spaces-before: 0 max-spaces-after: -1 line-length: max: 80 level: warning ignore: | /.appveyor.yml /.circleci/config.yml indentation: indent-sequences: whatever mpi4py-4.0.3/CHANGES.rst000066400000000000000000000537141475341043600145630ustar00rootroot00000000000000Release 4.0.3 [2025-02-13] ========================== * Fix DLPack v1.0 support. Release 4.0.2 [2025-02-01] ========================== * Support MPI-4 features within Intel MPI 2021.14. * Various fixes and updates to tests. * Minor fixes to typing support. * Minor fix to documentation. Release 4.0.1 [2024-10-11] ========================== * Update support for Python 3.13: + Enable Cython 3.1 support for free-threaded CPython. + Allow compiling Cython-generated C sources with the full Python C-API. + Fix MPI DLL path workarounds on Windows after changes to `locals()`. * Enhancements to test suite: + Support XML reports via `unittest-xml-reporting`. + Add command line options to exclude tests by patterns and files. + Refactor Python 2 code to use Python 3 constructs using `pyupgrade`. * Miscellaneous: + Minor and mostly inconsequential subclass fix in `mpi4py.util.pkl5`. + Update compatibility workarounds for legacy MPICH 3.0 release. Release 4.0.0 [2024-07-28] ========================== * New features: + Add support for the MPI-4.0 standard. - Use large count MPI-4 routines. - Add persistent collective communication. - Add partitioned point-to-point communication. - Add new communicator constructors. - Add the `Session` class and its methods. + Add support for the MPI-4.1 standard. - Add non-destructive completion test for multiple requests. - Add value-index datatype constructor. - Add communicator/session buffer attach/detach/flush. - Support for removal of error classes/codes/strings. - Support for querying hardware resource information. + Add preliminary support for the upcoming MPI-5.0 standard. - User-level failure mitigation (ULFM). + `mpi4py.util.pool`: New drop-in replacement for `multiprocessing.pool`. + `mpi4py.util.sync`: New synchronization utilities. + Add runtime check for mismatch between `mpiexec` and MPI library. + Support `scikit-build-core`_ as an alternative build backend. .. _scikit-build-core: https://scikit-build.readthedocs.io/ + Support `meson-python`_ as an alternative build backend. .. _meson-python: https://meson-python.readthedocs.io/ * Enhancements: + `mpi4py.futures`: Support for parallel tasks. + `mpi4py.futures`: Report exception tracebacks in workers. + `mpi4py.util.pkl5`: Add support for collective communication. + Add methods `Datatype.fromcode()`, `Datatype.tocode()` and attributes `Datatype.typestr`, `Datatype.typechar` to simplify NumPy interoperability for simple cases. + Add methods `Comm.Create_errhandler()`, `Win.Create_errhandler()`, and `File.Create_errhandler()` to create custom error handlers. + Add support for pickle serialization of instances of MPI types. All instances of `Datatype`, `Info`, and `Status` can be serialized. Instances of `Op` can be serialized only if created through `mpi4py` by calling `Op.Create()`. Instances of other MPI types can be serialized only if they reference predefined handles. + Add `handle` attribute and `fromhandle()` class method to MPI classes to ease interoperability with external code. The handle value is an unsigned integer guaranteed to fit on the platform's ``uintptr_t`` C type. + Add lowercase `free()` method to MPI classes to ease MPI object deallocation and cleanup. This method eventually attempts to call `Free()`, but only if the object's MPI handle is not a null or predefined handle, and such call is allowed within the World Model init/finalize. * Backward-incompatible changes: + Python 2 is no longer supported, Python 3.6+ is required, but typing stubs are supported for Python 3.8+. + The `Intracomm.Create_group()` method is no longer defined in the base `Comm` class. + `Group.Compare()` and `Comm.Compare()` are no longer class methods but instance methods. Existing codes using the former class methods are expected to continue working. + `Group.Translate_ranks()` is no longer a class method but an instance method. Existing codes using the former class method are expected to continue working. + The `LB` and `UB` datatypes are no longer available, use `Datatype.Create_resized()` instead. + The `HOST` predefined attribute key is no longer available. + The `MPI.memory` class has been renamed to `MPI.buffer`. The old name is still available as an alias to the new name. + The `mpi4py.dl` module is no longer available. + The `mpi4py.get_config` function returns an empty dictionary. * Miscellaneous: + The project is now licensed under the BSD-3-Clause license. This change is fairly inconsequential for users and distributors. It simply adds an additional clause against using contributor names for promotional purposes without their consent. + Add a new guidelines section to documentation laying out new fair play rules. These rules ask companies and outside developers to refrain from reusing the ``mpi4py`` name in unaffiliated projects, publishing binary mpi4py wheels on the main Python Package Index (PyPI), and distributing modified versions with incompatible or extended API changes. The primary motivation of these rules is to avoid fragmentation and end-user confusion. Release 3.1.6 [2024-04-14] ========================== .. warning:: This is the last release supporting Python 2. * Fix various build issues. Release 3.1.5 [2023-10-04] ========================== .. warning:: This is the last release supporting Python 2. * Rebuild C sources with Cython 0.29.36 to support Python 3.12. Release 3.1.4 [2022-11-02] ========================== .. warning:: This is the last release supporting Python 2. * Rebuild C sources with Cython 0.29.32 to support Python 3.11. * Fix contiguity check for DLPack and CAI buffers. * Workaround build failures with setuptools v60. Release 3.1.3 [2021-11-25] ========================== .. warning:: This is the last release supporting Python 2. * Add missing support for `MPI.BOTTOM` to generalized all-to-all collectives. Release 3.1.2 [2021-11-04] ========================== .. warning:: This is the last release supporting Python 2. * `mpi4py.futures`: Add `_max_workers` property to `MPIPoolExecutor`. * `mpi4py.util.dtlib`: Fix computation of alignment for predefined datatypes. * `mpi4py.util.pkl5`: Fix deadlock when using ``ssend()`` + ``mprobe()``. * `mpi4py.util.pkl5`: Add environment variable `MPI4PY_PICKLE_THRESHOLD`. * `mpi4py.rc`: Interpret ``"y"`` and ``"n"`` strings as boolean values. * Fix/add typemap/typestr for `MPI.WCHAR`/`MPI.COUNT` datatypes. * Minor fixes and additions to documentation. * Minor fixes to typing support. * Support for local version identifier (PEP-440). Release 3.1.1 [2021-08-14] ========================== .. warning:: This is the last release supporting Python 2. * Fix typo in Requires-Python package metadata. * Regenerate C sources with Cython 0.29.24. Release 3.1.0 [2021-08-12] ========================== .. warning:: This is the last release supporting Python 2. * New features: + `mpi4py.util`: New package collecting miscellaneous utilities. * Enhancements: + Add pickle-based ``Request.waitsome()`` and ``Request.testsome()``. + Add lowercase methods ``Request.get_status()`` and ``Request.cancel()``. + Support for passing Python GPU arrays compliant with the `DLPack`_ data interchange mechanism (`link `_) and the ``__cuda_array_interface__`` (CAI) standard (`link `_) to uppercase methods. This support requires that mpi4py is built against `CUDA-aware MPI `_ implementations. This feature is currently experimental and subject to future changes. + `mpi4py.futures`: Add support for initializers and canceling futures at shutdown. Environment variables names now follow the pattern ``MPI4PY_FUTURES_*``, the previous ``MPI4PY_*`` names are deprecated. + Add type annotations to Cython code. The first line of the docstring of functions and methods displays a signature including type annotations. + Add companion stub files to support type checkers. + Support for weak references. * Miscellaneous: + Add a new mpi4py publication (`link `_) to the citation listing. .. _DLPack: https://github.com/dmlc/dlpack .. _DIM: https://data-apis.org/array-api/latest/design_topics/data_interchange.html .. _CAI: https://numba.readthedocs.io/en/stable/cuda/cuda_array_interface.html .. _CAM: https://developer.nvidia.com/blog/introduction-cuda-aware-mpi/ .. _DOI: https://doi.org/10.1109/MCSE.2021.3083216 Release 3.0.3 [2019-11-04] ========================== * Regenerate Cython wrappers to support Python 3.8. Release 3.0.2 [2019-06-11] ========================== * Bug fixes: + Fix handling of readonly buffers in support for Python 2 legacy buffer interface. The issue triggers only when using a buffer-like object that is readonly and does not export the new Python 3 buffer interface. + Fix build issues with Open MPI 4.0.x series related to removal of many MPI-1 symbols deprecated in MPI-2 and removed in MPI-3. + Minor documentation fixes. Release 3.0.1 [2019-02-15] ========================== * Bug fixes: + Fix ``Comm.scatter()`` and other collectives corrupting input send list. Add safety measures to prevent related issues in global reduction operations. + Fix error-checking code for counts in ``Op.Reduce_local()``. * Enhancements: + Map size-specific Python/NumPy typecodes to MPI datatypes. + Allow partial specification of target list/tuple arguments in the various ``Win`` RMA methods. + Workaround for removal of ``MPI_{LB|UB}`` in Open MPI 4.0. + Support for Microsoft MPI v10.0. Release 3.0.0 [2017-11-08] ========================== * New features: + `mpi4py.futures`: Execute computations asynchronously using a pool of MPI processes. This package is based on ``concurrent.futures`` from the Python standard library. + `mpi4py.run`: Run Python code and abort execution in case of unhandled exceptions to prevent deadlocks. + `mpi4py.bench`: Run basic MPI benchmarks and tests. * Enhancements: + Lowercase, pickle-based collective communication calls are now thread-safe through the use of fine-grained locking. + The ``MPI`` module now exposes a ``memory`` type which is a lightweight variant of the builtin ``memoryview`` type, but exposes both the legacy Python 2 and the modern Python 3 buffer interface under a Python 2 runtime. + The ``MPI.Comm.Alltoallw()`` method now uses ``count=1`` and ``displ=0`` as defaults, assuming that messages are specified through user-defined datatypes. + The ``Request.Wait[all]()`` methods now return ``True`` to match the interface of ``Request.Test[all]()``. + The ``Win`` class now implements the Python buffer interface. * Backward-incompatible changes: + The ``buf`` argument of the ``MPI.Comm.recv()`` method is deprecated, passing anything but ``None`` emits a warning. + The ``MPI.Win.memory`` property was removed, use the ``MPI.Win.tomemory()`` method instead. + Executing ``python -m mpi4py`` in the command line is now equivalent to ``python -m mpi4py.run``. For the former behavior, use ``python -m mpi4py.bench``. + Python 2.6 and 3.2 are no longer supported. The ``mpi4py.MPI`` module may still build and partially work, but other pure-Python modules under the ``mpi4py`` namespace will not. + Windows: Remove support for legacy MPICH2, Open MPI, and DeinoMPI. Release 2.0.0 [2015-10-18] ========================== * Support for MPI-3 features. + Matched probes and receives. + Nonblocking collectives. + Neighborhood collectives. + New communicator constructors. + Request-based RMA operations. + New RMA communication and synchronisation calls. + New window constructors. + New datatype constructor. + New C++ boolean and floating complex datatypes. * Support for MPI-2 features not included in previous releases. + Generalized All-to-All collective (``Comm.Alltoallw()``) + User-defined data representations (``Register_datarep()``) * New scalable implementation of reduction operations for Python objects. This code is based on binomial tree algorithms using point-to-point communication and duplicated communicator contexts. To disable this feature, use ``mpi4py.rc.fast_reduce = False``. * Backward-incompatible changes: + Python 2.4, 2.5, 3.0 and 3.1 are no longer supported. + Default MPI error handling policies are overridden. After import, mpi4py sets the ``ERRORS_RETURN`` error handler in ``COMM_SELF`` and ``COMM_WORLD``, as well as any new ``Comm``, ``Win``, or ``File`` instance created through mpi4py, thus effectively ignoring the MPI rules about error handler inheritance. This way, MPI errors translate to Python exceptions. To disable this behavior and use the standard MPI error handling rules, use ``mpi4py.rc.errors = 'default'``. + Change signature of all send methods, ``dest`` is a required argument. + Change signature of all receive and probe methods, ``source`` defaults to ``ANY_SOURCE``, ``tag`` defaults to ``ANY_TAG``. + Change signature of send lowercase-spelling methods, ``obj`` arguments are not mandatory. + Change signature of recv lowercase-spelling methods, renamed 'obj' arguments to 'buf'. + Change ``Request.Waitsome()`` and ``Request.Testsome()`` to return ``None`` or ``list``. + Change signature of all lowercase-spelling collectives, ``sendobj`` arguments are now mandatory, ``recvobj`` arguments were removed. + Reduction operations ``MAXLOC`` and ``MINLOC`` are no longer special-cased in lowercase-spelling methods ``Comm.[all]reduce()`` and ``Comm.[ex]scan()``, the input object must be specified as a tuple ``(obj, location)``. + Change signature of name publishing functions. The new signatures are ``Publish_name(service_name, port_name, info=INFO_NULL)`` and ``Unpublish_name(service_name, port_name, info=INFO_NULL)```. + ``Win`` instances now cache Python objects exposing memory by keeping references instead of using MPI attribute caching. + Change signature of ``Win.Lock()``. The new signature is ``Win.Lock(rank, lock_type=LOCK_EXCLUSIVE, assertion=0)``. + Move ``Cartcomm.Map()`` to ``Intracomm.Cart_map()``. + Move ``Graphcomm.Map()`` to ``Intracomm.Graph_map()``. + Remove the ``mpi4py.MPE`` module. + Rename the Cython definition file for use with ``cimport`` statement from ``mpi_c.pxd`` to ``libmpi.pxd``. Release 1.3.1 [2013-08-07] ========================== * Regenerate C wrappers with Cython 0.19.1 to support Python 3.3. * Install ``*.pxd`` files in ``/mpi4py`` to ease the support for Cython's ``cimport`` statement in code requiring to access mpi4py internals. * As a side-effect of using Cython 0.19.1, ancient Python 2.3 is no longer supported. If you really need it, you can install an older Cython and run ``python setup.py build_src --force``. Release 1.3 [2012-01-20] ======================== * Now ``Comm.recv()`` accept a buffer to receive the message. * Add ``Comm.irecv()`` and ``Request.{wait|test}[any|all]()``. * Add ``Intracomm.Spawn_multiple()``. * Better buffer handling for PEP 3118 and legacy buffer interfaces. * Add support for attribute attribute caching on communicators, datatypes and windows. * Install MPI-enabled Python interpreter as ``/mpi4py/bin/python-mpi``. * Windows: Support for building with Open MPI. Release 1.2.2 [2010-09-13] ========================== * Add ``mpi4py.get_config()`` to retrieve information (compiler wrappers, includes, libraries, etc) about the MPI implementation employed to build mpi4py. * Workaround Python libraries with missing GILState-related API calls in case of non-threaded Python builds. * Windows: look for MPICH2, DeinoMPI, Microsoft HPC Pack at their default install locations under %ProgramFiles. * MPE: fix hacks related to old API's, these hacks are broken when MPE is built with a MPI implementations other than MPICH2. * HP-MPI: fix for missing Fortran datatypes, use dlopen() to load the MPI shared library before MPI_Init() * Many distutils-related fixes, cleanup, and enhancements, better logics to find MPI compiler wrappers. * Support for ``pip install mpi4py``. Release 1.2.1 [2010-02-26] ========================== * Fix declaration in Cython include file. This declaration, while valid for Cython, broke the simple-minded parsing used in conf/mpidistutils.py to implement configure-tests for availability of MPI symbols. * Update SWIG support and make it compatible with Python 3. Also generate an warning for SWIG < 1.3.28. * Fix distutils-related issues in Mac OS X. Now ARCHFLAGS environment variable is honored of all Python's ``config/Makefile`` variables. * Fix issues with Open MPI < 1.4.2 related to error checking and ``MPI_XXX_NULL`` handles. Release 1.2 [2009-12-29] ======================== * Automatic MPI datatype discovery for NumPy arrays and PEP-3118 buffers. Now buffer-like objects can be messaged directly, it is no longer required to explicitly pass a 2/3-list/tuple like ``[data, MPI.DOUBLE]``, or ``[data, count, MPI.DOUBLE]``. Only basic types are supported, i.e., all C/C99-native signed/unsigned integral types and single/double precision real/complex floating types. Many thanks to Eilif Muller for the initial feedback. * Nonblocking send of pickled Python objects. Many thanks to Andreas Kloeckner for the initial patch and enlightening discussion about this enhancement. * ``Request`` instances now hold a reference to the Python object exposing the buffer involved in point-to-point communication or parallel I/O. Many thanks to Andreas Kloeckner for the initial feedback. * Support for logging of user-defined states and events using `MPE `_. Runtime (i.e., without requiring a recompile!) activation of logging of all MPI calls is supported in POSIX platforms implementing ``dlopen()``. * Support for all the new features in MPI-2.2 (new C99 and F90 datatypes, distributed graph topology, local reduction operation, and other minor enhancements). * Fix the annoying issues related to Open MPI and Python dynamic loading of extension modules in platforms supporting ``dlopen()``. * Fix SLURM dynamic loading issues on SiCortex. Many thanks to Ian Langmore for providing me shell access. Release 1.1.0 [2009-06-06] ========================== * Fix bug in ``Comm.Iprobe()`` that caused segfaults as Python C-API calls were issued with the GIL released (issue #2). * Add ``Comm.bsend()`` and ``Comm.ssend()`` for buffered and synchronous send semantics when communicating general Python objects. * Now the call ``Info.Get(key)`` return a *single* value (i.e, instead of a 2-tuple); this value is ``None`` if ``key`` is not in the ``Info`` object, or a string otherwise. Previously, the call redundantly returned ``(None, False)`` for missing key-value pairs; ``None`` is enough to signal a missing entry. * Add support for parametrized Fortran datatypes. * Add support for decoding user-defined datatypes. * Add support for user-defined reduction operations on memory buffers. However, at most 16 user-defined reduction operations can be created. Ask the author for more room if you need it. Release 1.0.0 [2009-03-20] ========================== This is the fist release of the all-new, Cython-based, implementation of *MPI for Python*. Unfortunately, this implementation is not backward-compatible with the previous one. The list below summarizes the more important changes that can impact user codes. * Some communication calls had *overloaded* functionality. Now there is a clear distinction between communication of general Python object with *pickle*, and (fast, near C-speed) communication of buffer-like objects (e.g., NumPy arrays). - for communicating general Python objects, you have to use all-lowercase methods, like ``send()``, ``recv()``, ``bcast()``, etc. - for communicating array data, you have to use ``Send()``, ``Recv()``, ``Bcast()``, etc. methods. Buffer arguments to these calls must be explicitly specified by using a 2/3-list/tuple like ``[data, MPI.DOUBLE]``, or ``[data, count, MPI.DOUBLE]`` (the former one uses the byte-size of ``data`` and the extent of the MPI datatype to define the ``count``). * Indexing a communicator with an integer returned a special object associating the communication with a target rank, alleviating you from specifying source/destination/root arguments in point-to-point and collective communications. This functionality is no longer available, expressions like:: MPI.COMM_WORLD[0].Send(...) MPI.COMM_WORLD[0].Recv(...) MPI.COMM_WORLD[0].Bcast(...) have to be replaced by:: MPI.COMM_WORLD.Send(..., dest=0) MPI.COMM_WORLD.Recv(..., source=0) MPI.COMM_WORLD.Bcast(..., root=0) * Automatic MPI initialization (i.e., at import time) requests the maximum level of MPI thread support (i.e., it is done by calling ``MPI_Init_thread()`` and passing ``MPI_THREAD_MULTIPLE``). In case you need to change this behavior, you can tweak the contents of the ``mpi4py.rc`` module. * In order to obtain the values of predefined attributes attached to the world communicator, now you have to use the ``Get_attr()`` method on the ``MPI.COMM_WORLD`` instance:: tag_ub = MPI.COMM_WORLD.Get_attr(MPI.TAG_UB) * In the previous implementation, ``MPI.COMM_WORLD`` and ``MPI.COMM_SELF`` were associated to **duplicates** of the (C-level) ``MPI_COMM_WORLD`` and ``MPI_COMM_SELF`` predefined communicator handles. Now this is no longer the case, ``MPI.COMM_WORLD`` and ``MPI.COMM_SELF`` proxies the **actual** ``MPI_COMM_WORLD`` and ``MPI_COMM_SELF`` handles. * Convenience aliases ``MPI.WORLD`` and ``MPI.SELF`` were removed. Use instead ``MPI.COMM_WORLD`` and ``MPI.COMM_SELF``. * Convenience constants ``MPI.WORLD_SIZE`` and ``MPI.WORLD_RANK`` were removed. Use instead ``MPI.COMM_WORLD.Get_size()`` and ``MPI.COMM_WORLD.Get_rank()``. mpi4py-4.0.3/CITATION.cff000066400000000000000000000023451475341043600146450ustar00rootroot00000000000000cff-version: 1.2.0 message: Please cite this software using the metadata from 'preferred-citation'. title: MPI for Python abstract: Python bindings for MPI license: BSD-3-Clause url: https://mpi4py.github.io doi: 10.5281/zenodo.5645087 repository-code: https://github.com/mpi4py/mpi4py contact: - name: MPI for Python email: mpi4py@googlegroups.com website: https://groups.google.com/g/mpi4py authors: - family-names: Dalcin given-names: Lisandro alias: dalcinl orcid: https://orcid.org/0000-0001-8086-0155 - family-names: Fang given-names: Yao-Lung L. alias: leofang orcid: https://orcid.org/0000-0001-7191-1651 - family-names: Rogowski given-names: Marcin alias: mrogowski orcid: https://orcid.org/0000-0002-5662-2082 preferred-citation: type: article title: "mpi4py: Status Update After 12 Years of Development" authors: - family-names: Dalcin given-names: Lisandro orcid: https://orcid.org/0000-0001-8086-0155 - family-names: Fang given-names: Yao-Lung L. orcid: https://orcid.org/0000-0001-7191-1651 journal: Computing in Science & Engineering doi: 10.1109/mcse.2021.3083216 issn: 1558-366X volume: 23 issue: 4 start: 47 end: 54 year: 2021 mpi4py-4.0.3/CITATION.rst000066400000000000000000000020141475341043600147100ustar00rootroot00000000000000* M. Rogowski, S. Aseeri, D. Keyes, and L. Dalcin, *mpi4py.futures: MPI-Based Asynchronous Task Execution for Python*, IEEE Transactions on Parallel and Distributed Systems, 34(2):611-622, 2023. https://doi.org/10.1109/TPDS.2022.3225481 * L. Dalcin and Y.-L. L. Fang, *mpi4py: Status Update After 12 Years of Development*, Computing in Science & Engineering, 23(4):47-54, 2021. https://doi.org/10.1109/MCSE.2021.3083216 * L. Dalcin, P. Kler, R. Paz, and A. Cosimo, *Parallel Distributed Computing using Python*, Advances in Water Resources, 34(9):1124-1139, 2011. https://doi.org/10.1016/j.advwatres.2011.04.013 * L. Dalcin, R. Paz, M. Storti, and J. D'Elia, *MPI for Python: performance improvements and MPI-2 extensions*, Journal of Parallel and Distributed Computing, 68(5):655-662, 2008. https://doi.org/10.1016/j.jpdc.2007.09.005 * L. Dalcin, R. Paz, and M. Storti, *MPI for Python*, Journal of Parallel and Distributed Computing, 65(9):1108-1115, 2005. https://doi.org/10.1016/j.jpdc.2005.03.010 mpi4py-4.0.3/CMakeLists.txt000066400000000000000000000073431475341043600155160ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com cmake_minimum_required(VERSION 3.15...3.26) project(mpi4py LANGUAGES C) find_package( Python REQUIRED COMPONENTS Interpreter COMPONENTS Development.Module OPTIONAL_COMPONENTS Development.Embed ) find_package(MPI REQUIRED) separate_arguments(MPI_C_COMPILE_OPTIONS NATIVE_COMMAND) separate_arguments(MPI_C_LINK_FLAGS NATIVE_COMMAND) set(CIBUILDWHEEL $) add_compile_definitions($<${CIBUILDWHEEL}:CIBUILDWHEEL=1>) include(CheckSymbolExists) set(CMAKE_REQUIRED_INCLUDES ${MPI_C_INCLUDE_DIRS}) set(CMAKE_REQUIRED_DEFINITIONS ${MPI_C_COMPILE_DEFINITIONS}) set(CMAKE_REQUIRED_FLAGS ${MPI_C_COMPILE_OPTIONS}) set(BINDIR ${CMAKE_CURRENT_BINARY_DIR}) set(TOPDIR ${CMAKE_CURRENT_SOURCE_DIR}) set(SRCDIR ${CMAKE_CURRENT_SOURCE_DIR}/src) # Cython set(cythonize ${TOPDIR}/conf/cythonize.py) set(Cython_COMMAND ${Python_EXECUTABLE} ${cythonize}) set(Cython_OPTIONS --3str --cleanup 3) set(MPI.pyx ${SRCDIR}/mpi4py/MPI.pyx) file( GLOB_RECURSE MPI.deps ${SRCDIR}/mpi4py/*.pyx ${SRCDIR}/mpi4py/*.pxd ${SRCDIR}/mpi4py/MPI.src/*.pyx ${SRCDIR}/mpi4py/MPI.src/*.pxi ) set(MPI.c ${BINDIR}/MPI.c) set(MPI.h ${BINDIR}/MPI.h ${BINDIR}/MPI_api.h) add_custom_command( OUTPUT ${MPI.c} BYPRODUCTS ${MPI.h} DEPENDS ${MPI.deps} WORKING_DIRECTORY ${TOPDIR} VERBATIM COMMAND ${Cython_COMMAND} ${Cython_OPTIONS} ${MPI.pyx} --output-file ${MPI.c} ) install(FILES ${MPI.h} DESTINATION mpi4py) # mpi4py.MPI python_add_library(mpi4py.MPI MODULE ${MPI.c} WITH_SOABI) set_target_properties(mpi4py.MPI PROPERTIES OUTPUT_NAME "MPI" PREFIX "") target_include_directories(mpi4py.MPI PRIVATE ${SRCDIR}) target_include_directories(mpi4py.MPI PRIVATE ${MPI_C_INCLUDE_DIRS}) target_compile_definitions(mpi4py.MPI PRIVATE ${MPI_C_COMPILE_DEFINITIONS}) target_compile_options(mpi4py.MPI PRIVATE ${MPI_C_COMPILE_OPTIONS}) target_link_directories(mpi4py.MPI PRIVATE ${MPI_C_LIBRARY_DIRS}) target_link_libraries(mpi4py.MPI PRIVATE ${MPI_C_LIBRARIES}) target_link_options(mpi4py.MPI PRIVATE ${MPI_C_LINK_FLAGS}) install(TARGETS mpi4py.MPI LIBRARY DESTINATION mpi4py) # mpi4py/bin/python-mpi if (Python_Development.Embed_FOUND) add_executable(python-mpi ${SRCDIR}/python.c) target_include_directories(python-mpi PRIVATE ${Python_INCLUDE_DIRS}) target_link_directories(python-mpi PRIVATE ${Python_LIBRARY_DIRS}) target_link_libraries(python-mpi PRIVATE ${Python_LIBRARIES}) target_include_directories(python-mpi PRIVATE ${MPI_C_INCLUDE_DIRS}) target_compile_definitions(python-mpi PRIVATE ${MPI_C_COMPILE_DEFINITIONS}) target_compile_options(python-mpi PRIVATE ${MPI_C_COMPILE_OPTIONS}) target_link_directories(python-mpi PRIVATE ${MPI_C_LIBRARY_DIRS}) target_link_libraries(python-mpi PRIVATE ${MPI_C_LIBRARIES}) target_link_options(python-mpi PRIVATE ${MPI_C_LINK_FLAGS}) install(TARGETS python-mpi RUNTIME DESTINATION mpi4py/bin) endif() # mpi4py/* file( GLOB mpi4py_SOURCES RELATIVE ${SRCDIR} ${SRCDIR}/mpi4py/*.py ${SRCDIR}/mpi4py/futures/*.py ${SRCDIR}/mpi4py/util/*.py ) file( GLOB mpi4py_HEADERS RELATIVE ${SRCDIR} ${SRCDIR}/mpi4py/*.pxd ${SRCDIR}/mpi4py/include/mpi4py/*.[hi] ${SRCDIR}/mpi4py/include/mpi4py/*.px[di] ) if (Python_VERSION VERSION_GREATER_EQUAL 3.11) list(APPEND mpi4py_HEADERS mpi4py/py.typed) list(APPEND mpi4py_HEADERS mpi4py/MPI.pyi) foreach(file ${mpi4py_SOURCES}) list(APPEND mpi4py_HEADERS ${file}i) endforeach() endif() foreach(file ${mpi4py_SOURCES} ${mpi4py_HEADERS}) get_filename_component(dir ${file} DIRECTORY) install(FILES ${SRCDIR}/${file} DESTINATION ${dir}) endforeach() if (WIN32) set(mpidllpath mpi.pth) foreach(file ${mpidllpath}) if (EXISTS ${SRCDIR}/${file}) install(FILES ${SRCDIR}/${file} DESTINATION .) endif() endforeach() endif() mpi4py-4.0.3/DESCRIPTION.rst000066400000000000000000000031361475341043600152670ustar00rootroot00000000000000============== MPI for Python ============== This package provides Python bindings for the *Message Passing Interface* (MPI_) standard. It is implemented on top of the MPI specification and exposes an API which grounds on the standard MPI-2 C++ bindings. .. _MPI: https://www.mpi-forum.org Features ======== This package supports: * Convenient communication of any *picklable* Python object + point-to-point (send & receive) + collective (broadcast, scatter & gather, reductions) * Fast communication of Python object exposing the *Python buffer interface* (NumPy arrays, builtin bytes/string/array objects) + point-to-point (blocking/nonblocking/persistent send & receive) + collective (broadcast, block/vector scatter & gather, reductions) * Process groups and communication domains + Creation of new intra/inter communicators + Cartesian & graph topologies * Parallel input/output: + read & write + blocking/nonblocking & collective/noncollective + individual/shared file pointers & explicit offset * Dynamic process management + spawn & spawn multiple + accept/connect + name publishing & lookup * One-sided operations + remote memory access (put, get, accumulate) + passive target synchronization (start/complete & post/wait) + active target synchronization (lock & unlock) Install ======= See `INSTALL.rst `_. .. include:: INSTALL.rst Citation ======== If MPI for Python been significant to a project that leads to an academic publication, please acknowledge that fact by citing the project. See `CITATION.rst `_. .. include:: CITATION.rst mpi4py-4.0.3/INSTALL.rst000066400000000000000000000125651475341043600146200ustar00rootroot00000000000000Using **pip** ------------- You can install the latest mpi4py release from its source distribution at `PyPI `_ using ``pip``:: $ python -m pip install mpi4py You can also install the in-development version with:: $ python -m pip install git+https://github.com/mpi4py/mpi4py or:: $ python -m pip install https://github.com/mpi4py/mpi4py/tarball/master .. note:: Installing mpi4py from its source distribution (available at PyPI) or Git source code repository (available at GitHub) requires a C compiler and a working MPI implementation with development headers and libraries. .. warning:: ``pip`` keeps previously built wheel files on its cache for future reuse. If you want to reinstall the ``mpi4py`` package using a different or updated MPI implementation, you have to either first remove the cached wheel file with:: $ python -m pip cache remove mpi4py or ask ``pip`` to disable the cache:: $ python -m pip install --no-cache-dir mpi4py Using **conda** --------------- The `conda-forge`_ community provides ready-to-use binary packages from an ever growing collection of software libraries built around the multi-platform *conda* package manager. Four MPI implementations are available on conda-forge: Open MPI (Linux and macOS), MPICH (Linux and macOS), Intel MPI (Linux and Windows) and Microsoft MPI (Windows). You can install mpi4py and your preferred MPI implementation using the ``conda`` package manager: * to use MPICH do:: $ conda install -c conda-forge mpi4py mpich * to use Open MPI do:: $ conda install -c conda-forge mpi4py openmpi * to use Intel MPI do:: $ conda install -c conda-forge mpi4py impi_rt * to use Microsoft MPI do:: $ conda install -c conda-forge mpi4py msmpi MPICH and many of its derivatives are ABI-compatible. You can provide the package specification ``mpich=X.Y.*=external_*`` (where ``X`` and ``Y`` are the major and minor version numbers) to request the conda package manager to use system-provided MPICH (or derivative) libraries. Similarly, you can provide the package specification ``openmpi=X.Y.*=external_*`` to use system-provided Open MPI libraries. The ``openmpi`` package on conda-forge has built-in CUDA support, but it is disabled by default. To enable it, follow the instruction outlined during ``conda install``. Additionally, UCX support is also available once the ``ucx`` package is installed. .. warning:: Binary conda-forge packages are built with a focus on compatibility. The MPICH and Open MPI packages are build in a constrained environment with relatively dated OS images. Therefore, they may lack support for high-performance features like cross-memory attach (XPMEM/CMA). In production scenarios, it is recommended to use external (either custom-built or system-provided) MPI installations. See the relevant conda-forge documentation about `using external MPI libraries `_ . .. _conda-forge: https://conda-forge.org/ .. _cf-mpi-docs: https://conda-forge.org/docs/user/tipsandtricks/#using-external-message-passing-interface-mpi-libraries Linux ----- On **Fedora Linux** systems (as well as **RHEL** and their derivatives using the EPEL software repository), you can install binary packages with the system package manager: * using ``dnf`` and the ``mpich`` package:: $ sudo dnf install python3-mpi4py-mpich * using ``dnf`` and the ``openmpi`` package:: $ sudo dnf install python3-mpi4py-openmpi Please remember to load the correct MPI module for your chosen MPI implementation: * for the ``mpich`` package do:: $ module load mpi/mpich-$(arch) $ python -c "from mpi4py import MPI" * for the ``openmpi`` package do:: $ module load mpi/openmpi-$(arch) $ python -c "from mpi4py import MPI" On **Ubuntu Linux** and **Debian Linux** systems, binary packages are available for installation using the system package manager:: $ sudo apt install python3-mpi4py Note that on Ubuntu/Debian systems, the mpi4py package uses Open MPI. To use MPICH, install the ``libmpich-dev`` and ``python3-dev`` packages (and any other required development tools). Afterwards, install mpi4py from sources using ``pip``. macOS ----- **macOS** users can install mpi4py using the `Homebrew`_ package manager:: $ brew install mpi4py Note that the Homebrew mpi4py package uses Open MPI. Alternatively, install the ``mpich`` package and next install mpi4py from sources using ``pip``. .. _Homebrew: https://brew.sh/ Windows ------- **Windows** users can install mpi4py from binary wheels hosted on the Python Package Index (PyPI) using ``pip``:: $ python -m pip install mpi4py The Windows wheels available on PyPI are specially crafted to work with either the `Intel MPI `_ or the `Microsoft MPI `_ runtime, therefore requiring a separate installation of any one of these packages. .. _I_MPI: https://software.intel.com/intel-mpi-library .. _MSMPI: https://learn.microsoft.com/message-passing-interface/microsoft-mpi Intel MPI is under active development and supports recent version of the MPI standard. Intel MPI can be installed with ``pip`` (see the `impi-rt`_ package on PyPI), being therefore straightforward to get it up and running within a Python environment. Intel MPI can also be installed system-wide as part of the Intel HPC Toolkit for Windows or via standalone online/offline installers. .. _impi-rt: https://pypi.org/project/impi-rt/ mpi4py-4.0.3/LICENSE.rst000066400000000000000000000027101475341043600145630ustar00rootroot00000000000000Copyright (c) 2025, Lisandro Dalcin Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. mpi4py-4.0.3/MANIFEST.in000066400000000000000000000011471475341043600145100ustar00rootroot00000000000000include pyproject.toml setup.py tox.ini *.cfg *.rst include CMakeLists.txt meson.build exclude .* recursive-include demo *.py *.pyx *.i *.h *.c *.cxx *.f90 *.f08 recursive-include demo [M,m]akefile python-config *.sh *.txt recursive-include conf *.py *.sh *.toml *.txt *.h recursive-include src *.py py.typed *.pyi *.pyx *.px[di] recursive-include src *.pth *.h *.c *.i recursive-include test *.py *.sh exclude src/mpi4py/MPI.c exclude src/mpi4py/MPI.h exclude src/mpi4py/MPI_api.h include docs/*.rst include docs/*.bib include docs/*.svg graft docs/source prune docs/source/_build prune docs/source/reference mpi4py-4.0.3/README.rst000066400000000000000000000077151475341043600144500ustar00rootroot00000000000000============== MPI for Python ============== .. image:: https://github.com/mpi4py/mpi4py/workflows/ci/badge.svg?branch=master :target: https://github.com/mpi4py/mpi4py/actions/ .. image:: https://readthedocs.org/projects/mpi4py/badge/?version=latest :target: https://mpi4py.readthedocs.io/en/latest/ .. image:: https://dev.azure.com/mpi4py/mpi4py/_apis/build/status/ci?branchName=master&label=azure :target: https://dev.azure.com/mpi4py/mpi4py/_build .. image:: https://ci.appveyor.com/api/projects/status/whh5xovp217h0f7n?svg=true :target: https://ci.appveyor.com/project/mpi4py/mpi4py .. image:: https://circleci.com/gh/mpi4py/mpi4py.svg?style=shield :target: https://circleci.com/gh/mpi4py/mpi4py .. image:: https://codecov.io/gh/mpi4py/mpi4py/branch/master/graph/badge.svg :target: https://codecov.io/gh/mpi4py/mpi4py .. image:: https://scan.coverity.com/projects/mpi4py-mpi4py/badge.svg :target: https://scan.coverity.com/projects/mpi4py-mpi4py | |pypy| |mpi4py| |conda-forge| |anaconda| | |homebrew| |fedora| |ubuntu| |debian| |archlinux| .. |pypy| image:: https://img.shields.io/pypi/v/mpi4py?logo=pypi :target: https://pypi.org/project/mpi4py/ .. |mpi4py| image:: https://img.shields.io/conda/vn/mpi4py/mpi4py?logo=pypi :target: https://anaconda.org/conda-forge/mpi4py .. |conda-forge| image:: https://img.shields.io/conda/vn/conda-forge/mpi4py?logo=anaconda :target: https://anaconda.org/conda-forge/mpi4py .. |anaconda| image:: https://img.shields.io/conda/vn/anaconda/mpi4py?logo=anaconda :target: https://anaconda.org/anaconda/mpi4py .. |homebrew| image:: https://img.shields.io/homebrew/v/mpi4py?logo=homebrew :target: https://formulae.brew.sh/formula/mpi4py .. |fedora| image:: https://img.shields.io/fedora/v/mpi4py-common?logo=fedora :target: https://packages.fedoraproject.org/pkgs/mpi4py/ .. |ubuntu| image:: https://img.shields.io/ubuntu/v/mpi4py/noble?logo=ubuntu :target: https://packages.ubuntu.com/source/noble/mpi4py .. |debian| image:: https://img.shields.io/debian/v/mpi4py/stable?logo=debian :target: https://packages.debian.org/source/stable/mpi4py .. |archlinux| image:: https://img.shields.io/archlinux/v/extra/x86_64/python-mpi4py?logo=archlinux :target: https://archlinux.org/packages/extra/x86_64/python-mpi4py/ Overview -------- This package provides Python bindings for the *Message Passing Interface* (`MPI `_) standard. It is implemented on top of the MPI specification and exposes an API which grounds on the standard MPI-2 C++ bindings. Prerequisites ------------- * `Python `_ 3.6 or above, or `PyPy `_ 7.2 or above. * An MPI implementation like `MPICH `_ or `Open MPI `_ built with shared/dynamic libraries. Documentation ------------- * Read the Docs: https://mpi4py.readthedocs.io/ * GitHub Pages: https://mpi4py.github.io/ Support ------- * Mailing List: mpi4py@googlegroups.com * Google Groups: https://groups.google.com/g/mpi4py * GitHub Discussions: https://github.com/mpi4py/mpi4py/discussions Testsuite --------- The testsuite is run periodically on * `GitHub Actions `_ * `Read the Docs `_ * `Azure Pipelines `_ * `AppVeyor `_ * `Circle CI `_ * `Codecov `_ Citation -------- + L. Dalcin and Y.-L. L. Fang, *mpi4py: Status Update After 12 Years of Development*, Computing in Science & Engineering, 23(4):47-54, 2021. https://doi.org/10.1109/MCSE.2021.3083216 * M. Rogowski, S. Aseeri, D. Keyes, and L. Dalcin, *mpi4py.futures: MPI-Based Asynchronous Task Execution for Python*, IEEE Transactions on Parallel and Distributed Systems, 34(2):611-622, 2023. https://doi.org/10.1109/TPDS.2022.3225481 mpi4py-4.0.3/conf/000077500000000000000000000000001475341043600136745ustar00rootroot00000000000000mpi4py-4.0.3/conf/build-cmake.sh000077500000000000000000000003421475341043600164070ustar00rootroot00000000000000#!/bin/sh set -eu test -f CMakeLists.txt test -d src/mpi4py rm -rf build install options=-DPython_FIND_UNVERSIONED_NAMES=FIRST cmake -B build -DCMAKE_INSTALL_PREFIX=install $options cmake --build build cmake --install build mpi4py-4.0.3/conf/build-meson.sh000077500000000000000000000003671475341043600164570ustar00rootroot00000000000000#!/bin/sh set -eu test -f meson.build test -d src/mpi4py rm -rf build install options="--python.bytecompile=-1 --python.platlibdir=""" env CC=mpicc \ meson setup build --prefix="$PWD/install" $options meson compile -C build meson install -C build mpi4py-4.0.3/conf/builder.py000066400000000000000000000144761475341043600157100ustar00rootroot00000000000000import contextlib import importlib import os import shutil import sys # --- BACKENDS = { 'setuptools': 'setuptools.build_meta', 'skbuild': 'scikit_build_core.setuptools.build_meta', 'mesonpy': 'mesonpy', } def get_build_backend_name(name=None): if name is None: name = os.environ.get('MPI4PY_BUILD_BACKEND', '') name = name.lower().replace('_', '-') if name in ('default', ''): for name, module_name in BACKENDS.items(): if name != 'setuptools': if module_name in sys.modules: return name return 'setuptools' if name in ('setuptools', 'setup'): return 'setuptools' if name in ('scikit-build-core', 'scikit-build', 'skbuild', 'cmake'): return 'skbuild' if name in ('meson-python', 'mesonpy', 'meson'): return 'mesonpy' raise RuntimeError(f"Unknown build backend {name!r}") def build_backend(name=None): name = get_build_backend_name(name) return importlib.import_module(BACKENDS[name]) def read_build_requires(name): confdir = os.path.dirname(__file__) basename = f'requirements-build-{name}.txt' filename = os.path.join(confdir, basename) with open(filename, encoding='utf-8') as f: return [req for req in map(str.strip, f) if req] def get_backend_requires_fast(backend, dist, config_settings=None): if backend is None or isinstance(backend, str): try: backend = build_backend(backend) except ImportError: return None requires = [] get_requires = getattr(backend, f'get_requires_for_build_{dist}', None) if get_requires is not None: requires += get_requires(config_settings) return requires def get_backend_requires_hook(name, dist, config_settings=None): try: from pyproject_hooks import BuildBackendHookCaller except ImportError: from pep517.wrappers import Pep517HookCaller as BuildBackendHookCaller try: from build.env import DefaultIsolatedEnv except ImportError: from build.env import IsolatedEnvBuilder class DefaultIsolatedEnv(IsolatedEnvBuilder): def __enter__(self): env = super().__enter__() env.python_executable = env.executable def make_extra_environ(): path = os.environ.get('PATH') return {'PATH': os.pathsep.join([env.scripts_dir, path]) if path is not None else env.scripts_dir } env.make_extra_environ = make_extra_environ return env @contextlib.contextmanager def environment(path): environ_prev = [('PATH', os.environ['PATH'])] for prefix in ('_PYPROJECT_HOOKS', 'PEP517'): for suffix in ('BUILD_BACKEND', 'BACKEND_PATH'): key = f'{prefix}_{suffix}' if key in os.environ: val = os.environ.pop(key) environ_prev.append((key, val)) os.environ['PATH'] = path try: yield None finally: os.environ.update(environ_prev) requires = read_build_requires(name) with DefaultIsolatedEnv() as env: path = env.make_extra_environ()['PATH'] python_executable = env.python_executable with environment(path): env.install(requires) hook = BuildBackendHookCaller( source_dir=os.getcwd(), build_backend=BACKENDS[name], python_executable=python_executable, ) requires += get_backend_requires_fast(hook, dist, config_settings) return requires def get_requires_for_build(dist, config_settings=None): name = get_build_backend_name() requires = get_backend_requires_fast(name, dist, config_settings) if requires is None: requires = get_backend_requires_hook(name, dist, config_settings) if dist in ('wheel', 'editable'): requires += read_build_requires('cython') return requires # --- def get_requires_for_build_sdist(config_settings=None): return get_requires_for_build('sdist', config_settings) def build_sdist( sdist_directory, config_settings=None, ): return build_backend().build_sdist( sdist_directory, config_settings, ) # --- def get_requires_for_build_wheel(config_settings=None): return get_requires_for_build('wheel', config_settings) def prepare_metadata_for_build_wheel( metadata_directory, config_settings=None, ): return build_backend().prepare_metadata_for_build_wheel( metadata_directory, config_settings, ) def build_wheel( wheel_directory, config_settings=None, metadata_directory=None, ): return build_backend().build_wheel( wheel_directory, config_settings, metadata_directory, ) # --- def get_requires_for_build_editable(config_settings=None): return get_requires_for_build('editable', config_settings) def prepare_metadata_for_build_editable( metadata_directory, config_settings=None, ): return build_backend().prepare_metadata_for_build_editable( metadata_directory, config_settings, ) def build_editable( wheel_directory, config_settings=None, metadata_directory=None, ): return build_backend().build_editable( wheel_directory, config_settings, metadata_directory, ) # --- def setup_env_mpicc(): mpicc = shutil.which('mpicc') mpicc = os.environ.get('MPICC', mpicc) mpicc = os.environ.get('MPI4PY_BUILD_MPICC', mpicc) if not mpicc: return if ' ' in mpicc: mpicc = f'"{mpicc}"' if 'CC' not in os.environ: os.environ['CC'] = mpicc if get_build_backend_name() == 'setuptools': try: import setuptools.build_meta as st_bm except ImportError: st_bm = None if not hasattr(st_bm, 'get_requires_for_build_editable'): del get_requires_for_build_editable if not hasattr(st_bm, 'prepare_metadata_for_build_editable'): del prepare_metadata_for_build_editable if not hasattr(st_bm, 'build_editable'): del build_editable del st_bm if get_build_backend_name() == 'mesonpy': setup_env_mpicc() del prepare_metadata_for_build_wheel del prepare_metadata_for_build_editable # --- mpi4py-4.0.3/conf/cycoverage.py000066400000000000000000000131021475341043600163720ustar00rootroot00000000000000import os from coverage.plugin import ( CoveragePlugin, FileTracer, FileReporter ) from coverage.files import ( canonical_filename, ) CYTHON_EXTENSIONS = {".pxd", ".pyx", ".pxi"} class CythonCoveragePlugin(CoveragePlugin): def configure(self, config): self.exclude = config.get_option("report:exclude_lines") def file_tracer(self, filename): filename = canonical_filename(os.path.abspath(filename)) _, ext = os.path.splitext(filename) if ext in CYTHON_EXTENSIONS: return CythonFileTracer(filename) return None def file_reporter(self, filename): filename = canonical_filename(os.path.abspath(filename)) _, ext = os.path.splitext(filename) if ext in CYTHON_EXTENSIONS: return CythonFileReporter(filename, self.exclude) return None class CythonFileTracer(FileTracer): def __init__(self, filename): super().__init__() self.filename = filename def source_filename(self): return self.filename class CythonFileReporter(FileReporter): def __init__(self, filename, exclude=None): super().__init__(filename) self.exclude = exclude def lines(self): _setup_lines(self.exclude) return self._get_lines(CODE_LINES) def excluded_lines(self): _setup_lines(self.exclude) return self._get_lines(EXCL_LINES) def translate_lines(self, lines): _setup_lines(self.exclude) exec_lines = self._get_lines(EXEC_LINES) return set(lines).union(exec_lines) def _get_lines(self, lines_map): key = os.path.relpath(self.filename, TOPDIR) lines = lines_map.get(key, {}) return set(lines) TOPDIR = os.path.dirname(os.path.dirname(__file__)) SRCDIR = os.path.join(TOPDIR, 'src') CODE_LINES = None EXEC_LINES = None EXCL_LINES = None def _setup_lines(exclude): global CODE_LINES, EXEC_LINES, EXCL_LINES if CODE_LINES is None or EXEC_LINES is None or EXCL_LINES is None: source = os.path.join(SRCDIR, 'mpi4py', 'MPI.c') CODE_LINES, EXEC_LINES, EXCL_LINES = _parse_c_file(source, exclude) def _parse_c_file(c_file, exclude_list): from collections import defaultdict import re match_filetab_begin = 'static const char *__pyx_f[] = {' match_filetab_begin = re.compile(re.escape(match_filetab_begin)).match match_filetab_entry = re.compile(r' *"(.*)",').match match_source_path_line = re.compile(r' */[*] +"(.*)":([0-9]+)$').match match_current_code_line = re.compile(r' *[*] (.*) # <<<<<<+$').match match_comment_end = re.compile(r' *[*]/$').match match_trace_line = re.compile( r' *__Pyx_TraceLine\((\d+),\d+,__PYX_ERR\((\d+),' ).match not_executable = re.compile( '|'.join([ r'\s*c(?:type)?def\s+' r'(?:(?:public|external)\s+)?' r'(?:struct|union|enum|class)' r'(\s+[^:]+|)\s*:', ]) ).match if exclude_list: line_is_excluded = re.compile("|".join([ rf'(?:{regex})' for regex in exclude_list ])).search else: def line_is_excluded(_): return False filetab = [] modinit = False code_lines = defaultdict(dict) exec_lines = defaultdict(dict) executable_lines = defaultdict(set) excluded_lines = defaultdict(set) with open(c_file) as lines: lines = iter(lines) for line in lines: if match_filetab_begin(line): for line in lines: match = match_filetab_entry(line) if not match: break filename = match.group(1) filetab.append(filename) match = match_source_path_line(line) if not match: if '__Pyx_TraceCall("__Pyx_PyMODINIT_FUNC ' in line: modinit = True if '__Pyx_TraceLine(' in line: trace_line = match_trace_line(line) if trace_line: lineno, fid = map(int, trace_line.groups()) executable_lines[filetab[fid]].add(lineno) continue filename, lineno = match.groups() lineno = int(lineno) for comment_line in lines: match = match_current_code_line(comment_line) if match: code_line = match.group(1).rstrip() if not_executable(code_line): break if line_is_excluded(code_line): excluded_lines[filename].add(lineno) break code_lines[filename][lineno] = code_line if modinit: exec_lines[filename][lineno] = code_line break if match_comment_end(comment_line): # unexpected comment format - false positive? break # Remove lines that generated code but are not traceable. for filename, lines in code_lines.items(): dead_lines = set(lines).difference(executable_lines.get(filename, ())) for lineno in dead_lines: del lines[lineno] for filename, lines in exec_lines.items(): dead_lines = set(lines).difference(executable_lines.get(filename, ())) for lineno in dead_lines: del lines[lineno] return code_lines, exec_lines, excluded_lines def coverage_init(reg, options): # noqa: ARG001 plugin = CythonCoveragePlugin() reg.add_configurer(plugin) reg.add_file_tracer(plugin) mpi4py-4.0.3/conf/cythonize.py000066400000000000000000000022711475341043600162640ustar00rootroot00000000000000#!/usr/bin/env python """Run Cython with custom options.""" import os import sys from Cython.Compiler.Main import main as cython_main def cythonize(args=None): """Run `cython --3str --cleanup 3 ...`.""" if args is None: argv = sys.argv[:] else: argv = [os.path.abspath(__file__), *args] if '--cleanup' not in argv: argv[1:1] = ['--cleanup', '3'] if '--3str' not in argv: argv[1:1] = ['--3str'] cwd = os.getcwd() sys_argv = sys.argv[:] try: sys.argv[:] = argv cython_main(command_line=1) except SystemExit as exc: return exc.code else: return 0 finally: os.chdir(cwd) sys.argv[:] = sys_argv def main(): """Entry-point to run Cython with custom options.""" args = sys.argv[1:] if not args: appdir = os.path.dirname(os.path.abspath(__file__)) topdir = os.path.dirname(appdir) source = os.path.join('src', 'mpi4py', 'MPI.pyx') target = os.path.join('src', 'mpi4py', 'MPI.c') args += ['--working', topdir] args += [source, '--output-file', target] sys.exit(cythonize(args)) if __name__ == "__main__": main() mpi4py-4.0.3/conf/cythonize.sh000077500000000000000000000002241475341043600162450ustar00rootroot00000000000000#!/bin/sh set -eu topdir=$(cd $(dirname "$0")/.. && pwd) python "$topdir/conf/cythonize.py" \ --working "$topdir" $@ \ "src/mpi4py/MPI.pyx" mpi4py-4.0.3/conf/metadata.py000066400000000000000000000072021475341043600160270ustar00rootroot00000000000000import re import os import sys def get_name(settings=None): # noqa: ARG001 name = "mpi4py" suffix = os.environ.get("MPI4PY_DIST_SUFFIX") if suffix: name = "{name}-{suffix}".format(**vars()) return name def get_version(settings=None): # noqa: ARG001 confdir = os.path.dirname(os.path.abspath(__file__)) topdir = os.path.dirname(confdir) srcdir = os.path.join(topdir, "src") source = os.path.join(srcdir, "mpi4py", "__init__.py") with open(source, encoding="utf-8") as f: m = re.search(r"__version__\s*=\s*'(.*)'", f.read()) version = m.groups()[0] local_version = os.environ.get("MPI4PY_LOCAL_VERSION") if local_version: version = "{version}+{local_version}".format(**vars()) return version def get_readme(settings=None): # noqa: ARG001 confdir = os.path.dirname(__file__) topdir = os.path.dirname(confdir) filelist = ("DESCRIPTION.rst", "CITATION.rst", "INSTALL.rst") template = "See `{0} <{0}>`_.\n\n" template += ".. include:: {0}\n" text = template.format(filelist[0]) for filename in filelist: source = os.path.join(topdir, filename) with open(source, encoding="utf-8") as f: includeline = template.format(filename) text = text.replace(includeline, f.read()) return { "text": text, "content-type": "text/x-rst", } description = "Python bindings for MPI" requires_python = ">=3.6" license = "BSD-3-Clause" authors = [ {"name": "Lisandro Dalcin", "email": "dalcinl@gmail.com"}, ] keywords = [ "scientific computing", "parallel computing", "message passing interface", "MPI", ] classifiers = [ "Development Status :: 6 - Mature", "Environment :: GPU", "Environment :: GPU :: NVIDIA CUDA", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "License :: OSI Approved :: BSD License", "Operating System :: MacOS", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Operating System :: POSIX :: BSD", "Operating System :: POSIX :: Linux", "Operating System :: Unix", "Programming Language :: C", "Programming Language :: Cython", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Scientific/Engineering", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: System :: Distributed Computing", "Typing :: Typed", ] urls = { "Homepage": "https://mpi4py.github.io", "Documentation": "https://mpi4py.readthedocs.io/en/stable/", "Source": "https://github.com/mpi4py/mpi4py", "Issues": "https://github.com/mpi4py/mpi4py/issues", "Discussions": "https://github.com/mpi4py/mpi4py/discussions", "Downloads": "https://github.com/mpi4py/mpi4py/releases", } def dynamic_metadata(field, settings=None): getter = globals().get("get_" + field) if getter: return getter(settings) return globals()[field.replace(".", "_")] if __name__ == "__main__": print(dynamic_metadata(sys.argv[1])) mpi4py-4.0.3/conf/metadata.toml000066400000000000000000000043461475341043600163600ustar00rootroot00000000000000[project] name = "mpi4py" dynamic = ["version", "readme"] description = "Python bindings for MPI" requires-python = ">=3.6" license = {text = 'BSD-3-Clause'} authors = [ {name = "Lisandro Dalcin", email = "dalcinl@gmail.com"}, ] keywords = [ "scientific computing", "parallel computing", "message passing interface", "MPI", ] classifiers = [ "Development Status :: 6 - Mature", "Environment :: GPU", "Environment :: GPU :: NVIDIA CUDA", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "License :: OSI Approved :: BSD License", "Operating System :: MacOS", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Operating System :: POSIX :: BSD", "Operating System :: POSIX :: Linux", "Operating System :: Unix", "Programming Language :: C", "Programming Language :: Cython", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Scientific/Engineering", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: System :: Distributed Computing", "Typing :: Typed", ] [project.urls] Homepage = "https://mpi4py.github.io" Documentation = "https://mpi4py.readthedocs.io/en/latest/" Source = "https://github.com/mpi4py/mpi4py" Issues = "https://github.com/mpi4py/mpi4py/issues" Discussions = "https://github.com/mpi4py/mpi4py/discussions" Downloads = "https://github.com/mpi4py/mpi4py/releases" [tool.scikit-build] minimum-version = "0.5" experimental = true sdist.exclude = [".*"] wheel.packages = [] [tool.scikit-build.metadata] version = {provider = "metadata", provider-path = "conf"} readme = {provider = "metadata", provider-path = "conf"} mpi4py-4.0.3/conf/mpiapigen.py000066400000000000000000000664211475341043600162300ustar00rootroot00000000000000# Very, very naive RE-based way for collecting declarations inside # 'cdef extern from *' Cython blocks in in source files, and next # generate compatibility headers for partially implemented MPIs. # ruff: noqa: E501, UP031 import re from textwrap import indent, dedent import warnings def anyof(*args): return r'(?:{})'.format('|'.join(args)) def join(*args): tokens = [] for tok in args: if isinstance(tok, (list, tuple)): tok = '({})'.format(r'\s*'.join(tok)) tokens.append(tok) return r'\s*'.join(tokens) def r_(*args): return re.compile(join(*args)) lparen = r'\(' rparen = r'\)' colon = r'\:' asterisk = r'\*' ws = r'\s*' sol = r'^' eol = r'$' opt = r'?' enum = join('enum', colon) typedef = 'ctypedef' pointer = asterisk struct = join(typedef, 'struct') integral_type_names = [ 'Aint', 'Offset', 'Count', 'Fint', ] struct_type_names = [ 'Status', 'F08_status', ] handle_type_names = [ 'Datatype', 'Request', 'Message', 'Op', 'Info', 'Group', 'Errhandler', 'Session', 'Comm', 'Win', 'File', ] basic_type = r'(?:void|int|char\s*\*{1,3})' integral_type = r'MPI_(?:{})'.format('|'.join(integral_type_names)) struct_type = r'MPI_(?:{})'.format('|'.join(struct_type_names)) opaque_type = r'MPI_(?:{})'.format('|'.join(handle_type_names)) upper_name = r'MPI_[A-Z0-9_]+' camel_name = r'MPI_[A-Z][a-z0-9_]+' usrfun_name = camel_name + r'_(?:function|function_c|fn)' arg_list = r'.*' ret_type = r'void|int|double|MPI_Aint' canyint = anyof(r'int', r'long(?:\s+long)?') canyptr = join(r'\w+', pointer+'?') annotation = r'\#\:\=' fallback_value = r'\(?[A-Za-z0-9_\+\-\(\)\*]+\)?' fallback = rf'(?:{join(annotation, [fallback_value])})?' fint_type = r'MPI_Fint' fmpi_type = opaque_type.replace('Datatype', 'Type') c2f_name = fmpi_type+'_c2f' f2c_name = fmpi_type+'_f2c' class Re: INTEGRAL_TYPE = r_(sol, typedef, [canyint], [integral_type], fallback, eol) STRUCT_TYPE = r_(sol, struct, [struct_type], colon+opt, fallback, eol) OPAQUE_TYPE = r_(sol, typedef, canyptr, [opaque_type], eol) FUNCTION_TYPE = r_(sol, typedef, [ret_type], [camel_name], lparen, [arg_list], rparen, fallback, eol) ENUM_VALUE = r_(sol, enum, [upper_name], fallback, eol) HANDLE_VALUE = r_(sol, [opaque_type], [upper_name], fallback, eol) BASIC_PTRVAL = r_(sol, [basic_type, pointer], [upper_name], fallback, eol) INTEGRAL_PTRVAL = r_(sol, [integral_type, pointer], [upper_name], fallback, eol) STRUCT_PTRVAL = r_(sol, [struct_type, pointer], [upper_name], fallback, eol) FUNCTION_PTRVAL = r_(sol, [usrfun_name, pointer], [upper_name], fallback, eol) FUNCTION_PROTO = r_(sol, [ret_type], [camel_name], lparen, [arg_list], rparen, fallback, eol) FUNCTION_C2F = r_(sol, [fint_type], [c2f_name], lparen, [opaque_type], rparen, fallback, eol) FUNCTION_F2C = r_(sol, [opaque_type], [f2c_name], lparen, [fint_type], rparen, fallback, eol) IGNORE = r_(anyof( join(sol, r'cdef.*', eol), join(sol, struct, r'_mpi_\w+_t', eol), join(sol, 'int', r'MPI_(?:SOURCE|TAG|ERROR)', eol), join(sol, r'#.*', eol), join(sol, eol), )) class Node: REGEX = None @classmethod def match(self, line): m = self.REGEX.match(line) return m.groups() if m else None HEADER = None CONFIG = None MISSING = None MISSING_HEAD = """\ #ifndef PyMPI_HAVE_%(name)s #undef %(cname)s """ MISSING_TAIL = """ #endif """ def init(self, name, **kwargs): self.name = name self.__dict__.update(kwargs) def header(self): line = dedent(self.HEADER) % vars(self) line = line.replace('\n', '') line = line.replace(' ', ' ') return line + '\n' def config(self): return dedent(self.CONFIG) % vars(self) def missing(self, guard=True): if guard: head = dedent(self.MISSING_HEAD) tail = dedent(self.MISSING_TAIL) else: head = '#undef %(cname)s\n' tail = '\n\n' body = dedent(self.MISSING) return (head+body+tail) % vars(self) class NodeType(Node): CONFIG = """\ %(ctype)s v; %(ctype)s* p; (void)v; (void)p;""" def __init__(self, ctype): self.init(name=ctype, cname=ctype, ctype=ctype,) class NodeStructType(NodeType): HEADER = """\ typedef struct {%(cfields)s ...; } %(ctype)s;""" MISSING = """\ typedef struct PyMPI_%(ctype)s { %(cfields)s } PyMPI_%(ctype)s; #define %(ctype)s PyMPI_%(ctype)s""" def __init__(self, ctype, cfields): super().__init__(ctype) self.cfields = '\n'.join( [f' {ctype} {cname};' for ctype, cname in cfields] ) class NodeFuncType(NodeType): HEADER = """\ typedef %(crett)s (%(cname)s)(%(cargs)s);""" MISSING = """\ typedef %(crett)s (MPIAPI PyMPI_%(cname)s)(%(cargs)s); #define %(cname)s PyMPI_%(cname)s""" def __init__(self, crett, cname, cargs, calias=None): self.init(name=cname, cname=cname, ctype=cname+'*',) self.crett = crett self.cargs = cargs or 'void' if calias is not None: self.MISSING = '#define %(cname)s %(calias)s' self.calias = calias class NodeValue(Node): HEADER = """\ const %(ctype)s %(cname)s;""" CONFIG = """\ %(ctype)s v; v = %(cname)s; (void)v;""" MISSING = '#define %(cname)s (%(calias)s)' def __init__(self, ctype, cname, calias): self.init(name=cname, cname=cname, ctype=ctype, calias=calias) if ctype.endswith('*'): ctype = ctype + ' const' self.HEADER = ctype + ' %(cname)s;' class NodePtrVal(NodeValue): MISSING = '#define %(cname)s ((%(ctype)s)%(calias)s)' def ctypefix(ct): ct = ct.replace('[][3]',' (*)[3]') ct = ct.replace('[]','*') return ct class NodeFuncProto(Node): HEADER = """\ %(crett)s %(cname)s(%(cargs)s);""" CONFIG = """\ %(crett)s v; v = %(cname)s(%(cargscall)s); (void)v;""" MISSING = ' '. join(['#define %(cname)s(%(cargsnamed)s)', 'PyMPI_UNAVAILABLE("%(name)s"%(comma)s%(cargsnamed)s)']) def __init__(self, crett, cname, cargs, calias=None): self.init(name=cname, cname=cname) self.crett = crett self.cargs = cargs or 'void' if cargs == 'void': cargs = '' if cargs: cargs = [c.strip() for c in cargs.split(',')] if cargs[-1] == '...': del cargs[-1] else: cargs = [] self.cargstype = cargs nargs = len(cargs) self.comma = ',' if nargs else '' cargscall = [f'({ctypefix(a)})0' for a in cargs] self.cargscall = ','.join(cargscall) cargsnamed = ['a%d' % (a+1) for a in range(nargs)] self.cargsnamed = ','.join(cargsnamed) if calias is not None: self.MISSING = '#define %(cname)s %(calias)s' self.calias = calias class IntegralType(NodeType): REGEX = Re.INTEGRAL_TYPE HEADER = """\ typedef %(cbase)s... %(ctype)s;""" MISSING = """\ typedef %(ctdef)s PyMPI_%(ctype)s; #define %(ctype)s PyMPI_%(ctype)s""" def __init__(self, cbase, ctype, calias=None): super().__init__(ctype) self.cbase = cbase if calias is not None: self.ctdef = calias else: self.ctdef = cbase class StructType(NodeStructType): REGEX = Re.STRUCT_TYPE def __init__(self, ctype, calias=None): cfields = [] if ctype == 'MPI_Status': cnames = ['MPI_SOURCE', 'MPI_TAG', 'MPI_ERROR'] cfields = list(zip(['int']*3, cnames)) super().__init__(ctype, cfields) if calias is not None: self.MISSING = '#define %(cname)s %(calias)s' self.calias = calias class OpaqueType(NodeType): REGEX = Re.OPAQUE_TYPE HEADER = """\ typedef struct{...;} %(ctype)s;""" MISSING = """\ typedef void *PyMPI_%(ctype)s; #define %(ctype)s PyMPI_%(ctype)s""" class FunctionType(NodeFuncType): REGEX = Re.FUNCTION_TYPE class EnumValue(NodeValue): REGEX = Re.ENUM_VALUE def __init__(self, cname, calias): self.init(name=cname, cname=cname, ctype='int', calias=calias) class HandleValue(NodeValue): REGEX = Re.HANDLE_VALUE MISSING = '#define %(cname)s ((%(ctype)s)%(calias)s)' class BasicPtrVal(NodePtrVal): REGEX = Re.BASIC_PTRVAL class IntegralPtrVal(NodePtrVal): REGEX = Re.INTEGRAL_PTRVAL class StructPtrVal(NodePtrVal): REGEX = Re.STRUCT_PTRVAL class FunctionPtrVal(NodePtrVal): REGEX = Re.FUNCTION_PTRVAL class FunctionProto(NodeFuncProto): REGEX = Re.FUNCTION_PROTO class FunctionC2F(NodeFuncProto): REGEX = Re.FUNCTION_C2F MISSING = ' '.join(['#define %(cname)s(%(cargsnamed)s)', '((void)%(cargsnamed)s,(%(crett)s)0)']) class FunctionF2C(NodeFuncProto): REGEX = Re.FUNCTION_F2C MISSING = ' '.join(['#define %(cname)s(%(cargsnamed)s)', '((void)%(cargsnamed)s,%(cretv)s)']) def __init__(self, *a, **k): NodeFuncProto.__init__(self, *a, **k) self.cretv = self.crett.upper() + '_NULL' class Generator: NODE_TYPES = [ IntegralType, StructType, OpaqueType, HandleValue, EnumValue, BasicPtrVal, IntegralPtrVal, StructPtrVal, FunctionType, FunctionPtrVal, FunctionProto, FunctionC2F, FunctionF2C, ] def __init__(self): self.nodes = [] self.nodemap = {} def parse_file(self, filename): with open(filename) as f: self.parse_lines(f) def parse_lines(self, lines): for line in lines: self.parse_line(line) def parse_line(self, line): if Re.IGNORE.match(line): return nodemap = self.nodemap nodelist = self.nodes for nodetype in self.NODE_TYPES: args = nodetype.match(line) if args: node = nodetype(*args) nodemap[node.name] = len(nodelist) nodelist.append(node) break if not args: warnings.warn(f'unmatched line:\n{line}', stacklevel=1) def __iter__(self): return iter(self.nodes) def __getitem__(self, name): return self.nodes[self.nodemap[name]] def dump_header_h(self, fileobj): if isinstance(fileobj, str): with open(fileobj, 'w') as f: self.dump_header_h(f) return for node in self: fileobj.write(node.header()) CONFIG_HEAD = """\ #ifndef PyMPI_PYMPICONF_H #define PyMPI_PYMPICONF_H """ CONFIG_MACRO = 'PyMPI_HAVE_%s' CONFIG_TAIL = """\ #endif /* !PyMPI_PYMPICONF_H */ """ def dump_config_h(self, fileobj, suite): if isinstance(fileobj, str): with open(fileobj, 'w') as f: self.dump_config_h(f, suite) return head = dedent(self.CONFIG_HEAD) macro = dedent(self.CONFIG_MACRO) tail = dedent(self.CONFIG_TAIL) fileobj.write(head) if suite is None: for node in self: line = '#undef %s\n' % (macro % node.name) fileobj.write(line) else: for name, result in suite: if result: line = '#define %s 1\n' % (macro % name) else: line = '#undef %s\n' % (macro % name) fileobj.write(line) fileobj.write(tail) MISSING_HEAD = """\ #ifndef PyMPI_MISSING_H #define PyMPI_MISSING_H #ifndef PyMPI_UNUSED # if defined(__GNUC__) # define PyMPI_UNUSED __attribute__ ((__unused__)) # else # define PyMPI_UNUSED # endif #endif #define PyMPI_ERR_UNAVAILABLE (-1431655766) /*0xAAAAAAAA*/ static PyMPI_UNUSED int PyMPI_UNAVAILABLE(const char *name,...) { (void)name; return PyMPI_ERR_UNAVAILABLE; } """ MISSING_TAIL = """\ #endif /* !PyMPI_MISSING_H */ """ def dump_missing_h(self, fileobj, suite): if isinstance(fileobj, str): with open(fileobj, 'w') as f: self.dump_missing_h(f, suite) return head = dedent(self.MISSING_HEAD) tail = dedent(self.MISSING_TAIL) # fileobj.write(head) if suite is None: for node in self: fileobj.write(node.missing()) else: for name, result in suite: node = self[name] if not result: fileobj.write(node.missing()) fileobj.write(tail) LARGECNT_HEAD = """\ #ifndef PyMPI_LARGECNT_H #define PyMPI_LARGECNT_H #include #include #ifndef PyMPI_MALLOC #define PyMPI_MALLOC malloc #endif #ifndef PyMPI_FREE #define PyMPI_FREE free #endif #ifndef PyMPI_MEMCPY #define PyMPI_MEMCPY memcpy #endif #define PyMPIAllocArray(dsttype, dst, len) \\ do { \\ size_t _m = (size_t) (len) * sizeof(dsttype); \\ (dst) = (dsttype *) PyMPI_MALLOC(_m ? _m : 1); \\ } while (0) /**/ #define PyMPIFreeArray(dst) \\ do { \\ if ((dst) != NULL) PyMPI_FREE(dst); \\ (dst) = NULL; (void) (dst); \\ } while (0) /**/ #define PyMPICastError(ERRORCODE) \\ do { \\ ierr = (ERRORCODE); \\ (void) MPI_Comm_call_errhandler(MPI_COMM_SELF, ierr); \\ goto fn_exit; \\ } while (0) /**/ #define PyMPICastValue(dsttype, dst, srctype, src) \\ do { \\ (dst) = (dsttype) (src); \\ if ((srctype) (dst) != (src)) \\ PyMPICastError(MPI_ERR_ARG); \\ } while (0) /**/ #define PyMPICastArray(dsttype, dst, srctype, src, len) \\ do { \\ (dst) = NULL; \\ if ((src) != NULL) { \\ MPI_Aint _n = (MPI_Aint) (len), _i; \\ PyMPIAllocArray(dsttype, dst, len); \\ if ((dst) == NULL) \\ PyMPICastError(MPI_ERR_OTHER); \\ for (_i = 0; _i < _n; _i++) { \\ (dst)[_i] = (dsttype) (src)[_i]; \\ if ((srctype) (dst)[_i] != (src)[_i]) { \\ PyMPIFreeArray(dst); \\ PyMPICastError(MPI_ERR_ARG); \\ } \\ } \\ } \\ } while (0) /**/ #define PyMPIMoveArray(dsttype, dst, srctype, src, len) \\ do { \\ if ((src) != NULL && (dst) != NULL) { \\ size_t _n = (size_t) (len); \\ unsigned char *_buf = (unsigned char *) (src); \\ (void) PyMPI_MEMCPY(_buf, (dst), _n * sizeof(dsttype)); \\ PyMPI_FREE(dst); (dst) = (dsttype *) _buf; \\ } \\ } while (0) /**/ #define PyMPICommSize(comm, n) \\ do { \\ int _inter = 0; \\ ierr = MPI_Comm_test_inter(comm, &_inter); \\ if (_inter) \\ ierr = MPI_Comm_remote_size((comm), &(n)); \\ else \\ ierr = MPI_Comm_size((comm), &(n)); \\ if (ierr != MPI_SUCCESS) goto fn_exit; \\ } while (0) /**/ #define PyMPICommLocGroupSize(comm, n) \\ do { \\ ierr = MPI_Comm_size((comm), &(n)); \\ if (ierr != MPI_SUCCESS) goto fn_exit; \\ } while (0) /**/ #define PyMPICommNeighborCount(comm, ns, nr) \\ do { \\ int _topo = MPI_UNDEFINED; \\ int _i, _n; (ns) = (nr) = 0; \\ ierr = MPI_Topo_test((comm), &_topo); \\ if (ierr != MPI_SUCCESS) goto fn_exit; \\ if (_topo == MPI_UNDEFINED) { \\ ierr = MPI_Comm_size((comm), &_n); \\ (ns) = (nr) = _n; \\ } else if (_topo == MPI_CART) { \\ ierr = MPI_Cartdim_get((comm), &_n); \\ (ns) = (nr) = 2 * _n; \\ } else if (_topo == MPI_GRAPH) { \\ ierr = MPI_Comm_rank((comm), &_i); \\ ierr = MPI_Graph_neighbors_count( \\ (comm), _i, &_n); \\ (ns) = (nr) = _n; \\ } else if (_topo == MPI_DIST_GRAPH) { \\ ierr = MPI_Dist_graph_neighbors_count( \\ (comm), &(nr), &(ns), &_i); \\ } \\ if (ierr != MPI_SUCCESS) goto fn_exit; \\ } while (0) /**/ """ LARGECNT_BEGIN = """\ #ifndef PyMPI_HAVE_%(name)s_c static int Py%(name)s_c(%(argsdecl)s) { """ LARGECNT_COLLECTIVE = """\ PyMPICommSize(a%(commid)d, n); """ LARGECNT_LOCGROUP = """\ PyMPICommLocGroupSize(a%(commid)d, n); """ LARGECNT_NEIGHBOR = """\ PyMPICommNeighborCount(a%(commid)d, ns, nr); """ LARGECNT_CALL = """\ ierr = %(name)s(%(argscall)s); if (ierr != MPI_SUCCESS) goto fn_exit; """ LARGECNT_END = """\ return ierr; } #undef %(name)s_c #define %(name)s_c Py%(name)s_c #endif """ LARGECNT_TAIL = """\ #endif /* !PyMPI_LARGECNT_H */ """ LARGECNT_RE = re.compile(r'^mpi_({})_c$'.format('|'.join([ r'(i?(b|s|r|p)?send(_init)?(recv(_replace)?)?)', r'(i?m?p?recv(_init)?)', r'(buffer_(at|de)tach|get_count)', r'(i?(bcast|gather(v)?|scatter(v?)|all(gather(v)?|toall(v|w)?))(_init)?)', r'(i?((all)?reduce(_local|(_scatter(_block)?)?)|(ex)?scan)(_init)?)', r'(i?neighbor_all(gather(v)?|toall(v|w)?)(_init)?)', r'(win_(create|allocate(_shared)?|shared_query))', r'(r?(put|get|(get_)?accumulate))', r'file_(((i)?(read|write).*)|get_type_extent)', ]))) def dump_largecnt_h(self, fileobj): if isinstance(fileobj, str): with open(fileobj, 'w') as f: self.dump_largecnt_h(f) return def largecount_functions(): for node in self: name = node.name if self.LARGECNT_RE.match(name.lower()): yield name[:-2] def declare(t, v, init=None): t = t.strip() if t.endswith('[]'): t = t[:-2].strip() code = f'{t} *{v}' elif t.endswith('*'): t = t[:-1].strip() code = f'{t} *{v}' else: code = f'{t} {v}' if init is not None: code += f' = {init}' return code def generate(name): is_neighbor = 'neighbor' in name.lower() is_nonblocking = name.startswith('MPI_I') node1 = self[name+'_c'] node2 = self[name] cargstype1 = node1.cargstype cargstype2 = node2.cargstype argstype = list(zip(cargstype1, cargstype2)) convert_array = False for (t1, t2) in argstype: if t1 != t2: if t1 == 'MPI_Count[]' and t2 == 'int[]': convert_array = True break if t1 == 'MPI_Aint[]' and t2 == 'int[]': convert_array = True break commid = None if convert_array: for i, (t1, _) in enumerate(argstype, start=1): if t1 == 'MPI_Comm': commid = i break dtypeidx = 0 argslist = [] argsinit = [] argstemp = [] argsconv = [] argscall = [] argsoutp = [] argsfree = [] CASTVALUE = 'PyMPICastValue(%s, b%d, %s, a%d)' CASTARRAY = 'PyMPICastArray(%s, b%d, %s, a%d, %s)' MOVEARRAY = 'PyMPIMoveArray(%s, b%d, %s, a%d, %s)' FREEARRAY = 'PyMPIFreeArray(b%d)' argsinit += ['int ierr'] if commid is not None: if is_neighbor: argsinit += ['int ns, nr'] else: argsinit += ['int n'] for i, (t1, t2) in enumerate(argstype, start=1): argslist += [declare(t1, 'a%d' % i)] if t1.startswith('MPI_Datatype'): dtypeidx += 1 if t1 == t2: argscall += ['a%d' % i] else: if t1.endswith('[]'): t1, t2, n = t1[:-2], t2[:-2], 'n' argstemp += [declare(t2, '*b%d' % i, 'NULL')] if is_neighbor: n = ('ns', 'nr')[dtypeidx] subs = (t2, i, t1, i, n) argsconv += [CASTARRAY % subs] if is_nonblocking: argsconv += [MOVEARRAY % subs] else: argsfree += [FREEARRAY % i] argscall += ['b%d' % i] elif t1.endswith('*'): t1, t2 = t1[:-1], t2[:-1] pinit = 'a%d ? &b%d : NULL' % (i, i) argstemp += [declare(t2, 'b%d' % i, 0)] argstemp += [declare(t2+'*', 'p%d' % i, pinit)] argscall += ['p%d' % i] argsoutp += ['if (a%d) *a%d = b%d' % (i, i, i)] else: subs = (t2, i, t1, i) argstemp += [declare(t2, 'b%d' % i)] argsconv += [CASTVALUE % subs] argscall += ['b%d' % i] tab = ' ' subs = { 'name': name, 'argsdecl': (',\n'+' '*(len(name)+20)).join(argslist), 'argscall': ', '.join(argscall), 'commid': commid, } begin = self.LARGECNT_BEGIN % subs if commid is None: setup = '' elif is_neighbor: setup = self.LARGECNT_NEIGHBOR % subs elif 'reduce_scatter' in name.lower(): setup = self.LARGECNT_LOCGROUP % subs else: setup = self.LARGECNT_COLLECTIVE % subs call = self.LARGECNT_CALL % subs end = self.LARGECNT_END % subs yield dedent(begin) yield indent('{};\n'.format('; '.join(argsinit)), tab) yield indent('{};\n'.format('; '.join(argstemp)), tab) yield indent(dedent(setup), tab) for line in argsconv: yield indent(f'{line};\n', tab) yield indent(dedent(call), tab) for line in argsoutp: yield indent(f'{line};\n', tab) yield indent('fn_exit:\n', tab[:-1]) for line in argsfree: yield indent(f'{line};\n', tab) yield dedent(end) yield '\n' head = dedent(self.LARGECNT_HEAD) tail = dedent(self.LARGECNT_TAIL) self.largecnt = 0 fileobj.write(head) for name in largecount_functions(): self.largecnt += 1 for code in generate(name): fileobj.write(code) fileobj.write(tail) # ----------------------------------------- if __name__ == '__main__': import os import sys import argparse parser = argparse.ArgumentParser(description='MPI API generator') parser.add_argument('-q', '--quiet', action='store_true') parser.add_argument('-l', '--list', action='store_true') args = parser.parse_args() if args.list: args.quiet = True def log(message): if not args.quiet: print(message) generator = Generator() libmpi_pxd = os.path.join('src', 'mpi4py', 'libmpi.pxd') log(f'parsing file {libmpi_pxd}') generator.parse_file(libmpi_pxd) log('processed %d definitions' % len(generator.nodes)) if args.list: for node in generator: print(node.name) sys.exit(0) #config_h = os.path.join('src', 'lib-mpi', 'pympiconf.h') #log('writing file %s' % config_h) #generator.dump_config_h(config_h, None) missing_h = os.path.join('src', 'lib-mpi', 'missing.h') log(f'writing file {missing_h}') generator.dump_missing_h(missing_h, None) largecnt_h = os.path.join('src', 'lib-mpi', 'largecnt.h') log(f'writing file {largecnt_h}') generator.dump_largecnt_h(largecnt_h) log('generated %d large count fallbacks' % generator.largecnt) #libmpi_h = os.path.join('.', 'libmpi.h') #log('writing file %s' % libmpi_h) #generator.dump_header_h(libmpi_h) # ----------------------------------------- mpi4py-4.0.3/conf/mpiconfig.py000066400000000000000000000436421475341043600162320ustar00rootroot00000000000000import os import sys import shlex import shutil import logging import platform from collections import OrderedDict from configparser import ConfigParser from configparser import Error as ConfigParserError _logger = logging.getLogger("mpiconfig") _logger.setLevel(logging.INFO) class Config: def __init__(self, logger=None): self.log = logger or _logger self.section = None self.filename = None self.compiler_info = OrderedDict(( ('mpicc' , None), ('mpicxx' , None), ('mpild' , None), )) self.library_info = OrderedDict(( ('define_macros' , []), ('undef_macros' , []), ('include_dirs' , []), ('libraries' , []), ('library_dirs' , []), ('runtime_library_dirs' , []), ('extra_compile_args' , []), ('extra_link_args' , []), ('extra_objects' , []), )) def __bool__(self): for v in self.compiler_info.values(): if v: return True for v in self.library_info.values(): if v: return True return False __nonzero__ = __bool__ def get(self, k, d=None): if k in self.compiler_info: return self.compiler_info[k] if k in self.library_info: return self.library_info[k] return d def info(self, log=None): if log is None: log = self.log mpicc = self.compiler_info.get('mpicc') mpicxx = self.compiler_info.get('mpicxx') mpild = self.compiler_info.get('mpild') if mpicc: log.info("MPI C compiler: %s", mpicc) if mpicxx: log.info("MPI C++ compiler: %s", mpicxx) if mpild: log.info("MPI linker: %s", mpild) def update(self, config, **more): if hasattr(config, 'keys'): config = config.items() for option, value in config: if option in self.compiler_info: self.compiler_info[option] = value if option in self.library_info: self.library_info[option] = value if more: self.update(more) def setup(self, options, environ=None): if environ is None: environ = os.environ self.setup_library_info(options, environ) self.setup_compiler_info(options, environ) def setup_library_info(self, options, environ): filename = section = None mpiopt = getattr(options, 'mpi', None) mpiopt = environ.get('MPICFG', mpiopt) mpiopt = environ.get('MPI4PY_BUILD_MPICFG', mpiopt) if mpiopt: if ',' in mpiopt: section, filename = mpiopt.split(',', 1) elif ':' in mpiopt: filename, section = mpiopt.split(':', 1) elif os.path.isfile(mpiopt): filename = mpiopt else: section = mpiopt if not filename: filename = "mpi.cfg" if not section: section = "mpi" mach = platform.machine() arch = platform.architecture(None)[0] plat = sys.platform osnm = os.name if plat.startswith('linux'): plat = 'linux' elif plat.startswith('sunos'): plat = 'solaris' elif plat.startswith('win'): plat = 'windows' suffixes = [] suffixes.append(plat+'-'+mach) suffixes.append(plat+'-'+arch) suffixes.append(plat) suffixes.append(osnm+'-'+mach) suffixes.append(osnm+'-'+arch) suffixes.append(osnm) suffixes.append(mach) suffixes.append(arch) sections = [section+"-"+s for s in suffixes] sections += [section] self.load(filename, sections) if not self: if os.name == 'posix': self._setup_posix() if sys.platform == 'win32': self._setup_windows() def _setup_posix(self): pass def _setup_windows(self): if self._setup_windows_impi(): return if self._setup_windows_msmpi(): return def _setup_windows_impi(self): from os.path import join, isdir, isfile I_MPI_ROOT = os.environ.get('I_MPI_ROOT') if not I_MPI_ROOT: return None if not isdir(I_MPI_ROOT): return None arch = platform.architecture(None)[0][:2] archdir = {'32':'ia32', '64':'intel64'}[arch] mpi_dir = join(I_MPI_ROOT, archdir) if not isdir(mpi_dir): mpi_dir = I_MPI_ROOT IMPI_INC = join(mpi_dir, 'include') IMPI_LIB = join(mpi_dir, 'lib') I_MPI_LIBRARY_KIND = os.environ.get('I_MPI_LIBRARY_KIND') library_kind = os.getenv('library_kind') kind = I_MPI_LIBRARY_KIND or library_kind or 'release' if isfile(join(IMPI_LIB, kind, 'impi.lib')): IMPI_LIB = join(IMPI_LIB, kind) ok = ( IMPI_INC and isfile(join(IMPI_INC, 'mpi.h')) and IMPI_LIB and isfile(join(IMPI_LIB, 'impi.lib')) ) if not ok: return False IMPI_INC = os.path.normpath(IMPI_INC) IMPI_LIB = os.path.normpath(IMPI_LIB) self.library_info.update( include_dirs=[IMPI_INC], library_dirs=[IMPI_LIB], libraries=['impi']) self.section = 'impi' self.filename = [os.path.dirname(IMPI_INC)] return True def _setup_windows_msmpi(self): # Microsoft MPI (v7, v6, v5, v4) def msmpi_ver(): try: try: import winreg except ImportError: import _winreg as winreg HKLM = winreg.HKEY_LOCAL_MACHINE subkey = r"SOFTWARE\Microsoft\MPI" with winreg.OpenKey(HKLM, subkey) as key: for i in range(winreg.QueryInfoKey(key)[1]): name, value, type = winreg.EnumValue(key, i) if name != "Version": continue major, minor = value.split('.')[:2] return (int(major), int(minor)) except Exception: # noqa: S110 pass MSMPI_VER = os.environ.get('MSMPI_VER') if MSMPI_VER: try: major, minor = MSMPI_VER.split('.')[:2] return (int(major), int(minor)) except Exception: raise RuntimeError( f"invalid environment: MSMPI_VER={MSMPI_VER}" ) from None return None def setup_msmpi(MSMPI_INC, MSMPI_LIB): from os.path import join, isfile ok = ( MSMPI_INC and isfile(join(MSMPI_INC, 'mpi.h')) and MSMPI_LIB and isfile(join(MSMPI_LIB, 'msmpi.lib')) ) if not ok: return False version = msmpi_ver() if version is not None: major, minor = version MSMPI_VER = hex((major<<8)|(minor&0xFF)) self.library_info.update( define_macros=[('MSMPI_VER', MSMPI_VER)], ) MSMPI_INC = os.path.normpath(MSMPI_INC) MSMPI_LIB = os.path.normpath(MSMPI_LIB) self.library_info.update( include_dirs=[MSMPI_INC], library_dirs=[MSMPI_LIB], libraries=['msmpi'], ) self.section = 'msmpi' self.filename = [os.path.dirname(MSMPI_INC)] return True arch = platform.architecture(None)[0][:2] # Look for Microsoft MPI in the environment MSMPI_INC = os.environ.get('MSMPI_INC') MSMPI_LIB = os.environ.get('MSMPI_LIB'+arch) MSMPI_LIB = MSMPI_LIB or os.environ.get('MSMPI_LIB') if setup_msmpi(MSMPI_INC, MSMPI_LIB): return True # Look for Microsoft MPI v7/v6/v5 in default install path for ProgramFiles in ('ProgramFiles', 'ProgramFiles(x86)'): ProgramFiles = os.environ.get(ProgramFiles, '') archdir = {'32':'x86', '64':'x64'}[arch] MSMPI_DIR = os.path.join(ProgramFiles, 'Microsoft SDKs', 'MPI') MSMPI_INC = os.path.join(MSMPI_DIR, 'Include') MSMPI_LIB = os.path.join(MSMPI_DIR, 'Lib', archdir) if setup_msmpi(MSMPI_INC, MSMPI_LIB): return True # Look for Microsoft HPC Pack 2012 R2 in default install path for ProgramFiles in ('ProgramFiles', 'ProgramFiles(x86)'): ProgramFiles = os.environ.get(ProgramFiles, '') archdir = {'32':'i386', '64':'amd64'}[arch] MSMPI_DIR = os.path.join(ProgramFiles, 'Microsoft MPI') MSMPI_INC = os.path.join(MSMPI_DIR, 'Inc') MSMPI_LIB = os.path.join(MSMPI_DIR, 'Lib', archdir) if setup_msmpi(MSMPI_INC, MSMPI_LIB): return True # Microsoft MPI (legacy) and others ProgramFiles = os.environ.get('ProgramFiles', '') CCP_HOME = os.environ.get('CCP_HOME', '') for (name, prefix, suffix) in ( ('msmpi', CCP_HOME, ''), ('msmpi', ProgramFiles, 'Microsoft HPC Pack 2012 R2'), ('msmpi', ProgramFiles, 'Microsoft HPC Pack 2012'), ('msmpi', ProgramFiles, 'Microsoft HPC Pack 2012 SDK'), ('msmpi', ProgramFiles, 'Microsoft HPC Pack 2008 R2'), ('msmpi', ProgramFiles, 'Microsoft HPC Pack 2008'), ('msmpi', ProgramFiles, 'Microsoft HPC Pack 2008 SDK'), ): mpi_dir = os.path.join(prefix, suffix) if not mpi_dir or not os.path.isdir(mpi_dir): continue define_macros = [] include_dir = os.path.join(mpi_dir, 'include') library = 'mpi' library_dir = os.path.join(mpi_dir, 'lib') if name == 'msmpi': include_dir = os.path.join(mpi_dir, 'inc') library = 'msmpi' arch = platform.architecture(None)[0] if arch == '32bit': library_dir = os.path.join(library_dir, 'i386') if arch == '64bit': library_dir = os.path.join(library_dir, 'amd64') if not os.path.isdir(include_dir): include_dir = os.path.join(mpi_dir, 'include') self.library_info.update( define_macros=define_macros, include_dirs=[include_dir], libraries=[library], library_dirs=[library_dir], ) self.section = name self.filename = [mpi_dir] return True return None def setup_compiler_info(self, options, environ): def find_exe(cmd, path=None): if not cmd: return None parts = shlex.split(cmd) exe, args = parts[0], parts[1:] if not os.path.isabs(exe) and path: exe = os.path.basename(exe) exe = shutil.which(exe, path=path) if not exe: return None return ' '.join([exe, *args]) COMPILERS = ( ('mpicc', ['mpicc']), ('mpicxx', ['mpicxx', 'mpic++', 'mpiCC']), ('mpild', []), ) # compiler_info = {} PATH = environ.get('PATH', '') for name, _ in COMPILERS: cmd = ( environ.get(f'MPI4PY_BUILD_{name.upper()}') or environ.get(name.upper()) or getattr(options, name, None) or self.compiler_info.get(name) or None ) if cmd: exe = find_exe(cmd, path=PATH) if exe: path = os.path.dirname(exe) PATH = path + os.path.pathsep + PATH compiler_info[name] = exe else: self.log.warning("warning: %s='%s' not found", name, cmd) # if not self and not compiler_info: for name, candidates in COMPILERS: for cmd in candidates: cmd = find_exe(cmd) if cmd: compiler_info[name] = cmd break # self.compiler_info.update(compiler_info) def load(self, filename="mpi.cfg", section='mpi'): if isinstance(filename, str): filenames = filename.split(os.path.pathsep) else: filenames = list(filename) if isinstance(section, str): sections = section.split(',') else: sections = list(section) # try: parser = ConfigParser(dict_type=OrderedDict) except TypeError: parser = ConfigParser() try: read_ok = parser.read(filenames) except ConfigParserError: self.log.exception( "error: parsing configuration file/s '%s'", os.path.pathsep.join(filenames)) return None for section in sections: if parser.has_section(section): break section = None if not section: self.log.error( "error: section/s '%s' not found in file/s '%s'", ','.join(sections), os.path.pathsep.join(filenames)) return None parser_items = list(parser.items(section, vars=None)) # compiler_info = type(self.compiler_info)() for option, value in parser_items: if option in self.compiler_info: compiler_info[option] = value # pathsep = os.path.pathsep expanduser = os.path.expanduser expandvars = os.path.expandvars library_info = type(self.library_info)() for k, v in parser_items: if k in ( 'define_macros', 'undef_macros', ): macros = [e.strip() for e in v.split(',')] if k == 'define_macros': for i, m in enumerate(macros): try: # -DFOO=bar idx = m.index('=') macro = (m[:idx], m[idx+1:] or None) except ValueError: # -DFOO macro = (m, None) macros[i] = macro library_info[k] = macros elif k in ( 'include_dirs', 'library_dirs', 'rpath', 'runtime_dirs', 'runtime_library_dirs', ): if k in ('rpath', 'runtime_dirs'): k = 'runtime_library_dirs' pathlist = [p.strip() for p in v.split(pathsep)] library_info[k] = [ expanduser(expandvars(p)) for p in pathlist if p ] elif k == 'libraries': library_info[k] = [e.strip() for e in shlex.split(v)] elif k in ( 'extra_compile_args', 'extra_link_args', ): library_info[k] = shlex.split(v) elif k == 'extra_objects': library_info[k] = [ expanduser(expandvars(e)) for e in shlex.split(v) ] elif hasattr(self, k): library_info[k] = v.strip() else: pass # self.section = section self.filename = read_ok self.compiler_info.update(compiler_info) self.library_info.update(library_info) return compiler_info, library_info, section, read_ok def dump(self, filename=None, section='mpi'): # prepare configuration values compiler_info = self.compiler_info.copy() library_info = self.library_info.copy() for k in library_info: if k in ( 'define_macros', 'undef_macros', ): macros = library_info[k] if k == 'define_macros': for i, (m, v) in enumerate(macros): if v is None: macros[i] = m else: macros[i] = f'{m}={v}' library_info[k] = ','.join(macros) elif k in ( 'include_dirs', 'library_dirs', 'runtime_library_dirs', ): library_info[k] = os.path.pathsep.join(library_info[k]) elif isinstance(library_info[k], list): library_info[k] = ' '.join(library_info[k]) # fill configuration parser try: parser = ConfigParser(dict_type=OrderedDict) except TypeError: parser = ConfigParser() parser.add_section(section) for option, value in compiler_info.items(): if not value: continue parser.set(section, option, value) for option, value in library_info.items(): if not value: continue parser.set(section, option, value) # save configuration file if filename is None: parser.write(sys.stdout) elif hasattr(filename, 'write'): parser.write(filename) elif isinstance(filename, str): with open(filename, 'w', encoding='utf-8') as f: parser.write(f) return parser if __name__ == '__main__': import optparse parser = optparse.OptionParser() parser.add_option("--mpi", type="string") parser.add_option("--mpicc", type="string") parser.add_option("--mpicxx", type="string") parser.add_option("--mpild", type="string") opts, args = parser.parse_args() cfg = Config() cfg.setup(opts) cfg.dump() mpi4py-4.0.3/conf/mpidistutils.py000066400000000000000000001523511475341043600170070ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """ Support for building mpi4py with distutils/setuptools. """ # ruff: noqa: E402 # ----------------------------------------------------------------------------- import os import re import sys import glob import shlex import platform import warnings import contextlib from distutils import log from distutils import sysconfig from distutils.util import convert_path from distutils.file_util import copy_file # Fix missing variables PyPy's distutils.sysconfig if hasattr(sys, 'pypy_version_info'): config_vars = sysconfig.get_config_vars() for name in ('prefix', 'exec_prefix'): if name not in config_vars: config_vars[name] = os.path.normpath(getattr(sys, name)) if sys.platform == 'darwin' and 'LDSHARED' in config_vars: ldshared = shlex.split(config_vars['LDSHARED']) while '-shared' in ldshared: ldshared[ldshared.index('-shared')] = '-bundle' if '-undefined' not in ldshared: ldshared.extend('-undefined dynamic_lookup'.split()) config_vars['LDSHARED'] = ' '.join(ldshared) # Workaround distutils.cygwinccompiler.get_versions() # failing when the compiler path contains spaces from distutils import cygwinccompiler as cygcc if hasattr(cygcc, 'get_versions'): cygcc_get_versions = cygcc.get_versions def get_versions(): import distutils.spawn find_executable_orig = distutils.spawn.find_executable def find_executable(exe): exe = find_executable_orig(exe) if exe and ' ' in exe: exe = f'"{exe}"' return exe distutils.spawn.find_executable = find_executable versions = cygcc_get_versions() distutils.spawn.find_executable = find_executable_orig return versions cygcc.get_versions = get_versions # Workaround distutils.ccompiler.CCompiler._fix_lib_args from distutils.ccompiler import CCompiler cc_fix_compile_args_orig = getattr(CCompiler, '_fix_compile_args', None) cc_fix_lib_args_orig = getattr(CCompiler, '_fix_lib_args', None) def cc_fix_compile_args(self, out_dir, macros, inc_dirs): macros = macros or [] inc_dirs = inc_dirs or [] return cc_fix_compile_args_orig(self, out_dir, macros, inc_dirs) def cc_fix_lib_args(self, libs, lib_dirs, rt_lib_dirs): libs = libs or [] lib_dirs = lib_dirs or [] rt_lib_dirs = rt_lib_dirs or [] return cc_fix_lib_args_orig(self, libs, lib_dirs, rt_lib_dirs) CCompiler._fix_compile_args = cc_fix_compile_args CCompiler._fix_lib_args = cc_fix_lib_args def _fix_env(cmd, i): while os.path.basename(cmd[i]) == 'env': i = i + 1 while '=' in cmd[i]: i = i + 1 return i def _fix_xcrun(cmd, i): if os.path.basename(cmd[i]) == 'xcrun': del cmd[i] while True: if cmd[i] == '-sdk': del cmd[i:i+2] continue if cmd[i] == '-log': del cmd[i] continue break return i def fix_compiler_cmd(cc, mpicc): if not mpicc: return i = 0 i = _fix_env(cc, i) i = _fix_xcrun(cc, i) while os.path.basename(cc[i]) == 'ccache': i = i + 1 cc[i:i+1] = shlex.split(mpicc) def fix_linker_cmd(ld, mpild): if not mpild: return i = 0 if (sys.platform.startswith('aix') and os.path.basename(ld[i]) == 'ld_so_aix'): i = 1 i = _fix_env(ld, i) i = _fix_xcrun(ld, i) while os.path.basename(ld[i]) == 'ccache': del ld[i] ld[i:i+1] = shlex.split(mpild) def customize_compiler( compiler, lang=None, mpicc=None, mpicxx=None, mpild=None, ): sysconfig.customize_compiler(compiler) if compiler.compiler_type == 'unix': ld = compiler.linker_exe for envvar in ('LDFLAGS', 'CFLAGS', 'CPPFLAGS'): if envvar in os.environ: ld += shlex.split(os.environ[envvar]) if os.environ.get('SOURCE_DATE_EPOCH') is not None: # Linker tweaks for reproducible build if sys.platform == 'darwin': os.environ['ZERO_AR_DATE'] = 'YES' if compiler.compiler_type == 'msvc': if not compiler.initialized: compiler.initialize() for flags in compiler._ldflags.values(): flags.append('/BREPRO') if compiler.compiler_type == 'unix': # Compiler command overriding if mpicc: fix_compiler_cmd(compiler.compiler, mpicc) if lang in ('c', None): fix_compiler_cmd(compiler.compiler_so, mpicc) if mpicxx: fix_compiler_cmd(compiler.compiler_cxx, mpicxx) if lang == 'c++': fix_compiler_cmd(compiler.compiler_so, mpicxx) if mpild: for ld in [compiler.linker_so, compiler.linker_exe]: fix_linker_cmd(ld, mpild) if compiler.compiler_type == 'cygwin': compiler.set_executables( preprocessor = 'gcc -mcygwin -E', ) if compiler.compiler_type == 'mingw32': compiler.set_executables( preprocessor = 'gcc -mno-cygwin -E', ) if compiler.compiler_type in ('unix', 'cygwin', 'mingw32'): badcxxflags = [ '-Wimplicit', '-Wstrict-prototypes'] for flag in badcxxflags: while flag in compiler.compiler_cxx: compiler.compiler_cxx.remove(flag) if lang == 'c++': while flag in compiler.compiler_so: compiler.compiler_so.remove(flag) if compiler.compiler_type == 'mingw32': # Remove msvcrXX.dll del compiler.dll_libraries[:] # https://bugs.python.org/issue12641 if compiler.gcc_version >= '4.4': for attr in ( 'preprocessor', 'compiler', 'compiler_cxx', 'compiler_so', 'linker_so', 'linker_exe', ): with contextlib.suppress(Exception): getattr(compiler, attr).remove('-mno-cygwin') # Add required define and compiler flags for AMD64 if platform.architecture(None)[0] == '64bit': for attr in ( 'preprocessor', 'compiler', 'compiler_cxx', 'compiler_so', 'linker_so', 'linker_exe', ): getattr(compiler, attr).insert(1, '-DMS_WIN64') getattr(compiler, attr).insert(1, '-m64') # ----------------------------------------------------------------------------- from mpiconfig import Config def configuration(command_obj, verbose=True): config = Config(log) config.setup(command_obj) if verbose: if config.section and config.filename: config.log.info( "MPI configuration: [%s] from '%s'", config.section, ','.join(config.filename), ) config.info() return config def configure_compiler(compiler, config, lang=None): # mpicc = config.get('mpicc') mpicxx = config.get('mpicxx') mpild = config.get('mpild') if not mpild and (mpicc or mpicxx): if lang == 'c': mpild = mpicc if lang == 'c++': mpild = mpicxx if not mpild: mpild = mpicc or mpicxx # customize_compiler( compiler, lang, mpicc=mpicc, mpicxx=mpicxx, mpild=mpild, ) # for k, v in config.get('define_macros', []): compiler.define_macro(k, v) for v in config.get('undef_macros', []): compiler.undefine_macro(v) for v in config.get('include_dirs', []): compiler.add_include_dir(v) for v in config.get('libraries', []): compiler.add_library(v) for v in config.get('library_dirs', []): compiler.add_library_dir(v) for v in config.get('runtime_library_dirs', []): compiler.add_runtime_library_dir(v) for v in config.get('extra_objects', []): compiler.add_link_object(v) if compiler.compiler_type in ( 'unix', 'intel', 'cygwin', 'mingw32', ): cc_args = config.get('extra_compile_args', []) ld_args = config.get('extra_link_args', []) compiler.compiler += cc_args compiler.compiler_so += cc_args compiler.compiler_cxx += cc_args compiler.linker_so += ld_args compiler.linker_exe += ld_args return compiler # ----------------------------------------------------------------------------- try: from mpiapigen import Generator except ImportError: class Generator: def parse_file(self, *args): raise NotImplementedError( "You forgot to grab 'mpiapigen.py'") @contextlib.contextmanager def capture_stderr(filename=os.devnull): stream = sys.stderr file_obj = None fno_save = None try: file_obj = open(filename, 'w') fno_save = os.dup(stream.fileno()) os.dup2(file_obj.fileno(), stream.fileno()) yield finally: if file_obj is not None: file_obj.close() if fno_save is not None: os.dup2(fno_save, stream.fileno()) class ConfigureMPI: SRCDIR = 'src' SOURCES = [os.path.join('mpi4py', 'libmpi.pxd')] DESTDIR = os.path.join('src', 'lib-mpi') CONFIG_H = 'pympiconf.h' MISSING_H = 'missing.h' CONFIGTEST_H = """\ /* _configtest.h */ #if !defined(MPIAPI) # define MPIAPI #endif """ def __init__(self, config_cmd): self.generator = Generator() for filename in self.SOURCES: fullname = os.path.join(self.SRCDIR, filename) self.generator.parse_file(fullname) self.config_cmd = config_cmd def run(self): results = [] with open('_configtest.h', 'w') as f: f.write(self.CONFIGTEST_H) for node in self.generator: name = node.name testcode = node.config() confcode = node.missing(guard=False) log.info("checking for '%s'...", name) ok = self.run_test(testcode) if not ok: log.info("**** failed check for '%s'", name) with open('_configtest.h', 'a') as f: f.write(confcode) results.append((name, ok)) try: os.remove('_configtest.h') except OSError: pass return results def gen_test(self, code): body = [ '#include "_configtest.h"', 'int main(int argc, char **argv) {', '\n'.join([' ' + line for line in code.split('\n')]), ' (void)argc; (void)argv;', ' return 0;', '}', ] body = '\n'.join(body) + '\n' return body def run_test(self, code, lang='c'): level = log.set_threshold(log.WARN) log.set_threshold(level) if not self.config_cmd.noisy: level = log.set_threshold(log.WARN) try: body = self.gen_test(code) headers = ['stdlib.h', 'mpi.h'] ok = self.config_cmd.try_link(body, headers=headers, lang=lang) return ok finally: log.set_threshold(level) def dump(self, results): destdir = self.DESTDIR config_h = os.path.join(destdir, self.CONFIG_H) missing_h = os.path.join(destdir, self.MISSING_H) log.info("writing '%s'", config_h) self.generator.dump_config_h(config_h, results) log.info("writing '%s'", missing_h) self.generator.dump_missing_h(missing_h, None) # ----------------------------------------------------------------------------- cmd_mpi_opts = [ ('mpild=', None, "MPI linker command, " "overridden by environment variable 'MPILD' " "(defaults to 'mpicc' or 'mpicxx' if any is available)"), ('mpicxx=', None, "MPI C++ compiler command, " "overridden by environment variable 'MPICXX' " "(defaults to 'mpicxx', 'mpiCC', or 'mpic++' if any is available)"), ('mpicc=', None, "MPI C compiler command, " "overridden by environment variables 'MPICC' " "(defaults to 'mpicc' if available)"), ('mpi=', None, "specify a ini-style configuration file and section " "(e.g. --mpi=filename or --mpi=filename:section), " "to look for MPI includes/libraries, " "overridden by environment variable 'MPICFG' " "(defaults to configuration file 'mpi.cfg' and section 'mpi')"), ('configure', None, "exhaustive test for checking missing MPI constants/types/functions"), ] def cmd_get_mpi_options(cmd_opts): optlist = [] for (option, _, _) in cmd_opts: if option[-1] == '=': option = option[:-1] option = option.replace('-','_') optlist.append(option) return optlist def cmd_initialize_mpi_options(cmd): mpiopts = cmd_get_mpi_options(cmd_mpi_opts) for op in mpiopts: setattr(cmd, op, None) def cmd_set_undefined_mpi_options(cmd, basecmd): mpiopts = cmd_get_mpi_options(cmd_mpi_opts) optlist = tuple(zip(mpiopts, mpiopts)) cmd.set_undefined_options(basecmd, *optlist) # ----------------------------------------------------------------------------- try: import setuptools except ImportError: setuptools = None def import_command(cmd): from importlib import import_module if setuptools: try: return import_module('setuptools.command.' + cmd) except ImportError: pass return import_module('distutils.command.' + cmd) if setuptools: from setuptools import Distribution as cls_Distribution from setuptools import Extension as cls_Extension from setuptools import Command else: from distutils.core import Distribution as cls_Distribution from distutils.core import Extension as cls_Extension from distutils.core import Command cmd_config = import_command('config') cmd_build = import_command('build') cmd_install = import_command('install') cmd_clean = import_command('clean') cmd_build_ext = import_command('build_ext') cmd_install_lib = import_command('install_lib') cmd_install_data = import_command('install_data') from distutils.errors import DistutilsError from distutils.errors import DistutilsSetupError from distutils.errors import DistutilsPlatformError from distutils.errors import CCompilerError try: from packaging.version import Version except ImportError: try: from setuptools.extern.packaging.version import Version except ImportError: from distutils.version import StrictVersion as Version try: from setuptools.modified import newer_group except ImportError: try: from setuptools.dep_util import newer_group except ImportError: from distutils.dep_util import newer_group # ----------------------------------------------------------------------------- # Distribution class supporting a 'executables' keyword class Distribution(cls_Distribution): def __init__ (self, attrs=None): # support for pkg data self.package_data = {} # PEP 314 self.provides = None self.requires = None self.obsoletes = None # supports 'executables' keyword self.executables = None cls_Distribution.__init__(self, attrs) def has_executables(self): return self.executables and len(self.executables) > 0 def is_pure (self): return (cls_Distribution.is_pure(self) and not self.has_executables()) # Extension class class Extension(cls_Extension): def __init__ (self, **kw): optional = kw.pop('optional', None) configure = kw.pop('configure', None) cls_Extension.__init__(self, **kw) self.optional = optional self.configure = configure # Library class class Library(Extension): def __init__ (self, **kw): kind = kw.pop('kind', "static") package = kw.pop('package', None) dest_dir = kw.pop('dest_dir', None) Extension.__init__(self, **kw) self.kind = kind self.package = package self.dest_dir = dest_dir # Executable class class Executable(Extension): def __init__ (self, **kw): package = kw.pop('package', None) dest_dir = kw.pop('dest_dir', None) Extension.__init__(self, **kw) self.package = package self.dest_dir = dest_dir # setup function def setup(**attrs): if setuptools: from setuptools import setup as fcn_setup else: from distutils.core import setup as fcn_setup if 'distclass' not in attrs: attrs['distclass'] = Distribution if 'cmdclass' not in attrs: attrs['cmdclass'] = {} cmdclass = attrs['cmdclass'] for cmd in ( config, build, install, clean, build_src, build_ext, build_exe, install_lib, install_data, install_exe, ): if cmd.__name__ not in cmdclass: cmdclass[cmd.__name__] = cmd return fcn_setup(**attrs) # -------------------------------------------------------------------- def with_coverage(): return bool(os.environ.get('MPI4PY_COVERAGE_PLUGIN')) # -------------------------------------------------------------------- # Cython def cython_req(): confdir = os.path.dirname(__file__) basename = 'requirements-build-cython.txt' with open(os.path.join(confdir, basename)) as f: m = re.search(r'cython\s*>=+\s*(.*)', f.read().strip()) cython_version = m.groups()[0] return cython_version def cython_chk(VERSION, verbose=True): # def warn(message): if not verbose: return ruler, ws, nl = "*"*80, " " ,"\n" pyexe = sys.executable advise = f"$ {pyexe} -m pip install --upgrade cython" def printer(*s): print(*s, file=sys.stderr) printer(ruler, nl) printer(ws, message, nl) printer(ws, ws, advise, nl) printer(ruler) # try: import Cython except ImportError: warn("You need Cython to generate C source files.") return False # CYTHON_VERSION = Cython.__version__ m = re.match(r"(\d+\.\d+(?:\.\d+)?).*", CYTHON_VERSION) if not m: warn(f"Cannot parse Cython version string {CYTHON_VERSION!r}") return False REQUIRED = Version(VERSION) PROVIDED = Version(m.groups()[0]) if PROVIDED < REQUIRED: warn(f"You need Cython >= {VERSION} (you have version {CYTHON_VERSION})") return False # if verbose: log.info("using Cython %s", CYTHON_VERSION) return True def cython_run( source, target=None, depends=(), includes=(), workdir=None, force=False, VERSION="0.0", ): if target is None: target = os.path.splitext(source)[0]+'.c' cwd = os.getcwd() try: if workdir: os.chdir(workdir) alldeps = [source] for dep in depends: alldeps += glob.glob(dep) if not (force or newer_group(alldeps, target, 'newer')): log.debug("skipping '%s' -> '%s' (up-to-date)", source, target) return finally: os.chdir(cwd) require = f'Cython >= {VERSION}' if not cython_chk(VERSION, verbose=False) and setuptools: if sys.modules.get('Cython'): removed = getattr(sys.modules['Cython'], '__version__', '') log.info("removing Cython %s from sys.modules", removed) pkgname = re.compile(r'cython(\.|$)', re.IGNORECASE) for modname in list(sys.modules.keys()): if pkgname.match(modname): del sys.modules[modname] try: install_setup_requires = setuptools._install_setup_requires with warnings.catch_warnings(): category = setuptools.SetuptoolsDeprecationWarning warnings.simplefilter('ignore', category) log.info("fetching build requirement '%s'", require) install_setup_requires({'setup_requires': [require]}) except Exception: log.info("failed to fetch build requirement '%s'", require) if not cython_chk(VERSION): raise DistutilsError(f"missing build requirement {require!r}") # log.info("cythonizing '%s' -> '%s'", source, target) from cythonize import cythonize args = [] if with_coverage(): args += ['-X', 'linetrace=True'] if includes: args += [f'-I{incdir}' for incdir in includes] if workdir: args += ['--working', workdir] args += [source] if target: args += ['--output-file', target] err = cythonize(args) if err: raise DistutilsError(f"Cython failure: {source!r} -> {target!r}") # ----------------------------------------------------------------------------- # A minimalistic MPI program :-) ConfigTest = """\ int main(int argc, char **argv) { int ierr; (void)argc; (void)argv; ierr = MPI_Init(&argc, &argv); if (ierr) return -1; ierr = MPI_Finalize(); if (ierr) return -1; return 0; } """ class config(cmd_config.config): user_options = cmd_config.config.user_options + cmd_mpi_opts def initialize_options(self): cmd_config.config.initialize_options(self) cmd_initialize_mpi_options(self) self.noisy = 0 def finalize_options(self): cmd_config.config.finalize_options(self) if not self.noisy: self.dump_source = 0 def _clean(self, *a, **kw): if sys.platform.startswith('win'): for fn in ('_configtest.exe.manifest', ): if os.path.exists(fn): self.temp_files.append(fn) cmd_config.config._clean(self, *a, **kw) def check_header( self, header, headers=None, include_dirs=None, ): if headers is None: headers = [] log.info("checking for header '%s' ...", header) body = "int main(int n, char**v) { (void)n; (void)v; return 0; }" ok = self.try_compile(body, [*headers, header], include_dirs) log.info('success!' if ok else 'failure.') return ok def check_macro( self, macro, headers=None, include_dirs=None, ): log.info("checking for macro '%s' ...", macro) body = [ f"#ifndef {macro}", f"#error macro '{macro}' not defined", r"#endif", r"int main(int n, char**v) { (void)n; (void)v; return 0; }" ] body = "\n".join(body) + "\n" ok = self.try_compile(body, headers, include_dirs) return ok def check_library( self, library, library_dirs=None, headers=None, include_dirs=None, other_libraries=(), lang="c", ): if sys.platform == "darwin": self.compiler.linker_exe.append('-flat_namespace') self.compiler.linker_exe.append('-undefined') self.compiler.linker_exe.append('suppress') log.info("checking for library '%s' ...", library) body = "int main(int n, char**v) { (void)n; (void)v; return 0; }" ok = self.try_link( body, headers, include_dirs, [library, *other_libraries], library_dirs, lang=lang, ) if sys.platform == "darwin": self.compiler.linker_exe.remove('-flat_namespace') self.compiler.linker_exe.remove('-undefined') self.compiler.linker_exe.remove('suppress') return ok def check_function( self, function, headers=None, include_dirs=None, libraries=None, library_dirs=None, decl=0, call=0, lang="c", ): log.info("checking for function '%s' ...", function) body = [] if decl: if call: proto = f"int {function} (void);" else: proto = f"int {function};" if lang == "c": proto = "\n".join([ "#ifdef __cplusplus", "extern {}".format('"C"'), "#endif", proto ]) body.append(proto) body.append( r"int main (int n, char**v) {") if call: body.append(f" (void){function}();") else: body.append(f" {function};") body.append( r" (void)n; (void)v;") body.append( r" return 0;") body.append( r"}") body = "\n".join(body) + "\n" ok = self.try_link( body, headers, include_dirs, libraries, library_dirs, lang=lang, ) return ok def check_symbol( self, symbol, type="int", headers=None, include_dirs=None, libraries=None, library_dirs=None, decl=0, lang="c", ): log.info("checking for symbol '%s' ...", symbol) body = [] if decl: body.append(f"{type} {symbol};") body.append(r"int main (int n, char**v) {") body.append(f" {type} s; s = {symbol}; (void)s;") body.append(r" (void)n; (void)v;") body.append(r" return 0;") body.append(r"}") body = "\n".join(body) + "\n" ok = self.try_link( body, headers, include_dirs, libraries, library_dirs, lang=lang, ) return ok def check_function_call( self, function, args='', headers=None, include_dirs=None, libraries=None, library_dirs=None, lang="c", ): log.info("checking for function '%s' ...", function) body = [] body.append(r"int main (int n, char**v) {") body.append(f" (void){function}({args});") body.append(r" (void)n; (void)v;") body.append(r" return 0;") body.append(r"}") body = "\n".join(body) + "\n" ok = self.try_link( body, headers, include_dirs, libraries, library_dirs, lang=lang, ) return ok def run(self): config = configuration(self, verbose=True) # test MPI C compiler self.compiler = getattr(self.compiler, 'compiler_type', self.compiler) self._check_compiler() configure_compiler(self.compiler, config, lang='c') self.try_link(ConfigTest, headers=['mpi.h'], lang='c') # test MPI C++ compiler self.compiler = getattr(self.compiler, 'compiler_type', self.compiler) self._check_compiler() configure_compiler(self.compiler, config, lang='c++') self.try_link(ConfigTest, headers=['mpi.h'], lang='c++') def configure_dl(ext, config_cmd): log.info("checking for dlopen() availability ...") dlfcn = config_cmd.check_header('dlfcn.h') libdl = config_cmd.check_library('dl') libs = ['dl'] if libdl else None dlopen = config_cmd.check_function( 'dlopen', libraries=libs, decl=1, call=1, ) if dlfcn: ext.define_macros += [('HAVE_DLFCN_H', 1)] if dlopen: ext.define_macros += [('HAVE_DLOPEN', 1)] def configure_mpi(ext, config_cmd): from textwrap import dedent headers = ['stdlib.h', 'mpi.h'] # log.info("checking for MPI compile and link ...") ConfigTest = dedent("""\ int main(int argc, char **argv) { (void)MPI_Init(&argc, &argv); (void)MPI_Finalize(); return 0; } """) errmsg = [ "Cannot {} MPI programs. Check your configuration!!!", "Installing mpi4py requires a working MPI implementation.", ] if sys.platform == 'linux': errmsg += [ "If you are running on a supercomputer or cluster, check with", "the system administrator or refer to the system user guide.", "Otherwise, if you are running on a laptop or desktop computer,", "your may be missing the MPICH or Open MPI development package:", "* On Fedora/RHEL systems, run:", " $ sudo dnf install mpich-devel # for MPICH", " $ sudo dnf install openmpi-devel # for Open MPI", "* On Debian/Ubuntu systems, run:", " $ sudo apt install libmpich-dev # for MPICH", " $ sudo apt install libopenmpi-dev # for Open MPI", ] if sys.platform == 'darwin': errmsg += [ "Install MPICH or Open MPI with Homebrew or MacPorts:" " $ brew install mpich|openmpi # Homebrew", " $ port install mpich|openmpi # MacPorts", ] if sys.platform == 'win32': errmsg += [ "Please install *Intel MPI* or *Microsoft MPI*." ] ok = config_cmd.try_compile(ConfigTest, headers=headers) if not ok: message = "\n".join(errmsg).format("compile") raise DistutilsPlatformError(message) ok = config_cmd.try_link(ConfigTest, headers=headers) if not ok: message = errmsg[0].format("link") raise DistutilsPlatformError(message) # impls = ("OPEN_MPI", "MSMPI_VER") tests = [f"defined({macro})" for macro in impls] tests += ["(defined(MPICH_NAME)&&(MPICH_NAME>=3))"] tests += ["(defined(MPICH_NAME)&&(MPICH_NAME==2))"] tests = "||".join(tests) ConfigTestAPI = dedent(f"""\ #if !({tests}) #error "Unknown MPI implementation" #endif """) ConfigTestABI = dedent("""\ #if !(defined(MPI_ABI_VERSION)&&(MPI_ABI_VERSION>=1)) #error "MPI ABI not supported" #endif """) with capture_stderr(): log.info("checking for MPI ABI support ...") mpiabi = config_cmd.try_compile(ConfigTestABI, headers=headers) config = os.environ.get('MPI4PY_BUILD_CONFIGURE') or None config = getattr(config_cmd, 'configure', None) or config if not mpiabi and not config: with capture_stderr(): ok = config_cmd.try_compile(ConfigTestAPI, headers=headers) config = not ok if config: guard = "HAVE_PYMPICONF_H" with capture_stderr(): ok = config_cmd.check_macro(guard) config = not ok if config: log.info("checking for missing MPI functions/symbols ...") configure = ConfigureMPI(config_cmd) with capture_stderr(): results = configure.run() configure.dump(results) ext.define_macros += [(guard, 1)] elif not mpiabi: log.info("checking for missing MPI functions/symbols ...") for function, arglist in ( ('MPI_Type_create_f90_integer', '0,(MPI_Datatype*)0'), ('MPI_Type_create_f90_real', '0,0,(MPI_Datatype*)0'), ('MPI_Type_create_f90_complex', '0,0,(MPI_Datatype*)0'), ('MPI_Status_c2f', '(MPI_Status*)0,(MPI_Fint*)0'), ('MPI_Status_f2c', '(MPI_Fint*)0,(MPI_Status*)0'), ): with capture_stderr(): ok = config_cmd.check_function_call( function, arglist, headers=headers, ) if not ok: macro = 'PyMPI_MISSING_' + function ext.define_macros += [(macro, 1)] # if not mpiabi and os.name == 'posix': configure_dl(ext, config_cmd) def configure_pyexe(exe, _config_cmd): if sys.platform.startswith('win'): return if (sys.platform == 'darwin' and ('Anaconda' in sys.version or 'Continuum Analytics' in sys.version)): py_version = sysconfig.get_python_version() py_abiflags = getattr(sys, 'abiflags', '') exe.libraries += ['python' + py_version + py_abiflags] return # pyver = sys.version_info[:2] cfg_vars = sysconfig.get_config_vars() libraries = [] library_dirs = [] runtime_dirs = [] link_args = [] py_enable_shared = cfg_vars.get('Py_ENABLE_SHARED') if pyver >= (3, 8) or not py_enable_shared: py_version = sysconfig.get_python_version() py_abiflags = getattr(sys, 'abiflags', '') libraries = ['python' + py_version + py_abiflags] if hasattr(sys, 'pypy_version_info'): py_tag = py_version[0].replace('2', '') libraries = [f'pypy{py_tag}-c'] if sys.platform == 'darwin': fwkdir = cfg_vars.get('PYTHONFRAMEWORKDIR') if (fwkdir and fwkdir != 'no-framework' and fwkdir in cfg_vars.get('LINKFORSHARED', '')): del libraries[:] # libdir = shlex.split(cfg_vars.get('LIBDIR', '')) libpl = shlex.split(cfg_vars.get('LIBPL', '')) if py_enable_shared: library_dirs += libdir if sys.exec_prefix != '/usr': runtime_dirs += libdir else: library_dirs += libdir library_dirs += libpl for var in ('LIBS', 'MODLIBS', 'SYSLIBS', 'LDLAST'): link_args += shlex.split(cfg_vars.get(var, '')) # exe.libraries += libraries exe.library_dirs += library_dirs exe.runtime_library_dirs += runtime_dirs exe.extra_link_args += link_args class build(cmd_build.build): user_options = cmd_build.build.user_options + cmd_mpi_opts boolean_options = cmd_build.build.boolean_options user_options += [( 'inplace', 'i', "ignore build-lib and put compiled extensions into the source " "directory alongside your pure Python modules", )] boolean_options += ['inplace'] def initialize_options(self): cmd_build.build.initialize_options(self) cmd_initialize_mpi_options(self) self.inplace = None def finalize_options(self): cmd_build.build.finalize_options(self) config_cmd = self.get_finalized_command('config') if isinstance(config_cmd, config): cmd_set_undefined_mpi_options(self, 'config') if self.inplace is None: self.inplace = False def has_executables (self): return self.distribution.has_executables() sub_commands = [ ('build_src', lambda *_: True), *cmd_build.build.sub_commands, ('build_exe', has_executables), ] # XXX disable build_exe subcommand !!! del sub_commands[-1] class build_src(Command): description = "build C sources from Cython files" user_options = [ ('force', 'f', "forcibly build everything (ignore file timestamps)"), ] boolean_options = ['force'] def initialize_options(self): self.force = False def finalize_options(self): self.set_undefined_options('build', ('force', 'force'), ) def run(self): sources = getattr(self, 'sources', []) require = cython_req() for source in sources: cython_run( **source, force=self.force, VERSION=require, ) # Command class to build extension modules class build_ext(cmd_build_ext.build_ext): user_options = cmd_build_ext.build_ext.user_options + cmd_mpi_opts def initialize_options(self): cmd_build_ext.build_ext.initialize_options(self) cmd_initialize_mpi_options(self) self.inplace = None def finalize_options(self): self.set_undefined_options('build', ('inplace', 'inplace')) cmd_build_ext.build_ext.finalize_options(self) build_cmd = self.get_finalized_command('build') if isinstance(build_cmd, build): cmd_set_undefined_mpi_options(self, 'build') def run(self): self.build_sources() cmd_build_ext.build_ext.run(self) def build_sources(self): if self.get_command_name() == 'build_ext': if 'build_src' in self.distribution.cmdclass: self.run_command('build_src') def build_extensions(self): # First, sanity-check the 'extensions' list self.check_extensions_list(self.extensions) # parse configuration file and configure compiler self.config = configuration(self, verbose=True) configure_compiler(self.compiler, self.config) # build extensions for ext in self.extensions: try: self.build_extension(ext) except (DistutilsError, CCompilerError): if not ext.optional: raise e = sys.exc_info()[1] self.warn(f'{e}') exe = isinstance(ext, Executable) knd = 'executable' if exe else 'extension' self.warn(f'building optional {knd} "{ext.name}" failed') def config_extension (self, ext): configure = getattr(ext, 'configure', None) if configure: config_cmd = self.get_finalized_command('config') config_cmd.compiler = self.compiler # fix compiler config_cmd.configure = self.configure configure(ext, config_cmd) if with_coverage(): ext.define_macros += [('CYTHON_TRACE_NOGIL', 1)] def _get_pth_files(self, ext): if ext.name == 'mpi4py.MPI' and sys.platform == 'win32': confdir = os.path.dirname(__file__) topdir = os.path.dirname(confdir) srcdir = os.path.join(topdir, 'src') dstdir = self.build_lib for pthfile in ('mpi.pth', ): source = os.path.join(srcdir, pthfile) target = os.path.join(dstdir, pthfile) if os.path.exists(source): yield (source, target) def build_extension (self, ext): fullname = self.get_ext_fullname(ext.name) filename = os.path.join( self.build_lib, self.get_ext_filename(fullname)) depends = ext.sources + ext.depends if not (self.force or newer_group(depends, filename, 'newer')): log.debug("skipping '%s' extension (up-to-date)", ext.name) return # self.config_extension(ext) cmd_build_ext.build_ext.build_extension(self, ext) # for source, target in self._get_pth_files(ext): log.info("writing %s", target) copy_file( source, target, verbose=False, dry_run=self.dry_run ) def get_outputs(self): outputs = cmd_build_ext.build_ext.get_outputs(self) for ext in self.extensions: for _, target in self._get_pth_files(ext): outputs.append(target) return outputs # Command class to build executables class build_exe(build_ext): description = "build binary executable components" user_options = [ ('build-exe=', None, "build directory for executable components"), *build_ext.user_options, ] def initialize_options (self): build_ext.initialize_options(self) self.build_base = None self.build_exe = None self.inplace = None def finalize_options (self): build_ext.finalize_options(self) self.configure = None self.set_undefined_options('build', ('build_base','build_base'), ('build_lib', 'build_exe')) self.set_undefined_options('build_ext', ('inplace', 'inplace')) self.executables = self.distribution.executables # XXX This is a hack self.extensions = self.distribution.executables self.get_ext_filename = self.get_exe_filename self.check_extensions_list = self.check_executables_list self.build_extension = self.build_executable self.copy_extensions_to_source = self.copy_executables_to_source self.build_lib = self.build_exe def get_exe_filename(self, exe_name): exe_ext = sysconfig.get_config_var('EXE') or '' return exe_name + exe_ext def check_executables_list (self, executables): ListType, TupleType = type([]), type(()) if type(executables) is not ListType: raise DistutilsSetupError( "'executables' option must be a list of Executable instances") for exe in executables: if not isinstance(exe, Executable): raise DistutilsSetupError( "'executables' items must be Executable instances") if (exe.sources is None or type(exe.sources) not in (ListType, TupleType)): raise DistutilsSetupError( f"in 'executables' option (executable '{exe.name}'), " "'sources' must be present and must be " "a list of source filenames" ) def get_exe_fullpath(self, exe, build_dir=None): build_dir = build_dir or self.build_exe package_dir = (exe.package or '').split('.') dest_dir = convert_path(exe.dest_dir or '') output_dir = os.path.join(build_dir, *[*package_dir, dest_dir]) exe_filename = self.get_exe_filename(exe.name) return os.path.join(output_dir, exe_filename) def config_executable (self, exe): build_ext.config_extension(self, exe) def build_executable (self, exe): sources = list(exe.sources) depends = list(exe.depends) exe_fullpath = self.get_exe_fullpath(exe) depends = sources + depends if not (self.force or newer_group(depends, exe_fullpath, 'newer')): log.debug("skipping '%s' executable (up-to-date)", exe.name) return self.config_executable(exe) log.info("building '%s' executable", exe.name) # Next, compile the source code to object files. # XXX not honouring 'define_macros' or 'undef_macros' -- the # CCompiler API needs to change to accommodate this, and I # want to do one thing at a time! macros = exe.define_macros[:] for undef in exe.undef_macros: macros.append((undef,)) # Two possible sources for extra compiler arguments: # - 'extra_compile_args' in Extension object # - CFLAGS environment variable (not particularly # elegant, but people seem to expect it and I # guess it's useful) # The environment variable should take precedence, and # any sensible compiler will give precedence to later # command line args. Hence we combine them in order: extra_args = exe.extra_compile_args[:] objects = self.compiler.compile( sources, output_dir=self.build_temp, macros=macros, include_dirs=exe.include_dirs, debug=self.debug, extra_postargs=extra_args, depends=exe.depends) self._built_objects = objects[:] # Now link the object files together into a "shared object" -- # of course, first we have to figure out all the other things # that go into the mix. if exe.extra_objects: objects.extend(exe.extra_objects) extra_args = exe.extra_link_args[:] # Get special linker flags for building a executable with # bundled Python library, also fix location of needed # python.exp file on AIX ldflags = sysconfig.get_config_var('PY_LDFLAGS') or '' linkshared = sysconfig.get_config_var('LINKFORSHARED') or '' linkshared = linkshared.replace('-Xlinker ', '-Wl,') if sys.platform == 'darwin': # fix wrong framework paths fwkprefix = sysconfig.get_config_var('PYTHONFRAMEWORKPREFIX') fwkdir = sysconfig.get_config_var('PYTHONFRAMEWORKDIR') if fwkprefix and fwkdir and fwkdir != 'no-framework': for flag in shlex.split(linkshared): if flag.startswith(fwkdir): fwkpath = os.path.join(fwkprefix, flag) linkshared = linkshared.replace(flag, fwkpath) if sys.platform.startswith('aix'): python_lib = sysconfig.get_python_lib(standard_lib=1) python_exp = os.path.join(python_lib, 'config', 'python.exp') linkshared = linkshared.replace('Modules/python.exp', python_exp) # Detect target language, if not provided language = exe.language or self.compiler.detect_language(sources) self.compiler.link( self.compiler.EXECUTABLE, objects, exe_fullpath, output_dir=None, libraries=self.get_libraries(exe), library_dirs=exe.library_dirs, runtime_library_dirs=exe.runtime_library_dirs, extra_preargs=shlex.split(ldflags) + shlex.split(linkshared), extra_postargs=extra_args, debug=self.debug, target_lang=language) def copy_executables_to_source(self): build_py = self.get_finalized_command('build_py') root_dir = build_py.get_package_dir('') for exe in self.executables: src = self.get_exe_fullpath(exe) dest = self.get_exe_fullpath(exe, root_dir) self.mkpath(os.path.dirname(dest)) copy_file( src, dest, verbose=self.verbose, dry_run=self.dry_run ) def get_outputs (self): outputs = [] for exe in self.executables: outputs.append(self.get_exe_fullpath(exe)) return outputs class install(cmd_install.install): def initialize_options(self): with warnings.catch_warnings(): if setuptools: category = setuptools.SetuptoolsDeprecationWarning warnings.simplefilter('ignore', category) cmd_install.install.initialize_options(self) self.old_and_unmanageable = True def run(self): cmd_install.install.run(self) def has_lib (self): return (cmd_install.install.has_lib(self) and self.has_exe()) def has_exe (self): return self.distribution.has_executables() sub_commands = [ *cmd_install.install.sub_commands, ('install_exe', has_exe), ] # XXX disable install_exe subcommand !!! del sub_commands[-1] class install_lib(cmd_install_lib.install_lib): def get_outputs(self): outputs = cmd_install_lib.install_lib.get_outputs(self) for (build_cmd, build_dir) in ( ('build_exe', 'build_exe'), ): cmd_obj = self.get_finalized_command(build_cmd) build_files = cmd_obj.get_outputs() exe_outputs = self._mutate_outputs( self.distribution.has_executables(), build_cmd, build_dir, self.install_dir, ) for src, dest in zip(build_files, exe_outputs): if os.path.exists(src): outputs.append(dest) return outputs class install_data(cmd_install_data.install_data): def finalize_options (self): self.set_undefined_options('install', ('install_lib', 'install_dir'), ('root', 'root'), ('force', 'force'), ) class install_exe(cmd_install_lib.install_lib): description = "install binary executable components" user_options = [ ('install-dir=', 'd', "directory to install to"), ('build-dir=','b', "build directory (where to install from)"), ('force', 'f', "force installation (overwrite existing files)"), ('skip-build', None, "skip the build steps"), ] boolean_options = ['force', 'skip-build'] negative_opt = { } def initialize_options (self): self.install_dir = None self.build_dir = None self.force = 0 self.skip_build = None def finalize_options (self): self.set_undefined_options('build_exe', ('build_exe', 'build_dir')) self.set_undefined_options('install', ('force', 'force'), ('skip_build', 'skip_build'), ('install_scripts', 'install_dir')) def run(self): self.build() self.install() def build (self): if not self.skip_build: if self.distribution.has_executables(): self.run_command('build_exe') def install (self): self.outfiles = [] if self.distribution.has_executables(): build_exe = self.get_finalized_command('build_exe') for exe in build_exe.executables: exe_fullpath = build_exe.get_exe_fullpath(exe) exe_filename = os.path.basename(exe_fullpath) if exe_filename.startswith("python-") and os.name == 'posix': x, y = sys.version_info[:2] install_name = exe_filename.replace( "python-", f"python{x}.{y}-") link = None else: install_name = exe_filename link = None source = exe_fullpath target = os.path.join(self.install_dir, install_name) self.mkpath(self.install_dir) out, done = self.copy_file(source, target, link=link) self.outfiles.append(out) def get_outputs (self): return self.outfiles def get_inputs (self): inputs = [] if self.distribution.has_executables(): build_exe = self.get_finalized_command('build_exe') inputs.extend(build_exe.get_outputs()) return inputs class clean(cmd_clean.clean): description = "clean up temporary files from 'build' command" user_options = \ cmd_clean.clean.user_options[:2] + [ ('build-exe=', None, "build directory for executable components " "(default: 'build_exe.build-exe')"), ] + cmd_clean.clean.user_options[2:] def initialize_options(self): cmd_clean.clean.initialize_options(self) self.build_exe = None def finalize_options(self): cmd_clean.clean.finalize_options(self) self.set_undefined_options('build_exe', ('build_exe', 'build_exe')) def run(self): from distutils.dir_util import remove_tree # remove the build/temp. directory # (unless it's already gone) if os.path.exists(self.build_temp): remove_tree(self.build_temp, dry_run=self.dry_run) else: log.debug("'%s' does not exist -- can't clean it", self.build_temp) if self.all: # remove build directories for directory in ( self.build_lib, self.build_exe, self.build_scripts, self.bdist_base, ): if os.path.exists(directory): remove_tree(directory, dry_run=self.dry_run) else: log.debug("'%s' does not exist -- can't clean it", directory) # just for the heck of it, try to remove the base build directory: # we might have emptied it right now, but if not we don't care if not self.dry_run: try: os.rmdir(self.build_base) log.info("removing '%s'", self.build_base) except OSError: pass if self.all: # remove the .egg_info directory try: egg_info = self.get_finalized_command('egg_info').egg_info if os.path.exists(egg_info): remove_tree(egg_info, dry_run=self.dry_run) else: log.debug("'%s' does not exist -- can't clean it", egg_info) except DistutilsError: pass # ----------------------------------------------------------------------------- if setuptools: with contextlib.suppress(Exception): from setuptools.command import egg_info as mod_egg_info _FileList = mod_egg_info.FileList class FileList(_FileList): def process_template_line(self, line): level = log.set_threshold(log.ERROR) try: _FileList.process_template_line(self, line) finally: log.set_threshold(level) mod_egg_info.FileList = FileList # ----------------------------------------------------------------------------- # Support for Reproducible Builds # https://reproducible-builds.org/docs/source-date-epoch/ timestamp = os.environ.get('SOURCE_DATE_EPOCH') if timestamp is not None: import distutils.archive_util as archive_util import stat import tarfile import time timestamp = float(max(int(timestamp), 0)) class Time: @staticmethod def time(): return timestamp @staticmethod def localtime(_=None): return time.localtime(timestamp) class TarInfoMode: def __get__(self, obj, objtype=None): return obj._mode def __set__(self, obj, stmd): ifmt = stat.S_IFMT(stmd) mode = stat.S_IMODE(stmd) & 0o7755 obj._mode = ifmt | mode class TarInfoAttr: def __init__(self, value): self.value = value def __get__(self, obj, objtype=None): return self.value def __set__(self, obj, value): pass class TarInfo(tarfile.TarInfo): mode = TarInfoMode() mtime = TarInfoAttr(timestamp) uid = TarInfoAttr(0) gid = TarInfoAttr(0) uname = TarInfoAttr('') gname = TarInfoAttr('') def make_tarball(*args, **kwargs): tarinfo_orig = tarfile.TarFile.tarinfo try: tarfile.time = Time() tarfile.TarFile.tarinfo = TarInfo return archive_util.make_tarball(*args, **kwargs) finally: tarfile.time = time tarfile.TarFile.tarinfo = tarinfo_orig archive_util.ARCHIVE_FORMATS['gztar'] = ( make_tarball, *archive_util.ARCHIVE_FORMATS['gztar'][1:], ) # ----------------------------------------------------------------------------- mpi4py-4.0.3/conf/mpistubgen.py000066400000000000000000000322561475341043600164330ustar00rootroot00000000000000import os import inspect import textwrap def is_cyfunction(obj): return type(obj).__name__ == 'cython_function_or_method' def is_function(obj): return ( inspect.isbuiltin(obj) or is_cyfunction(obj) or type(obj) is type(ord) ) def is_method(obj): return ( inspect.ismethoddescriptor(obj) or inspect.ismethod(obj) or is_cyfunction(obj) or type(obj) in ( type(str.index), type(str.__add__), type(str.__new__), ) ) def is_classmethod(obj): return ( inspect.isbuiltin(obj) or type(obj).__name__ in ( 'classmethod', 'classmethod_descriptor', ) ) def is_staticmethod(obj): return ( type(obj).__name__ in ( 'staticmethod', ) ) def is_datadescr(obj): return inspect.isdatadescriptor(obj) and not hasattr(obj, 'fget') def is_property(obj): return inspect.isdatadescriptor(obj) and hasattr(obj, 'fget') def is_class(obj): return inspect.isclass(obj) or type(obj) is type(int) class Lines(list): INDENT = " " * 4 level = 0 @property def add(self): return self @add.setter def add(self, lines): if lines is None: return if isinstance(lines, str): lines = textwrap.dedent(lines).strip().split('\n') indent = self.INDENT * self.level for line in lines: self.append(indent + line) def signature(obj): doc = obj.__doc__ sig = doc.partition('\n')[0] return sig or None def docstring(obj): doc = obj.__doc__ doc = doc.partition('\n')[2] doc = textwrap.dedent(doc).strip() doc = f'"""{doc}"""' doc = textwrap.indent(doc, Lines.INDENT) return doc def visit_constant(constant): name, value = constant return f"{name}: Final[{type(value).__name__}] = ..." def visit_function(function): sig = signature(function) return f"def {sig}: ..." def visit_method(method): sig = signature(method) return f"def {sig}: ..." def visit_datadescr(datadescr): sig = signature(datadescr) return f"{sig}" def visit_property(prop, name=None): sig = signature(prop.fget) pname = name or prop.fget.__name__ ptype = sig.rsplit('->', 1)[-1].strip() return f"{pname}: {ptype}" def visit_constructor(cls, name='__init__', args=None): init = (name == '__init__') argname = cls.__name__.lower() argtype = cls.__name__ initarg = args or f"{argname}: {argtype} | None = None" selfarg = 'self' if init else 'cls' rettype = 'None' if init else 'Self' arglist = f"{selfarg}, {initarg}" sig = f"{name}({arglist}) -> {rettype}" return f"def {sig}: ..." def visit_class(cls, done=None): skip = { '__doc__', '__dict__', '__module__', '__weakref__', '__pyx_vtable__', '__lt__', '__le__', '__ge__', '__gt__', '__str__', '__repr__', } special = { '__len__': ("self", "int", None), '__bool__': ("self", "bool", None), '__hash__': ("self", "int", None), '__int__': ("self", "int", None), '__index__': ("self", "int", None), '__eq__': ("self", "__other: object", "bool", None), '__ne__': ("self", "__other: object", "bool", None), '__buffer__': ("self", "__flags: int", "memoryview", (3, 12)), } constructor = ( '__new__', '__init__', ) override = OVERRIDE.get(cls.__name__, {}) done = set() if done is None else done lines = Lines() try: class sub(cls): pass final = False except TypeError: final = True if final: lines.add = "@final" base = cls.__base__ if base is object: lines.add = f"class {cls.__name__}:" else: lines.add = f"class {cls.__name__}({base.__name__}):" lines.level += 1 for name in constructor: if name in done: continue if name in override: done.add(name) lines.add = override[name] continue if name in cls.__dict__: done.add(name) lines.add = visit_constructor(cls, name) continue if '__hash__' in cls.__dict__: if cls.__hash__ is None: done.add('__hash__') dct = cls.__dict__ keys = list(dct.keys()) for name in keys: if name in done: continue if name in skip: continue if name in override: done.add(name) lines.add = override[name] continue if name in special: done.add(name) *args, retv, py = special[name] sig = f"{name}({', '.join(args)}) -> {retv}" if py is not None: lines.add = f"if sys.version_info >= {py}:" lines.level += 1 lines.add = f"def {sig}: ..." if py is not None: lines.level -= 1 continue attr = getattr(cls, name) if is_method(attr): done.add(name) if name == attr.__name__: obj = dct[name] if is_classmethod(obj): lines.add = "@classmethod" elif is_staticmethod(obj): lines.add = "@staticmethod" lines.add = visit_method(attr) elif True: lines.add = f"{name} = {attr.__name__}" continue if is_datadescr(attr): done.add(name) lines.add = visit_datadescr(attr) continue if is_property(attr): done.add(name) lines.add = visit_property(attr, name) continue leftovers = [name for name in keys if name not in done and name not in skip] if leftovers: raise RuntimeError(f"leftovers: {leftovers}") lines.level -= 1 return lines def visit_module(module, done=None): skip = { '__doc__', '__name__', '__loader__', '__spec__', '__file__', '__package__', '__builtins__', } done = set() if done is None else done lines = Lines() keys = list(module.__dict__.keys()) keys.sort(key=lambda name: name.startswith("_")) constants = [ (name, getattr(module, name)) for name in keys if all(( name not in done and name not in skip, isinstance(getattr(module, name), int), )) ] for name, value in constants: done.add(name) if name in OVERRIDE: lines.add = OVERRIDE[name] else: lines.add = visit_constant((name, value)) if constants: lines.add = "" for name in keys: if name in done or name in skip: continue value = getattr(module, name) if is_class(value): done.add(name) lines.add = visit_class(value) lines.add = "" aliases = [ (k, getattr(module, k)) for k in keys if all(( k not in done and k not in skip, getattr(module, k) is value, )) ] for aliasname, target in aliases: done.add(aliasname) lines.add = f"{aliasname} = {target.__name__}" if aliases: lines.add = "" instances = [ (k, getattr(module, k)) for k in keys if all(( k not in done and k not in skip, type(getattr(module, k)) is value, )) ] for attrname, attrvalue in instances: done.add(attrname) lines.add = visit_constant((attrname, attrvalue)) if instances: lines.add = "" continue if is_function(value): done.add(name) if name == value.__name__: lines.add = visit_function(value) else: lines.add = f"{name} = {value.__name__}" continue lines.add = "" for name in keys: if name in done or name in skip: continue value = getattr(module, name) done.add(name) if name in OVERRIDE: lines.add = OVERRIDE[name] else: lines.add = visit_constant((name, value)) leftovers = [name for name in keys if name not in done and name not in skip] if leftovers: raise RuntimeError(f"leftovers: {leftovers}") return lines IMPORTS = """ import sys from threading import Lock from typing import ( Any, AnyStr, Final, Literal, NoReturn, final, overload, ) if sys.version_info >= (3, 9): from collections.abc import ( Callable, Hashable, Iterable, Iterator, Sequence, Mapping, ) else: from typing import ( Callable, Hashable, Iterable, Iterator, Sequence, Mapping, ) if sys.version_info >= (3, 11): from typing import Self else: from typing_extensions import Self from os import PathLike """ OVERRIDE = { 'Exception': { '__new__': "def __new__(cls, ierr: int = SUCCESS) -> Self: ...", "__lt__": "def __lt__(self, __other: int) -> bool: ...", "__le__": "def __le__(self, __other: int) -> bool: ...", "__gt__": "def __gt__(self, __other: int) -> bool: ...", "__ge__": "def __ge__(self, __other: int) -> bool: ...", }, 'Info': { '__iter__': "def __iter__(self) -> Iterator[str]: ...", '__getitem__': "def __getitem__(self, __item: str) -> str: ...", '__setitem__': "def __setitem__(self, __item: str, __value: str) -> None: ...", '__delitem__': "def __delitem__(self, __item: str) -> None: ...", '__contains__': "def __contains__(self, __value: str) -> bool: ...", }, 'Op': { '__call__': "def __call__(self, x: Any, y: Any) -> Any: ...", }, 'buffer': { '__new__': """ @overload def __new__(cls) -> Self: ... @overload def __new__(cls, __buf: Buffer) -> Self: ... """, '__getitem__': """ @overload def __getitem__(self, __item: int) -> int: ... @overload def __getitem__(self, __item: slice) -> buffer: ... """, '__setitem__': """ @overload def __setitem__(self, __item: int, __value: int) -> None: ... @overload def __setitem__(self, __item: slice, __value: Buffer) -> None: ... """, '__delitem__': None, }, 'Pickle': { '__new__': None, '__init__': """ @overload def __init__(self, dumps: Callable[[Any, int], bytes], loads: Callable[[Buffer], Any], protocol: int | None = None, threshold: int | None = None, ) -> None: ... @overload def __init__(self, dumps: Callable[[Any], bytes] | None = None, loads: Callable[[Buffer], Any] | None = None, ) -> None: ... """, }, '__pyx_capi__': "__pyx_capi__: Final[dict[str, Any]] = ...", '_typedict': "_typedict: Final[dict[str, Datatype]] = ...", '_typedict_c': "_typedict_c: Final[dict[str, Datatype]] = ...", '_typedict_f': "_typedict_f: Final[dict[str, Datatype]] = ...", '_keyval_registry': None, } OVERRIDE.update({ subtype: { '__new__': "def __new__(cls) -> Self: ...", } for subtype in ( 'BottomType', 'InPlaceType', 'BufferAutomaticType', ) }) OVERRIDE.update({ subtype: { '__new__': str.format(""" def __new__(cls, {}: {} | None = None) -> Self: ... """, basetype.lower(), basetype) } for basetype, subtype in ( ('Comm', 'Comm'), ('Comm', 'Intracomm'), ('Comm', 'Topocomm'), ('Comm', 'Cartcomm'), ('Comm', 'Graphcomm'), ('Comm', 'Distgraphcomm'), ('Comm', 'Intercomm'), ('Request', 'Request'), ('Request', 'Prequest'), ('Request', 'Grequest'), ) }) OVERRIDE.update({ # python/mypy#15717 'Group': { 'Intersect': """ @classmethod # python/mypy#15717 def Intersect(cls, group1: Group, group2: Group) -> Self: ... """ } }) TYPING = """ from .typing import ( # noqa: E402 Buffer, Bottom, InPlace, BufSpec, BufSpecB, BufSpecV, BufSpecW, TargetSpec, ) """ def visit_mpi4py_MPI(): from mpi4py import MPI as module lines = Lines() lines.add = IMPORTS lines.add = "" lines.add = visit_module(module) lines.add = "" lines.add = TYPING return lines def generate(filename): dirname = os.path.dirname(filename) os.makedirs(dirname, exist_ok=True) with open(filename, 'w') as f: script = os.path.basename(__file__) print(f"# Generated with `python conf/{script}`", file=f) for line in visit_mpi4py_MPI(): print(line, file=f) OUTDIR = os.path.join('src', 'mpi4py') if __name__ == '__main__': generate(os.path.join(OUTDIR, 'MPI.pyi')) mpi4py-4.0.3/conf/mpiuni/000077500000000000000000000000001475341043600151755ustar00rootroot00000000000000mpi4py-4.0.3/conf/mpiuni/mpi.h000066400000000000000000000014071475341043600161350ustar00rootroot00000000000000/* Author: Lisandro Dalcin */ /* Contact: dalcinl@gmail.com */ #ifndef PyMPI_MPIUNI_H #define PyMPI_MPIUNI_H #include #undef PETSC_HAVE_HIP #undef PETSC_HAVE_CUDA #undef PETSC_HAVE_FORTRAN #undef PETSC_HAVE_I_MPI_NUMVERSION #undef PETSC_HAVE_MVAPICH_NUMVERSION #undef PETSC_HAVE_MVAPICH2_NUMVERSION #undef PETSC_HAVE_MPICH_NUMVERSION #undef PETSC_HAVE_OMPI_MAJOR_VERSION #undef PETSC_HAVE_MSMPI_VERSION #undef PETSC_HAVE_MPI_PROCESS_SHARED_MEMORY #include #include #define PETSCSYS_H #define PETSCIMPL_H #define PETSCDEVICE_CUPM_H #include #include #include static int PETSC_COMM_WORLD = MPI_COMM_NULL; #include <../src/sys/mpiuni/mpi.c> #include <../src/sys/mpiuni/mpitime.c> #endif mpi4py-4.0.3/conf/nompi/000077500000000000000000000000001475341043600150165ustar00rootroot00000000000000mpi4py-4.0.3/conf/nompi/mpi.h000066400000000000000000000007011475341043600157520ustar00rootroot00000000000000/* Author: Lisandro Dalcin */ /* Contact: dalcinl@gmail.com */ #ifndef PyMPI_NOMPI_H #define PyMPI_NOMPI_H #define MPI_Init(a,b) ((void)a,(void)b,0) #define MPI_Finalize() (0) #define MPI_Initialized(a) ((*(a)=1),0) #define MPI_Finalized(a) ((*(a)=1),0) #define MPI_COMM_WORLD ((void*)0) #define MPI_Comm_size(a,b) ((void)(a),(*(b)=1),0) #define MPI_Comm_rank(a,b) ((void)(a),(*(b)=0),0) #define MPI_Abort(a,b) ((void)(a),(void)(b),0) #endif mpi4py-4.0.3/conf/nompi/pympiconf.h000066400000000000000000001057371475341043600172100ustar00rootroot00000000000000#ifndef PyMPI_PYMPICONF_H #define PyMPI_PYMPICONF_H #undef PyMPI_HAVE_MPI_Aint #undef PyMPI_HAVE_MPI_Offset #undef PyMPI_HAVE_MPI_Count #undef PyMPI_HAVE_MPI_Status #undef PyMPI_HAVE_MPI_Datatype #undef PyMPI_HAVE_MPI_Request #undef PyMPI_HAVE_MPI_Message #undef PyMPI_HAVE_MPI_Op #undef PyMPI_HAVE_MPI_Group #undef PyMPI_HAVE_MPI_Info #undef PyMPI_HAVE_MPI_Errhandler #undef PyMPI_HAVE_MPI_Session #undef PyMPI_HAVE_MPI_Comm #undef PyMPI_HAVE_MPI_Win #undef PyMPI_HAVE_MPI_File #undef PyMPI_HAVE_MPI_UNDEFINED #undef PyMPI_HAVE_MPI_ANY_SOURCE #undef PyMPI_HAVE_MPI_ANY_TAG #undef PyMPI_HAVE_MPI_PROC_NULL #undef PyMPI_HAVE_MPI_ROOT #undef PyMPI_HAVE_MPI_IDENT #undef PyMPI_HAVE_MPI_CONGRUENT #undef PyMPI_HAVE_MPI_SIMILAR #undef PyMPI_HAVE_MPI_UNEQUAL #undef PyMPI_HAVE_MPI_BOTTOM #undef PyMPI_HAVE_MPI_IN_PLACE #undef PyMPI_HAVE_MPI_KEYVAL_INVALID #undef PyMPI_HAVE_MPI_MAX_OBJECT_NAME #undef PyMPI_HAVE_MPI_DATATYPE_NULL #undef PyMPI_HAVE_MPI_PACKED #undef PyMPI_HAVE_MPI_BYTE #undef PyMPI_HAVE_MPI_AINT #undef PyMPI_HAVE_MPI_OFFSET #undef PyMPI_HAVE_MPI_COUNT #undef PyMPI_HAVE_MPI_CHAR #undef PyMPI_HAVE_MPI_WCHAR #undef PyMPI_HAVE_MPI_SIGNED_CHAR #undef PyMPI_HAVE_MPI_SHORT #undef PyMPI_HAVE_MPI_INT #undef PyMPI_HAVE_MPI_LONG #undef PyMPI_HAVE_MPI_LONG_LONG #undef PyMPI_HAVE_MPI_LONG_LONG_INT #undef PyMPI_HAVE_MPI_UNSIGNED_CHAR #undef PyMPI_HAVE_MPI_UNSIGNED_SHORT #undef PyMPI_HAVE_MPI_UNSIGNED #undef PyMPI_HAVE_MPI_UNSIGNED_LONG #undef PyMPI_HAVE_MPI_UNSIGNED_LONG_LONG #undef PyMPI_HAVE_MPI_FLOAT #undef PyMPI_HAVE_MPI_DOUBLE #undef PyMPI_HAVE_MPI_LONG_DOUBLE #undef PyMPI_HAVE_MPI_C_BOOL #undef PyMPI_HAVE_MPI_INT8_T #undef PyMPI_HAVE_MPI_INT16_T #undef PyMPI_HAVE_MPI_INT32_T #undef PyMPI_HAVE_MPI_INT64_T #undef PyMPI_HAVE_MPI_UINT8_T #undef PyMPI_HAVE_MPI_UINT16_T #undef PyMPI_HAVE_MPI_UINT32_T #undef PyMPI_HAVE_MPI_UINT64_T #undef PyMPI_HAVE_MPI_C_COMPLEX #undef PyMPI_HAVE_MPI_C_FLOAT_COMPLEX #undef PyMPI_HAVE_MPI_C_DOUBLE_COMPLEX #undef PyMPI_HAVE_MPI_C_LONG_DOUBLE_COMPLEX #undef PyMPI_HAVE_MPI_CXX_BOOL #undef PyMPI_HAVE_MPI_CXX_FLOAT_COMPLEX #undef PyMPI_HAVE_MPI_CXX_DOUBLE_COMPLEX #undef PyMPI_HAVE_MPI_CXX_LONG_DOUBLE_COMPLEX #undef PyMPI_HAVE_MPI_SHORT_INT #undef PyMPI_HAVE_MPI_2INT #undef PyMPI_HAVE_MPI_LONG_INT #undef PyMPI_HAVE_MPI_FLOAT_INT #undef PyMPI_HAVE_MPI_DOUBLE_INT #undef PyMPI_HAVE_MPI_LONG_DOUBLE_INT #undef PyMPI_HAVE_MPI_CHARACTER #undef PyMPI_HAVE_MPI_LOGICAL #undef PyMPI_HAVE_MPI_INTEGER #undef PyMPI_HAVE_MPI_REAL #undef PyMPI_HAVE_MPI_DOUBLE_PRECISION #undef PyMPI_HAVE_MPI_COMPLEX #undef PyMPI_HAVE_MPI_DOUBLE_COMPLEX #undef PyMPI_HAVE_MPI_LOGICAL1 #undef PyMPI_HAVE_MPI_LOGICAL2 #undef PyMPI_HAVE_MPI_LOGICAL4 #undef PyMPI_HAVE_MPI_LOGICAL8 #undef PyMPI_HAVE_MPI_INTEGER1 #undef PyMPI_HAVE_MPI_INTEGER2 #undef PyMPI_HAVE_MPI_INTEGER4 #undef PyMPI_HAVE_MPI_INTEGER8 #undef PyMPI_HAVE_MPI_INTEGER16 #undef PyMPI_HAVE_MPI_REAL2 #undef PyMPI_HAVE_MPI_REAL4 #undef PyMPI_HAVE_MPI_REAL8 #undef PyMPI_HAVE_MPI_REAL16 #undef PyMPI_HAVE_MPI_COMPLEX4 #undef PyMPI_HAVE_MPI_COMPLEX8 #undef PyMPI_HAVE_MPI_COMPLEX16 #undef PyMPI_HAVE_MPI_COMPLEX32 #undef PyMPI_HAVE_MPI_Get_address #undef PyMPI_HAVE_MPI_Aint_add #undef PyMPI_HAVE_MPI_Aint_diff #undef PyMPI_HAVE_MPI_Type_dup #undef PyMPI_HAVE_MPI_Type_contiguous #undef PyMPI_HAVE_MPI_Type_vector #undef PyMPI_HAVE_MPI_Type_indexed #undef PyMPI_HAVE_MPI_Type_create_indexed_block #undef PyMPI_HAVE_MPI_ORDER_C #undef PyMPI_HAVE_MPI_ORDER_FORTRAN #undef PyMPI_HAVE_MPI_Type_create_subarray #undef PyMPI_HAVE_MPI_DISTRIBUTE_NONE #undef PyMPI_HAVE_MPI_DISTRIBUTE_BLOCK #undef PyMPI_HAVE_MPI_DISTRIBUTE_CYCLIC #undef PyMPI_HAVE_MPI_DISTRIBUTE_DFLT_DARG #undef PyMPI_HAVE_MPI_Type_create_darray #undef PyMPI_HAVE_MPI_Type_create_hvector #undef PyMPI_HAVE_MPI_Type_create_hindexed #undef PyMPI_HAVE_MPI_Type_create_hindexed_block #undef PyMPI_HAVE_MPI_Type_create_struct #undef PyMPI_HAVE_MPI_Type_create_resized #undef PyMPI_HAVE_MPI_Type_size #undef PyMPI_HAVE_MPI_Type_get_extent #undef PyMPI_HAVE_MPI_Type_get_true_extent #undef PyMPI_HAVE_MPI_Type_size_x #undef PyMPI_HAVE_MPI_Type_get_extent_x #undef PyMPI_HAVE_MPI_Type_get_true_extent_x #undef PyMPI_HAVE_MPI_Type_create_f90_integer #undef PyMPI_HAVE_MPI_Type_create_f90_real #undef PyMPI_HAVE_MPI_Type_create_f90_complex #undef PyMPI_HAVE_MPI_TYPECLASS_INTEGER #undef PyMPI_HAVE_MPI_TYPECLASS_REAL #undef PyMPI_HAVE_MPI_TYPECLASS_COMPLEX #undef PyMPI_HAVE_MPI_Type_match_size #undef PyMPI_HAVE_MPI_Type_get_value_index #undef PyMPI_HAVE_MPI_Type_commit #undef PyMPI_HAVE_MPI_Type_free #undef PyMPI_HAVE_MPI_COMBINER_NAMED #undef PyMPI_HAVE_MPI_COMBINER_DUP #undef PyMPI_HAVE_MPI_COMBINER_CONTIGUOUS #undef PyMPI_HAVE_MPI_COMBINER_VECTOR #undef PyMPI_HAVE_MPI_COMBINER_HVECTOR #undef PyMPI_HAVE_MPI_COMBINER_INDEXED #undef PyMPI_HAVE_MPI_COMBINER_HINDEXED #undef PyMPI_HAVE_MPI_COMBINER_INDEXED_BLOCK #undef PyMPI_HAVE_MPI_COMBINER_HINDEXED_BLOCK #undef PyMPI_HAVE_MPI_COMBINER_STRUCT #undef PyMPI_HAVE_MPI_COMBINER_SUBARRAY #undef PyMPI_HAVE_MPI_COMBINER_DARRAY #undef PyMPI_HAVE_MPI_COMBINER_F90_REAL #undef PyMPI_HAVE_MPI_COMBINER_F90_COMPLEX #undef PyMPI_HAVE_MPI_COMBINER_F90_INTEGER #undef PyMPI_HAVE_MPI_COMBINER_RESIZED #undef PyMPI_HAVE_MPI_COMBINER_VALUE_INDEX #undef PyMPI_HAVE_MPI_Type_get_envelope #undef PyMPI_HAVE_MPI_Type_get_contents #undef PyMPI_HAVE_MPI_Pack #undef PyMPI_HAVE_MPI_Unpack #undef PyMPI_HAVE_MPI_Pack_size #undef PyMPI_HAVE_MPI_Pack_external #undef PyMPI_HAVE_MPI_Unpack_external #undef PyMPI_HAVE_MPI_Pack_external_size #undef PyMPI_HAVE_MPI_Type_get_name #undef PyMPI_HAVE_MPI_Type_set_name #undef PyMPI_HAVE_MPI_Type_get_attr #undef PyMPI_HAVE_MPI_Type_set_attr #undef PyMPI_HAVE_MPI_Type_delete_attr #undef PyMPI_HAVE_MPI_Type_copy_attr_function #undef PyMPI_HAVE_MPI_Type_delete_attr_function #undef PyMPI_HAVE_MPI_TYPE_NULL_COPY_FN #undef PyMPI_HAVE_MPI_TYPE_DUP_FN #undef PyMPI_HAVE_MPI_TYPE_NULL_DELETE_FN #undef PyMPI_HAVE_MPI_Type_create_keyval #undef PyMPI_HAVE_MPI_Type_free_keyval #undef PyMPI_HAVE_MPI_Type_contiguous_c #undef PyMPI_HAVE_MPI_Type_vector_c #undef PyMPI_HAVE_MPI_Type_indexed_c #undef PyMPI_HAVE_MPI_Type_create_indexed_block_c #undef PyMPI_HAVE_MPI_Type_create_subarray_c #undef PyMPI_HAVE_MPI_Type_create_darray_c #undef PyMPI_HAVE_MPI_Type_create_hvector_c #undef PyMPI_HAVE_MPI_Type_create_hindexed_c #undef PyMPI_HAVE_MPI_Type_create_hindexed_block_c #undef PyMPI_HAVE_MPI_Type_create_struct_c #undef PyMPI_HAVE_MPI_Type_create_resized_c #undef PyMPI_HAVE_MPI_Type_size_c #undef PyMPI_HAVE_MPI_Type_get_extent_c #undef PyMPI_HAVE_MPI_Type_get_true_extent_c #undef PyMPI_HAVE_MPI_Type_get_envelope_c #undef PyMPI_HAVE_MPI_Type_get_contents_c #undef PyMPI_HAVE_MPI_Pack_c #undef PyMPI_HAVE_MPI_Unpack_c #undef PyMPI_HAVE_MPI_Pack_size_c #undef PyMPI_HAVE_MPI_Pack_external_c #undef PyMPI_HAVE_MPI_Unpack_external_c #undef PyMPI_HAVE_MPI_Pack_external_size_c #undef PyMPI_HAVE_MPI_STATUS_IGNORE #undef PyMPI_HAVE_MPI_STATUSES_IGNORE #undef PyMPI_HAVE_MPI_Get_count #undef PyMPI_HAVE_MPI_Get_elements #undef PyMPI_HAVE_MPI_Status_set_elements #undef PyMPI_HAVE_MPI_Get_elements_x #undef PyMPI_HAVE_MPI_Status_set_elements_x #undef PyMPI_HAVE_MPI_Test_cancelled #undef PyMPI_HAVE_MPI_Status_set_cancelled #undef PyMPI_HAVE_MPI_Get_count_c #undef PyMPI_HAVE_MPI_Get_elements_c #undef PyMPI_HAVE_MPI_Status_set_elements_c #undef PyMPI_HAVE_MPI_Status_get_source #undef PyMPI_HAVE_MPI_Status_set_source #undef PyMPI_HAVE_MPI_Status_get_tag #undef PyMPI_HAVE_MPI_Status_set_tag #undef PyMPI_HAVE_MPI_Status_get_error #undef PyMPI_HAVE_MPI_Status_set_error #undef PyMPI_HAVE_MPI_REQUEST_NULL #undef PyMPI_HAVE_MPI_Wait #undef PyMPI_HAVE_MPI_Test #undef PyMPI_HAVE_MPI_Request_get_status #undef PyMPI_HAVE_MPI_Waitany #undef PyMPI_HAVE_MPI_Testany #undef PyMPI_HAVE_MPI_Request_get_status_any #undef PyMPI_HAVE_MPI_Waitall #undef PyMPI_HAVE_MPI_Testall #undef PyMPI_HAVE_MPI_Request_get_status_all #undef PyMPI_HAVE_MPI_Waitsome #undef PyMPI_HAVE_MPI_Testsome #undef PyMPI_HAVE_MPI_Request_get_status_some #undef PyMPI_HAVE_MPI_Cancel #undef PyMPI_HAVE_MPI_Request_free #undef PyMPI_HAVE_MPI_Start #undef PyMPI_HAVE_MPI_Startall #undef PyMPI_HAVE_MPI_Pready #undef PyMPI_HAVE_MPI_Pready_range #undef PyMPI_HAVE_MPI_Pready_list #undef PyMPI_HAVE_MPI_Parrived #undef PyMPI_HAVE_MPI_Grequest_cancel_function #undef PyMPI_HAVE_MPI_Grequest_free_function #undef PyMPI_HAVE_MPI_Grequest_query_function #undef PyMPI_HAVE_MPI_Grequest_start #undef PyMPI_HAVE_MPI_Grequest_complete #undef PyMPI_HAVE_MPI_OP_NULL #undef PyMPI_HAVE_MPI_MAX #undef PyMPI_HAVE_MPI_MIN #undef PyMPI_HAVE_MPI_SUM #undef PyMPI_HAVE_MPI_PROD #undef PyMPI_HAVE_MPI_LAND #undef PyMPI_HAVE_MPI_BAND #undef PyMPI_HAVE_MPI_LOR #undef PyMPI_HAVE_MPI_BOR #undef PyMPI_HAVE_MPI_LXOR #undef PyMPI_HAVE_MPI_BXOR #undef PyMPI_HAVE_MPI_MAXLOC #undef PyMPI_HAVE_MPI_MINLOC #undef PyMPI_HAVE_MPI_REPLACE #undef PyMPI_HAVE_MPI_NO_OP #undef PyMPI_HAVE_MPI_Op_free #undef PyMPI_HAVE_MPI_User_function #undef PyMPI_HAVE_MPI_Op_create #undef PyMPI_HAVE_MPI_Op_commutative #undef PyMPI_HAVE_MPI_User_function_c #undef PyMPI_HAVE_MPI_Op_create_c #undef PyMPI_HAVE_MPI_GROUP_NULL #undef PyMPI_HAVE_MPI_GROUP_EMPTY #undef PyMPI_HAVE_MPI_Group_free #undef PyMPI_HAVE_MPI_Group_size #undef PyMPI_HAVE_MPI_Group_rank #undef PyMPI_HAVE_MPI_Group_translate_ranks #undef PyMPI_HAVE_MPI_Group_compare #undef PyMPI_HAVE_MPI_Group_union #undef PyMPI_HAVE_MPI_Group_intersection #undef PyMPI_HAVE_MPI_Group_difference #undef PyMPI_HAVE_MPI_Group_incl #undef PyMPI_HAVE_MPI_Group_excl #undef PyMPI_HAVE_MPI_Group_range_incl #undef PyMPI_HAVE_MPI_Group_range_excl #undef PyMPI_HAVE_MPI_INFO_NULL #undef PyMPI_HAVE_MPI_INFO_ENV #undef PyMPI_HAVE_MPI_Info_free #undef PyMPI_HAVE_MPI_Info_create #undef PyMPI_HAVE_MPI_Info_dup #undef PyMPI_HAVE_MPI_Info_create_env #undef PyMPI_HAVE_MPI_MAX_INFO_KEY #undef PyMPI_HAVE_MPI_MAX_INFO_VAL #undef PyMPI_HAVE_MPI_Info_get_string #undef PyMPI_HAVE_MPI_Info_set #undef PyMPI_HAVE_MPI_Info_delete #undef PyMPI_HAVE_MPI_Info_get_nkeys #undef PyMPI_HAVE_MPI_Info_get_nthkey #undef PyMPI_HAVE_MPI_ERRHANDLER_NULL #undef PyMPI_HAVE_MPI_ERRORS_RETURN #undef PyMPI_HAVE_MPI_ERRORS_ABORT #undef PyMPI_HAVE_MPI_ERRORS_ARE_FATAL #undef PyMPI_HAVE_MPI_Errhandler_free #undef PyMPI_HAVE_MPI_SESSION_NULL #undef PyMPI_HAVE_MPI_MAX_PSET_NAME_LEN #undef PyMPI_HAVE_MPI_Session_init #undef PyMPI_HAVE_MPI_Session_finalize #undef PyMPI_HAVE_MPI_Session_get_num_psets #undef PyMPI_HAVE_MPI_Session_get_nth_pset #undef PyMPI_HAVE_MPI_Session_get_info #undef PyMPI_HAVE_MPI_Session_get_pset_info #undef PyMPI_HAVE_MPI_Group_from_session_pset #undef PyMPI_HAVE_MPI_Session_errhandler_function #undef PyMPI_HAVE_MPI_Session_create_errhandler #undef PyMPI_HAVE_MPI_Session_get_errhandler #undef PyMPI_HAVE_MPI_Session_set_errhandler #undef PyMPI_HAVE_MPI_Session_call_errhandler #undef PyMPI_HAVE_MPI_COMM_NULL #undef PyMPI_HAVE_MPI_COMM_SELF #define PyMPI_HAVE_MPI_COMM_WORLD 1 #undef PyMPI_HAVE_MPI_Comm_free #undef PyMPI_HAVE_MPI_Comm_group #define PyMPI_HAVE_MPI_Comm_size 1 #define PyMPI_HAVE_MPI_Comm_rank 1 #undef PyMPI_HAVE_MPI_Comm_compare #undef PyMPI_HAVE_MPI_Topo_test #undef PyMPI_HAVE_MPI_Comm_test_inter #define PyMPI_HAVE_MPI_Abort 1 #undef PyMPI_HAVE_MPI_BSEND_OVERHEAD #undef PyMPI_HAVE_MPI_BUFFER_AUTOMATIC #undef PyMPI_HAVE_MPI_Buffer_attach #undef PyMPI_HAVE_MPI_Buffer_detach #undef PyMPI_HAVE_MPI_Buffer_flush #undef PyMPI_HAVE_MPI_Buffer_iflush #undef PyMPI_HAVE_MPI_Comm_attach_buffer #undef PyMPI_HAVE_MPI_Comm_detach_buffer #undef PyMPI_HAVE_MPI_Comm_flush_buffer #undef PyMPI_HAVE_MPI_Comm_iflush_buffer #undef PyMPI_HAVE_MPI_Session_attach_buffer #undef PyMPI_HAVE_MPI_Session_detach_buffer #undef PyMPI_HAVE_MPI_Session_flush_buffer #undef PyMPI_HAVE_MPI_Session_iflush_buffer #undef PyMPI_HAVE_MPI_Send #undef PyMPI_HAVE_MPI_Recv #undef PyMPI_HAVE_MPI_Sendrecv #undef PyMPI_HAVE_MPI_Sendrecv_replace #undef PyMPI_HAVE_MPI_Bsend #undef PyMPI_HAVE_MPI_Ssend #undef PyMPI_HAVE_MPI_Rsend #undef PyMPI_HAVE_MPI_Isend #undef PyMPI_HAVE_MPI_Irecv #undef PyMPI_HAVE_MPI_Isendrecv #undef PyMPI_HAVE_MPI_Isendrecv_replace #undef PyMPI_HAVE_MPI_Ibsend #undef PyMPI_HAVE_MPI_Issend #undef PyMPI_HAVE_MPI_Irsend #undef PyMPI_HAVE_MPI_Send_init #undef PyMPI_HAVE_MPI_Bsend_init #undef PyMPI_HAVE_MPI_Ssend_init #undef PyMPI_HAVE_MPI_Rsend_init #undef PyMPI_HAVE_MPI_Recv_init #undef PyMPI_HAVE_MPI_Psend_init #undef PyMPI_HAVE_MPI_Precv_init #undef PyMPI_HAVE_MPI_Probe #undef PyMPI_HAVE_MPI_Iprobe #undef PyMPI_HAVE_MPI_MESSAGE_NULL #undef PyMPI_HAVE_MPI_MESSAGE_NO_PROC #undef PyMPI_HAVE_MPI_Mprobe #undef PyMPI_HAVE_MPI_Improbe #undef PyMPI_HAVE_MPI_Mrecv #undef PyMPI_HAVE_MPI_Imrecv #undef PyMPI_HAVE_MPI_Barrier #undef PyMPI_HAVE_MPI_Bcast #undef PyMPI_HAVE_MPI_Gather #undef PyMPI_HAVE_MPI_Gatherv #undef PyMPI_HAVE_MPI_Scatter #undef PyMPI_HAVE_MPI_Scatterv #undef PyMPI_HAVE_MPI_Allgather #undef PyMPI_HAVE_MPI_Allgatherv #undef PyMPI_HAVE_MPI_Alltoall #undef PyMPI_HAVE_MPI_Alltoallv #undef PyMPI_HAVE_MPI_Alltoallw #undef PyMPI_HAVE_MPI_Reduce_local #undef PyMPI_HAVE_MPI_Reduce #undef PyMPI_HAVE_MPI_Allreduce #undef PyMPI_HAVE_MPI_Reduce_scatter_block #undef PyMPI_HAVE_MPI_Reduce_scatter #undef PyMPI_HAVE_MPI_Scan #undef PyMPI_HAVE_MPI_Exscan #undef PyMPI_HAVE_MPI_Neighbor_allgather #undef PyMPI_HAVE_MPI_Neighbor_allgatherv #undef PyMPI_HAVE_MPI_Neighbor_alltoall #undef PyMPI_HAVE_MPI_Neighbor_alltoallv #undef PyMPI_HAVE_MPI_Neighbor_alltoallw #undef PyMPI_HAVE_MPI_Ibarrier #undef PyMPI_HAVE_MPI_Ibcast #undef PyMPI_HAVE_MPI_Igather #undef PyMPI_HAVE_MPI_Igatherv #undef PyMPI_HAVE_MPI_Iscatter #undef PyMPI_HAVE_MPI_Iscatterv #undef PyMPI_HAVE_MPI_Iallgather #undef PyMPI_HAVE_MPI_Iallgatherv #undef PyMPI_HAVE_MPI_Ialltoall #undef PyMPI_HAVE_MPI_Ialltoallv #undef PyMPI_HAVE_MPI_Ialltoallw #undef PyMPI_HAVE_MPI_Ireduce #undef PyMPI_HAVE_MPI_Iallreduce #undef PyMPI_HAVE_MPI_Ireduce_scatter_block #undef PyMPI_HAVE_MPI_Ireduce_scatter #undef PyMPI_HAVE_MPI_Iscan #undef PyMPI_HAVE_MPI_Iexscan #undef PyMPI_HAVE_MPI_Ineighbor_allgather #undef PyMPI_HAVE_MPI_Ineighbor_allgatherv #undef PyMPI_HAVE_MPI_Ineighbor_alltoall #undef PyMPI_HAVE_MPI_Ineighbor_alltoallv #undef PyMPI_HAVE_MPI_Ineighbor_alltoallw #undef PyMPI_HAVE_MPI_Barrier_init #undef PyMPI_HAVE_MPI_Bcast_init #undef PyMPI_HAVE_MPI_Gather_init #undef PyMPI_HAVE_MPI_Gatherv_init #undef PyMPI_HAVE_MPI_Scatter_init #undef PyMPI_HAVE_MPI_Scatterv_init #undef PyMPI_HAVE_MPI_Allgather_init #undef PyMPI_HAVE_MPI_Allgatherv_init #undef PyMPI_HAVE_MPI_Alltoall_init #undef PyMPI_HAVE_MPI_Alltoallv_init #undef PyMPI_HAVE_MPI_Alltoallw_init #undef PyMPI_HAVE_MPI_Reduce_init #undef PyMPI_HAVE_MPI_Allreduce_init #undef PyMPI_HAVE_MPI_Reduce_scatter_block_init #undef PyMPI_HAVE_MPI_Reduce_scatter_init #undef PyMPI_HAVE_MPI_Scan_init #undef PyMPI_HAVE_MPI_Exscan_init #undef PyMPI_HAVE_MPI_Neighbor_allgather_init #undef PyMPI_HAVE_MPI_Neighbor_allgatherv_init #undef PyMPI_HAVE_MPI_Neighbor_alltoall_init #undef PyMPI_HAVE_MPI_Neighbor_alltoallv_init #undef PyMPI_HAVE_MPI_Neighbor_alltoallw_init #undef PyMPI_HAVE_MPI_Comm_dup #undef PyMPI_HAVE_MPI_Comm_dup_with_info #undef PyMPI_HAVE_MPI_Comm_idup #undef PyMPI_HAVE_MPI_Comm_idup_with_info #undef PyMPI_HAVE_MPI_Comm_create #undef PyMPI_HAVE_MPI_Comm_create_group #undef PyMPI_HAVE_MPI_MAX_STRINGTAG_LEN #undef PyMPI_HAVE_MPI_Comm_create_from_group #undef PyMPI_HAVE_MPI_Comm_split #undef PyMPI_HAVE_MPI_COMM_TYPE_SHARED #undef PyMPI_HAVE_MPI_COMM_TYPE_HW_GUIDED #undef PyMPI_HAVE_MPI_COMM_TYPE_HW_UNGUIDED #undef PyMPI_HAVE_MPI_COMM_TYPE_RESOURCE_GUIDED #undef PyMPI_HAVE_MPI_Comm_split_type #undef PyMPI_HAVE_MPI_Comm_set_info #undef PyMPI_HAVE_MPI_Comm_get_info #undef PyMPI_HAVE_MPI_CART #undef PyMPI_HAVE_MPI_Cart_create #undef PyMPI_HAVE_MPI_Cartdim_get #undef PyMPI_HAVE_MPI_Cart_get #undef PyMPI_HAVE_MPI_Cart_rank #undef PyMPI_HAVE_MPI_Cart_coords #undef PyMPI_HAVE_MPI_Cart_shift #undef PyMPI_HAVE_MPI_Cart_sub #undef PyMPI_HAVE_MPI_Cart_map #undef PyMPI_HAVE_MPI_Dims_create #undef PyMPI_HAVE_MPI_GRAPH #undef PyMPI_HAVE_MPI_Graph_create #undef PyMPI_HAVE_MPI_Graphdims_get #undef PyMPI_HAVE_MPI_Graph_get #undef PyMPI_HAVE_MPI_Graph_map #undef PyMPI_HAVE_MPI_Graph_neighbors_count #undef PyMPI_HAVE_MPI_Graph_neighbors #undef PyMPI_HAVE_MPI_DIST_GRAPH #undef PyMPI_HAVE_MPI_UNWEIGHTED #undef PyMPI_HAVE_MPI_WEIGHTS_EMPTY #undef PyMPI_HAVE_MPI_Dist_graph_create_adjacent #undef PyMPI_HAVE_MPI_Dist_graph_create #undef PyMPI_HAVE_MPI_Dist_graph_neighbors_count #undef PyMPI_HAVE_MPI_Dist_graph_neighbors #undef PyMPI_HAVE_MPI_Intercomm_create #undef PyMPI_HAVE_MPI_Intercomm_create_from_groups #undef PyMPI_HAVE_MPI_Comm_remote_group #undef PyMPI_HAVE_MPI_Comm_remote_size #undef PyMPI_HAVE_MPI_Intercomm_merge #undef PyMPI_HAVE_MPI_MAX_PORT_NAME #undef PyMPI_HAVE_MPI_Open_port #undef PyMPI_HAVE_MPI_Close_port #undef PyMPI_HAVE_MPI_Publish_name #undef PyMPI_HAVE_MPI_Unpublish_name #undef PyMPI_HAVE_MPI_Lookup_name #undef PyMPI_HAVE_MPI_Comm_accept #undef PyMPI_HAVE_MPI_Comm_connect #undef PyMPI_HAVE_MPI_Comm_join #undef PyMPI_HAVE_MPI_Comm_disconnect #undef PyMPI_HAVE_MPI_ARGV_NULL #undef PyMPI_HAVE_MPI_ARGVS_NULL #undef PyMPI_HAVE_MPI_ERRCODES_IGNORE #undef PyMPI_HAVE_MPI_Comm_spawn #undef PyMPI_HAVE_MPI_Comm_spawn_multiple #undef PyMPI_HAVE_MPI_Comm_get_parent #undef PyMPI_HAVE_MPI_Comm_get_name #undef PyMPI_HAVE_MPI_Comm_set_name #undef PyMPI_HAVE_MPI_TAG_UB #undef PyMPI_HAVE_MPI_IO #undef PyMPI_HAVE_MPI_WTIME_IS_GLOBAL #undef PyMPI_HAVE_MPI_UNIVERSE_SIZE #undef PyMPI_HAVE_MPI_APPNUM #undef PyMPI_HAVE_MPI_LASTUSEDCODE #undef PyMPI_HAVE_MPI_Comm_get_attr #undef PyMPI_HAVE_MPI_Comm_set_attr #undef PyMPI_HAVE_MPI_Comm_delete_attr #undef PyMPI_HAVE_MPI_Comm_copy_attr_function #undef PyMPI_HAVE_MPI_Comm_delete_attr_function #undef PyMPI_HAVE_MPI_COMM_DUP_FN #undef PyMPI_HAVE_MPI_COMM_NULL_COPY_FN #undef PyMPI_HAVE_MPI_COMM_NULL_DELETE_FN #undef PyMPI_HAVE_MPI_Comm_create_keyval #undef PyMPI_HAVE_MPI_Comm_free_keyval #undef PyMPI_HAVE_MPI_Comm_errhandler_fn #undef PyMPI_HAVE_MPI_Comm_errhandler_function #undef PyMPI_HAVE_MPI_Comm_create_errhandler #undef PyMPI_HAVE_MPI_Comm_get_errhandler #undef PyMPI_HAVE_MPI_Comm_set_errhandler #undef PyMPI_HAVE_MPI_Comm_call_errhandler #undef PyMPI_HAVE_MPI_Buffer_attach_c #undef PyMPI_HAVE_MPI_Buffer_detach_c #undef PyMPI_HAVE_MPI_Comm_attach_buffer_c #undef PyMPI_HAVE_MPI_Comm_detach_buffer_c #undef PyMPI_HAVE_MPI_Session_attach_buffer_c #undef PyMPI_HAVE_MPI_Session_detach_buffer_c #undef PyMPI_HAVE_MPI_Send_c #undef PyMPI_HAVE_MPI_Recv_c #undef PyMPI_HAVE_MPI_Sendrecv_c #undef PyMPI_HAVE_MPI_Sendrecv_replace_c #undef PyMPI_HAVE_MPI_Bsend_c #undef PyMPI_HAVE_MPI_Ssend_c #undef PyMPI_HAVE_MPI_Rsend_c #undef PyMPI_HAVE_MPI_Isend_c #undef PyMPI_HAVE_MPI_Irecv_c #undef PyMPI_HAVE_MPI_Isendrecv_c #undef PyMPI_HAVE_MPI_Isendrecv_replace_c #undef PyMPI_HAVE_MPI_Ibsend_c #undef PyMPI_HAVE_MPI_Issend_c #undef PyMPI_HAVE_MPI_Irsend_c #undef PyMPI_HAVE_MPI_Send_init_c #undef PyMPI_HAVE_MPI_Recv_init_c #undef PyMPI_HAVE_MPI_Bsend_init_c #undef PyMPI_HAVE_MPI_Ssend_init_c #undef PyMPI_HAVE_MPI_Rsend_init_c #undef PyMPI_HAVE_MPI_Mrecv_c #undef PyMPI_HAVE_MPI_Imrecv_c #undef PyMPI_HAVE_MPI_Bcast_c #undef PyMPI_HAVE_MPI_Gather_c #undef PyMPI_HAVE_MPI_Gatherv_c #undef PyMPI_HAVE_MPI_Scatter_c #undef PyMPI_HAVE_MPI_Scatterv_c #undef PyMPI_HAVE_MPI_Allgather_c #undef PyMPI_HAVE_MPI_Allgatherv_c #undef PyMPI_HAVE_MPI_Alltoall_c #undef PyMPI_HAVE_MPI_Alltoallv_c #undef PyMPI_HAVE_MPI_Alltoallw_c #undef PyMPI_HAVE_MPI_Reduce_local_c #undef PyMPI_HAVE_MPI_Reduce_c #undef PyMPI_HAVE_MPI_Allreduce_c #undef PyMPI_HAVE_MPI_Reduce_scatter_block_c #undef PyMPI_HAVE_MPI_Reduce_scatter_c #undef PyMPI_HAVE_MPI_Scan_c #undef PyMPI_HAVE_MPI_Exscan_c #undef PyMPI_HAVE_MPI_Neighbor_allgather_c #undef PyMPI_HAVE_MPI_Neighbor_allgatherv_c #undef PyMPI_HAVE_MPI_Neighbor_alltoall_c #undef PyMPI_HAVE_MPI_Neighbor_alltoallv_c #undef PyMPI_HAVE_MPI_Neighbor_alltoallw_c #undef PyMPI_HAVE_MPI_Ibcast_c #undef PyMPI_HAVE_MPI_Igather_c #undef PyMPI_HAVE_MPI_Igatherv_c #undef PyMPI_HAVE_MPI_Iscatter_c #undef PyMPI_HAVE_MPI_Iscatterv_c #undef PyMPI_HAVE_MPI_Iallgather_c #undef PyMPI_HAVE_MPI_Iallgatherv_c #undef PyMPI_HAVE_MPI_Ialltoall_c #undef PyMPI_HAVE_MPI_Ialltoallv_c #undef PyMPI_HAVE_MPI_Ialltoallw_c #undef PyMPI_HAVE_MPI_Ireduce_c #undef PyMPI_HAVE_MPI_Iallreduce_c #undef PyMPI_HAVE_MPI_Ireduce_scatter_block_c #undef PyMPI_HAVE_MPI_Ireduce_scatter_c #undef PyMPI_HAVE_MPI_Iscan_c #undef PyMPI_HAVE_MPI_Iexscan_c #undef PyMPI_HAVE_MPI_Ineighbor_allgather_c #undef PyMPI_HAVE_MPI_Ineighbor_allgatherv_c #undef PyMPI_HAVE_MPI_Ineighbor_alltoall_c #undef PyMPI_HAVE_MPI_Ineighbor_alltoallv_c #undef PyMPI_HAVE_MPI_Ineighbor_alltoallw_c #undef PyMPI_HAVE_MPI_Bcast_init_c #undef PyMPI_HAVE_MPI_Gather_init_c #undef PyMPI_HAVE_MPI_Gatherv_init_c #undef PyMPI_HAVE_MPI_Scatter_init_c #undef PyMPI_HAVE_MPI_Scatterv_init_c #undef PyMPI_HAVE_MPI_Allgather_init_c #undef PyMPI_HAVE_MPI_Allgatherv_init_c #undef PyMPI_HAVE_MPI_Alltoall_init_c #undef PyMPI_HAVE_MPI_Alltoallv_init_c #undef PyMPI_HAVE_MPI_Alltoallw_init_c #undef PyMPI_HAVE_MPI_Reduce_init_c #undef PyMPI_HAVE_MPI_Allreduce_init_c #undef PyMPI_HAVE_MPI_Reduce_scatter_block_init_c #undef PyMPI_HAVE_MPI_Reduce_scatter_init_c #undef PyMPI_HAVE_MPI_Scan_init_c #undef PyMPI_HAVE_MPI_Exscan_init_c #undef PyMPI_HAVE_MPI_Neighbor_allgather_init_c #undef PyMPI_HAVE_MPI_Neighbor_allgatherv_init_c #undef PyMPI_HAVE_MPI_Neighbor_alltoall_init_c #undef PyMPI_HAVE_MPI_Neighbor_alltoallv_init_c #undef PyMPI_HAVE_MPI_Neighbor_alltoallw_init_c #undef PyMPI_HAVE_MPI_WIN_NULL #undef PyMPI_HAVE_MPI_Win_free #undef PyMPI_HAVE_MPI_Win_create #undef PyMPI_HAVE_MPI_Win_allocate #undef PyMPI_HAVE_MPI_Win_allocate_shared #undef PyMPI_HAVE_MPI_Win_shared_query #undef PyMPI_HAVE_MPI_Win_create_dynamic #undef PyMPI_HAVE_MPI_Win_attach #undef PyMPI_HAVE_MPI_Win_detach #undef PyMPI_HAVE_MPI_Win_set_info #undef PyMPI_HAVE_MPI_Win_get_info #undef PyMPI_HAVE_MPI_Win_get_group #undef PyMPI_HAVE_MPI_Get #undef PyMPI_HAVE_MPI_Put #undef PyMPI_HAVE_MPI_Accumulate #undef PyMPI_HAVE_MPI_Get_accumulate #undef PyMPI_HAVE_MPI_Fetch_and_op #undef PyMPI_HAVE_MPI_Compare_and_swap #undef PyMPI_HAVE_MPI_Rget #undef PyMPI_HAVE_MPI_Rput #undef PyMPI_HAVE_MPI_Raccumulate #undef PyMPI_HAVE_MPI_Rget_accumulate #undef PyMPI_HAVE_MPI_MODE_NOCHECK #undef PyMPI_HAVE_MPI_MODE_NOSTORE #undef PyMPI_HAVE_MPI_MODE_NOPUT #undef PyMPI_HAVE_MPI_MODE_NOPRECEDE #undef PyMPI_HAVE_MPI_MODE_NOSUCCEED #undef PyMPI_HAVE_MPI_Win_fence #undef PyMPI_HAVE_MPI_Win_post #undef PyMPI_HAVE_MPI_Win_start #undef PyMPI_HAVE_MPI_Win_complete #undef PyMPI_HAVE_MPI_Win_wait #undef PyMPI_HAVE_MPI_Win_test #undef PyMPI_HAVE_MPI_LOCK_EXCLUSIVE #undef PyMPI_HAVE_MPI_LOCK_SHARED #undef PyMPI_HAVE_MPI_Win_lock #undef PyMPI_HAVE_MPI_Win_unlock #undef PyMPI_HAVE_MPI_Win_lock_all #undef PyMPI_HAVE_MPI_Win_unlock_all #undef PyMPI_HAVE_MPI_Win_flush #undef PyMPI_HAVE_MPI_Win_flush_all #undef PyMPI_HAVE_MPI_Win_flush_local #undef PyMPI_HAVE_MPI_Win_flush_local_all #undef PyMPI_HAVE_MPI_Win_sync #undef PyMPI_HAVE_MPI_Win_get_name #undef PyMPI_HAVE_MPI_Win_set_name #undef PyMPI_HAVE_MPI_WIN_BASE #undef PyMPI_HAVE_MPI_WIN_SIZE #undef PyMPI_HAVE_MPI_WIN_DISP_UNIT #undef PyMPI_HAVE_MPI_WIN_CREATE_FLAVOR #undef PyMPI_HAVE_MPI_WIN_MODEL #undef PyMPI_HAVE_MPI_WIN_FLAVOR_CREATE #undef PyMPI_HAVE_MPI_WIN_FLAVOR_ALLOCATE #undef PyMPI_HAVE_MPI_WIN_FLAVOR_DYNAMIC #undef PyMPI_HAVE_MPI_WIN_FLAVOR_SHARED #undef PyMPI_HAVE_MPI_WIN_SEPARATE #undef PyMPI_HAVE_MPI_WIN_UNIFIED #undef PyMPI_HAVE_MPI_Win_get_attr #undef PyMPI_HAVE_MPI_Win_set_attr #undef PyMPI_HAVE_MPI_Win_delete_attr #undef PyMPI_HAVE_MPI_Win_copy_attr_function #undef PyMPI_HAVE_MPI_Win_delete_attr_function #undef PyMPI_HAVE_MPI_WIN_DUP_FN #undef PyMPI_HAVE_MPI_WIN_NULL_COPY_FN #undef PyMPI_HAVE_MPI_WIN_NULL_DELETE_FN #undef PyMPI_HAVE_MPI_Win_create_keyval #undef PyMPI_HAVE_MPI_Win_free_keyval #undef PyMPI_HAVE_MPI_Win_errhandler_fn #undef PyMPI_HAVE_MPI_Win_errhandler_function #undef PyMPI_HAVE_MPI_Win_create_errhandler #undef PyMPI_HAVE_MPI_Win_get_errhandler #undef PyMPI_HAVE_MPI_Win_set_errhandler #undef PyMPI_HAVE_MPI_Win_call_errhandler #undef PyMPI_HAVE_MPI_Win_create_c #undef PyMPI_HAVE_MPI_Win_allocate_c #undef PyMPI_HAVE_MPI_Win_allocate_shared_c #undef PyMPI_HAVE_MPI_Win_shared_query_c #undef PyMPI_HAVE_MPI_Get_c #undef PyMPI_HAVE_MPI_Put_c #undef PyMPI_HAVE_MPI_Accumulate_c #undef PyMPI_HAVE_MPI_Get_accumulate_c #undef PyMPI_HAVE_MPI_Rget_c #undef PyMPI_HAVE_MPI_Rput_c #undef PyMPI_HAVE_MPI_Raccumulate_c #undef PyMPI_HAVE_MPI_Rget_accumulate_c #undef PyMPI_HAVE_MPI_FILE_NULL #undef PyMPI_HAVE_MPI_MODE_RDONLY #undef PyMPI_HAVE_MPI_MODE_RDWR #undef PyMPI_HAVE_MPI_MODE_WRONLY #undef PyMPI_HAVE_MPI_MODE_CREATE #undef PyMPI_HAVE_MPI_MODE_EXCL #undef PyMPI_HAVE_MPI_MODE_DELETE_ON_CLOSE #undef PyMPI_HAVE_MPI_MODE_UNIQUE_OPEN #undef PyMPI_HAVE_MPI_MODE_APPEND #undef PyMPI_HAVE_MPI_MODE_SEQUENTIAL #undef PyMPI_HAVE_MPI_File_open #undef PyMPI_HAVE_MPI_File_close #undef PyMPI_HAVE_MPI_File_delete #undef PyMPI_HAVE_MPI_File_set_size #undef PyMPI_HAVE_MPI_File_preallocate #undef PyMPI_HAVE_MPI_File_get_size #undef PyMPI_HAVE_MPI_File_get_group #undef PyMPI_HAVE_MPI_File_get_amode #undef PyMPI_HAVE_MPI_File_set_info #undef PyMPI_HAVE_MPI_File_get_info #undef PyMPI_HAVE_MPI_File_get_view #undef PyMPI_HAVE_MPI_File_set_view #undef PyMPI_HAVE_MPI_File_read_at #undef PyMPI_HAVE_MPI_File_read_at_all #undef PyMPI_HAVE_MPI_File_write_at #undef PyMPI_HAVE_MPI_File_write_at_all #undef PyMPI_HAVE_MPI_File_iread_at #undef PyMPI_HAVE_MPI_File_iread_at_all #undef PyMPI_HAVE_MPI_File_iwrite_at #undef PyMPI_HAVE_MPI_File_iwrite_at_all #undef PyMPI_HAVE_MPI_SEEK_SET #undef PyMPI_HAVE_MPI_SEEK_CUR #undef PyMPI_HAVE_MPI_SEEK_END #undef PyMPI_HAVE_MPI_DISPLACEMENT_CURRENT #undef PyMPI_HAVE_MPI_File_seek #undef PyMPI_HAVE_MPI_File_get_position #undef PyMPI_HAVE_MPI_File_get_byte_offset #undef PyMPI_HAVE_MPI_File_read #undef PyMPI_HAVE_MPI_File_read_all #undef PyMPI_HAVE_MPI_File_write #undef PyMPI_HAVE_MPI_File_write_all #undef PyMPI_HAVE_MPI_File_iread #undef PyMPI_HAVE_MPI_File_iread_all #undef PyMPI_HAVE_MPI_File_iwrite #undef PyMPI_HAVE_MPI_File_iwrite_all #undef PyMPI_HAVE_MPI_File_read_shared #undef PyMPI_HAVE_MPI_File_write_shared #undef PyMPI_HAVE_MPI_File_iread_shared #undef PyMPI_HAVE_MPI_File_iwrite_shared #undef PyMPI_HAVE_MPI_File_read_ordered #undef PyMPI_HAVE_MPI_File_write_ordered #undef PyMPI_HAVE_MPI_File_seek_shared #undef PyMPI_HAVE_MPI_File_get_position_shared #undef PyMPI_HAVE_MPI_File_read_at_all_begin #undef PyMPI_HAVE_MPI_File_read_at_all_end #undef PyMPI_HAVE_MPI_File_write_at_all_begin #undef PyMPI_HAVE_MPI_File_write_at_all_end #undef PyMPI_HAVE_MPI_File_read_all_begin #undef PyMPI_HAVE_MPI_File_read_all_end #undef PyMPI_HAVE_MPI_File_write_all_begin #undef PyMPI_HAVE_MPI_File_write_all_end #undef PyMPI_HAVE_MPI_File_read_ordered_begin #undef PyMPI_HAVE_MPI_File_read_ordered_end #undef PyMPI_HAVE_MPI_File_write_ordered_begin #undef PyMPI_HAVE_MPI_File_write_ordered_end #undef PyMPI_HAVE_MPI_File_get_type_extent #undef PyMPI_HAVE_MPI_File_set_atomicity #undef PyMPI_HAVE_MPI_File_get_atomicity #undef PyMPI_HAVE_MPI_File_sync #undef PyMPI_HAVE_MPI_File_errhandler_fn #undef PyMPI_HAVE_MPI_File_errhandler_function #undef PyMPI_HAVE_MPI_File_create_errhandler #undef PyMPI_HAVE_MPI_File_get_errhandler #undef PyMPI_HAVE_MPI_File_set_errhandler #undef PyMPI_HAVE_MPI_File_call_errhandler #undef PyMPI_HAVE_MPI_Datarep_conversion_function #undef PyMPI_HAVE_MPI_Datarep_extent_function #undef PyMPI_HAVE_MPI_CONVERSION_FN_NULL #undef PyMPI_HAVE_MPI_MAX_DATAREP_STRING #undef PyMPI_HAVE_MPI_Register_datarep #undef PyMPI_HAVE_MPI_File_read_at_c #undef PyMPI_HAVE_MPI_File_read_at_all_c #undef PyMPI_HAVE_MPI_File_write_at_c #undef PyMPI_HAVE_MPI_File_write_at_all_c #undef PyMPI_HAVE_MPI_File_iread_at_c #undef PyMPI_HAVE_MPI_File_iread_at_all_c #undef PyMPI_HAVE_MPI_File_iwrite_at_c #undef PyMPI_HAVE_MPI_File_iwrite_at_all_c #undef PyMPI_HAVE_MPI_File_read_c #undef PyMPI_HAVE_MPI_File_read_all_c #undef PyMPI_HAVE_MPI_File_write_c #undef PyMPI_HAVE_MPI_File_write_all_c #undef PyMPI_HAVE_MPI_File_iread_c #undef PyMPI_HAVE_MPI_File_iread_all_c #undef PyMPI_HAVE_MPI_File_iwrite_c #undef PyMPI_HAVE_MPI_File_iwrite_all_c #undef PyMPI_HAVE_MPI_File_read_shared_c #undef PyMPI_HAVE_MPI_File_write_shared_c #undef PyMPI_HAVE_MPI_File_iread_shared_c #undef PyMPI_HAVE_MPI_File_iwrite_shared_c #undef PyMPI_HAVE_MPI_File_read_ordered_c #undef PyMPI_HAVE_MPI_File_write_ordered_c #undef PyMPI_HAVE_MPI_File_read_at_all_begin_c #undef PyMPI_HAVE_MPI_File_write_at_all_begin_c #undef PyMPI_HAVE_MPI_File_read_all_begin_c #undef PyMPI_HAVE_MPI_File_write_all_begin_c #undef PyMPI_HAVE_MPI_File_read_ordered_begin_c #undef PyMPI_HAVE_MPI_File_write_ordered_begin_c #undef PyMPI_HAVE_MPI_File_get_type_extent_c #undef PyMPI_HAVE_MPI_Datarep_conversion_function_c #undef PyMPI_HAVE_MPI_CONVERSION_FN_NULL_C #undef PyMPI_HAVE_MPI_Register_datarep_c #undef PyMPI_HAVE_MPI_MAX_ERROR_STRING #undef PyMPI_HAVE_MPI_Error_class #undef PyMPI_HAVE_MPI_Error_string #undef PyMPI_HAVE_MPI_Add_error_class #undef PyMPI_HAVE_MPI_Remove_error_class #undef PyMPI_HAVE_MPI_Add_error_code #undef PyMPI_HAVE_MPI_Remove_error_code #undef PyMPI_HAVE_MPI_Add_error_string #undef PyMPI_HAVE_MPI_Remove_error_string #undef PyMPI_HAVE_MPI_SUCCESS #undef PyMPI_HAVE_MPI_ERR_LASTCODE #undef PyMPI_HAVE_MPI_ERR_TYPE #undef PyMPI_HAVE_MPI_ERR_REQUEST #undef PyMPI_HAVE_MPI_ERR_OP #undef PyMPI_HAVE_MPI_ERR_GROUP #undef PyMPI_HAVE_MPI_ERR_INFO #undef PyMPI_HAVE_MPI_ERR_ERRHANDLER #undef PyMPI_HAVE_MPI_ERR_SESSION #undef PyMPI_HAVE_MPI_ERR_COMM #undef PyMPI_HAVE_MPI_ERR_WIN #undef PyMPI_HAVE_MPI_ERR_FILE #undef PyMPI_HAVE_MPI_ERR_BUFFER #undef PyMPI_HAVE_MPI_ERR_COUNT #undef PyMPI_HAVE_MPI_ERR_TAG #undef PyMPI_HAVE_MPI_ERR_RANK #undef PyMPI_HAVE_MPI_ERR_ROOT #undef PyMPI_HAVE_MPI_ERR_TRUNCATE #undef PyMPI_HAVE_MPI_ERR_IN_STATUS #undef PyMPI_HAVE_MPI_ERR_PENDING #undef PyMPI_HAVE_MPI_ERR_TOPOLOGY #undef PyMPI_HAVE_MPI_ERR_DIMS #undef PyMPI_HAVE_MPI_ERR_ARG #undef PyMPI_HAVE_MPI_ERR_OTHER #undef PyMPI_HAVE_MPI_ERR_UNKNOWN #undef PyMPI_HAVE_MPI_ERR_INTERN #undef PyMPI_HAVE_MPI_ERR_KEYVAL #undef PyMPI_HAVE_MPI_ERR_NO_MEM #undef PyMPI_HAVE_MPI_ERR_INFO_KEY #undef PyMPI_HAVE_MPI_ERR_INFO_VALUE #undef PyMPI_HAVE_MPI_ERR_INFO_NOKEY #undef PyMPI_HAVE_MPI_ERR_SPAWN #undef PyMPI_HAVE_MPI_ERR_PORT #undef PyMPI_HAVE_MPI_ERR_SERVICE #undef PyMPI_HAVE_MPI_ERR_NAME #undef PyMPI_HAVE_MPI_ERR_PROC_ABORTED #undef PyMPI_HAVE_MPI_ERR_BASE #undef PyMPI_HAVE_MPI_ERR_SIZE #undef PyMPI_HAVE_MPI_ERR_DISP #undef PyMPI_HAVE_MPI_ERR_ASSERT #undef PyMPI_HAVE_MPI_ERR_LOCKTYPE #undef PyMPI_HAVE_MPI_ERR_RMA_CONFLICT #undef PyMPI_HAVE_MPI_ERR_RMA_SYNC #undef PyMPI_HAVE_MPI_ERR_RMA_RANGE #undef PyMPI_HAVE_MPI_ERR_RMA_ATTACH #undef PyMPI_HAVE_MPI_ERR_RMA_SHARED #undef PyMPI_HAVE_MPI_ERR_RMA_FLAVOR #undef PyMPI_HAVE_MPI_ERR_BAD_FILE #undef PyMPI_HAVE_MPI_ERR_NO_SUCH_FILE #undef PyMPI_HAVE_MPI_ERR_FILE_EXISTS #undef PyMPI_HAVE_MPI_ERR_FILE_IN_USE #undef PyMPI_HAVE_MPI_ERR_AMODE #undef PyMPI_HAVE_MPI_ERR_ACCESS #undef PyMPI_HAVE_MPI_ERR_READ_ONLY #undef PyMPI_HAVE_MPI_ERR_NO_SPACE #undef PyMPI_HAVE_MPI_ERR_QUOTA #undef PyMPI_HAVE_MPI_ERR_UNSUPPORTED_OPERATION #undef PyMPI_HAVE_MPI_ERR_NOT_SAME #undef PyMPI_HAVE_MPI_ERR_IO #undef PyMPI_HAVE_MPI_ERR_UNSUPPORTED_DATAREP #undef PyMPI_HAVE_MPI_ERR_CONVERSION #undef PyMPI_HAVE_MPI_ERR_DUP_DATAREP #undef PyMPI_HAVE_MPI_ERR_VALUE_TOO_LARGE #undef PyMPI_HAVE_MPI_Alloc_mem #undef PyMPI_HAVE_MPI_Free_mem #define PyMPI_HAVE_MPI_Init 1 #define PyMPI_HAVE_MPI_Finalize 1 #define PyMPI_HAVE_MPI_Initialized 1 #define PyMPI_HAVE_MPI_Finalized 1 #undef PyMPI_HAVE_MPI_THREAD_SINGLE #undef PyMPI_HAVE_MPI_THREAD_FUNNELED #undef PyMPI_HAVE_MPI_THREAD_SERIALIZED #undef PyMPI_HAVE_MPI_THREAD_MULTIPLE #undef PyMPI_HAVE_MPI_Init_thread #undef PyMPI_HAVE_MPI_Query_thread #undef PyMPI_HAVE_MPI_Is_thread_main #undef PyMPI_HAVE_MPI_VERSION #undef PyMPI_HAVE_MPI_SUBVERSION #undef PyMPI_HAVE_MPI_Get_version #undef PyMPI_HAVE_MPI_MAX_LIBRARY_VERSION_STRING #undef PyMPI_HAVE_MPI_Get_library_version #undef PyMPI_HAVE_MPI_MAX_PROCESSOR_NAME #undef PyMPI_HAVE_MPI_Get_processor_name #undef PyMPI_HAVE_MPI_Get_hw_resource_info #undef PyMPI_HAVE_MPI_Wtime #undef PyMPI_HAVE_MPI_Wtick #undef PyMPI_HAVE_MPI_Pcontrol #undef PyMPI_HAVE_MPI_Fint #undef PyMPI_HAVE_MPI_F_SOURCE #undef PyMPI_HAVE_MPI_F_TAG #undef PyMPI_HAVE_MPI_F_ERROR #undef PyMPI_HAVE_MPI_F_STATUS_SIZE #undef PyMPI_HAVE_MPI_F_STATUS_IGNORE #undef PyMPI_HAVE_MPI_F_STATUSES_IGNORE #undef PyMPI_HAVE_MPI_Status_c2f #undef PyMPI_HAVE_MPI_Status_f2c #undef PyMPI_HAVE_MPI_Type_c2f #undef PyMPI_HAVE_MPI_Request_c2f #undef PyMPI_HAVE_MPI_Message_c2f #undef PyMPI_HAVE_MPI_Op_c2f #undef PyMPI_HAVE_MPI_Group_c2f #undef PyMPI_HAVE_MPI_Info_c2f #undef PyMPI_HAVE_MPI_Session_c2f #undef PyMPI_HAVE_MPI_Comm_c2f #undef PyMPI_HAVE_MPI_Win_c2f #undef PyMPI_HAVE_MPI_File_c2f #undef PyMPI_HAVE_MPI_Errhandler_c2f #undef PyMPI_HAVE_MPI_Type_f2c #undef PyMPI_HAVE_MPI_Request_f2c #undef PyMPI_HAVE_MPI_Message_f2c #undef PyMPI_HAVE_MPI_Op_f2c #undef PyMPI_HAVE_MPI_Group_f2c #undef PyMPI_HAVE_MPI_Info_f2c #undef PyMPI_HAVE_MPI_Session_f2c #undef PyMPI_HAVE_MPI_Comm_f2c #undef PyMPI_HAVE_MPI_Win_f2c #undef PyMPI_HAVE_MPI_File_f2c #undef PyMPI_HAVE_MPI_Errhandler_f2c #undef PyMPI_HAVE_MPI_HOST #undef PyMPI_HAVE_MPI_Info_get #undef PyMPI_HAVE_MPI_Info_get_valuelen #undef PyMPI_HAVE_MPI_Attr_get #undef PyMPI_HAVE_MPI_Attr_put #undef PyMPI_HAVE_MPI_Attr_delete #undef PyMPI_HAVE_MPI_Copy_function #undef PyMPI_HAVE_MPI_Delete_function #undef PyMPI_HAVE_MPI_DUP_FN #undef PyMPI_HAVE_MPI_NULL_COPY_FN #undef PyMPI_HAVE_MPI_NULL_DELETE_FN #undef PyMPI_HAVE_MPI_Keyval_create #undef PyMPI_HAVE_MPI_Keyval_free #undef PyMPI_HAVE_MPI_Errhandler_get #undef PyMPI_HAVE_MPI_Errhandler_set #undef PyMPI_HAVE_MPI_Handler_function #undef PyMPI_HAVE_MPI_Errhandler_create #undef PyMPI_HAVE_MPI_Address #undef PyMPI_HAVE_MPI_UB #undef PyMPI_HAVE_MPI_LB #undef PyMPI_HAVE_MPI_Type_lb #undef PyMPI_HAVE_MPI_Type_ub #undef PyMPI_HAVE_MPI_Type_extent #undef PyMPI_HAVE_MPI_Type_hvector #undef PyMPI_HAVE_MPI_Type_hindexed #undef PyMPI_HAVE_MPI_Type_struct #undef PyMPI_HAVE_MPI_COMBINER_HVECTOR_INTEGER #undef PyMPI_HAVE_MPI_COMBINER_HINDEXED_INTEGER #undef PyMPI_HAVE_MPI_COMBINER_STRUCT_INTEGER #undef PyMPI_HAVE_MPI_ERR_REVOKED #undef PyMPI_HAVE_MPI_ERR_PROC_FAILED #undef PyMPI_HAVE_MPI_ERR_PROC_FAILED_PENDING #undef PyMPI_HAVE_MPI_Comm_revoke #undef PyMPI_HAVE_MPI_Comm_is_revoked #undef PyMPI_HAVE_MPI_Comm_get_failed #undef PyMPI_HAVE_MPI_Comm_ack_failed #undef PyMPI_HAVE_MPI_Comm_agree #undef PyMPI_HAVE_MPI_Comm_iagree #undef PyMPI_HAVE_MPI_Comm_shrink #undef PyMPI_HAVE_MPI_Comm_ishrink #endif /* !PyMPI_PYMPICONF_H */ mpi4py-4.0.3/conf/requirements-build-cython.txt000066400000000000000000000000201475341043600215470ustar00rootroot00000000000000cython >= 3.0.0 mpi4py-4.0.3/conf/requirements-build-mesonpy.txt000066400000000000000000000000151475341043600217410ustar00rootroot00000000000000meson-python mpi4py-4.0.3/conf/requirements-build-skbuild.txt000066400000000000000000000000351475341043600217060ustar00rootroot00000000000000scikit-build-core setuptools mpi4py-4.0.3/conf/requirements-docs.txt000066400000000000000000000001051475341043600201020ustar00rootroot00000000000000sphinx == 8.1.3 sphinx-copybutton == 0.5.2 sphinx-rtd-theme == 3.0.2 mpi4py-4.0.3/conf/requirements-lint.txt000066400000000000000000000002441475341043600201240ustar00rootroot00000000000000codespell cython cython-lint flake8 flake8-assertive flake8-bandit flake8-bugbear flake8-docstrings flake8-unused-arguments flake8-use-fstring pylint ruff yamllint mpi4py-4.0.3/conf/requirements-test.txt000066400000000000000000000004001475341043600201270ustar00rootroot00000000000000pickle5 ; implementation_name == 'cpython' and python_version < '3.8' numpy ; implementation_name == 'cpython' or (sys_platform == 'linux' and python_version < '3.10') cffi ; implementation_name == 'cpython' pyyaml ; implementation_name == 'cpython' mpi4py-4.0.3/conf/requirements-type.txt000066400000000000000000000000241475341043600201330ustar00rootroot00000000000000mypy >= 1.0.0 numpy mpi4py-4.0.3/demo/000077500000000000000000000000001475341043600136735ustar00rootroot00000000000000mpi4py-4.0.3/demo/README.txt000066400000000000000000000003451475341043600153730ustar00rootroot00000000000000Issuing at the command line:: $ mpiexec -n 5 python helloworld.py will launch a five-process run of the Python interpreter and execute the test script ``helloworld.py``, a parallelized version of the *Hello World!* program. mpi4py-4.0.3/demo/check-mpiexec/000077500000000000000000000000001475341043600164005ustar00rootroot00000000000000mpi4py-4.0.3/demo/check-mpiexec/makefile000066400000000000000000000001541475341043600201000ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean default: test build: test: $(SHELL) run.sh clean: mpi4py-4.0.3/demo/check-mpiexec/run.sh000077500000000000000000000022351475341043600175450ustar00rootroot00000000000000#/bin/sh set -eu ENV=${ENV:-env} PYTHON=${PYTHON:-python${py:-}} MPIEXEC=${MPIEXEC:-mpiexec} badenv='' check='MPI4PY_CHECK_MPIEXEC' warn='PYTHONWARNINGS=always' pycmd='from mpi4py import MPI' set -x if command -v mpichversion > /dev/null; then badenv='OMPI_COMM_WORLD_SIZE=1' fi if command -v impi_info > /dev/null; then badenv='OMPI_COMM_WORLD_SIZE=1' fi if command -v ompi_info > /dev/null; then badenv='PMI_SIZE=1 HYDI_CONTROL_FD=dummy' fi if command -v $MPIEXEC > /dev/null; then $ENV $badenv PYTHONWARNINGS=error $MPIEXEC -n 1 $PYTHON -c "$pycmd" $ENV $badenv PYTHONWARNINGS=error $MPIEXEC -n 2 $PYTHON -c "$pycmd" fi $ENV $check=yes $warn $PYTHON -c "$pycmd;print()" 2>&1 | grep -q "" $ENV $check=OFF $warn $PYTHON -c "$pycmd;print()" 2>&1 | grep -q "" $ENV $check=foo $warn $PYTHON -c "$pycmd;print()" 2>&1 | grep -q "$check" $ENV $badenv $warn $PYTHON -c "$pycmd;print()" 2>&1 | grep -q "${badenv% *}" $ENV $badenv $check=1 $warn $PYTHON -c "$pycmd;print()" 2>&1 | grep -q "${badenv% *}" $ENV $badenv $check=0 $warn $PYTHON -c "$pycmd;print()" 2>&1 | grep -q "" $ENV $badenv $check= $warn $PYTHON -c "$pycmd;print()" 2>&1 | grep -q "" mpi4py-4.0.3/demo/compute-pi/000077500000000000000000000000001475341043600157555ustar00rootroot00000000000000mpi4py-4.0.3/demo/compute-pi/README.txt000066400000000000000000000000631475341043600174520ustar00rootroot00000000000000Different approaches for computing PI in parallel. mpi4py-4.0.3/demo/compute-pi/cpi-cco.py000066400000000000000000000023651475341043600176520ustar00rootroot00000000000000#!/usr/bin/env python """ Parallel PI computation using Collective Communication Operations (CCO) within Python objects exposing memory buffers (requires NumPy). usage:: $ mpiexec -n python cpi-buf.py """ from mpi4py import MPI from math import pi as PI from numpy import array def get_n(): prompt = "Enter the number of intervals: (0 quits) " try: n = int(input(prompt)) if n < 0: n = 0 except: n = 0 return n def comp_pi(n, myrank=0, nprocs=1): h = 1.0 / n s = 0.0 for i in range(myrank + 1, n + 1, nprocs): x = h * (i - 0.5) s += 4.0 / (1.0 + x**2) return s * h def prn_pi(pi, PI): message = "pi is approximately %.16f, error is %.16f" print (message % (pi, abs(pi - PI))) comm = MPI.COMM_WORLD nprocs = comm.Get_size() myrank = comm.Get_rank() n = array(0, dtype=int) pi = array(0, dtype=float) mypi = array(0, dtype=float) while True: if myrank == 0: _n = get_n() n.fill(_n) comm.Bcast([n, MPI.INT], root=0) if n == 0: break _mypi = comp_pi(n, myrank, nprocs) mypi.fill(_mypi) comm.Reduce([mypi, MPI.DOUBLE], [pi, MPI.DOUBLE], op=MPI.SUM, root=0) if myrank == 0: prn_pi(pi, PI) mpi4py-4.0.3/demo/compute-pi/cpi-dpm.py000066400000000000000000000107071475341043600176650ustar00rootroot00000000000000#!/usr/bin/env python """ Parallel PI computation using Dynamic Process Management (DPM) within Python objects exposing memory buffers (requires NumPy). usage: + parent/child model:: $ mpiexec -n 1 python cpi-dpm.py [nchilds] + client/server model:: $ [xterm -e] mpiexec -n python cpi-dpm.py server [-v] & $ [xterm -e] mpiexec -n 1 python cpi-dpm.py client [-v] """ import sys from mpi4py import MPI import numpy as N def get_n(): prompt = "Enter the number of intervals: (0 quits) " try: n = int(input(prompt)) if n < 0: n = 0 except: n = 0 return n def view(pi, np=None, wt=None): from math import pi as PI prn = sys.stdout.write if pi is not None: prn("computed pi is: %.16f\n" % pi) prn("absolute error: %.16f\n" % abs(pi - PI)) if np is not None: prn("computing units: %d processes\n" % np) if wt is not None: prn("wall clock time: %g seconds\n" % wt) sys.stdout.flush() def comp_pi(n, comm, root=0): nprocs = comm.Get_size() myrank = comm.Get_rank() n = N.array(n, 'i') comm.Bcast([n, MPI.INT], root=root) if n == 0: return 0.0 h = 1.0 / n; s = 0.0; for i in range(myrank, n, nprocs): x = h * (i + 0.5); s += 4.0 / (1.0 + x**2); mypi = s * h mypi = N.array(mypi, 'd') pi = N.array(0, 'd') comm.Reduce([mypi, MPI.DOUBLE], [pi, MPI.DOUBLE], root=root, op=MPI.SUM) return pi def master(icomm): n = get_n() wt = MPI.Wtime() n = N.array(n, 'i') icomm.Send([n, MPI.INT], dest=0) pi = N.array(0, 'd') icomm.Recv([pi, MPI.DOUBLE], source=0) wt = MPI.Wtime() - wt if n == 0: return np = icomm.Get_remote_size() view(pi, np, wt) def worker(icomm): myrank = icomm.Get_rank() if myrank == 0: source = dest = 0 else: source = dest = MPI.PROC_NULL n = N.array(0, 'i') icomm.Recv([n, MPI.INT], source=source) pi = comp_pi(n, comm=MPI.COMM_WORLD, root=0) pi = N.array(pi, 'd') icomm.Send([pi, MPI.DOUBLE], dest=dest) # Parent/Child def main_parent(nprocs=1): assert nprocs > 0 assert MPI.COMM_WORLD.Get_size() == 1 icomm = MPI.COMM_WORLD.Spawn(command=sys.executable, args=[__file__, 'child'], maxprocs=nprocs) master(icomm) icomm.Disconnect() def main_child(): icomm = MPI.Comm.Get_parent() assert icomm != MPI.COMM_NULL worker(icomm) icomm.Disconnect() # Client/Server def main_server(COMM): nprocs = COMM.Get_size() myrank = COMM.Get_rank() service, port, info = None, None, MPI.INFO_NULL if myrank == 0: port = MPI.Open_port(info) log(COMM, "open port '%s'", port) service = 'cpi' MPI.Publish_name(service, port, info) log(COMM, "service '%s' published.", service) else: port = '' log(COMM, "waiting for client connection ...") icomm = COMM.Accept(port, info, root=0) log(COMM, "client connection accepted.") worker(icomm) log(COMM, "disconnecting from client ...") icomm.Disconnect() log(COMM, "client disconnected.") if myrank == 0: MPI.Unpublish_name(service, port, info) log(COMM, "service '%s' unpublished", port) MPI.Close_port(port) log(COMM, "closed port '%s' ", port) def main_client(COMM): assert COMM.Get_size() == 1 service, info = 'cpi', MPI.INFO_NULL port = MPI.Lookup_name(service, info) log(COMM, "service '%s' found in port '%s'.", service, port) log(COMM, "connecting to server ...") icomm = COMM.Connect(port, info, root=0) log(COMM, "server connected.") master(icomm) log(COMM, "disconnecting from server ...") icomm.Disconnect() log(COMM, "server disconnected.") def main(): assert len(sys.argv) <= 2 if 'server' in sys.argv: main_server(MPI.COMM_WORLD) elif 'client' in sys.argv: main_client(MPI.COMM_WORLD) elif 'child' in sys.argv: main_child() else: try: nchilds = int(sys.argv[1]) except: nchilds = 2 main_parent(nchilds) VERBOSE = False def log(COMM, fmt, *args): if not VERBOSE: return if COMM.rank != 0: return sys.stdout.write(fmt % args) sys.stdout.write('\n') sys.stdout.flush() if __name__ == '__main__': if '-v' in sys.argv: VERBOSE = True sys.argv.remove('-v') main() mpi4py-4.0.3/demo/compute-pi/cpi-rma.py000066400000000000000000000031041475341043600176550ustar00rootroot00000000000000#!/usr/bin/env python """ Parallel PI computation using Remote Memory Access (RMA) within Python objects exposing memory buffers (requires NumPy). usage:: $ mpiexec -n python cpi-rma.py """ from mpi4py import MPI from math import pi as PI from numpy import array def get_n(): prompt = "Enter the number of intervals: (0 quits) " try: n = int(input(prompt)); if n < 0: n = 0 except: n = 0 return n def comp_pi(n, myrank=0, nprocs=1): h = 1.0 / n; s = 0.0; for i in range(myrank + 1, n + 1, nprocs): x = h * (i - 0.5); s += 4.0 / (1.0 + x**2); return s * h def prn_pi(pi, PI): message = "pi is approximately %.16f, error is %.16f" print (message % (pi, abs(pi - PI))) nprocs = MPI.COMM_WORLD.Get_size() myrank = MPI.COMM_WORLD.Get_rank() n = array(0, dtype=int) pi = array(0, dtype=float) mypi = array(0, dtype=float) if myrank == 0: win_n = MPI.Win.Create(n, comm=MPI.COMM_WORLD) win_pi = MPI.Win.Create(pi, comm=MPI.COMM_WORLD) else: win_n = MPI.Win.Create(None, comm=MPI.COMM_WORLD) win_pi = MPI.Win.Create(None, comm=MPI.COMM_WORLD) while True: if myrank == 0: _n = get_n() n.fill(_n) pi.fill(0.0) win_n.Fence() if myrank != 0: win_n.Get([n, MPI.INT], 0) win_n.Fence() if n == 0: break _mypi = comp_pi(n, myrank, nprocs) mypi.fill(_mypi) win_pi.Fence() win_pi.Accumulate([mypi, MPI.DOUBLE], 0, op=MPI.SUM) win_pi.Fence() if myrank == 0: prn_pi(pi, PI) win_n.Free() win_pi.Free() mpi4py-4.0.3/demo/compute-pi/makefile000066400000000000000000000004301475341043600174520ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean default: test build: test: echo 100 | $(MPIEXEC) $(NP_FLAG) $(NP) $(PYTHON) cpi-cco.py echo 100 | $(MPIEXEC) $(NP_FLAG) $(NP) $(PYTHON) cpi-rma.py echo 100 | $(MPIEXEC) $(NP_FLAG) 1 $(PYTHON) cpi-dpm.py $(NP) clean: mpi4py-4.0.3/demo/config.mk000066400000000000000000000022361475341043600154740ustar00rootroot00000000000000dir := $(abspath $(dir $(lastword $(MAKEFILE_LIST)))) PYTHON = python$(py) PYTHON_CONFIG = $(PYTHON) $(dir)/python-config PYBIND11_CONFIG = pybind11-config PYTHON_INCLUDE = $(shell $(PYTHON_CONFIG) --includes) PYBIND11_INCLUDE = $(shell $(PYBIND11_CONFIG) --includes) MPI4PY_INCLUDE = -I$(shell $(PYTHON) -m mpi4py --prefix)/include PYCCFLAGS = $(shell $(PYTHON_CONFIG) --cflags) PYLDFLAGS = $(shell $(PYTHON_CONFIG) --ldflags) CC_FLAGS = $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) $(PYCCFLAGS) $(PYLDFLAGS) CXX_FLAGS = $(CPPFLAGS) $(CXXFLAGS) $(LDFLAGS) $(PYCCFLAGS) $(PYLDFLAGS) FC_FLAGS = $(CPPFLAGS) $(FCFLAGS) $(LDFLAGS) $(PYLDFLAGS) CC_SHARED = -fPIC LD_SHARED = -shared EXT_SUFFIX = $(shell ${PYTHON_CONFIG} --extension-suffix) LIB_SUFFIX = $(suffix $(EXT_SUFFIX)) CYTHON = cython F2PY = f2py SWIG = swig MPICC = mpicc MPICXX = mpicxx MPIFORT = mpifort CC_FLAGS_SHARED = $(CC_SHARED) $(LD_SHARED) $(CC_FLAGS) CXX_FLAGS_SHARED = $(CC_SHARED) $(LD_SHARED) $(CXX_FLAGS) FC_FLAGS_SHARED = $(CC_SHARED) $(LD_SHARED) $(FC_FLAGS) MPIEXEC = mpiexec NP_FLAG = -n NP = 5 MPIEXEC_RUNCMD = $(MPIEXEC) $(MPIEXEC_FLAGS) $(NP_FLAG) $(NP) MPIEXEC_PYTHON = $(MPIEXEC_RUNCMD) $(PYTHON) mpi4py-4.0.3/demo/cuda-aware-mpi/000077500000000000000000000000001475341043600164675ustar00rootroot00000000000000mpi4py-4.0.3/demo/cuda-aware-mpi/use_cupy.py000066400000000000000000000024031475341043600206740ustar00rootroot00000000000000# Demonstrate how to work with Python GPU arrays using CUDA-aware MPI. # We choose the CuPy library for simplicity, but any CUDA array which # has the __cuda_array_interface__ attribute defined will work. # # Run this script using the following command: # mpiexec -n 2 python use_cupy.py from mpi4py import MPI import cupy comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() # Allreduce sendbuf = cupy.arange(10, dtype='i') recvbuf = cupy.empty_like(sendbuf) # always make sure the GPU buffer is ready before any MPI operation cupy.cuda.get_current_stream().synchronize() comm.Allreduce(sendbuf, recvbuf) assert cupy.allclose(recvbuf, sendbuf*size) # Bcast if rank == 0: buf = cupy.arange(100, dtype=cupy.complex64) else: buf = cupy.empty(100, dtype=cupy.complex64) cupy.cuda.get_current_stream().synchronize() comm.Bcast(buf) assert cupy.allclose(buf, cupy.arange(100, dtype=cupy.complex64)) # Send-Recv if rank == 0: buf = cupy.arange(20, dtype=cupy.float64) cupy.cuda.get_current_stream().synchronize() comm.Send(buf, dest=1, tag=88) else: buf = cupy.empty(20, dtype=cupy.float64) cupy.cuda.get_current_stream().synchronize() comm.Recv(buf, source=0, tag=88) assert cupy.allclose(buf, cupy.arange(20, dtype=cupy.float64)) mpi4py-4.0.3/demo/cuda-aware-mpi/use_numba.py000066400000000000000000000020611475341043600210160ustar00rootroot00000000000000# Demonstrate how to work with Python GPU arrays using CUDA-aware MPI. # A GPU array is allocated and manipulated through Numba, which is # compliant with the __cuda_array_interface__ standard. # # Run this script using the following command: # mpiexec -n 2 python use_cupy.py from mpi4py import MPI from numba import cuda import numpy @cuda.jit() def add_const(arr, value): x = cuda.grid(1) if x < arr.size: arr[x] += value comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() # Send-Recv if rank == 0: buf = cuda.device_array((20,), dtype='f') buf[:] = range(20) block = 32 grid = (buf.size + block - 1)//block add_const[grid, block](buf, 100) # always make sure the GPU buffer is ready before any MPI operation cuda.default_stream().synchronize() comm.Send(buf, dest=1, tag=77) else: buf = cuda.device_array((20,), dtype='f') cuda.default_stream().synchronize() comm.Recv(buf, source=0, tag=77) buf = buf.copy_to_host() assert numpy.allclose(buf, 100+numpy.arange(20, dtype='f')) mpi4py-4.0.3/demo/cython/000077500000000000000000000000001475341043600151775ustar00rootroot00000000000000mpi4py-4.0.3/demo/cython/helloworld.pyx000066400000000000000000000036641475341043600201250ustar00rootroot00000000000000# cython: language_level=3str cdef extern from "mpi-compat.h": pass # --------- # Python-level module import # (file: mpi4py/MPI.*.so) from mpi4py import MPI # Python-level objects and code size = MPI.COMM_WORLD.Get_size() rank = MPI.COMM_WORLD.Get_rank() pname = MPI.Get_processor_name() hwmess = "Hello, World! I am process %d of %d on %s." print (hwmess % (rank, size, pname)) # --------- # Cython-level cimport # this make available mpi4py's Python extension types # (file: mpi4py/MPI.pxd) from mpi4py cimport MPI from mpi4py.MPI cimport Intracomm as IntracommType from mpi4py.libmpi cimport MPI_Comm_size from mpi4py.libmpi cimport MPI_Comm_rank from mpi4py.libmpi cimport MPI_MAX_PROCESSOR_NAME from mpi4py.libmpi cimport MPI_Get_processor_name # C-level cdef, typed, Python objects cdef MPI.Comm WORLD = MPI.COMM_WORLD cdef IntracommType SELF = MPI.COMM_SELF cdef int ierr0 = 0 cdef int size0 = 0 ierr0 = MPI_Comm_size(WORLD.ob_mpi, &size0) cdef int rank0 = 0 ierr0 = MPI_Comm_rank(WORLD.ob_mpi, &rank0) cdef int rlen0 = 0 cdef char pname0[MPI_MAX_PROCESSOR_NAME] ierr0 = MPI_Get_processor_name(pname0, &rlen0) pname0[rlen0] = 0 # just in case ;-) hwmess = "Hello, World! I am process %d of %d on %s." print (hwmess % (rank0, size0, pname0.decode())) # --------- # Cython-level cimport with PXD file # this make available the native MPI C API # with namespace-protection (stuff accessed as mpi.XXX) # (file: mpi4py/libmpi.pxd) from mpi4py cimport libmpi as mpi cdef mpi.MPI_Comm world1 = WORLD.ob_mpi cdef int ierr1 = 0 cdef int size1 = 0 ierr1 = mpi.MPI_Comm_size(mpi.MPI_COMM_WORLD, &size1) cdef int rank1 = 0 ierr1 = mpi.MPI_Comm_rank(mpi.MPI_COMM_WORLD, &rank1) cdef int rlen1 = 0 cdef char pname1[mpi.MPI_MAX_PROCESSOR_NAME] ierr1 = mpi.MPI_Get_processor_name(pname1, &rlen1) pname1[rlen1] = 0 # just in case ;-) hwmess = "Hello, World! I am process %d of %d on %s." print (hwmess % (rank1, size1, pname1.decode())) # --------- mpi4py-4.0.3/demo/cython/makefile000066400000000000000000000006241475341043600167010ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean default: build test clean MODULE = helloworld SOURCE = $(MODULE).pyx GENSRC = $(MODULE).c TARGET = $(MODULE)$(EXT_SUFFIX) $(GENSRC): $(SOURCE) $(CYTHON) $< $(TARGET): $(GENSRC) $(MPICC) $(CC_FLAGS_SHARED) -o $@ $< src: $(GENSRC) build: $(TARGET) test: build $(PYTHON) -c 'import helloworld' clean: $(RM) -r $(TARGET) __pycache__ $(GENSRC) mpi4py-4.0.3/demo/cython/mpi-compat.h000066400000000000000000000006511475341043600174200ustar00rootroot00000000000000/* Author: Lisandro Dalcin */ /* Contact: dalcinl@gmail.com */ #ifndef MPI_COMPAT_H #define MPI_COMPAT_H #include #if (MPI_VERSION < 3) && !defined(PyMPI_HAVE_MPI_Message) typedef void *PyMPI_MPI_Message; #define MPI_Message PyMPI_MPI_Message #endif #if (MPI_VERSION < 4) && !defined(PyMPI_HAVE_MPI_Session) typedef void *PyMPI_MPI_Session; #define MPI_Session PyMPI_MPI_Session #endif #endif/*MPI_COMPAT_H*/ mpi4py-4.0.3/demo/embedding/000077500000000000000000000000001475341043600156115ustar00rootroot00000000000000mpi4py-4.0.3/demo/embedding/helloworld.c000066400000000000000000000011411475341043600201250ustar00rootroot00000000000000#include #include static const char helloworld[] = \ "from mpi4py import MPI \n" "hwmess = 'Hello, World! I am process %d of %d on %s.' \n" "myrank = MPI.COMM_WORLD.Get_rank() \n" "nprocs = MPI.COMM_WORLD.Get_size() \n" "procnm = MPI.Get_processor_name() \n" "print (hwmess % (myrank, nprocs, procnm)) \n" ""; int main(int argc, char *argv[]) { MPI_Init(&argc, &argv); Py_Initialize(); PyRun_SimpleString(helloworld); Py_Finalize(); MPI_Finalize(); return 0; } mpi4py-4.0.3/demo/embedding/makefile000066400000000000000000000006361475341043600173160ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean default: build test clean PROGRAM = helloworld SOURCE = $(PROGRAM).c TARGET = $(PROGRAM).exe CFLAGS = $(shell $(PYTHON_CONFIG) --cflags) LDFLAGS = $(shell $(PYTHON_CONFIG) --ldflags --embed) $(TARGET): $(SOURCE) $(MPICC) $(CFLAGS) $(LDFLAGS) -o $@ $< build: $(TARGET) test: build $(MPIEXEC_RUNCMD) ./$(TARGET) clean: $(RM) -r $(TARGET) $(TARGET).dSYM mpi4py-4.0.3/demo/futures/000077500000000000000000000000001475341043600153705ustar00rootroot00000000000000mpi4py-4.0.3/demo/futures/makefile000066400000000000000000000025711475341043600170750ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean default: test .PHONY: run-pool run-crawl run-hello run-primes run-pool: run-cpi run-crawl run-hello run-primes run-cpi: $(MPIEXEC) $(NP_FLAG) 1 $(PYTHON) run_cpi.py run-crawl: $(MPIEXEC) $(NP_FLAG) 1 $(PYTHON) run_crawl.py run-hello: $(MPIEXEC) $(NP_FLAG) 1 $(PYTHON) run_hello.py run-primes: $(MPIEXEC) $(NP_FLAG) 1 $(PYTHON) run_primes.py .PHONY: run-comm run-julia run-mandelbrot run-comm: run-julia run-mandelbrot run-julia: $(MPIEXEC) $(NP_FLAG) 5 $(PYTHON) run_julia.py run-mandelbrot: $(MPIEXEC) $(NP_FLAG) 5 $(PYTHON) run_mandelbrot.py .PHONY: run-cmdline run-cmdline: $(MPIEXEC) $(NP_FLAG) 5 $(PYTHON) -m mpi4py.futures run_cpi.py $(MPIEXEC) $(NP_FLAG) 5 $(PYTHON) -m mpi4py.futures run_crawl.py $(MPIEXEC) $(NP_FLAG) 5 $(PYTHON) -m mpi4py.futures run_hello.py $(MPIEXEC) $(NP_FLAG) 5 $(PYTHON) -m mpi4py.futures run_primes.py $(MPIEXEC) $(NP_FLAG) 5 $(PYTHON) -m mpi4py.futures run_julia.py $(MPIEXEC) $(NP_FLAG) 5 $(PYTHON) -m mpi4py.futures run_mandelbrot.py .PHONY: run-unittest run-unittest: $(MPIEXEC) $(NP_FLAG) 1 $(PYTHON) test_futures.py $(opt) $(MPIEXEC) $(NP_FLAG) 2 $(PYTHON) test_futures.py $(opt) $(MPIEXEC) $(NP_FLAG) 1 $(PYTHON) -m mpi4py.futures test_futures.py $(opt) $(MPIEXEC) $(NP_FLAG) 2 $(PYTHON) -m mpi4py.futures test_futures.py $(opt) build: test: run-pool run-comm run-cmdline clean: mpi4py-4.0.3/demo/futures/perf_crawl.py000066400000000000000000000052601475341043600200710ustar00rootroot00000000000000""" Compare the speed of downloading URLs sequentially vs. using futures. """ import sys import time import functools try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen try: from concurrent.futures import ThreadPoolExecutor except ImportError: ThreadPoolExecutor = lambda n: None try: from concurrent.futures import ProcessPoolExecutor except ImportError: ProcessPoolExecutor = lambda n: None from mpi4py.futures import MPIPoolExecutor, as_completed URLS = [ 'http://www.google.com/', 'http://www.apple.com/', 'http://www.ibm.com', 'http://www.thisurlprobablydoesnotexist.com', 'http://www.slashdot.org/', 'http://www.python.org/', 'http://www.bing.com/', 'http://www.facebook.com/', 'http://www.yahoo.com/', 'http://www.youtube.com/', 'http://www.blogger.com/', ] def load_url(url, timeout): return urlopen(url, timeout=timeout).read() def download_urls_sequential(urls, timeout=60): url_to_content = {} for url in urls: try: url_to_content[url] = load_url(url, timeout=timeout) except: pass return url_to_content def download_urls_with_executor(executor, urls, timeout=60): if executor is None: return {} try: url_to_content = {} future_to_url = { executor.submit(load_url, url, timeout): url for url in urls } for future in as_completed(future_to_url): try: url_to_content[future_to_url[future]] = future.result() except: pass return url_to_content finally: executor.shutdown() def main(): for meth, fn in [('sequential', functools.partial(download_urls_sequential, URLS)), ('threads', functools.partial(download_urls_with_executor, ThreadPoolExecutor(10), URLS)), ('processes', functools.partial(download_urls_with_executor, ProcessPoolExecutor(10), URLS)), ('mpi4py', functools.partial(download_urls_with_executor, MPIPoolExecutor(10), URLS))]: sys.stdout.write('%s: ' % meth.ljust(11)) sys.stdout.flush() start = time.time() url_map = fn() elapsed = time.time() - start sys.stdout.write('%5.2f seconds (%2d of %d downloaded)\n' % (elapsed, len(url_map), len(URLS))) sys.stdout.flush() if __name__ == '__main__': main() mpi4py-4.0.3/demo/futures/perf_primes.py000066400000000000000000000036371475341043600202660ustar00rootroot00000000000000""" Compare the speed of primes sequentially vs. using futures. """ import sys import time import math try: from concurrent.futures import ThreadPoolExecutor except ImportError: ThreadPoolExecutor = None try: from concurrent.futures import ProcessPoolExecutor except ImportError: ProcessPoolExecutor = None from mpi4py.futures import MPIPoolExecutor PRIMES = [ 112272535095293, 112582705942171, 112272535095293, 115280095190773, 115797848077099, 117450548693743, 993960000099397, ] def is_prime(n): if n % 2 == 0: return False sqrt_n = int(math.floor(math.sqrt(n))) for i in range(3, sqrt_n + 1, 2): if n % i == 0: return False return True def sequential(): return list(map(is_prime, PRIMES)) def with_thread_pool_executor(): if not ThreadPoolExecutor: return None with ThreadPoolExecutor(4) as executor: return list(executor.map(is_prime, PRIMES)) def with_process_pool_executor(): if not ProcessPoolExecutor: return None with ProcessPoolExecutor(4) as executor: return list(executor.map(is_prime, PRIMES)) def with_mpi_pool_executor(): with MPIPoolExecutor(4) as executor: return list(executor.map(is_prime, PRIMES)) def main(): for name, fn in [('sequential', sequential), ('threads', with_thread_pool_executor), ('processes', with_process_pool_executor), ('mpi4py', with_mpi_pool_executor)]: sys.stdout.write('%s: ' % name.ljust(11)) sys.stdout.flush() start = time.time() result = fn() if result is None: sys.stdout.write(' not available\n') elif result != [True] * len(PRIMES): sys.stdout.write(' failed\n') else: sys.stdout.write('%5.2f seconds\n' % (time.time() - start)) sys.stdout.flush() if __name__ == '__main__': main() mpi4py-4.0.3/demo/futures/run_cpi.py000066400000000000000000000016361475341043600174070ustar00rootroot00000000000000import math import sys from mpi4py.futures import MPIPoolExecutor, wait from mpi4py.futures import get_comm_workers def compute_pi(n): comm = get_comm_workers() comm.barrier() n = comm.bcast(n, root=0) h = 1.0 / n s = 0.0 for i in range(comm.rank + 1, n + 1, comm.size): x = h * (i - 0.5) s += 4.0 / (1.0 + x**2) pi = comm.allreduce(s * h) return pi def main(): try: n = int(sys.argv[1]) except IndexError: n = 256 try: P = int(sys.argv[2]) except IndexError: P = 5 with MPIPoolExecutor(P) as executor: P = executor.num_workers fs = [executor.submit(compute_pi, n) for _ in range(P)] wait(fs) pi = fs[0].result() print( f"pi: {pi:.15f}, error: {abs(pi - math.pi):.3e}", f"({n:d} intervals, {P:d} workers)", ) if __name__ == '__main__': main() mpi4py-4.0.3/demo/futures/run_crawl.py000066400000000000000000000013551475341043600177420ustar00rootroot00000000000000from urllib.request import urlopen from mpi4py.futures import MPIPoolExecutor URLS = [ 'http://www.google.com/', 'http://www.apple.com/', 'http://www.ibm.com/', 'http://www.slashdot.org/', 'http://www.python.org/', 'http://www.bing.com/', 'http://www.facebook.com/', 'http://www.yahoo.com/', 'http://www.youtube.com/', 'http://www.blogger.com/', ] def load_url(url): return url, urlopen(url).read() def test_crawl(): with MPIPoolExecutor(10) as executor: for url, content in executor.map(load_url, URLS, timeout=10, unordered=True): print('%-25s: %6.2f KiB' % (url, len(content)/(1 << 10))) if __name__ == '__main__': test_crawl() mpi4py-4.0.3/demo/futures/run_hello.py000066400000000000000000000016751475341043600177420ustar00rootroot00000000000000from mpi4py import MPI from mpi4py.futures import MPIPoolExecutor, wait from mpi4py.futures import get_comm_workers def helloworld(): comm = get_comm_workers() comm.Barrier() size = comm.Get_size() rank = comm.Get_rank() name = MPI.Get_processor_name() greet = f"Hello, World! I am worker {rank} of {size} on {name}." sbuf = bytearray(128) rbuf = bytearray(128) dest = (rank + 1) % size source = (rank - 1) % size rbuf[:len(greet)] = greet.encode() for _ in range(size): sbuf, rbuf = rbuf, sbuf comm.Sendrecv( sbuf, dest, 42, rbuf, source, 42, ) return bytes(rbuf).decode() if __name__ == '__main__': executor = MPIPoolExecutor(5) futures = [] for _ in range(executor.num_workers): f = executor.submit(helloworld) futures.append(f) wait(futures) for f in futures: print(f.result()) executor.shutdown() mpi4py-4.0.3/demo/futures/run_julia.py000066400000000000000000000023441475341043600177350ustar00rootroot00000000000000import sys import time from mpi4py.futures import MPICommExecutor x0 = -2.0 x1 = +2.0 y0 = -1.5 y1 = +1.5 w = 1600 h = 1200 dx = (x1 - x0) / w dy = (y1 - y0) / h def julia(x, y): c = complex(0, 0.65) z = complex(x, y) n = 255 while abs(z) < 3 and n > 1: z = z**2 + c n -= 1 return n def julia_line(k): line = bytearray(w) y = y1 - k * dy for j in range(w): x = x0 + j * dx line[j] = julia(x, y) return line def plot(image): import warnings warnings.simplefilter('ignore', UserWarning) try: from matplotlib import pyplot as plt except ImportError: return plt.figure() plt.imshow(image, aspect='equal', cmap='cubehelix') plt.axis('off') try: plt.draw() plt.pause(2) except: pass def test_julia(): with MPICommExecutor() as executor: if executor is None: return # worker process tic = time.time() image = list(executor.map(julia_line, range(h), chunksize=10)) toc = time.time() print("%s Set %dx%d in %.2f seconds." % ('Julia', w, h, toc-tic)) if len(sys.argv) > 1 and sys.argv[1] == '-plot': plot(image) if __name__ == '__main__': test_julia() mpi4py-4.0.3/demo/futures/run_mandelbrot.py000066400000000000000000000024141475341043600207560ustar00rootroot00000000000000import sys import time from mpi4py.futures import MPICommExecutor x0 = -2.0 x1 = +1.0 y0 = -1.0 y1 = +1.0 w = 750 h = 500 dx = (x1 - x0) / w dy = (y1 - y0) / h def mandelbrot(x, y, maxit=255): c = complex(x, y) z = complex(0, 0) n = 255 while abs(z) < 2 and n > 1: z = z**2 + c n -= 1 return n def mandelbrot_line(k): line = bytearray(w) y = y1 - k * dy for j in range(w): x = x0 + j * dx line[j] = mandelbrot(x, y) return line def plot(image): import warnings warnings.simplefilter('ignore', UserWarning) try: from matplotlib import pyplot as plt except ImportError: return plt.figure() plt.imshow(image, aspect='equal', cmap='spectral') plt.axis('off') try: plt.draw() plt.pause(2) except: pass def test_mandelbrot(): with MPICommExecutor() as executor: if executor is None: return # worker process tic = time.time() image = list(executor.map(mandelbrot_line, range(h), chunksize=10)) toc = time.time() print("%s Set %dx%d in %.2f seconds." % ('Mandelbrot', w, h, toc-tic)) if len(sys.argv) > 1 and sys.argv[1] == '-plot': plot(image) if __name__ == '__main__': test_mandelbrot() mpi4py-4.0.3/demo/futures/run_primes.py000066400000000000000000000012201475341043600201200ustar00rootroot00000000000000import math from mpi4py.futures import MPIPoolExecutor PRIMES = [ 112272535095293, 112582705942171, 112272535095293, 115280095190773, 115797848077099, 117450548693743, 993960000099397, ] def is_prime(n): if n % 2 == 0: return False sqrt_n = int(math.floor(math.sqrt(n))) for i in range(3, sqrt_n + 1, 2): if n % i == 0: return False return True def test_primes(): with MPIPoolExecutor(4) as executor: for number, prime in zip(PRIMES, executor.map(is_prime, PRIMES)): print('%d is prime: %s' % (number, prime)) if __name__ == '__main__': test_primes() mpi4py-4.0.3/demo/futures/test_futures.py000066400000000000000000001627761475341043600205210ustar00rootroot00000000000000import os import sys import time import random import warnings import functools import threading import unittest from mpi4py import MPI from mpi4py import futures from concurrent.futures._base import ( PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED ) SHARED_POOL = futures._core.SharedPool is not None WORLD_SIZE = MPI.COMM_WORLD.Get_size() def create_future(state=PENDING, exception=None, result=None): f = futures.Future() f._state = state f._exception = exception f._result = result return f PENDING_FUTURE = create_future(state=PENDING) RUNNING_FUTURE = create_future(state=RUNNING) CANCELLED_FUTURE = create_future(state=CANCELLED) CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED) EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError()) SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42) def mul(x, y): return x * y def sleep_and_raise(t): time.sleep(t) raise Exception('this is an exception') def check_global_var(x): return global_var == x def check_run_name(name): return __name__ == name def check_comm_workers(): comm = futures.get_comm_workers() return comm.Get_size() def sys_flags_get(name): return getattr(sys.flags, name) class ExecutorMixin: worker_count = 2 def setUp(self): self.t1 = time.time() try: self.executor = self.executor_type(max_workers=self.worker_count) except NotImplementedError as exc: self.skipTest(str(exc)) self._prime_executor() def tearDown(self): self.executor.shutdown(wait=True) dt = time.time() - self.t1 self.assertLess(dt, 60, 'synchronization issue: test lasted too long') def _prime_executor(self): # Make sure that the executor is ready to do work before running the # tests. This should reduce the probability of timeouts in the tests. futures = [self.executor.submit(time.sleep, 0) for _ in range(self.worker_count)] for f in futures: f.result() class ProcessPoolMixin(ExecutorMixin): executor_type = futures.MPIPoolExecutor if 'coverage' in sys.modules: executor_type = staticmethod(functools.partial( executor_type, python_args='-m coverage run'.split(), )) @unittest.skipIf(not SHARED_POOL, 'not-shared-pool') class SharedPoolInitTest(unittest.TestCase): executor_type = futures.MPIPoolExecutor def test_initializer_0(self): executor = self.executor_type( initializer=time.sleep, initargs=(0,), ) executor.bootup() executor.submit(time.sleep, 0).result() executor.shutdown() def test_initializer_1(self): for _ in range(2): executor = self.executor_type( initializer=sleep_and_raise, initargs=(0.2,), ) executor.submit(time.sleep, 0).cancel() future = executor.submit(time.sleep, 0) with self.assertRaises(futures.BrokenExecutor): executor.submit(time.sleep, 0).result() with self.assertRaises(futures.BrokenExecutor): future.result() with self.assertRaises(futures.BrokenExecutor): executor.submit(time.sleep, 0) def test_initializer_2(self): executor = self.executor_type( initializer=time.sleep, initargs=(0,), ) executor.bootup() executor.submit(time.sleep, 0).result() executor.shutdown() def test_initializer_3(self): executor = self.executor_type() executor.submit(time.sleep, 0).result() executor.shutdown() def test_initializer_4(self): def test(tid): with self.executor_type( initializer=time.sleep, initargs=(random.random()/100,), ) as executor: futures.as_completed([ executor.submit(time.sleep, random.random()/100) for _ in range(executor.num_workers + tid) ]) ts = [threading.Thread(target=test, args=(i,)) for i in range(5)] for t in ts: t.start() for t in ts: t.join() class ProcessPoolInitTest(ProcessPoolMixin, unittest.TestCase): def setUp(self): pass def tearDown(self): pass def _prime_executor(self): pass def test_init(self): self.executor_type() def test_init_args(self): self.executor_type(1) def test_init_kwargs(self): executor = self.executor_type( python_exe=sys.executable, max_workers=None, mpi_info=dict(soft="0:1"), globals=None, main=False, path=[], wdir=os.getcwd(), env={}, use_pkl5=None, backoff=0.001, ) futures = [executor.submit(time.sleep, 0) for _ in range(self.worker_count)] for f in futures: f.result() executor.shutdown() def test_init_pyargs(self): executor_type = futures.MPIPoolExecutor executor = executor_type(python_args=['-B', '-Wi']) executor.submit(time.sleep, 0).result() executor.shutdown() @unittest.skipIf(SHARED_POOL, 'shared-pool') def test_init_sys_flags(self): executor_type = futures.MPIPoolExecutor sys_flags = [ ('debug', '-d', True), ('optimize', '-O', 1), ('optimize', '-OO', 2), ('dont_write_bytecode', '-B', True), ] if sys.version_info >= (3, 7): sys_flags.extend([ ('dev_mode', '-Xdev', True), ('utf8_mode', '-Xutf8', True), ]) if sys.version_info >= (3, 11): sys_flags.extend([ ('safe_path', '-P', True), ]) for (name, flag, value) in sys_flags: if not isinstance(value, bool): if isinstance(value, int): value += getattr(sys.flags, name) with executor_type(python_args=[flag]) as executor: result = executor.submit(sys_flags_get, name).result() if isinstance(value, bool): result = bool(result) self.assertEqual(value, result, f"sys.flags.{name}") @unittest.skipIf(SHARED_POOL, 'shared-pool') def test_init_globals(self): executor = self.executor_type(globals=dict(global_var=42)) future1 = executor.submit(check_global_var, 42) future2 = executor.submit(check_global_var, 24) self.assertTrue(future1.result()) self.assertFalse(future2.result()) executor.shutdown() @unittest.skipIf(SHARED_POOL and WORLD_SIZE == 1, 'shared-pool') def test_run_name(self): executor = self.executor_type() run_name = futures._core.MAIN_RUN_NAME future = executor.submit(check_run_name, run_name) self.assertTrue(future.result()) @unittest.skipIf(SHARED_POOL and WORLD_SIZE > 2, 'shared-pool') def test_max_workers(self): executor = self.executor_type(max_workers=1) self.assertEqual(executor.num_workers, 1) self.assertEqual(executor.num_workers, executor._max_workers) executor.shutdown() self.assertEqual(executor.num_workers, 0) self.assertEqual(executor.num_workers, executor._max_workers) @unittest.skipIf(SHARED_POOL and WORLD_SIZE > 2, 'shared-pool') def test_max_workers_environ(self): save = os.environ.get('MPI4PY_FUTURES_MAX_WORKERS') os.environ['MPI4PY_FUTURES_MAX_WORKERS'] = '1' try: executor = self.executor_type() executor.submit(time.sleep, 0).result() executor.shutdown() executor = self.executor_type() self.assertEqual(executor.num_workers, 1) executor.shutdown() self.assertEqual(executor.num_workers, 0) finally: del os.environ['MPI4PY_FUTURES_MAX_WORKERS'] if save is not None: os.environ['MPI4PY_FUTURES_MAX_WORKERS'] = save def test_max_workers_negative(self): for number in (0, -1): with self.assertRaises(ValueError): self.executor_type(max_workers=number) def test_get_comm_workers(self): executor = self.executor_type() num_workers = executor.submit(check_comm_workers).result() self.assertTrue(executor.num_workers, num_workers) self.assertRaises(RuntimeError, check_comm_workers) @unittest.skipIf(SHARED_POOL, 'shared-pool') def test_use_pkl5_kwarg(self): executor = self.executor_type(use_pkl5=True) executor.submit(time.sleep, 0).result() executor.shutdown() @unittest.skipIf(SHARED_POOL, 'shared-pool') def test_use_pkl5_environ(self): save = os.environ.get('MPI4PY_FUTURES_USE_PKL5') try: for value in ('false', 'true'): os.environ['MPI4PY_FUTURES_USE_PKL5'] = value executor = self.executor_type() executor.submit(time.sleep, 0).result() executor.shutdown() with warnings.catch_warnings(record=True) as wlist: warnings.simplefilter('always') os.environ['MPI4PY_FUTURES_USE_PKL5'] = 'foobar' executor = self.executor_type() executor.submit(time.sleep, 0).result() executor.shutdown() self.assertTrue(wlist) msg = wlist[0].message self.assertIsInstance(msg, RuntimeWarning) self.assertIn('foobar', msg.args[0]) finally: del os.environ['MPI4PY_FUTURES_USE_PKL5'] if save is not None: os.environ['MPI4PY_FUTURES_USE_PKL5'] = save def test_initializer(self): executor = self.executor_type( initializer=time.sleep, initargs=(0,), ) executor.submit(time.sleep, 0).result() def test_initializer_bad(self): with self.assertRaises(TypeError): self.executor_type(initializer=123) def test_initializer_error(self): executor = self.executor_type( initializer=sleep_and_raise, initargs=(0.2,), ) executor.submit(time.sleep, 0).cancel() future = executor.submit(time.sleep, 0) with self.assertRaises(futures.BrokenExecutor): executor.submit(time.sleep, 0).result() with self.assertRaises(futures.BrokenExecutor): future.result() with self.assertRaises(futures.BrokenExecutor): executor.submit(time.sleep, 0) self.assertEqual(executor.num_workers, 0) def test_initializer_error_del(self): executor = self.executor_type( initializer=sleep_and_raise, initargs=(0.2,), ) executor.bootup() del executor def test_initializer_error_del_nowait(self): executor = self.executor_type( initializer=sleep_and_raise, initargs=(1.2,), ) executor.bootup(wait=False) executor.shutdown(wait=False) del executor class ProcessPoolBootupTest(ProcessPoolMixin, unittest.TestCase): def _prime_executor(self): pass def test_bootup(self): executor = self.executor_type(1) executor.bootup() executor.bootup() executor.shutdown() with self.assertRaises(RuntimeError): executor.bootup() def test_bootup_wait(self): executor = self.executor_type(1) executor.bootup(wait=True) executor.bootup(wait=True) executor.shutdown(wait=True) with self.assertRaises(RuntimeError): executor.bootup(wait=True) def test_bootup_nowait(self): executor = self.executor_type(1) executor.bootup(wait=False) executor.bootup(wait=False) executor.shutdown(wait=False) with self.assertRaises(RuntimeError): executor.bootup(wait=False) executor.shutdown(wait=True) def test_bootup_nowait_wait(self): executor = self.executor_type(1) executor.bootup(wait=False) executor.bootup(wait=True) executor.shutdown() with self.assertRaises(RuntimeError): executor.bootup() def test_bootup_shutdown_nowait(self): executor = self.executor_type(1) executor.bootup(wait=False) executor.shutdown(wait=False) worker = executor._pool del executor worker.join() class ExecutorShutdownTestMixin: def test_run_after_shutdown(self): self.executor.shutdown() with self.assertRaises(RuntimeError): self.executor.submit(pow, 2, 5) def test_hang_issue12364(self): fs = [self.executor.submit(time.sleep, 0.01) for _ in range(50)] self.executor.shutdown() for f in fs: f.result() class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTestMixin, unittest.TestCase): def _prime_executor(self): pass def test_shutdown(self): executor = self.executor_type(max_workers=1) self.assertIsNone(executor._pool) self.assertFalse(executor._shutdown) executor.submit(mul, 21, 2) executor.submit(mul, 6, 7) executor.submit(mul, 3, 14) self.assertIsNotNone(executor._pool.thread) self.assertFalse(executor._shutdown) executor.shutdown(wait=False) self.assertIsNotNone(executor._pool.thread) self.assertTrue(executor._shutdown) executor.shutdown(wait=True) self.assertIsNone(executor._pool) self.assertTrue(executor._shutdown) def test_submit_shutdown_cancel(self): executor = self.executor_type(max_workers=1) executor.bootup() num_workers = executor.num_workers for _ in range(num_workers*100): executor.submit(time.sleep, 0.1) fut = executor.submit(time.sleep, 0) executor.shutdown(wait=False, cancel_futures=False) self.assertFalse(fut.cancelled()) executor.shutdown(wait=True, cancel_futures=True) self.assertTrue(fut.cancelled()) def test_submit_shutdown_cancel_wait(self): executor = self.executor_type(max_workers=1) executor.bootup() num_workers = executor.num_workers fut1 = executor.submit(time.sleep, 0.1) for _ in range(num_workers*100): executor.submit(time.sleep, 0.1) fut2 = executor.submit(time.sleep, 0) fut3 = executor.submit(time.sleep, 0) time.sleep(0.2) executor.shutdown(wait=False, cancel_futures=True) done, not_done = futures.wait({fut1, fut2, fut3}) self.assertEqual(len(not_done), 0) self.assertFalse(fut1.cancelled()) self.assertTrue(fut2.cancelled()) self.assertTrue(fut3.cancelled()) executor.shutdown(wait=True, cancel_futures=True) def test_shutdown_cancel(self): executor = self.executor_type(max_workers=1) executor.bootup() executor._pool.cancel() executor.shutdown(wait=False, cancel_futures=False) executor.shutdown(wait=False, cancel_futures=False) executor.shutdown(wait=False, cancel_futures=True) executor.shutdown(wait=False, cancel_futures=True) executor.shutdown(wait=True, cancel_futures=True) executor.shutdown(wait=True, cancel_futures=True) def test_init_bootup_shutdown(self): executor = self.executor_type(max_workers=1) self.assertIsNone(executor._pool) self.assertFalse(executor._shutdown) executor.bootup() self.assertTrue(executor._pool.event.is_set()) self.assertFalse(executor._shutdown) executor.shutdown() self.assertIsNone(executor._pool) self.assertTrue(executor._shutdown) def test_context_manager_shutdown(self): with self.executor_type(max_workers=1) as e: self.assertEqual(list(e.map(abs, range(-5, 5))), [5, 4, 3, 2, 1, 0, 1, 2, 3, 4]) threads = [e._pool.thread] queues = [e._pool.queue] events = [e._pool.event] for t in threads: t.join() for q in queues: with self.assertRaises(LookupError): q.pop() for e in events: self.assertTrue(e.is_set()) def test_del_shutdown(self): executor = self.executor_type(max_workers=1) list(executor.map(abs, range(-5, 5))) threads = [executor._pool.thread] queues = [executor._pool.queue] events = [executor._pool.event] if hasattr(sys, 'pypy_version_info'): executor.shutdown(False) else: del executor for t in threads: t.join() for q in queues: with self.assertRaises(LookupError): q.pop() for e in events: self.assertTrue(e.is_set()) class WaitTestMixin: def test_first_completed(self): future1 = self.executor.submit(mul, 21, 2) future2 = self.executor.submit(time.sleep, 0.25) done, not_done = futures.wait( [CANCELLED_FUTURE, future1, future2], return_when=futures.FIRST_COMPLETED) self.assertEqual({future1}, done) self.assertEqual({CANCELLED_FUTURE, future2}, not_done) def test_first_completed_some_already_completed(self): future1 = self.executor.submit(time.sleep, 0.5) finished, pending = futures.wait( [CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1], return_when=futures.FIRST_COMPLETED) self.assertEqual({ CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE}, finished) self.assertEqual({future1}, pending) def test_first_exception(self): future1 = self.executor.submit(mul, 2, 21) future2 = self.executor.submit(sleep_and_raise, 0.25) future3 = self.executor.submit(time.sleep, 0.5) finished, pending = futures.wait( [future1, future2, future3], return_when=futures.FIRST_EXCEPTION) self.assertEqual({future1, future2}, finished) self.assertEqual({future3}, pending) def test_first_exception_some_already_complete(self): future1 = self.executor.submit(divmod, 21, 0) future2 = self.executor.submit(time.sleep, 0.5) finished, pending = futures.wait( [SUCCESSFUL_FUTURE, CANCELLED_FUTURE, CANCELLED_AND_NOTIFIED_FUTURE, future1, future2], return_when=futures.FIRST_EXCEPTION) self.assertEqual({ SUCCESSFUL_FUTURE, CANCELLED_AND_NOTIFIED_FUTURE, future1}, finished) self.assertEqual({ CANCELLED_FUTURE, future2}, pending) def test_first_exception_one_already_failed(self): future1 = self.executor.submit(time.sleep, 0.25) finished, pending = futures.wait( [EXCEPTION_FUTURE, future1], return_when=futures.FIRST_EXCEPTION) self.assertEqual({EXCEPTION_FUTURE}, finished) self.assertEqual({future1}, pending) def test_all_completed(self): future1 = self.executor.submit(divmod, 2, 0) future2 = self.executor.submit(mul, 2, 21) finished, pending = futures.wait( [SUCCESSFUL_FUTURE, CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, future1, future2], return_when=futures.ALL_COMPLETED) self.assertEqual({ SUCCESSFUL_FUTURE, CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, future1, future2}, finished) self.assertEqual(set(), pending) def test_timeout(self): future1 = self.executor.submit(mul, 6, 7) future2 = self.executor.submit(time.sleep, 0.75) finished, pending = futures.wait( [CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1, future2], timeout=0.5, return_when=futures.ALL_COMPLETED) self.assertEqual({ CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1}, finished) self.assertEqual({future2}, pending) class ProcessPoolWaitTest(ProcessPoolMixin, WaitTestMixin, unittest.TestCase): pass class AsCompletedTestMixin: def test_no_timeout(self): future1 = self.executor.submit(mul, 2, 21) future2 = self.executor.submit(mul, 7, 6) completed = set(futures.as_completed( [CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1, future2])) self.assertEqual({ CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1, future2}, completed) def test_zero_timeout(self): future1 = self.executor.submit(time.sleep, 0.5) completed_futures = set() try: for future in futures.as_completed( [CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1], timeout=0): completed_futures.add(future) except futures.TimeoutError: pass self.assertEqual({ CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE}, completed_futures) def test_nonzero_timeout(self): future1 = self.executor.submit(time.sleep, 0.0) future2 = self.executor.submit(time.sleep, 0.5) completed_futures = set() try: for future in futures.as_completed( [CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1], timeout=0.2): completed_futures.add(future) except futures.TimeoutError: pass self.assertEqual({ CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1}, completed_futures) def test_duplicate_futures(self): # Issue 20367. Duplicate futures should not raise exceptions or give # duplicate responses. future1 = self.executor.submit(time.sleep, 0.1) completed = [f for f in futures.as_completed([future1, future1])] self.assertEqual(len(completed), 1) class ProcessPoolAsCompletedTest(ProcessPoolMixin, AsCompletedTestMixin, unittest.TestCase): pass class ExecutorTestMixin: def test_submit(self): future = self.executor.submit(pow, 2, 8) self.assertEqual(256, future.result()) def test_submit_keyword(self): future = self.executor.submit(mul, 2, y=8) self.assertEqual(16, future.result()) future = self.executor.submit(mul, x=2, y=8) self.assertEqual(16, future.result()) def test_submit_cancel(self): fs = [] num_workers = self.executor.num_workers for _ in range(num_workers*100): f = self.executor.submit(time.sleep, 0.1) fs.append(f) future = self.executor.submit(time.sleep, 0) future.cancel() self.assertTrue(future.cancelled()) for f in fs: f.cancel() def test_map(self): self.assertEqual( list(self.executor.map(pow, range(10), range(10))), list(map(pow, range(10), range(10)))) def test_starmap(self): sequence = [(a,a) for a in range(10)] self.assertEqual( list(self.executor.starmap(pow, sequence)), list(map(pow, range(10), range(10)))) self.assertEqual( list(self.executor.starmap(pow, iter(sequence))), list(map(pow, range(10), range(10)))) def test_map_exception(self): i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5]) self.assertEqual(next(i), (0, 1)) self.assertEqual(next(i), (0, 1)) with self.assertRaises(ZeroDivisionError): next(i) def test_map_timeout(self): results = [] try: for i in self.executor.map(time.sleep, [0, 0, 1], timeout=0.25): results.append(i) except futures.TimeoutError: pass else: self.fail('expected TimeoutError') self.assertEqual([None, None], results) def test_map_timeout_one(self): results = [] for i in self.executor.map(time.sleep, [0, 0, 0], timeout=1): results.append(i) self.assertEqual([None, None, None], results) class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTestMixin, unittest.TestCase): def test_map_chunksize(self): ref = list(map(pow, range(40), range(40))) self.assertEqual( list(self.executor.map(pow, range(40), range(40), chunksize=6)), ref) self.assertEqual( list(self.executor.map(pow, range(40), range(40), chunksize=50)), ref) self.assertEqual( list(self.executor.map(pow, range(40), range(40), chunksize=40)), ref) with self.assertRaises(ValueError): list(self.executor.map(pow, range(40), range(40), chunksize=-1)) def test_starmap_chunksize(self): ref = list(map(pow, range(40), range(40))) sequence = [(a, a) for a in range(40)] self.assertEqual( list(self.executor.starmap(pow, sequence, chunksize=6)), ref) self.assertEqual( list(self.executor.starmap(pow, sequence, chunksize=50)), ref) self.assertEqual( list(self.executor.starmap(pow, sequence, chunksize=40)), ref) self.assertEqual( list(self.executor.starmap(pow, iter(sequence), chunksize=6)), ref) self.assertEqual( list(self.executor.starmap(pow, iter(sequence), chunksize=50)), ref) self.assertEqual( list(self.executor.starmap(pow, iter(sequence), chunksize=40)), ref) with self.assertRaises(ValueError): list(self.executor.starmap(pow, sequence, chunksize=-1)) def test_map_unordered(self): map_unordered = functools.partial(self.executor.map, unordered=True) self.assertEqual( set(map_unordered(pow, range(10), range(10))), set(map(pow, range(10), range(10)))) def test_map_unordered_timeout(self): map_unordered = functools.partial(self.executor.map, unordered=True) num_workers = self.executor.num_workers results = [] try: args = [1] + [0]*(num_workers-1) for i in map_unordered(time.sleep, args, timeout=0.25): results.append(i) except futures.TimeoutError: pass else: self.fail('expected TimeoutError') self.assertEqual([None]*(num_workers-1), results) def test_map_unordered_timeout_one(self): map_unordered = functools.partial(self.executor.map, unordered=True) results = [] for i in map_unordered(time.sleep, [0, 0, 0], timeout=1): results.append(i) self.assertEqual([None, None, None], results) def test_map_unordered_exception(self): map_unordered = functools.partial(self.executor.map, unordered=True) i = map_unordered(divmod, [1, 1, 1, 1], [2, 3, 0, 5]) try: self.assertEqual(next(i), (0, 1)) except ZeroDivisionError: return def test_map_unordered_chunksize(self): map_unordered = functools.partial(self.executor.map, unordered=True) ref = set(map(pow, range(40), range(40))) self.assertEqual( set(map_unordered(pow, range(40), range(40), chunksize=6)), ref) self.assertEqual( set(map_unordered(pow, range(40), range(40), chunksize=50)), ref) self.assertEqual( set(map_unordered(pow, range(40), range(40), chunksize=40)), ref) with self.assertRaises(ValueError): set(map_unordered(pow, range(40), range(40), chunksize=-1)) class ProcessPoolSubmitTest(unittest.TestCase): @unittest.skipIf(MPI.get_vendor()[0] == 'Microsoft MPI', 'msmpi') def test_multiple_executors(self): executor1 = futures.MPIPoolExecutor(1).bootup(wait=True) executor2 = futures.MPIPoolExecutor(1).bootup(wait=True) executor3 = futures.MPIPoolExecutor(1).bootup(wait=True) fs1 = [executor1.submit(abs, i) for i in range(100, 200)] fs2 = [executor2.submit(abs, i) for i in range(200, 300)] fs3 = [executor3.submit(abs, i) for i in range(300, 400)] futures.wait(fs3+fs2+fs1) for i, f in enumerate(fs1): self.assertEqual(f.result(), i + 100) for i, f in enumerate(fs2): self.assertEqual(f.result(), i + 200) for i, f in enumerate(fs3): self.assertEqual(f.result(), i + 300) executor1 = executor2 = executor3 = None def test_mpi_serialized_support(self): futures._core.setup_mpi_threads() serialized = futures._core.serialized lock_save = serialized.lock try: if lock_save is None: serialized.lock = threading.Lock() executor = futures.MPIPoolExecutor(1).bootup() executor.submit(abs, 0).result() executor.shutdown() serialized.lock = lock_save else: serialized.lock = None with lock_save: executor = futures.MPIPoolExecutor(1).bootup() executor.submit(abs, 0).result() executor.shutdown() serialized.lock = lock_save finally: serialized.lock = lock_save def test_shared_executors(self): if not SHARED_POOL: return executors = [futures.MPIPoolExecutor() for _ in range(16)] fs = [] for i in range(128): fs.extend( e.submit(abs, i*16+j) for j, e in enumerate(executors) ) self.assertEqual(sorted(f.result() for f in fs), list(range(16*128))) world_size = MPI.COMM_WORLD.Get_size() num_workers = max(1, world_size - 1) for e in executors: self.assertEqual(e.num_workers, num_workers) del e, executors def inout(arg): return arg class GoodPickle: def __init__(self, value=0): self.value = value self.pickled = False self.unpickled = False def __getstate__(self): self.pickled = True return (self.value,) def __setstate__(self, state): self.unpickled = True self.value = state[0] class BadPickle: def __init__(self): self.pickled = False def __getstate__(self): self.pickled = True 1/0 def __setstate__(self, state): pass class BadUnpickle: def __init__(self): self.pickled = False def __getstate__(self): self.pickled = True return (None,) def __setstate__(self, state): if state[0] is not None: raise ValueError 1/0 @unittest.skipIf(SHARED_POOL and WORLD_SIZE == 1, 'shared-pool') class ProcessPoolPickleTest(unittest.TestCase): def setUp(self): self.executor = futures.MPIPoolExecutor(1) def tearDown(self): self.executor.shutdown() def test_good_pickle(self): o = GoodPickle(42) r = self.executor.submit(inout, o).result() self.assertEqual(o.value, r.value) self.assertTrue(o.pickled) self.assertTrue(r.unpickled) r = self.executor.submit(GoodPickle, 77).result() self.assertEqual(r.value, 77) self.assertTrue(r.unpickled) def test_bad_pickle(self): o = BadPickle() self.assertFalse(o.pickled) f = self.executor.submit(inout, o) with self.assertRaises(ZeroDivisionError): f.result() self.assertTrue(o.pickled) f = self.executor.submit(BadPickle) with self.assertRaises(ZeroDivisionError): f.result() f = self.executor.submit(abs, 42) self.assertEqual(f.result(), 42) def test_bad_unpickle(self): o = BadUnpickle() self.assertFalse(o.pickled) f = self.executor.submit(inout, o) with self.assertRaises(ZeroDivisionError): f.result() self.assertTrue(o.pickled) f = self.executor.submit(BadUnpickle) with self.assertRaises(ZeroDivisionError): f.result() f = self.executor.submit(abs, 42) self.assertEqual(f.result(), 42) def test_exc_pickle(self): o = BadPickle() f = self.executor.submit(inout, o) exc = f.exception() self.assertIsInstance(exc, ZeroDivisionError) cause = exc.__cause__ self.assertIsNone(cause) def test_exc_unpickle(self): o = BadUnpickle() f = self.executor.submit(inout, o) exc = f.exception() self.assertIsInstance(exc, ZeroDivisionError) cause = exc.__cause__ self.assertIsInstance(cause, futures._core.RemoteTraceback) class MPICommExecutorTest(unittest.TestCase): MPICommExecutor = futures.MPICommExecutor def test_default(self): with self.MPICommExecutor() as executor: if executor is not None: executor.bootup() future1 = executor.submit(time.sleep, 0) future2 = executor.submit(time.sleep, 0) executor.shutdown() self.assertIsNone(future1.result()) self.assertIsNone(future2.result()) def test_self(self): with self.MPICommExecutor(MPI.COMM_SELF) as executor: future = executor.submit(time.sleep, 0) self.assertIsNone(future.result()) self.assertIsNone(future.exception()) future = executor.submit(sleep_and_raise, 0) with self.assertRaises(Exception): future.result() self.assertEqual(Exception, type(future.exception())) list(executor.map(time.sleep, [0, 0])) list(executor.map(time.sleep, [0, 0], timeout=1)) iterator = executor.map(time.sleep, [0.2, 0], timeout=0) with self.assertRaises(futures.TimeoutError): list(iterator) def test_args(self): with self.MPICommExecutor(MPI.COMM_SELF) as executor: self.assertIsNotNone(executor) with self.MPICommExecutor(MPI.COMM_SELF, 0) as executor: self.assertIsNotNone(executor) def test_kwargs(self): with self.MPICommExecutor(comm=MPI.COMM_SELF) as executor: self.assertIsNotNone(executor) with self.MPICommExecutor(comm=MPI.COMM_SELF, root=0) as executor: self.assertIsNotNone(executor) @unittest.skipIf(SHARED_POOL, 'shared-pool') def test_arg_root(self): comm = MPI.COMM_WORLD rank = comm.Get_rank() for root in range(comm.Get_size()): with self.MPICommExecutor(comm, root) as executor: if rank == root: self.assertIsNotNone(executor) else: self.assertIsNone(executor) with self.MPICommExecutor(root=root) as executor: if rank == root: self.assertIsNotNone(executor) else: self.assertIsNone(executor) @unittest.skipIf(SHARED_POOL, 'shared-pool') def test_arg_bad_root(self): size = MPI.COMM_WORLD.Get_size() with self.assertRaises(ValueError): self.MPICommExecutor(root=-size) with self.assertRaises(ValueError): self.MPICommExecutor(root=-1) with self.assertRaises(ValueError): self.MPICommExecutor(root=+size) @unittest.skipIf(SHARED_POOL, 'shared-pool') def test_arg_bad_comm(self): if MPI.COMM_WORLD.Get_size() == 1: return intercomm, intracomm = futures._core.comm_split(MPI.COMM_WORLD, 0) try: with self.assertRaises(ValueError): self.MPICommExecutor(intercomm) finally: intercomm.Free() if intracomm: intracomm.Free() def test_with_bad(self): mpicommexecutor = self.MPICommExecutor(MPI.COMM_SELF) with mpicommexecutor as executor: try: with mpicommexecutor: pass except RuntimeError: pass else: self.fail('expected RuntimeError') def test_initializer(self): mpicommexecutor = self.MPICommExecutor( initializer=time.sleep, initargs=(0,), ) with mpicommexecutor as executor: if executor is not None: executor.bootup() del executor with mpicommexecutor as executor: if executor is not None: executor.submit(time.sleep, 0).result() def test_initializer_error(self): mpicommexecutor = self.MPICommExecutor( initializer=sleep_and_raise, initargs=(0.2,), ) with mpicommexecutor as executor: if executor is not None: executor.submit(time.sleep, 0).cancel() future = executor.submit(time.sleep, 0) with self.assertRaises(futures.BrokenExecutor): executor.submit(time.sleep, 0).result() with self.assertRaises(futures.BrokenExecutor): future.result() def test_initializer_error_del(self): mpicommexecutor = self.MPICommExecutor( initializer=sleep_and_raise, initargs=(0.2,), ) with mpicommexecutor as executor: if executor is not None: executor.bootup() del executor def test_initializer_error_del_nowait(self): mpicommexecutor = self.MPICommExecutor( initializer=sleep_and_raise, initargs=(0.2,), ) with mpicommexecutor as executor: if executor is not None: executor.bootup(wait=False) executor.shutdown(wait=False) del executor def test_get_comm_workers(self): for comm in (MPI.COMM_SELF, MPI.COMM_WORLD): with self.MPICommExecutor(MPI.COMM_SELF) as executor: num_workers = executor.submit(check_comm_workers).result() self.assertTrue(executor.num_workers, num_workers) self.assertRaises(RuntimeError, check_comm_workers) class ThreadPoolMixin(ExecutorMixin): executor_type = futures.ThreadPoolExecutor class ThreadPoolTest(ThreadPoolMixin, ExecutorTestMixin, ExecutorShutdownTestMixin, unittest.TestCase): pass from mpi4py.futures.aplus import ThenableFuture class ThenTest(unittest.TestCase): assert_ = unittest.TestCase.assertTrue def test_cancel_base(self): base_f = ThenableFuture() new_f = base_f.then() self.assertTrue(base_f is not new_f) self.assertTrue(not base_f.done()) self.assertTrue(not new_f.done()) base_f.cancel() self.assertTrue(base_f.done()) self.assertTrue(new_f.done()) self.assertTrue(base_f.cancelled()) self.assertTrue(new_f.cancelled()) def test_cancel_new(self): base_f = ThenableFuture() new_f = base_f.then() self.assertTrue(base_f is not new_f) self.assertTrue(not base_f.done()) self.assertTrue(not new_f.done()) new_f.cancel() self.assertTrue(not base_f.done()) self.assertTrue(new_f.done()) base_f.set_result(1) self.assertTrue(base_f.done()) self.assertTrue(new_f.cancelled()) def test_then_multiple(self): base_f = ThenableFuture() new_f1 = base_f.then() new_f2 = base_f.then() new_f3 = base_f.then() self.assertTrue(base_f is not new_f1) self.assertTrue(base_f is not new_f2) self.assertTrue(base_f is not new_f3) self.assertTrue(not base_f.done()) self.assertTrue(not new_f1.done()) self.assertTrue(not new_f2.done()) self.assertTrue(not new_f3.done()) base_f.set_result('done') self.assertTrue(base_f.done()) self.assertTrue(new_f1.done()) self.assertTrue(new_f2.done()) self.assertTrue(new_f3.done()) self.assertTrue(not new_f1.exception()) self.assertTrue(not new_f2.exception()) self.assertTrue(not new_f3.exception()) self.assertTrue(new_f1.result() == 'done') self.assertTrue(new_f2.result() == 'done') self.assertTrue(new_f3.result() == 'done') def test_no_callbacks_and_success(self): base_f = ThenableFuture() new_f = base_f.then() self.assertTrue(base_f is not new_f) self.assertTrue(not base_f.done()) self.assertTrue(not new_f.done()) base_f.set_result('done') self.assertTrue(base_f.done()) self.assertTrue(new_f.done()) self.assertTrue(not new_f.exception()) self.assertTrue(new_f.result() == 'done') def test_no_callbacks_and_failure(self): class MyException(Exception): pass base_f = ThenableFuture() new_f = base_f.then() self.assertTrue(base_f is not new_f) self.assertTrue(not base_f.done()) self.assertTrue(not new_f.done()) base_f.set_exception(MyException('sad')) self.assertTrue(base_f.done()) self.assertTrue(new_f.done()) self.assertTrue(new_f.exception()) with self.assertRaises(MyException) as catcher: new_f.result() self.assertTrue(catcher.exception.args[0] == 'sad') def test_success_callback_and_success(self): base_f = ThenableFuture() new_f = base_f.then(lambda result: result + ' manipulated') self.assertTrue(base_f is not new_f) self.assertTrue(not base_f.done()) self.assertTrue(not new_f.done()) base_f.set_result('done') self.assertTrue(base_f.done()) self.assertTrue(new_f.done()) self.assertTrue(not new_f.exception()) self.assertTrue(new_f.result() == 'done manipulated') def test_err_callback_and_failure_repackage(self): class MyException(Exception): pass class MyRepackagedException(Exception): pass class NotMatched(Exception): pass def on_failure(ex): if isinstance(ex, MyException): return MyRepackagedException(ex.args[0] + ' repackaged') else: return NotMatched('?') base_f = ThenableFuture() new_f = base_f.then(None, on_failure) self.assertTrue(base_f is not new_f) self.assertTrue(not base_f.done()) self.assertTrue(not new_f.done()) base_f.set_exception(MyException('sad')) self.assertTrue(base_f.done()) self.assertTrue(new_f.done()) self.assertTrue(new_f.exception()) with self.assertRaises(MyRepackagedException) as catcher: new_f.result() self.assertTrue(catcher.exception.args[0] == 'sad repackaged') def test_err_callback_and_failure_raised(self): class MyException(Exception): pass class MyRepackagedException(Exception): pass def raise_something_else(ex): raise MyRepackagedException(ex.args[0] + ' repackaged') base_f = ThenableFuture() new_f = base_f.then(None, raise_something_else) self.assertTrue(base_f is not new_f) self.assertTrue(not base_f.done()) self.assertTrue(not new_f.done()) base_f.set_exception(MyException('sad')) self.assertTrue(base_f.done()) self.assertTrue(new_f.done()) self.assertTrue(new_f.exception()) with self.assertRaises(MyRepackagedException) as catcher: new_f.result() self.assertTrue(catcher.exception.args[0] == 'sad repackaged') def test_err_callback_convert_to_success(self): class MyException(Exception): pass class NotMatched(Exception): pass def on_failure(ex): if isinstance(ex, MyException): return ex.args[0] + ' repackaged' else: return NotMatched('?') base_f = ThenableFuture() new_f = base_f.catch(on_failure) self.assertTrue(base_f is not new_f) self.assertTrue(not base_f.done()) self.assertTrue(not new_f.done()) base_f.set_exception(MyException('sad')) self.assertTrue(base_f.done()) self.assertTrue(new_f.done()) self.assertTrue(not new_f.exception()) self.assertTrue(new_f.result() == 'sad repackaged') def test_err_catch_ignore(self): base_f = ThenableFuture() new_f = base_f.catch() self.assertTrue(base_f is not new_f) self.assertTrue(not base_f.done()) self.assertTrue(not new_f.done()) base_f.set_exception(Exception('sad')) self.assertTrue(base_f.done()) self.assertTrue(new_f.done()) self.assertTrue(new_f.exception() is None) self.assertTrue(new_f.result() is None) def test_success_callback_and_failure_raised(self): class MyException(Exception): pass def raise_something_else(value): raise MyException(value + ' repackaged') base_f = ThenableFuture() new_f = base_f.then(raise_something_else) self.assertTrue(base_f is not new_f) self.assertTrue(not base_f.done()) self.assertTrue(not new_f.done()) base_f.set_result('sad') self.assertTrue(base_f.done()) self.assertTrue(new_f.done()) self.assertTrue(new_f.exception()) with self.assertRaises(MyException) as catcher: new_f.result() assert catcher.exception.args[0] == 'sad repackaged' def test_chained_success_callback_and_success(self): def transform(value): f = ThenableFuture() if value < 5: f.set_result(transform(value+1)) else: f.set_result(value) return f base_f = ThenableFuture() new_f = base_f.then(transform) self.assertTrue(base_f is not new_f) self.assertTrue(not base_f.done()) self.assertTrue(not new_f.done()) base_f.set_result(1) self.assertTrue(base_f.done()) self.assertTrue(new_f.done()) self.assertTrue(not new_f.exception()) self.assertTrue(new_f.result() == 5) def test_chained_failure_callback_and_success(self): def transform(exc): self.assertIsInstance(exc, RuntimeError) f = ThenableFuture() f.set_result(5) return f base_f = ThenableFuture() new_f = base_f.catch(transform) self.assertTrue(base_f is not new_f) self.assertTrue(not base_f.done()) self.assertTrue(not new_f.done()) base_f.set_exception(RuntimeError()) self.assertTrue(base_f.done()) self.assertTrue(new_f.done()) self.assertTrue(not new_f.exception()) self.assertTrue(new_f.result() == 5) def test_detect_cycle_chain(self): f1 = ThenableFuture() f2 = ThenableFuture() chain = [f1, f2, f1] def transform(a): try: f = chain.pop(0) r = transform(a) f.__init__() f.set_result(r) return f except IndexError: return 42 base_f = ThenableFuture() new_f = base_f.then(transform) self.assertTrue(base_f is not new_f) self.assertTrue(not base_f.done()) self.assertTrue(not new_f.done()) base_f.set_result(1) self.assertTrue(base_f.done()) self.assertTrue(new_f.done()) self.assertTrue(new_f.exception()) with self.assertRaises(RuntimeError) as catcher: new_f.result() self.assertTrue( 'chain cycle detected' in catcher.exception.args[0], ) def test_detect_self_chain(self): base_f = ThenableFuture() new_f = base_f.then(lambda arg: new_f) self.assertTrue(base_f is not new_f) self.assertTrue(not base_f.done()) self.assertTrue(not new_f.done()) base_f.set_result(1) self.assertTrue(base_f.done()) self.assertTrue(new_f.done()) self.assertTrue(new_f.exception()) with self.assertRaises(RuntimeError) as catcher: new_f.result() self.assertTrue( 'chain cycle detected' in catcher.exception.args[0], ) class CollectTest(unittest.TestCase): def test_empty(self): future = futures.collect([]) self.assertFalse(future.cancelled()) self.assertFalse(future.running()) self.assertTrue(future.done()) self.assertEqual(future.result(), []) def test_item_success(self): fs = [futures.Future() for _ in range(5)] future = futures.collect(fs) self.assertFalse(future.cancelled()) self.assertFalse(future.running()) self.assertFalse(future.done()) for i in range(5): fs[i].set_result(i) self.assertFalse(future.cancelled()) self.assertFalse(future.running()) self.assertTrue(future.done()) self.assertEqual(future.result(), list(range(5))) def test_item_failure(self): fs = [futures.Future() for _ in range(5)] future = futures.collect(fs) for i in range(2, 4): fs[i].set_result(i) fs[-1].set_exception(RuntimeError()) self.assertFalse(future.cancelled()) self.assertFalse(future.running()) self.assertTrue(future.done()) self.assertIsInstance(future.exception(), RuntimeError) for i in range(0, 2): self.assertTrue(fs[i].cancelled()) for i in range(2, 4): self.assertFalse(fs[i].cancelled()) self.assertFalse(fs[-1].cancelled()) def test_item_done(self): fs = [futures.Future() for _ in range(5)] for i in range(5): fs[i].set_result(i) future = futures.collect(fs) self.assertFalse(future.cancelled()) self.assertFalse(future.running()) self.assertTrue(future.done()) self.assertEqual(future.result(), list(range(5))) def test_item_cancel(self): fs = [futures.Future() for _ in range(5)] future = futures.collect(fs) for i in range(2, 4): fs[i].set_result(i) fs[-1].cancel() self.assertTrue(future.cancelled()) self.assertFalse(future.running()) self.assertTrue(future.done()) for i in range(0, 2): self.assertTrue(fs[i].cancelled()) for i in range(2, 4): self.assertFalse(fs[i].cancelled()) self.assertTrue(fs[-1].cancelled()) def test_cancel(self): fs = [futures.Future() for _ in range(5)] future = futures.collect(fs) future.cancel() for f in fs: self.assertTrue(f.cancelled()) def test_cancel_pending(self): class MyFuture(futures.Future): def cancel(self): pass fs = [MyFuture() for _ in range(5)] future = futures.collect(fs) self.assertIs(type(future), MyFuture) super(MyFuture, future).cancel() for f in fs: self.assertFalse(f.cancelled()) f.set_result(None) class ComposeTest(unittest.TestCase): def test_result(self): base = futures.Future() future = futures.compose(base) self.assertIs(type(future), type(base)) self.assertFalse(future.cancelled()) self.assertFalse(future.running()) self.assertFalse(future.done()) base.set_result(42) self.assertFalse(future.cancelled()) self.assertFalse(future.running()) self.assertTrue(future.done()) self.assertEqual(future.result(), 42) def test_except(self): base = futures.Future() future = futures.compose(base) self.assertIs(type(future), type(base)) self.assertFalse(future.cancelled()) self.assertFalse(future.running()) self.assertFalse(future.done()) base.set_exception(RuntimeError(42)) self.assertFalse(future.cancelled()) self.assertFalse(future.running()) self.assertTrue(future.done()) self.assertIs(type(future.exception()), RuntimeError) self.assertEqual(future.exception().args, (42,)) def test_cancel_new(self): base = futures.Future() future = futures.compose(base) base.cancel() self.assertTrue(future.cancelled()) def test_cancel_old(self): base = futures.Future() future = futures.compose(base) future.cancel() self.assertTrue(base.cancelled()) def test_result_hook(self): base = futures.Future() future = futures.compose(base, int) base.set_result('42') self.assertEqual(future.result(), 42) def test_result_hook_failure(self): base = futures.Future() future = futures.compose(base, resulthook=lambda x: 1/0) base.set_result(42) self.assertIs(type(future.exception()), ZeroDivisionError) def test_except_hook(self): base = futures.Future() future = futures.compose(base, excepthook=lambda exc: exc.args[0]) base.set_exception(RuntimeError(42)) self.assertEqual(future.result(), 42) def test_except_hook_except(self): base = futures.Future() future = futures.compose( base, excepthook=lambda exc: RuntimeError(exc.args[0]) ) base.set_exception(ValueError(42)) self.assertIs(type(future.exception()), RuntimeError) self.assertEqual(future.exception().args, (42,)) def test_except_hook_failure(self): base = futures.Future() future = futures.compose(base, excepthook=lambda exc: 1/0) base.set_exception(ValueError(42)) self.assertIs(type(future.exception()), ZeroDivisionError) def skip_spawn(): return ( os.environ.get('MPI4PY_TEST_SPAWN') in (None, '0', 'no', 'off', 'false') ) SKIP_POOL_TEST = False name, version = MPI.get_vendor() if name == 'Open MPI': if version < (3,0,0): SKIP_POOL_TEST = True if version == (4,0,0): SKIP_POOL_TEST = True if version == (4,0,1) and sys.platform=='darwin': SKIP_POOL_TEST = True if version == (4,0,2) and sys.platform=='darwin': SKIP_POOL_TEST = True if version == (4,1,2) and sys.platform=='linux': azure = (os.environ.get('TF_BUILD') == 'True') github = (os.environ.get('GITHUB_ACTIONS') == 'true') SKIP_POOL_TEST = azure or github if version >= (5,0,0) and version < (5,0,7): SKIP_POOL_TEST = skip_spawn() if name == 'MPICH': if sys.platform == 'darwin': if version >= (3, 4) and version < (4, 0): SKIP_POOL_TEST = True if version < (4, 1): if MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is None: SKIP_POOL_TEST = True if version < (4, 3): try: port = MPI.Open_port() MPI.Close_port(port) except: port = "" if port == "": SKIP_POOL_TEST = True del port if name == 'Intel MPI': import mpi4py if mpi4py.rc.recv_mprobe: SKIP_POOL_TEST = True if name == 'Microsoft MPI': if version < (8,1,0): SKIP_POOL_TEST = True if skip_spawn(): SKIP_POOL_TEST = True if MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is None: SKIP_POOL_TEST = True if os.environ.get("PMI_APPNUM") is None: SKIP_POOL_TEST = True if name == 'MVAPICH': SKIP_POOL_TEST = True if name == 'MPICH2': if MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is None: SKIP_POOL_TEST = True if name == 'MVAPICH2': SKIP_POOL_TEST = True if MPI.Get_version() < (2,0): SKIP_POOL_TEST = True if SHARED_POOL: del ProcessPoolInitTest.test_init_sys_flags del ProcessPoolInitTest.test_init_globals del ProcessPoolInitTest.test_use_pkl5_kwarg del ProcessPoolInitTest.test_use_pkl5_environ if WORLD_SIZE == 1: del ProcessPoolInitTest.test_run_name if WORLD_SIZE > 2: del ProcessPoolInitTest.test_max_workers del ProcessPoolInitTest.test_max_workers_environ if WORLD_SIZE == 1: del ProcessPoolPickleTest del MPICommExecutorTest.test_arg_root del MPICommExecutorTest.test_arg_bad_root del MPICommExecutorTest.test_arg_bad_comm elif WORLD_SIZE > 1 or SKIP_POOL_TEST: del ProcessPoolInitTest del ProcessPoolBootupTest del ProcessPoolShutdownTest del ProcessPoolWaitTest del ProcessPoolAsCompletedTest del ProcessPoolExecutorTest del ProcessPoolSubmitTest del ProcessPoolPickleTest if not SHARED_POOL: del SharedPoolInitTest if __name__ == '__main__': unittest.main() mpi4py-4.0.3/demo/futures/test_service.py000066400000000000000000000014431475341043600204430ustar00rootroot00000000000000import sys from mpi4py.futures import MPIPoolExecutor def main(): def getarg(opt, default=None): try: return sys.argv[sys.argv.index('--' + opt) + 1] except ValueError: return default if '--host' in sys.argv or '--port' in sys.argv: service = (getarg('host'), getarg('port')) else: service = getarg('service') if '--info' in sys.argv: info = getarg('info').split(',') info = dict(entry.split('=') for entry in info if entry) else: info = None with MPIPoolExecutor(service=service, mpi_info=info) as executor: fut1 = executor.submit(abs, +42) fut2 = executor.submit(abs, -42) assert fut1.result(0) == 42 assert fut2.result(0) == 42 if __name__ == '__main__': main() mpi4py-4.0.3/demo/futures/test_service.sh000077500000000000000000000024231475341043600204270ustar00rootroot00000000000000#!/bin/bash PYTHON=${1-${PYTHON-python}} MPIEXEC=${MPIEXEC-mpiexec} testdir=$(dirname "$0") set -e if [ $(command -v mpichversion) ]; then $MPIEXEC -n 1 $PYTHON -m mpi4py.futures.server --xyz > /dev/null 2>&1 || true $MPIEXEC -n 2 $PYTHON -m mpi4py.futures.server --bind localhost & mpi4pyserver=$!; sleep 0.25; $MPIEXEC -n 1 $PYTHON $testdir/test_service.py --host localhost wait $mpi4pyserver $MPIEXEC -n 2 $PYTHON -m mpi4py.futures.server --port 31414 --info "a=x,b=y" & mpi4pyserver=$!; sleep 0.25; $MPIEXEC -n 1 $PYTHON $testdir/test_service.py --port 31414 --info "a=x,b=y" wait $mpi4pyserver fi if [ $(command -v mpichversion) ] && [ $(command -v hydra_nameserver) ]; then hydra_nameserver & nameserver=$!; sleep 0.25; $MPIEXEC -nameserver localhost -n 2 $PYTHON -m mpi4py.futures.server & mpi4pyserver=$!; sleep 0.25; $MPIEXEC -nameserver localhost -n 1 $PYTHON $testdir/test_service.py wait $mpi4pyserver $MPIEXEC -nameserver localhost -n 2 $PYTHON -m mpi4py.futures.server --service test-service & mpi4pyserver=$!; sleep 0.25; $MPIEXEC -nameserver localhost -n 1 $PYTHON $testdir/test_service.py --service test-service wait $mpi4pyserver kill -TERM $nameserver wait $nameserver 2>/dev/null || true fi mpi4py-4.0.3/demo/helloworld.c000066400000000000000000000011551475341043600162140ustar00rootroot00000000000000#include #include int main(int argc, char *argv[]) { int size, rank, len; char name[MPI_MAX_PROCESSOR_NAME]; #if defined(MPI_VERSION) && (MPI_VERSION >= 2) int provided; MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided); #else MPI_Init(&argc, &argv); #endif MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Get_processor_name(name, &len); printf("Hello, World! I am process %d of %d on %s.\n", rank, size, name); MPI_Finalize(); return 0; } /* * Local Variables: * mode: C * c-basic-offset: 2 * indent-tabs-mode: nil * End: */ mpi4py-4.0.3/demo/helloworld.cxx000066400000000000000000000012111475341043600165650ustar00rootroot00000000000000#include #include int main(int argc, char *argv[]) { #if defined(MPI_VERSION) && (MPI_VERSION >= 2) MPI::Init_thread(MPI_THREAD_MULTIPLE); #else MPI::Init(); #endif int size = MPI::COMM_WORLD.Get_size(); int rank = MPI::COMM_WORLD.Get_rank(); int len; char name[MPI_MAX_PROCESSOR_NAME]; MPI::Get_processor_name(name, len); std::cout << "Hello, World! " << "I am process " << rank << " of " << size << " on " << name << "." << std::endl; MPI::Finalize(); return 0; } // Local Variables: // mode: C++ // c-basic-offset: 2 // indent-tabs-mode: nil // End: mpi4py-4.0.3/demo/helloworld.f08000066400000000000000000000007711475341043600163720ustar00rootroot00000000000000program main use mpi_f08 implicit none integer :: provided, size, rank, len character (len=MPI_MAX_PROCESSOR_NAME) :: name call MPI_Init_thread(MPI_THREAD_MULTIPLE, provided) call MPI_Comm_rank(MPI_COMM_WORLD, rank) call MPI_Comm_size(MPI_COMM_WORLD, size) call MPI_Get_processor_name(name, len) write(*, '(2A,I2,A,I2,3A)') & 'Hello, World! ', & 'I am process ', rank, & ' of ', size, & ' on ', name(1:len), '.' call MPI_Finalize() end program main mpi4py-4.0.3/demo/helloworld.f90000066400000000000000000000010271475341043600163660ustar00rootroot00000000000000program main use mpi implicit none integer :: provided, ierr, size, rank, len character (len=MPI_MAX_PROCESSOR_NAME) :: name call MPI_Init_thread(MPI_THREAD_MULTIPLE, provided, ierr) call MPI_Comm_rank(MPI_COMM_WORLD, rank, ierr) call MPI_Comm_size(MPI_COMM_WORLD, size, ierr) call MPI_Get_processor_name(name, len, ierr) write(*, '(2A,I2,A,I2,3A)') & 'Hello, World! ', & 'I am process ', rank, & ' of ', size, & ' on ', name(1:len), '.' call MPI_Finalize(ierr) end program main mpi4py-4.0.3/demo/helloworld.py000066400000000000000000000003741475341043600164240ustar00rootroot00000000000000#!/usr/bin/env python """ Parallel Hello World """ from mpi4py import MPI size = MPI.COMM_WORLD.Get_size() rank = MPI.COMM_WORLD.Get_rank() name = MPI.Get_processor_name() print(f"Hello, World! I am process {rank} of {size} on {name}.", flush=True) mpi4py-4.0.3/demo/init-fini/000077500000000000000000000000001475341043600155615ustar00rootroot00000000000000mpi4py-4.0.3/demo/init-fini/makefile000066400000000000000000000024511475341043600172630ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean default: test build: NP = 2 test: $(MPIEXEC) $(NP_FLAG) $(NP) $(PYTHON) test_0.py threads=true $(MPIEXEC) $(NP_FLAG) $(NP) $(PYTHON) test_0.py threads=false $(MPIEXEC) $(NP_FLAG) $(NP) $(PYTHON) test_0.py thread_level=single $(MPIEXEC) $(NP_FLAG) $(NP) $(PYTHON) test_0.py thread_level=funneled $(MPIEXEC) $(NP_FLAG) $(NP) $(PYTHON) test_0.py thread_level=serialized $(MPIEXEC) $(NP_FLAG) $(NP) $(PYTHON) test_0.py thread_level=multiple $(MPIEXEC) $(NP_FLAG) $(NP) $(PYTHON) test_0.py fast_reduce=true $(MPIEXEC) $(NP_FLAG) $(NP) $(PYTHON) test_0.py fast_reduce=false $(MPIEXEC) $(NP_FLAG) $(NP) $(PYTHON) test_0.py recv_mprobe=true $(MPIEXEC) $(NP_FLAG) $(NP) $(PYTHON) test_0.py recv_mprobe=false $(MPIEXEC) $(NP_FLAG) $(NP) $(PYTHON) test_0.py errors=default $(MPIEXEC) $(NP_FLAG) $(NP) $(PYTHON) test_0.py errors=exception $(MPIEXEC) $(NP_FLAG) $(NP) $(PYTHON) test_0.py errors=abort $(MPIEXEC) $(NP_FLAG) $(NP) $(PYTHON) test_0.py errors=fatal $(MPIEXEC) $(NP_FLAG) $(NP) $(PYTHON) test_1.py $(MPIEXEC) $(NP_FLAG) $(NP) $(PYTHON) test_2.py $(MPIEXEC) $(NP_FLAG) $(NP) $(PYTHON) test_3.py $(MPIEXEC) $(NP_FLAG) $(NP) $(PYTHON) test_4.py $(MPIEXEC) $(NP_FLAG) $(NP) $(PYTHON) test_5.py $(MPIEXEC) $(NP_FLAG) $(NP) $(PYTHON) test_6.py clean: mpi4py-4.0.3/demo/init-fini/run.sh000077500000000000000000000027551475341043600167350ustar00rootroot00000000000000#!/bin/sh set -eu PYTHON=${PYTHON:-python${py:-}} MPIEXEC=${MPIEXEC:-mpiexec} NP_FLAG=${NP_FLAG:-'-n'} NP=${NP:-2} dir=$(dirname -- "$0") set -x $MPIEXEC $NP_FLAG $NP $PYTHON "$dir/test_0.py" threads=true $MPIEXEC $NP_FLAG $NP $PYTHON "$dir/test_0.py" threads=false $MPIEXEC $NP_FLAG $NP $PYTHON "$dir/test_0.py" thread_level=single $MPIEXEC $NP_FLAG $NP $PYTHON "$dir/test_0.py" thread_level=funneled $MPIEXEC $NP_FLAG $NP $PYTHON "$dir/test_0.py" thread_level=serialized $MPIEXEC $NP_FLAG $NP $PYTHON "$dir/test_0.py" thread_level=multiple $MPIEXEC $NP_FLAG $NP $PYTHON "$dir/test_0.py" fast_reduce=true $MPIEXEC $NP_FLAG $NP $PYTHON "$dir/test_0.py" fast_reduce=false $MPIEXEC $NP_FLAG $NP $PYTHON "$dir/test_0.py" recv_mprobe=true $MPIEXEC $NP_FLAG $NP $PYTHON "$dir/test_0.py" recv_mprobe=false $MPIEXEC $NP_FLAG $NP $PYTHON "$dir/test_0.py" irecv_bufsz=0 $MPIEXEC $NP_FLAG $NP $PYTHON "$dir/test_0.py" irecv_bufsz=1 $MPIEXEC $NP_FLAG $NP $PYTHON "$dir/test_0.py" irecv_bufsz=1024 $MPIEXEC $NP_FLAG $NP $PYTHON "$dir/test_0.py" errors=default $MPIEXEC $NP_FLAG $NP $PYTHON "$dir/test_0.py" errors=exception $MPIEXEC $NP_FLAG $NP $PYTHON "$dir/test_0.py" errors=abort $MPIEXEC $NP_FLAG $NP $PYTHON "$dir/test_0.py" errors=fatal $MPIEXEC $NP_FLAG $NP $PYTHON "$dir/test_1.py" $MPIEXEC $NP_FLAG $NP $PYTHON "$dir/test_2.py" $MPIEXEC $NP_FLAG $NP $PYTHON "$dir/test_3.py" $MPIEXEC $NP_FLAG $NP $PYTHON "$dir/test_4.py" $MPIEXEC $NP_FLAG $NP $PYTHON "$dir/test_5.py" $MPIEXEC $NP_FLAG $NP $PYTHON "$dir/test_6.py" mpi4py-4.0.3/demo/init-fini/test_0.py000066400000000000000000000047041475341043600173360ustar00rootroot00000000000000import os import sys from mpi4py import rc vmap = { 'true': True, 'false': False, '0': 0, '1': 1, '1024': 1024, } for arg in sys.argv[1:]: attr, value = arg.split('=') setattr(rc, attr, vmap.get(value, value)) if rc.errors == 'abort': rc.initialize = False from mpi4py import MPI if rc.errors == 'abort': vendor, version = MPI.get_vendor() if vendor == 'Intel MPI': sys.exit(0) if vendor == 'MPICH': if version[0] > 3 and version[:2] < (4, 1): sys.exit(0) def check_errhandler(obj): if rc.errors == 'default': if isinstance(obj, MPI.File): check_eh = MPI.ERRORS_RETURN else: check_eh = MPI.ERRORS_ARE_FATAL elif rc.errors == 'exception': check_eh = MPI.ERRORS_RETURN elif rc.errors == 'abort': if MPI.ERRORS_ABORT != MPI.ERRHANDLER_NULL: check_eh = MPI.ERRORS_ABORT else: check_eh = MPI.ERRORS_ARE_FATAL elif rc.errors == 'fatal': check_eh = MPI.ERRORS_ARE_FATAL else: assert 0 eh = obj.Get_errhandler() try: assert eh == check_eh finally: if eh != MPI.ERRHANDLER_NULL: eh.Free() if not MPI.Is_initialized(): MPI.Init() try: session = MPI.Session.Init() try: check_errhandler(session) finally: session.Finalize() except NotImplementedError: pass except MPI.Exception: pass try: for commbase in (MPI.COMM_SELF, MPI.COMM_WORLD): check_errhandler(commbase) comm = commbase.Dup() try: check_errhandler(comm) finally: comm.Free() except NotImplementedError: pass except MPI.Exception: pass weh = MPI.COMM_SELF.Get_errhandler() MPI.COMM_SELF.Set_errhandler(MPI.ERRORS_RETURN) try: win = MPI.Win.Create( MPI.BOTTOM, 1, MPI.INFO_NULL, MPI.COMM_SELF, ) try: check_errhandler(win) finally: win.Free() except NotImplementedError: pass except MPI.Exception: pass finally: MPI.COMM_SELF.Set_errhandler(weh) weh.Free() try: # check_errhandler(MPI.FILE_NULL) # TODO file = MPI.File.Open( MPI.COMM_SELF, os.devnull, MPI.MODE_WRONLY, MPI.INFO_NULL, ) try: check_errhandler(file) finally: file.Close() except NotImplementedError: pass except MPI.Exception: pass if not MPI.Is_finalized(): MPI.Finalize() mpi4py-4.0.3/demo/init-fini/test_1.py000066400000000000000000000004161475341043600173330ustar00rootroot00000000000000from mpi4py import rc rc.initialize = False from mpi4py import MPI assert not MPI.Is_initialized() assert not MPI.Is_finalized() MPI.Init() assert MPI.Is_initialized() assert not MPI.Is_finalized() MPI.Finalize() assert MPI.Is_initialized() assert MPI.Is_finalized() mpi4py-4.0.3/demo/init-fini/test_2.py000066400000000000000000000005231475341043600173330ustar00rootroot00000000000000from mpi4py import rc rc.initialize = False from mpi4py import MPI assert not MPI.Is_initialized() assert not MPI.Is_finalized() level = MPI.Init_thread(MPI.THREAD_MULTIPLE) assert MPI.Query_thread() == level assert MPI.Is_initialized() assert not MPI.Is_finalized() MPI.Finalize() assert MPI.Is_initialized() assert MPI.Is_finalized() mpi4py-4.0.3/demo/init-fini/test_3.py000066400000000000000000000014601475341043600173350ustar00rootroot00000000000000import os from mpi4py import rc assert rc.initialize is True assert rc.finalize is None assert rc.threads is True assert rc.thread_level == 'multiple' os.environ['MPI4PY_RC_INITIALIZE'] = 'false' os.environ['MPI4PY_RC_FINALIZE'] = 'true' os.environ['MPI4PY_RC_THREADS'] = 'false' os.environ['MPI4PY_RC_THREAD_LEVEL'] = 'single' os.environ['MPI4PY_RC_IRECV_BUFSZ'] = str(2048) os.environ['MPI4PY_PICKLE_PROTOCOL'] = str(3) os.environ['MPI4PY_PICKLE_THRESHOLD'] = str(1024) from mpi4py import MPI assert not MPI.Is_initialized() assert not MPI.Is_finalized() assert rc.initialize is False assert rc.finalize is True assert rc.threads is False assert rc.thread_level == 'single' assert rc.irecv_bufsz == 2048 assert MPI.pickle.PROTOCOL == 3 assert MPI.pickle.THRESHOLD == 1024 mpi4py-4.0.3/demo/init-fini/test_4.py000066400000000000000000000003021475341043600173300ustar00rootroot00000000000000from mpi4py import rc rc.finalize = False from mpi4py import MPI assert MPI.Is_initialized() assert not MPI.Is_finalized() MPI.Finalize() assert MPI.Is_initialized() assert MPI.Is_finalized() mpi4py-4.0.3/demo/init-fini/test_5.py000066400000000000000000000003171475341043600173370ustar00rootroot00000000000000import os import sys import mpi4py del mpi4py.rc del sys.modules['mpi4py.rc'] os.environ['MPI4PY_RC_ERRORS'] = 'exception' from mpi4py import MPI assert MPI.Is_initialized() assert not MPI.Is_finalized() mpi4py-4.0.3/demo/init-fini/test_6.py000066400000000000000000000010721475341043600173370ustar00rootroot00000000000000from os import environ from warnings import catch_warnings from mpi4py import rc ATTRS = {a for a in dir(rc) if not a.startswith('_')} VALUE = -123456789 for attr in ATTRS: environ.pop(f"MPI4PY_RC_{attr.upper()}", None) setattr(rc, attr, VALUE) with catch_warnings(record=True) as warnings: from mpi4py import MPI template = "mpi4py.rc.{}: unexpected value {}" expected = sorted({template.format(attr, repr(VALUE)) for attr in ATTRS}) messages = sorted({str(entry.message) for entry in warnings}) assert messages == expected, "\n" + "\n".join(messages) mpi4py-4.0.3/demo/libmpi-cffi/000077500000000000000000000000001475341043600160545ustar00rootroot00000000000000mpi4py-4.0.3/demo/libmpi-cffi/apigen.py000066400000000000000000000014041475341043600176700ustar00rootroot00000000000000import sys, os.path as p wdir = p.abspath(p.dirname(__file__)) topdir = p.normpath(p.join(wdir, p.pardir, p.pardir)) srcdir = p.join(topdir, 'src') sys.path.insert(0, p.join(topdir, 'conf')) from mpiapigen import Generator generator = Generator() libmpi_pxd = p.join(srcdir, 'mpi4py', 'libmpi.pxd') generator.parse_file(libmpi_pxd) libmpi_h = p.join(wdir, 'libmpi.h') generator.dump_header_h(libmpi_h) #from io import StringIO #libmpi_h = StringIO() #generator.dump_header_h(libmpi_h) #print libmpi_h.read() libmpi_c = p.join(wdir, 'libmpi.c.in') with open(libmpi_c, 'w') as f: f.write(f"""\ #include #include "{srcdir}/lib-mpi/config.h" #include "{srcdir}/lib-mpi/missing.h" #include "{srcdir}/lib-mpi/fallback.h" #include "{srcdir}/lib-mpi/compat.h" """) mpi4py-4.0.3/demo/libmpi-cffi/build.py000066400000000000000000000030251475341043600175250ustar00rootroot00000000000000import os import shlex import shutil import cffi ffi = cffi.FFI() with open("libmpi.c.in") as f: ffi.set_source("libmpi", f.read()) with open("libmpi.h") as f: ffi.cdef(f.read()) class mpicompiler: from cffi import ffiplatform def __init__(self, cc, ld=None): self.cc = cc self.ld = ld if ld else cc self.ffi_compile = self.ffiplatform.compile def __enter__(self): self.ffiplatform.compile = self.compile def __exit__(self, *args): self.ffiplatform.compile = self.ffi_compile def configure(self, compiler): def fix_command(command, cmd): if not cmd: return cmd = shlex.split(cmd) exe = shutil.which(cmd[0]) if not exe: return command[0] = exe command += cmd[1:] fix_command(compiler.compiler_so, self.cc) fix_command(compiler.linker_so, self.ld) def compile(self, *args, **kwargs): from distutils.command import build_ext customize_compiler_orig = build_ext.customize_compiler def customize_compiler(compiler): customize_compiler_orig(compiler) self.configure(compiler) build_ext.customize_compiler = customize_compiler try: return self.ffi_compile(*args, **kwargs) finally: build_ext.customize_compiler = customize_compiler_orig if __name__ == '__main__': cc = os.environ.get('MPICC', 'mpicc') ld = os.environ.get('MPILD') with mpicompiler(cc, ld): ffi.compile() mpi4py-4.0.3/demo/libmpi-cffi/makefile000066400000000000000000000005251475341043600175560ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean default: build test clean libmpi.h libmpi.c.in: $(PYTHON) apigen.py build: libmpi.h libmpi.c.in $(PYTHON) build.py NP = 2 test: build $(MPIEXEC_PYTHON) test_helloworld.py $(MPIEXEC_PYTHON) test_ringtest.py $(MPIEXEC_PYTHON) test_latency.py clean: $(RM) -r libmpi.* __pycache__ mpi4py-4.0.3/demo/libmpi-cffi/test_helloworld.py000066400000000000000000000010121475341043600216320ustar00rootroot00000000000000from libmpi import ffi, lib NULL = ffi.NULL size_p = ffi.new('int*') rank_p = ffi.new('int*') nlen_p = ffi.new('int*') name_p = ffi.new('char[]', lib.MPI_MAX_PROCESSOR_NAME); lib.MPI_Init(NULL, NULL); lib.MPI_Comm_size(lib.MPI_COMM_WORLD, size_p) lib.MPI_Comm_rank(lib.MPI_COMM_WORLD, rank_p) lib.MPI_Get_processor_name(name_p, nlen_p) size = size_p[0] rank = rank_p[0] nlen = nlen_p[0] name = ffi.string(name_p[0:nlen]).decode() print(f"Hello, World! I am process {rank} of {size} on {name}.") lib.MPI_Finalize() mpi4py-4.0.3/demo/libmpi-cffi/test_latency.py000066400000000000000000000040321475341043600211230ustar00rootroot00000000000000# http://mvapich.cse.ohio-state.edu/benchmarks/ from libmpi import ffi, lib def osu_latency( BENCHMARH = "MPI Latency Test", skip = 1000, loop = 10000, skip_large = 10, loop_large = 100, large_message_size = 8192, MAX_MSG_SIZE = 1<<22, ): myid = ffi.new('int*') numprocs = ffi.new('int*') lib.MPI_Comm_rank(lib.MPI_COMM_WORLD, myid) lib.MPI_Comm_size(lib.MPI_COMM_WORLD, numprocs) myid = myid[0] numprocs = numprocs[0] if numprocs != 2: if myid == 0: errmsg = "This test requires exactly two processes" else: errmsg = None raise SystemExit(errmsg) sbuf = ffi.new('unsigned char[]', MAX_MSG_SIZE) rbuf = ffi.new('unsigned char[]', MAX_MSG_SIZE) dtype = lib.MPI_BYTE tag = 1 comm = lib.MPI_COMM_WORLD status = lib.MPI_STATUS_IGNORE if myid == 0: print (f'# {BENCHMARH}') if myid == 0: print ('# %-8s%20s' % ("Size [B]", "Latency [us]")) message_sizes = [0] + [2**i for i in range(30)] for size in message_sizes: if size > MAX_MSG_SIZE: break if size > large_message_size: skip = skip_large loop = loop_large iterations = list(range(loop+skip)) # lib.MPI_Barrier(comm) if myid == 0: for i in iterations: if i == skip: t_start = lib.MPI_Wtime() lib.MPI_Send(sbuf, size, dtype, 1, tag, comm) lib.MPI_Recv(rbuf, size, dtype, 1, tag, comm, status) t_end = lib.MPI_Wtime() elif myid == 1: for i in iterations: lib.MPI_Recv(rbuf, size, dtype, 0, tag, comm, status) lib.MPI_Send(sbuf, size, dtype, 0, tag, comm) # if myid == 0: latency = (t_end - t_start) * 1e6 / (2 * loop) print ('%-10d%20.2f' % (size, latency)) def main(): lib.MPI_Init(ffi.NULL, ffi.NULL) osu_latency() lib.MPI_Finalize() if __name__ == '__main__': main() mpi4py-4.0.3/demo/libmpi-cffi/test_ringtest.py000066400000000000000000000043741475341043600213340ustar00rootroot00000000000000from libmpi import ffi, lib def ring(comm, count=1, loop=1, skip=0): size_p = ffi.new('int*') rank_p = ffi.new('int*') lib.MPI_Comm_size(comm, size_p) lib.MPI_Comm_rank(comm, rank_p) size = size_p[0] rank = rank_p[0] source = (rank - 1) % size dest = (rank + 1) % size sbuf = ffi.new('unsigned char[]', [42]*count) rbuf = ffi.new('unsigned char[]', [ 0]*count) iterations = list(range(loop+skip)) if size == 1: for i in iterations: if i == skip: tic = lib.MPI_Wtime() lib.MPI_Sendrecv(sbuf, count, lib.MPI_BYTE, dest, 0, rbuf, count, lib.MPI_BYTE, source, 0, comm, lib.MPI_STATUS_IGNORE) else: if rank == 0: for i in iterations: if i == skip: tic = lib.MPI_Wtime() lib.MPI_Send(sbuf, count, lib.MPI_BYTE, dest, 0, comm) lib.MPI_Recv(rbuf, count, lib.MPI_BYTE, source, 0, comm, lib.MPI_STATUS_IGNORE) else: sbuf = rbuf for i in iterations: if i == skip: tic = lib.MPI_Wtime() lib.MPI_Recv(rbuf, count, lib.MPI_BYTE, source, 0, comm, lib.MPI_STATUS_IGNORE) lib.MPI_Send(sbuf, count, lib.MPI_BYTE, dest, 0, comm) toc = lib.MPI_Wtime() if rank == 0 and ffi.string(sbuf) != ffi.string(rbuf): import warnings, traceback try: warnings.warn("received message does not match!") except UserWarning: traceback.print_exc() lib.MPI_Abort(comm, 2) return toc - tic def ringtest(comm): size = ( 1 ) loop = ( 1 ) skip = ( 0 ) lib.MPI_Barrier(comm) elapsed = ring(comm, size, loop, skip) size_p = ffi.new('int*') rank_p = ffi.new('int*') lib.MPI_Comm_size(comm, size_p) lib.MPI_Comm_rank(comm, rank_p) comm_size = size_p[0] comm_rank = rank_p[0] if comm_rank == 0: print ("time for %d loops = %g seconds (%d processes, %d bytes)" % (loop, elapsed, comm_size, size)) def main(): lib.MPI_Init(ffi.NULL, ffi.NULL) ringtest(lib.MPI_COMM_WORLD) lib.MPI_Finalize() if __name__ == '__main__': main() mpi4py-4.0.3/demo/makefile000066400000000000000000000010771475341043600154000ustar00rootroot00000000000000.PHONY: default wrap default: $(MAKE) -C compute-pi $(MAKE) -C mandelbrot $(MAKE) -C nxtval $(MAKE) -C reductions $(MAKE) -C sequential $(MAKE) -C spawning $(MAKE) -C profiling $(MAKE) -C cython $(MAKE) -C embedding $(MAKE) -C libmpi-cffi $(MAKE) -C mpi-ref-v1 $(MAKE) -C init-fini $(MAKE) -C threads $(MAKE) -C futures $(MAKE) wrap wrap: $(MAKE) -C wrap-c $(MAKE) -C wrap-cffi $(MAKE) -C wrap-ctypes $(MAKE) -C wrap-cython $(MAKE) -C wrap-swig $(MAKE) -C wrap-pybind11 $(MAKE) -C wrap-f2py $(MAKE) -C wrap-ctypes-f90 $(MAKE) -C wrap-ctypes-f08 mpi4py-4.0.3/demo/mandelbrot/000077500000000000000000000000001475341043600160225ustar00rootroot00000000000000mpi4py-4.0.3/demo/mandelbrot/makefile000066400000000000000000000006531475341043600175260ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean default: build test clean PROGRAM = mandelbrot-worker SOURCE = $(PROGRAM).f90 TARGET = $(PROGRAM).exe FCFLAGS = -O3 $(TARGET): $(SOURCE) $(MPIFORT) $(FCFLAGS) -o $@ $< build: $(TARGET) test: build $(MPIEXEC) $(NP_FLAG) 1 $(PYTHON) mandelbrot-master.py $(MPIEXEC) $(NP_FLAG) 7 $(PYTHON) mandelbrot.py $(PYTHON) mandelbrot-seq.py clean: $(RM) $(TARGET) __pycache__ mpi4py-4.0.3/demo/mandelbrot/mandelbrot-master.py000066400000000000000000000026041475341043600220160ustar00rootroot00000000000000from mpi4py import MPI import numpy as np x1 = -2.0 x2 = 1.0 y1 = -1.0 y2 = 1.0 w = 600 h = 400 maxit = 255 import os dirname = os.path.abspath(os.path.dirname(__file__)) executable = os.path.join(dirname, 'mandelbrot-worker.exe') # spawn worker worker = MPI.COMM_SELF.Spawn(executable, maxprocs=7) size = worker.Get_remote_size() # send parameters rmsg = np.array([x1, x2, y1, y2], dtype='f') imsg = np.array([w, h, maxit], dtype='i') worker.Bcast([rmsg, MPI.REAL], root=MPI.ROOT) worker.Bcast([imsg, MPI.INTEGER], root=MPI.ROOT) # gather results counts = np.empty(size, dtype='i') indices = np.empty(h, dtype='i') cdata = np.empty([h, w], dtype='i') worker.Gather(sendbuf=None, recvbuf=[counts, MPI.INTEGER], root=MPI.ROOT) worker.Gatherv(sendbuf=None, recvbuf=[indices, (counts, None), MPI.INTEGER], root=MPI.ROOT) worker.Gatherv(sendbuf=None, recvbuf=[cdata, (counts * w, None), MPI.INTEGER], root=MPI.ROOT) # disconnect worker worker.Disconnect() # reconstruct full result M = np.zeros([h, w], dtype='i') M[indices, :] = cdata # eye candy (requires matplotlib) if 1: try: from matplotlib import pyplot as plt plt.imshow(M, aspect='equal') try: plt.nipy_spectral() except AttributeError: plt.spectral() plt.pause(2) except: pass mpi4py-4.0.3/demo/mandelbrot/mandelbrot-seq.py000066400000000000000000000015211475341043600213100ustar00rootroot00000000000000import numpy as np import time tic = time.time() x1 = -2.0 x2 = 1.0 y1 = -1.0 y2 = 1.0 w = 150 h = 100 maxit = 127 def mandelbrot(x, y, maxit): c = x + y*1j z = 0 + 0j it = 0 while abs(z) < 2 and it < maxit: z = z**2 + c it += 1 return it dx = (x2 - x1) / w dy = (y2 - y1) / h C = np.empty([h, w], dtype='i') for k in np.arange(h): y = y1 + k * dy for j in np.arange(w): x = x1 + j * dx C[k, j] = mandelbrot(x, y, maxit) M = C toc = time.time() print('wall clock time: %8.2f seconds' % (toc-tic)) # eye candy (requires matplotlib) if 1: try: from matplotlib import pyplot as plt plt.imshow(M, aspect='equal') try: plt.nipy_spectral() except AttributeError: plt.spectral() plt.pause(2) except: pass mpi4py-4.0.3/demo/mandelbrot/mandelbrot-worker.f90000066400000000000000000000044761475341043600220130ustar00rootroot00000000000000! $ mpifort -o mandelbrot.exe mandelbrot.f90 program main use MPI implicit none integer master, nprocs, myrank, ierr real :: rmsg(4), x1, x2, y1, y2 integer :: imsg(3), w, h, maxit integer :: N integer, allocatable :: I(:) integer, allocatable :: C(:,:) integer :: empty(0) integer :: j, k real :: x, dx, y, dy call MPI_Init(ierr) call MPI_Comm_get_parent(master, ierr) if (master == MPI_COMM_NULL) then print *, "parent communicator is MPI_COMM_NULL" call MPI_Abort(MPI_COMM_WORLD, 1, ierr) end if call MPI_Comm_size(master, nprocs, ierr) call MPI_Comm_rank(master, myrank, ierr) ! receive parameters and unpack call MPI_Bcast(rmsg, 4, MPI_REAL, 0, master, ierr) call MPI_Bcast(imsg, 3, MPI_INTEGER, 0, master, ierr) x1 = rmsg(1); x2 = rmsg(2) y1 = rmsg(3); y2 = rmsg(4) w = imsg(1); h = imsg(2); maxit = imsg(3) dx = (x2-x1)/real(w) dy = (y2-y1)/real(h) ! number of lines to compute here N = h / nprocs if (modulo(h, nprocs) > myrank) then N = N + 1 end if ! indices of lines to compute here allocate( I(0:N-1) ) I = (/ (k, k=myrank, h-1, nprocs) /) ! compute local lines allocate( C(0:w-1, 0:N-1) ) do k = 0, N-1 y = y1 + real(I(k)) * dy do j = 0, w-1 x = x1 + real(j) * dx C(j, k) = mandelbrot(x, y, maxit) end do end do ! send number of lines computed here call MPI_Gather(N, 1, MPI_INTEGER, & MPI_BOTTOM, 0, MPI_BYTE, & 0, master, ierr) ! send indices of lines computed here call MPI_Gatherv(I, N, MPI_INTEGER, & MPI_BOTTOM, empty, empty, MPI_BYTE, & 0, master, ierr) ! send data of lines computed here call MPI_Gatherv(C, N*w, MPI_INTEGER, & MPI_BOTTOM, empty, empty, MPI_BYTE, & 0, master, ierr) deallocate(C) deallocate(I) ! we are done call MPI_Comm_disconnect(master, ierr) call MPI_Finalize(ierr) contains function mandelbrot(x, y, maxit) result (it) implicit none real, intent(in) :: x, y integer, intent(in) :: maxit integer :: it complex :: z, c z = cmplx(0, 0) c = cmplx(x, y) it = 0 do while (abs(z) < 2.0 .and. it < maxit) z = z*z + c it = it + 1 end do end function mandelbrot end program main mpi4py-4.0.3/demo/mandelbrot/mandelbrot.py000066400000000000000000000046251475341043600205320ustar00rootroot00000000000000from mpi4py import MPI import numpy as np tic = MPI.Wtime() x1 = -2.0 x2 = 1.0 y1 = -1.0 y2 = 1.0 w = 150 h = 100 maxit = 127 def mandelbrot(x, y, maxit): c = x + y*1j z = 0 + 0j it = 0 while abs(z) < 2 and it < maxit: z = z**2 + c it += 1 return it comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() rmsg = np.empty(4, dtype='f') imsg = np.empty(3, dtype='i') if rank == 0: rmsg[:] = [x1, x2, y1, y2] imsg[:] = [w, h, maxit] comm.Bcast([rmsg, MPI.FLOAT], root=0) comm.Bcast([imsg, MPI.INT], root=0) x1, x2, y1, y2 = (float(r) for r in rmsg) w, h, maxit = (int(i) for i in imsg) dx = (x2 - x1) / w dy = (y2 - y1) / h # number of lines to compute here N = h // size + (h % size > rank) N = np.array(N, dtype='i') # indices of lines to compute here I = np.arange(rank, h, size, dtype='i') # compute local lines C = np.empty([N, w], dtype='i') for k in np.arange(N): y = y1 + I[k] * dy for j in np.arange(w): x = x1 + j * dx C[k, j] = mandelbrot(x, y, maxit) # gather results at root counts = 0 indices = None cdata = None if rank == 0: counts = np.empty(size, dtype='i') indices = np.empty(h, dtype='i') cdata = np.empty([h, w], dtype='i') comm.Gather(sendbuf=[N, MPI.INT], recvbuf=[counts, MPI.INT], root=0) comm.Gatherv(sendbuf=[I, MPI.INT], recvbuf=[indices, (counts, None), MPI.INT], root=0) comm.Gatherv(sendbuf=[C, MPI.INT], recvbuf=[cdata, (counts*w, None), MPI.INT], root=0) # reconstruct full result at root if rank == 0: M = np.zeros([h,w], dtype='i') M[indices, :] = cdata toc = MPI.Wtime() wct = comm.gather(toc-tic, root=0) if rank == 0: for task, time in enumerate(wct): print('wall clock time: %8.2f seconds (task %d)' % (time, task)) def mean(seq): return sum(seq)/len(seq) print ('all tasks, mean: %8.2f seconds' % mean(wct)) print ('all tasks, min: %8.2f seconds' % min(wct)) print ('all tasks, max: %8.2f seconds' % max(wct)) print ('all tasks, sum: %8.2f seconds' % sum(wct)) # eye candy (requires matplotlib) if rank == 0: try: from matplotlib import pyplot as plt plt.imshow(M, aspect='equal') try: plt.nipy_spectral() except AttributeError: plt.spectral() plt.pause(2) except: pass MPI.COMM_WORLD.Barrier() mpi4py-4.0.3/demo/mpi-ref-v1/000077500000000000000000000000001475341043600155565ustar00rootroot00000000000000mpi4py-4.0.3/demo/mpi-ref-v1/README.txt000066400000000000000000000006601475341043600172560ustar00rootroot00000000000000@Book{MPI-Ref-V1, title = {{MPI} - The Complete Reference: Volume 1, The {MPI} Core}, author = {Marc Snir and Steve Otto and Steven Huss-Lederman and David Walker and Jack Dongarra}, edition = {2nd.}, year = 1998, publisher = {MIT Press}, volume = {1, The MPI Core}, series = {Scientific and Engineering Computation}, address = {Cambridge, MA, USA}, } mpi4py-4.0.3/demo/mpi-ref-v1/ex-2.01.py000066400000000000000000000017561475341043600171330ustar00rootroot00000000000000## mpiexec -n 2 python ex-2.01.py # Process 0 sends a message to process 1 # -------------------------------------------------------------------- from mpi4py import MPI import array if MPI.COMM_WORLD.Get_size() < 2: raise SystemExit # -------------------------------------------------------------------- s = b"Hello there" msg = array.array('B', b'\0'*20) tag = 99 status = MPI.Status() myrank = MPI.COMM_WORLD.Get_rank() if myrank == 0: msg[:len(s)] = array.array('B', s) MPI.COMM_WORLD.Send([msg, len(s)+1, MPI.CHAR], 1, tag) elif myrank == 1: MPI.COMM_WORLD.Recv([msg, 20, MPI.CHAR], 0, tag, status) # -------------------------------------------------------------------- if myrank == 1: assert list(msg[:len(s)]) == list(s) assert msg[len(s)] == 0 assert status.source == 0 assert status.tag == tag assert status.error == MPI.SUCCESS assert status.Get_count(MPI.CHAR) == len(s)+1 # -------------------------------------------------------------------- mpi4py-4.0.3/demo/mpi-ref-v1/ex-2.08.py000066400000000000000000000024651475341043600171400ustar00rootroot00000000000000## mpiexec -n 2 python ex-2.08.py # An exchange of messages # -------------------------------------------------------------------- from mpi4py import MPI import array if MPI.COMM_WORLD.Get_size() < 2: raise SystemExit # -------------------------------------------------------------------- sendbuf = array.array('d', [0]*10) recvbuf = array.array('d', [0]*10) tag = 0 status = MPI.Status() myrank = MPI.COMM_WORLD.Get_rank() if myrank == 0: sendbuf[:] = array.array('d', range(len(sendbuf))) MPI.COMM_WORLD.Send([sendbuf, MPI.DOUBLE], 1, tag) MPI.COMM_WORLD.Recv([recvbuf, MPI.DOUBLE], 1, tag, status) elif myrank == 1: MPI.COMM_WORLD.Recv([recvbuf, MPI.DOUBLE], 0, tag, status) sendbuf[:] = recvbuf MPI.COMM_WORLD.Send([sendbuf, MPI.DOUBLE], 0, tag) # -------------------------------------------------------------------- if myrank == 0: assert status.source == 1 assert status.tag == tag assert status.error == MPI.SUCCESS assert status.Get_count(MPI.DOUBLE) == len(recvbuf) assert sendbuf == recvbuf elif myrank == 1: assert status.source == 0 assert status.tag == tag assert status.error == MPI.SUCCESS assert status.Get_count(MPI.DOUBLE) == len(recvbuf) assert sendbuf == recvbuf # -------------------------------------------------------------------- mpi4py-4.0.3/demo/mpi-ref-v1/ex-2.16.py000066400000000000000000000035021475341043600171300ustar00rootroot00000000000000## mpiexec -n 4 python ex-2.16.py # Jacobi code # version of parallel code using sendrecv and null processes. # -------------------------------------------------------------------- from mpi4py import MPI try: import numpy except ImportError: raise SystemExit # -------------------------------------------------------------------- n = 5 * MPI.COMM_WORLD.Get_size() # compute number of processes and myrank p = MPI.COMM_WORLD.Get_size() myrank = MPI.COMM_WORLD.Get_rank() # compute size of local block m = n//p if myrank < (n - p * m): m = m + 1 #compute neighbors if myrank == 0: left = MPI.PROC_NULL else: left = myrank - 1 if myrank == p - 1: right = MPI.PROC_NULL else: right = myrank + 1 # allocate local arrays A = numpy.empty((n+2, m+2), dtype='d', order='f') B = numpy.empty((n, m), dtype='d', order='f') A.fill(1) A[0, :] = A[-1, :] = 0 A[:, 0] = A[:, -1] = 0 # main loop converged = False while not converged: # compute, B = 0.25 * ( N + S + E + W) N, S = A[:-2, 1:-1], A[2:, 1:-1] E, W = A[1:-1, :-2], A[1:-1, 2:] numpy.add(N, S, B) numpy.add(E, B, B) numpy.add(W, B, B) B *= 0.25 A[1:-1, 1:-1] = B # communicate tag = 0 MPI.COMM_WORLD.Sendrecv([B[:, -1], MPI.DOUBLE], right, tag, [A[:, 0], MPI.DOUBLE], left, tag) MPI.COMM_WORLD.Sendrecv((B[:, 0], MPI.DOUBLE), left, tag, (A[:, -1], MPI.DOUBLE), right, tag) # convergence myconv = numpy.allclose(B, 0) loc_conv = numpy.asarray(myconv, dtype='i') glb_conv = numpy.asarray(0, dtype='i') MPI.COMM_WORLD.Allreduce([loc_conv, MPI.INT], [glb_conv, MPI.INT], op=MPI.LAND) converged = bool(glb_conv) # -------------------------------------------------------------------- mpi4py-4.0.3/demo/mpi-ref-v1/ex-2.29.py000066400000000000000000000022301475341043600171310ustar00rootroot00000000000000## mpiexec -n 3 python ex-2.29.py # Use a blocking probe to wait for an incoming message # -------------------------------------------------------------------- from mpi4py import MPI import array if MPI.COMM_WORLD.Get_size() < 3: raise SystemExit # -------------------------------------------------------------------- comm = MPI.COMM_WORLD rank = comm.Get_rank() if rank == 0: i = array.array('i', [7]*5) comm.Send([i, MPI.INT], 2, 0) elif rank == 1: x = array.array('f', [7]*5) comm.Send([x, MPI.FLOAT], 2, 0) elif rank == 2: i = array.array('i', [0]*5) x = array.array('f', [0]*5) status = MPI.Status() for j in range(2): comm.Probe(MPI.ANY_SOURCE, 0, status) if status.Get_source() == 0: comm.Recv([i, MPI.INT], 0, 0, status) else: comm.Recv([x, MPI.FLOAT], 1, 0, status) # -------------------------------------------------------------------- if rank == 2: for v in i: assert v == 7 for v in x: assert v == 7 assert status.source in (0, 1) assert status.tag == 0 assert status.error == 0 # -------------------------------------------------------------------- mpi4py-4.0.3/demo/mpi-ref-v1/ex-2.32.py000066400000000000000000000044011475341043600171250ustar00rootroot00000000000000# Jacobi computation, using persistent requests from mpi4py import MPI try: import numpy except ImportError: raise SystemExit n = 5 * MPI.COMM_WORLD.Get_size() # compute number of processes and myrank p = MPI.COMM_WORLD.Get_size() myrank = MPI.COMM_WORLD.Get_rank() # compute size of local block m = n//p if myrank < (n - p * m): m = m + 1 #compute neighbors if myrank == 0: left = MPI.PROC_NULL else: left = myrank - 1 if myrank == p - 1: right = MPI.PROC_NULL else: right = myrank + 1 # allocate local arrays A = numpy.empty((n+2, m+2), dtype=float, order='f') B = numpy.empty((n, m), dtype=float, order='f') A.fill(1) A[0, :] = A[-1, :] = 0 A[:, 0] = A[:, -1] = 0 # create persistent requests tag = 0 sreq1 = MPI.COMM_WORLD.Send_init((B[:, 0], MPI.DOUBLE), left, tag) sreq2 = MPI.COMM_WORLD.Send_init((B[:, -1], MPI.DOUBLE), right, tag) rreq1 = MPI.COMM_WORLD.Recv_init((A[:, 0], MPI.DOUBLE), left, tag) rreq2 = MPI.COMM_WORLD.Recv_init((A[:, -1], MPI.DOUBLE), right, tag) reqlist = [sreq1, sreq2, rreq1, rreq2] for req in reqlist: assert req != MPI.REQUEST_NULL # main loop converged = False while not converged: # compute boundary columns N, S = A[ :-2, 1], A[2:, 1] E, W = A[1:-1, 0], A[1:-1, 2] C = B[:, 0] numpy.add(N, S, C) numpy.add(C, E, C) numpy.add(C, W, C) C *= 0.25 N, S = A[ :-2, -2], A[2:, -2] E, W = A[1:-1, -3], A[1:-1, -1] C = B[:, -1] numpy.add(N, S, C) numpy.add(C, E, C) numpy.add(C, W, C) C *= 0.25 # start communication #MPI.Prequest.Startall(reqlist) for r in reqlist: r.Start() # compute interior N, S = A[ :-2, 2:-2], A[2, 2:-2] E, W = A[1:-1, 2:-2], A[1:-1, 2:-2] C = B[:, 1:-1] numpy.add(N, S, C) numpy.add(E, C, C) numpy.add(W, C, C) C *= 0.25 A[1:-1, 1:-1] = B # complete communication MPI.Prequest.Waitall(reqlist) # convergence myconv = numpy.allclose(B, 0) loc_conv = numpy.asarray(myconv, dtype='i') glb_conv = numpy.asarray(0, dtype='i') MPI.COMM_WORLD.Allreduce([loc_conv, MPI.INT], [glb_conv, MPI.INT], op=MPI.LAND) converged = bool(glb_conv) # free persistent requests for req in reqlist: req.Free() mpi4py-4.0.3/demo/mpi-ref-v1/ex-2.34.py000066400000000000000000000022411475341043600171270ustar00rootroot00000000000000## mpiexec -n 2 python ex-2.34.py # Use of ready-mode and synchronous-mode # -------------------------------------------------------------------- from mpi4py import MPI try: import numpy except ImportError: raise SystemExit if MPI.COMM_WORLD.Get_size() < 2: raise SystemExit # -------------------------------------------------------------------- comm = MPI.COMM_WORLD buff = numpy.empty((1000,2), dtype='f', order='f') rank = comm.Get_rank() if rank == 0: req1 = comm.Irecv([buff[:, 0], MPI.FLOAT], 1, 1) req2 = comm.Irecv([buff[:, 1], MPI.FLOAT], 1, 2) status = [MPI.Status(), MPI.Status()] MPI.Request.Waitall([req1, req2], status) elif rank == 1: buff[:, 0] = 5 buff[:, 1] = 7 comm.Ssend([buff[:, 1], MPI.FLOAT], 0, 2) comm.Rsend([buff[:, 0], MPI.FLOAT], 0, 1) # -------------------------------------------------------------------- all = numpy.all if rank == 0: assert all(buff[:, 0] == 5) assert all(buff[:, 1] == 7) assert status[0].source == 1 assert status[0].tag == 1 assert status[1].source == 1 assert status[1].tag == 2 # -------------------------------------------------------------------- mpi4py-4.0.3/demo/mpi-ref-v1/ex-2.35.py000066400000000000000000000013371475341043600171350ustar00rootroot00000000000000## mpiexec -n 1 python ex-2.35.py # Calls to attach and detach buffers # -------------------------------------------------------------------- from mpi4py import MPI try: from numpy import empty except ImportError: from array import array def empty(size, dtype): return array(dtype, [0]*size) # -------------------------------------------------------------------- BUFSIZE = 10000 + MPI.BSEND_OVERHEAD buff = empty(BUFSIZE, dtype='b') MPI.Attach_buffer(buff) buff2 = MPI.Detach_buffer() MPI.Attach_buffer(buff2) MPI.Detach_buffer() # -------------------------------------------------------------------- assert len(buff2) == BUFSIZE # -------------------------------------------------------------------- mpi4py-4.0.3/demo/mpi-ref-v1/ex-3.01.py000066400000000000000000000013661475341043600171310ustar00rootroot00000000000000from mpi4py import MPI try: import numpy except ImportError: raise SystemExit # send a upper triangular matrix N = 10 a = numpy.empty((N, N), dtype=float, order='c') b = numpy.zeros((N, N), dtype=float, order='c') a.flat = numpy.arange(a.size, dtype=float) # compute start and size of each row i = numpy.arange(N) blocklen = N - i disp = N * i + i # create datatype for upper triangular part upper = MPI.DOUBLE.Create_indexed(blocklen, disp) upper.Commit() # send and recv matrix myrank = MPI.COMM_WORLD.Get_rank() MPI.COMM_WORLD.Sendrecv((a, 1, upper), myrank, 0, [b, 1, upper], myrank, 0) assert numpy.allclose(numpy.triu(b), numpy.triu(a)) assert numpy.allclose(numpy.tril(b, -1), numpy.zeros((N,N))) upper.Free() mpi4py-4.0.3/demo/mpi-ref-v1/ex-3.02.py000066400000000000000000000003551475341043600171270ustar00rootroot00000000000000from mpi4py import MPI # Type = { (double, 0), (char, 8) } blens = (1, 1) disps = (0, MPI.DOUBLE.size) types = (MPI.DOUBLE, MPI.CHAR) dtype = MPI.Datatype.Create_struct(blens, disps, types) if 'ex-3.02' in __file__: dtype.Free() mpi4py-4.0.3/demo/mpi-ref-v1/ex-3.03.py000066400000000000000000000002411475341043600171220ustar00rootroot00000000000000with open('ex-3.02.py') as source: exec(source.read()) assert dtype.size == MPI.DOUBLE.size + MPI.CHAR.size assert dtype.extent >= dtype.size dtype.Free() mpi4py-4.0.3/demo/mpi-ref-v1/ex-3.04.py000066400000000000000000000002731475341043600171300ustar00rootroot00000000000000with open('ex-3.02.py') as source: exec(source.read()) count = 3 newtype = dtype.Create_contiguous(count) assert newtype.extent == dtype.extent * count dtype.Free() newtype.Free() mpi4py-4.0.3/demo/mpi-ref-v1/ex-3.05.py000066400000000000000000000003431475341043600171270ustar00rootroot00000000000000with open('ex-3.02.py') as source: exec(source.read()) count = 2 blklen = 3 stride = 4 newtype = dtype.Create_vector(count, blklen, stride) assert newtype.size == dtype.size * count * blklen dtype.Free() newtype.Free() mpi4py-4.0.3/demo/mpi-ref-v1/ex-3.06.py000066400000000000000000000003441475341043600171310ustar00rootroot00000000000000with open('ex-3.02.py') as source: exec(source.read()) count = 3 blklen = 1 stride = -2 newtype = dtype.Create_vector(count, blklen, stride) assert newtype.size == dtype.size * count * blklen dtype.Free() newtype.Free() mpi4py-4.0.3/demo/mpi-ref-v1/ex-3.07.py000066400000000000000000000003631475341043600171330ustar00rootroot00000000000000with open('ex-3.02.py') as source: exec(source.read()) count = 2 blklen = 3 stride = 4 * dtype.extent newtype = dtype.Create_hvector(count, blklen, stride) assert newtype.size == dtype.size * count * blklen dtype.Free() newtype.Free() mpi4py-4.0.3/demo/mpi-ref-v1/ex-3.08.py000066400000000000000000000015661475341043600171420ustar00rootroot00000000000000from mpi4py import MPI try: import numpy except ImportError: raise SystemExit # extract the section a[0:6:2, 0:5:2] and store it in e[:,:] a = numpy.empty((6, 5), dtype=float, order='f') e = numpy.empty((3, 3), dtype=float, order='f') a.flat = numpy.arange(a.size, dtype=float) lb, sizeofdouble = MPI.DOUBLE.Get_extent() # create datatype for a 1D section oneslice = MPI.DOUBLE.Create_vector(3, 1, 2) # create datatype for a 2D section twoslice = oneslice.Create_hvector(3, 1, 12*sizeofdouble) twoslice.Commit() # send and recv on same process myrank = MPI.COMM_WORLD.Get_rank() status = MPI.Status() MPI.COMM_WORLD.Sendrecv([a, 1, twoslice], myrank, 0, (e, MPI.DOUBLE), myrank, 0, status) assert numpy.allclose(a[::2, ::2], e) assert status.Get_count(twoslice) == 1 assert status.Get_count(MPI.DOUBLE) == e.size oneslice.Free() twoslice.Free() mpi4py-4.0.3/demo/mpi-ref-v1/ex-3.09.py000066400000000000000000000020251475341043600171320ustar00rootroot00000000000000from mpi4py import MPI try: import numpy except ImportError: raise SystemExit # transpose a matrix a into b a = numpy.empty((100, 100), dtype=float, order='f') b = numpy.empty((100, 100), dtype=float, order='f') a.flat = numpy.arange(a.size, dtype=float) lb, sizeofdouble = MPI.DOUBLE.Get_extent() # create datatype dor one row # (vector with 100 double entries and stride 100) row = MPI.DOUBLE.Create_vector(100, 1, 100) # create datatype for matrix in row-major order # (one hundred copies of the row datatype, strided one word # apart; the successive row datatypes are interleaved) xpose = row.Create_hvector(100, 1, sizeofdouble) xpose.Commit() # send matrix in row-major order and receive in column major order abuf = (a, xpose) bbuf = (b, MPI.DOUBLE) myrank = MPI.COMM_WORLD.Get_rank() status = MPI.Status() MPI.COMM_WORLD.Sendrecv(abuf, myrank, 0, bbuf, myrank, 0, status) assert numpy.allclose(a, b.transpose()) assert status.Get_count(xpose) == 1 assert status.Get_count(MPI.DOUBLE) == b.size row.Free() xpose.Free() mpi4py-4.0.3/demo/mpi-ref-v1/ex-3.11.py000066400000000000000000000002241475341043600171220ustar00rootroot00000000000000with open('ex-3.02.py') as source: exec(source.read()) B = (3, 1) D = (4, 0) newtype = dtype.Create_indexed(B, D) dtype.Free() newtype.Free() mpi4py-4.0.3/demo/mpi-ref-v1/ex-3.12.py000066400000000000000000000002441475341043600171250ustar00rootroot00000000000000with open('ex-3.02.py') as source: exec(source.read()) B = (3, 1) D = (4 * dtype.extent, 0) newtype = dtype.Create_hindexed(B, D) dtype.Free() newtype.Free() mpi4py-4.0.3/demo/mpi-ref-v1/ex-3.13.py000066400000000000000000000004421475341043600171260ustar00rootroot00000000000000from mpi4py import MPI blens = (1, 1) disps = (0, MPI.DOUBLE.size) types = (MPI.DOUBLE, MPI.CHAR) type1 = MPI.Datatype.Create_struct(blens, disps, types) B = (2, 1, 3) D = (0, 16, 26) T = (MPI.FLOAT, type1, MPI.CHAR) dtype = MPI.Datatype.Create_struct(B, D, T) type1.Free() dtype.Free() mpi4py-4.0.3/demo/mpi-ref-v1/makefile000066400000000000000000000005471475341043600172640ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean default: build test clean .PHONY: run_seq run_seq: -@for i in `ls ex-*.py`; do \ echo $(PYTHON) $$i; \ $(PYTHON) $$i; \ done .PHONY: run_mpi run_mpi: -@for i in `ls ex-*.py`; do \ echo $(MPIEXEC_PYTHON) $$i; \ $(MPIEXEC_PYTHON) $$i; \ done build: test: run_seq run_mpi clean: mpi4py-4.0.3/demo/mpi-ref-v1/runtests.sh000077500000000000000000000015151475341043600200060ustar00rootroot00000000000000#!/bin/sh MPIEXEC=mpiexec NP_FLAG=-n NP=3 PYTHON=python set -x $MPIEXEC $NP_FLAG $NP $PYTHON ex-2.01.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-2.08.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-2.16.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-2.29.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-2.32.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-2.34.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-2.35.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.01.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.02.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.03.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.04.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.05.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.06.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.07.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.08.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.09.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.11.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.12.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.13.py mpi4py-4.0.3/demo/nxtval/000077500000000000000000000000001475341043600152075ustar00rootroot00000000000000mpi4py-4.0.3/demo/nxtval/makefile000066400000000000000000000004231475341043600167060ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean default: test build: test: $(MPIEXEC_PYTHON) nxtval-threads.py $(MPIEXEC_PYTHON) nxtval-dynproc.py $(MPIEXEC_PYTHON) nxtval-onesided.py $(MPIEXEC_PYTHON) nxtval-scalable.py $(MPIEXEC_PYTHON) nxtval-mpi3.py clean: mpi4py-4.0.3/demo/nxtval/nxtval-dynproc.py000066400000000000000000000040171475341043600205530ustar00rootroot00000000000000# -------------------------------------------------------------------- from mpi4py import MPI import sys, os class Counter: def __init__(self, comm): assert not comm.Is_inter() self.comm = comm.Dup() # start counter process script = os.path.abspath(__file__) if script[-4:] in ('.pyc', '.pyo'): script = script[:-1] self.child = self.comm.Spawn(sys.executable, [script, '--child'], 1) def free(self): self.comm.Barrier() # stop counter process rank = self.child.Get_rank() if rank == 0: self.child.send(None, 0, 1) self.child.Disconnect() # self.comm.Free() def next(self): # incr = 1 self.child.send(incr, 0, 0) ival = self.child.recv(None, 0, 0) nxtval = ival # return nxtval # -------------------------------------------------------------------- def _counter_child(): parent = MPI.Comm.Get_parent() assert parent != MPI.COMM_NULL try: counter = 0 status = MPI.Status() any_src, any_tag = MPI.ANY_SOURCE, MPI.ANY_TAG while True: # server loop incr = parent.recv(None, any_src, any_tag, status) if status.tag == 1: break parent.send(counter, status.source, 0) counter += incr finally: parent.Disconnect() if __name__ == '__main__': if (len(sys.argv) > 1 and sys.argv[0] == __file__ and sys.argv[1] == '--child'): _counter_child() sys.exit(0) # -------------------------------------------------------------------- def test(): vals = [] counter = Counter(MPI.COMM_WORLD) for i in range(5): c = counter.next() vals.append(c) counter.free() # vals = MPI.COMM_WORLD.allreduce(vals) assert sorted(vals) == list(range(len(vals))) if __name__ == '__main__': test() # -------------------------------------------------------------------- mpi4py-4.0.3/demo/nxtval/nxtval-mpi3.py000066400000000000000000000040651475341043600177500ustar00rootroot00000000000000from mpi4py import MPI from array import array as _array import struct as _struct # -------------------------------------------------------------------- class Counter: def __init__(self, comm): rank = comm.Get_rank() itemsize = MPI.INT.Get_size() if rank == 0: n = 1 else: n = 0 self.win = MPI.Win.Allocate(n*itemsize, itemsize, MPI.INFO_NULL, comm) if rank == 0: mem = self.win.tomemory() mem[:] = _struct.pack('i', 0) def free(self): self.win.Free() def next(self, increment=1): incr = _array('i', [increment]) nval = _array('i', [0]) self.win.Lock(0) self.win.Get_accumulate([incr, 1, MPI.INT], [nval, 1, MPI.INT], 0, op=MPI.SUM) self.win.Unlock(0) return nval[0] # ----------------------------------------------------------------------------- class Mutex: def __init__(self, comm): self.counter = Counter(comm) def __enter__(self): self.lock() return self def __exit__(self, *exc): self.unlock() return None def free(self): self.counter.free() def lock(self): value = self.counter.next(+1) while value != 0: value = self.counter.next(-1) value = self.counter.next(+1) def unlock(self): self.counter.next(-1) # ----------------------------------------------------------------------------- def test_counter(): vals = [] counter = Counter(MPI.COMM_WORLD) for i in range(5): c = counter.next() vals.append(c) counter.free() vals = MPI.COMM_WORLD.allreduce(vals) assert sorted(vals) == list(range(len(vals))) def test_mutex(): mutex = Mutex(MPI.COMM_WORLD) mutex.lock() mutex.unlock() mutex.free() if __name__ == '__main__': test_counter() test_mutex() # ----------------------------------------------------------------------------- mpi4py-4.0.3/demo/nxtval/nxtval-onesided.py000066400000000000000000000036061475341043600206720ustar00rootroot00000000000000# -------------------------------------------------------------------- from mpi4py import MPI from array import array as _array import struct as _struct class Counter: def __init__(self, comm): # size = comm.Get_size() rank = comm.Get_rank() # itemsize = MPI.INT.Get_size() if rank == 0: mem = MPI.Alloc_mem(itemsize*size, MPI.INFO_NULL) mem[:] = _struct.pack('i', 0) * size else: mem = MPI.BOTTOM self.win = MPI.Win.Create(mem, itemsize, MPI.INFO_NULL, comm) # blens = [rank, size-rank-1] disps = [0, rank+1] self.dt_get = MPI.INT.Create_indexed(blens, disps).Commit() # self.myval = 0 def free(self): self.dt_get.Free() mem = self.win.tomemory() self.win.Free() if mem: MPI.Free_mem(mem) def next(self): # group = self.win.Get_group() size = group.Get_size() rank = group.Get_rank() group.Free() # incr = _array('i', [1]) vals = _array('i', [0])*size self.win.Lock(0) self.win.Accumulate([incr, 1, MPI.INT], 0, [rank, 1, MPI.INT], MPI.SUM) self.win.Get([vals, 1, self.dt_get], 0, [ 0, 1, self.dt_get]) self.win.Unlock(0) # vals[rank] = self.myval self.myval += 1 nxtval = sum(vals) # return nxtval # -------------------------------------------------------------------- def test(): vals = [] counter = Counter(MPI.COMM_WORLD) for i in range(5): c = counter.next() vals.append(c) counter.free() vals = MPI.COMM_WORLD.allreduce(vals) assert sorted(vals) == list(range(len(vals))) if __name__ == '__main__': test() # -------------------------------------------------------------------- mpi4py-4.0.3/demo/nxtval/nxtval-scalable.py000066400000000000000000000100071475341043600206370ustar00rootroot00000000000000from mpi4py import MPI # ----------------------------------------------------------------------------- import struct as _struct try: from numpy import empty as _empty def _array_new(size, typecode, init=0): a = _empty(size, typecode) a.fill(init) return a def _array_set(ary, value): ary.fill(value) def _array_sum(ary): return ary.sum() except ImportError: from array import array as _array def _array_new(size, typecode, init=0): return _array(typecode, [init]) * size def _array_set(ary, value): for i, _ in enumerate(ary): ary[i] = value def _array_sum(ary): return sum(ary, 0) # ----------------------------------------------------------------------------- class Counter: def __init__(self, comm, init=0): # size = comm.Get_size() rank = comm.Get_rank() mask = 1 while mask < size: mask <<= 1 mask >>= 1 idx = 0 get_idx = [] acc_idx = [] while mask >= 1: left = idx + 1 right = idx + (mask<<1) if rank < mask: acc_idx.append( left ) get_idx.append( right ) idx = left else: acc_idx.append( right ) get_idx.append( left ) idx = right rank = rank % mask mask >>= 1 # typecode = 'i' datatype = MPI.INT itemsize = datatype.Get_size() # root = 0 rank = comm.Get_rank() if rank == root: nlevels = len(get_idx) + 1 nentries = (1< large_message_size: skip = skip_large loop = loop_large iterations = list(range(loop+skip)) s_msg = MPI.IN_PLACE r_msg = [r_buf, size, MPI.BYTE] # comm.Barrier() for i in iterations: if i == skip: t_start = MPI.Wtime() comm.Allgather(s_msg, r_msg) t_end = MPI.Wtime() comm.Barrier() # if myid == 0: latency = (t_end - t_start) * 1e6 / loop print ('%-10d%20.2f' % (size, latency)) def message_sizes(max_size): return [0] + [(1< large_message_size: skip = skip_large loop = loop_large iterations = list(range(loop+skip)) s_msg = [s_buf, size, MPI.BYTE] r_msg = [r_buf, size, MPI.BYTE] # comm.Barrier() for i in iterations: if i == skip: t_start = MPI.Wtime() comm.Alltoall(s_msg, r_msg) t_end = MPI.Wtime() comm.Barrier() # if myid == 0: latency = (t_end - t_start) * 1e6 / loop print ('%-10d%20.2f' % (size, latency)) def message_sizes(max_size): return [0] + [(1< large_message_size: skip = skip_large loop = loop_large iterations = list(range(loop+skip)) disp = 0 for i in range (numprocs): s_counts[i] = r_counts[i] = size s_displs[i] = r_displs[i] = disp disp += size s_msg = [s_buf, (s_counts, s_displs), MPI.BYTE] r_msg = [r_buf, (r_counts, r_displs), MPI.BYTE] # comm.Barrier() for i in iterations: if i == skip: t_start = MPI.Wtime() comm.Alltoallv(s_msg, r_msg) t_end = MPI.Wtime() comm.Barrier() # if myid == 0: latency = (t_end - t_start) * 1e6 / loop print ('%-10d%20.2f' % (size, latency)) def message_sizes(max_size): return [0] + [(1< large_message_size: skip = skip_large loop = loop_large iterations = list(range(loop+skip)) msg = [buf, size, MPI.BYTE] # comm.Barrier() for i in iterations: if i == skip: t_start = MPI.Wtime() comm.Bcast(msg, 0) t_end = MPI.Wtime() comm.Barrier() # if myid == 0: latency = (t_end - t_start) * 1e6 / loop print ('%-10d%20.2f' % (size, latency)) def message_sizes(max_size): return [0] + [(1< MAX_MSG_SIZE: break if size > large_message_size: skip = skip_large loop = loop_large window_size = window_size_large iterations = list(range(loop+skip)) window_sizes = list(range(window_size)) s_msg = [s_buf, size, MPI.BYTE] r_msg = [r_buf, size, MPI.BYTE] send_request = [MPI.REQUEST_NULL] * window_size recv_request = [MPI.REQUEST_NULL] * window_size # comm.Barrier() if myid == 0: for i in iterations: if i == skip: t_start = MPI.Wtime() for j in window_sizes: recv_request[j] = comm.Irecv(r_msg, 1, 10) for j in window_sizes: send_request[j] = comm.Isend(s_msg, 1, 100) MPI.Request.Waitall(send_request) MPI.Request.Waitall(recv_request) t_end = MPI.Wtime() elif myid == 1: for i in iterations: for j in window_sizes: recv_request[j] = comm.Irecv(r_msg, 0, 100) for j in window_sizes: send_request[j] = comm.Isend(s_msg, 0, 10) MPI.Request.Waitall(send_request) MPI.Request.Waitall(recv_request) # if myid == 0: MB = size / 1e6 * loop * window_size s = t_end - t_start print ('%-10d%20.2f' % (size, MB/s)) def allocate(n): try: import mmap return mmap.mmap(-1, n) except (ImportError, OSError): try: from numpy import zeros return zeros(n, 'B') except ImportError: from array import array return array('B', [0]) * n if __name__ == '__main__': osu_bibw() mpi4py-4.0.3/demo/osu_bw.py000066400000000000000000000047011475341043600155450ustar00rootroot00000000000000# http://mvapich.cse.ohio-state.edu/benchmarks/ from mpi4py import MPI def osu_bw( BENCHMARH = "MPI Bandwidth Test", skip = 10, loop = 100, window_size = 64, skip_large = 2, loop_large = 20, window_size_large = 64, large_message_size = 8192, MAX_MSG_SIZE = 1<<22, ): comm = MPI.COMM_WORLD myid = comm.Get_rank() numprocs = comm.Get_size() if numprocs != 2: if myid == 0: errmsg = "This test requires exactly two processes" else: errmsg = None raise SystemExit(errmsg) s_buf = allocate(MAX_MSG_SIZE) r_buf = allocate(MAX_MSG_SIZE) if myid == 0: print (f'# {BENCHMARH}') if myid == 0: print ('# %-8s%20s' % ("Size [B]", "Bandwidth [MB/s]")) message_sizes = [2**i for i in range(30)] for size in message_sizes: if size > MAX_MSG_SIZE: break if size > large_message_size: skip = skip_large loop = loop_large window_size = window_size_large iterations = list(range(loop+skip)) window_sizes = list(range(window_size)) requests = [MPI.REQUEST_NULL] * window_size # comm.Barrier() if myid == 0: s_msg = [s_buf, size, MPI.BYTE] r_msg = [r_buf, 4, MPI.BYTE] for i in iterations: if i == skip: t_start = MPI.Wtime() for j in window_sizes: requests[j] = comm.Isend(s_msg, 1, 100) MPI.Request.Waitall(requests) comm.Recv(r_msg, 1, 101) t_end = MPI.Wtime() elif myid == 1: s_msg = [s_buf, 4, MPI.BYTE] r_msg = [r_buf, size, MPI.BYTE] for i in iterations: for j in window_sizes: requests[j] = comm.Irecv(r_msg, 0, 100) MPI.Request.Waitall(requests) comm.Send(s_msg, 0, 101) # if myid == 0: MB = size / 1e6 * loop * window_size s = t_end - t_start print ('%-10d%20.2f' % (size, MB/s)) def allocate(n): try: import mmap return mmap.mmap(-1, n) except (ImportError, OSError): try: from numpy import zeros return zeros(n, 'B') except ImportError: from array import array return array('B', [0]) * n if __name__ == '__main__': osu_bw() mpi4py-4.0.3/demo/osu_gather.py000066400000000000000000000036651475341043600164170ustar00rootroot00000000000000# http://mvapich.cse.ohio-state.edu/benchmarks/ from mpi4py import MPI def osu_gather( BENCHMARH = "MPI Gather Latency Test", skip = 1000, loop = 10000, skip_large = 10, loop_large = 100, large_message_size = 8192, MAX_MSG_SIZE = 1<<20, ): comm = MPI.COMM_WORLD myid = comm.Get_rank() numprocs = comm.Get_size() if numprocs < 2: if myid == 0: errmsg = "This test requires at least two processes" else: errmsg = None raise SystemExit(errmsg) if myid == 0: r_buf = allocate(MAX_MSG_SIZE*numprocs) else: s_buf = allocate(MAX_MSG_SIZE) if myid == 0: print (f'# {BENCHMARH}') if myid == 0: print ('# %-8s%20s' % ("Size [B]", "Latency [us]")) for size in message_sizes(MAX_MSG_SIZE): if size > large_message_size: skip = skip_large loop = loop_large iterations = list(range(loop+skip)) if myid == 0: s_msg = MPI.IN_PLACE r_msg = [r_buf, size, MPI.BYTE] else: s_msg = [s_buf, size, MPI.BYTE] r_msg = None # comm.Barrier() for i in iterations: if i == skip: t_start = MPI.Wtime() comm.Gather(s_msg, r_msg, 0) t_end = MPI.Wtime() comm.Barrier() # if myid == 0: latency = (t_end - t_start) * 1e6 / loop print ('%-10d%20.2f' % (size, latency)) def message_sizes(max_size): return [0] + [(1< MAX_MSG_SIZE: break if size > large_message_size: skip = skip_large loop = loop_large iterations = list(range(loop+skip)) s_msg = [s_buf, size, MPI.BYTE] r_msg = [r_buf, size, MPI.BYTE] # comm.Barrier() if myid == 0: for i in iterations: if i == skip: t_start = MPI.Wtime() comm.Send(s_msg, 1, 1) comm.Recv(r_msg, 1, 1) t_end = MPI.Wtime() elif myid == 1: for i in iterations: comm.Recv(r_msg, 0, 1) comm.Send(s_msg, 0, 1) # if myid == 0: latency = (t_end - t_start) * 1e6 / (2 * loop) print ('%-10d%20.2f' % (size, latency)) def allocate(n): try: import mmap return mmap.mmap(-1, n) except (ImportError, OSError): try: from numpy import zeros return zeros(n, 'B') except ImportError: from array import array return array('B', [0]) * n if __name__ == '__main__': osu_latency() mpi4py-4.0.3/demo/osu_multi_lat.py000066400000000000000000000042441475341043600171310ustar00rootroot00000000000000# http://mvapich.cse.ohio-state.edu/benchmarks/ from mpi4py import MPI def osu_multi_lat( BENCHMARH = "MPI Multi Latency Test", skip_small = 100, loop_small = 10000, skip_large = 10, loop_large = 1000, large_message_size = 8192, MAX_MSG_SIZE = 1<<22, ): comm = MPI.COMM_WORLD myid = comm.Get_rank() nprocs = comm.Get_size() pairs = nprocs/2 s_buf = allocate(MAX_MSG_SIZE) r_buf = allocate(MAX_MSG_SIZE) if myid == 0: print (f'# {BENCHMARH}') if myid == 0: print ('# %-8s%20s' % ("Size [B]", "Latency [us]")) message_sizes = [0] + [2**i for i in range(30)] for size in message_sizes: if size > MAX_MSG_SIZE: break if size > large_message_size: skip = skip_large loop = loop_large else: skip = skip_small loop = loop_small iterations = list(range(loop+skip)) s_msg = [s_buf, size, MPI.BYTE] r_msg = [r_buf, size, MPI.BYTE] # comm.Barrier() if myid < pairs: partner = myid + pairs for i in iterations: if i == skip: t_start = MPI.Wtime() comm.Send(s_msg, partner, 1) comm.Recv(r_msg, partner, 1) t_end = MPI.Wtime() else: partner = myid - pairs for i in iterations: if i == skip: t_start = MPI.Wtime() comm.Recv(r_msg, partner, 1) comm.Send(s_msg, partner, 1) t_end = MPI.Wtime() # latency = (t_end - t_start) * 1e6 / (2 * loop) total_lat = comm.reduce(latency, root=0, op=MPI.SUM) if myid == 0: avg_lat = total_lat/(pairs * 2) print ('%-10d%20.2f' % (size, avg_lat)) def allocate(n): try: import mmap return mmap.mmap(-1, n) except (ImportError, OSError): try: from numpy import zeros return zeros(n, 'B') except ImportError: from array import array return array('B', [0]) * n if __name__ == '__main__': osu_multi_lat() mpi4py-4.0.3/demo/osu_scatter.py000066400000000000000000000036711475341043600166070ustar00rootroot00000000000000# http://mvapich.cse.ohio-state.edu/benchmarks/ from mpi4py import MPI def osu_scatter( BENCHMARH = "MPI Scatter Latency Test", skip = 1000, loop = 10000, skip_large = 10, loop_large = 100, large_message_size = 8192, MAX_MSG_SIZE = 1<<20, ): comm = MPI.COMM_WORLD myid = comm.Get_rank() numprocs = comm.Get_size() if numprocs < 2: if myid == 0: errmsg = "This test requires at least two processes" else: errmsg = None raise SystemExit(errmsg) if myid == 0: s_buf = allocate(MAX_MSG_SIZE*numprocs) else: r_buf = allocate(MAX_MSG_SIZE) if myid == 0: print (f'# {BENCHMARH}') if myid == 0: print ('# %-8s%20s' % ("Size [B]", "Latency [us]")) for size in message_sizes(MAX_MSG_SIZE): if size > large_message_size: skip = skip_large loop = loop_large iterations = list(range(loop+skip)) if myid == 0: s_msg = [s_buf, size, MPI.BYTE] r_msg = MPI.IN_PLACE else: s_msg = None r_msg = [r_buf, size, MPI.BYTE] # comm.Barrier() for i in iterations: if i == skip: t_start = MPI.Wtime() comm.Scatter(s_msg, r_msg, 0) t_end = MPI.Wtime() comm.Barrier() # if myid == 0: latency = (t_end - t_start) * 1e6 / loop print ('%-10d%20.2f' % (size, latency)) def message_sizes(max_size): return [0] + [(1< target: partial = op(tmp, partial) recvobj = op(tmp, recvobj) else: tmp = op(partial, tmp) partial = tmp mask <<= 1 return recvobj def exscan(self, sendobj=None, recvobj=None, op=MPI.SUM): size = self.size rank = self.rank tag = MPI.COMM_WORLD.Get_attr(MPI.TAG_UB)-1 recvobj = sendobj partial = sendobj mask = 1 flag = False while mask < size: target = rank ^ mask if target < size: tmp = self.sendrecv(partial, dest=target, source=target, sendtag=tag, recvtag=tag) if rank > target: partial = op(tmp, partial) if rank != 0: if not flag: recvobj = tmp flag = True else: recvobj = op(tmp, recvobj) else: tmp = op(partial, tmp) partial = tmp mask <<= 1 if rank == 0: recvobj = None return recvobj mpi4py-4.0.3/demo/reductions/runtests.sh000077500000000000000000000001661475341043600203030ustar00rootroot00000000000000#!/bin/sh MPIEXEC=mpiexec NP_FLAG=-n NP=5 PYTHON=python set -x $MPIEXEC $NP_FLAG $NP $PYTHON test_reductions.py -q mpi4py-4.0.3/demo/reductions/test_reductions.py000066400000000000000000000141031475341043600216410ustar00rootroot00000000000000from mpi4py import MPI import unittest import sys, os sys.path.insert(0, os.path.dirname(__file__)) from reductions import Intracomm del sys.path[0] class BaseTest: def test_reduce(self): rank = self.comm.rank size = self.comm.size for root in range(size): msg = rank res = self.comm.reduce(sendobj=msg, root=root) if self.comm.rank == root: self.assertEqual(res, sum(range(size))) else: self.assertIsNone(res) def test_reduce_min(self): rank = self.comm.rank size = self.comm.size for root in range(size): msg = rank res = self.comm.reduce(sendobj=msg, op=MPI.MIN, root=root) if self.comm.rank == root: self.assertEqual(res, 0) else: self.assertIsNone(res) def test_reduce_max(self): rank = self.comm.rank size = self.comm.size for root in range(size): msg = rank res = self.comm.reduce(sendobj=msg, op=MPI.MAX, root=root) if self.comm.rank == root: self.assertEqual(res, size-1) else: self.assertIsNone(res) def test_reduce_minloc(self): rank = self.comm.rank size = self.comm.size for root in range(size): msg = rank res = self.comm.reduce(sendobj=(msg, rank), op=MPI.MINLOC, root=root) if self.comm.rank == root: self.assertEqual(res, (0, 0)) else: self.assertIsNone(res) def test_reduce_maxloc(self): rank = self.comm.rank size = self.comm.size for root in range(size): msg = rank res = self.comm.reduce(sendobj=(msg, rank), op=MPI.MAXLOC, root=root) if self.comm.rank == root: self.assertEqual(res, (size-1, size-1)) else: self.assertIsNone(res) def test_allreduce(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.allreduce(sendobj=msg) self.assertEqual(res, sum(range(size))) def test_allreduce_min(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.allreduce(sendobj=msg, op=MPI.MIN) self.assertEqual(res, 0) def test_allreduce_max(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.allreduce(sendobj=msg, op=MPI.MAX) self.assertEqual(res, size-1) def test_allreduce_minloc(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.allreduce(sendobj=(msg, rank), op=MPI.MINLOC) self.assertEqual(res, (0, 0)) def test_allreduce_maxloc(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.allreduce(sendobj=(msg, rank), op=MPI.MAXLOC) self.assertEqual(res, (size-1, size-1)) def test_scan(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.scan(sendobj=msg) self.assertEqual(res, sum(list(range(size))[:rank+1])) def test_scan_min(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.scan(sendobj=msg, op=MPI.MIN) self.assertEqual(res, 0) def test_scan_max(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.scan(sendobj=msg, op=MPI.MAX) self.assertEqual(res, rank) def test_scan_minloc(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.scan(sendobj=(msg, rank), op=MPI.MINLOC) self.assertEqual(res, (0, 0)) def test_scan_maxloc(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.scan(sendobj=(msg, rank), op=MPI.MAXLOC) self.assertEqual(res, (rank, rank)) def test_exscan(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.exscan(sendobj=msg) if self.comm.rank == 0: self.assertIsNone(res) else: self.assertEqual(res, sum(list(range(size))[:rank])) def test_exscan_min(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.exscan(sendobj=msg, op=MPI.MIN) if self.comm.rank == 0: self.assertIsNone(res) else: self.assertEqual(res, 0) def test_exscan_max(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.exscan(sendobj=msg, op=MPI.MAX) if self.comm.rank == 0: self.assertIsNone(res) else: self.assertEqual(res, rank-1) def test_exscan_minloc(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.exscan(sendobj=(msg, rank), op=MPI.MINLOC) if self.comm.rank == 0: self.assertIsNone(res) else: self.assertEqual(res, (0, 0)) def test_exscan_maxloc(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.exscan(sendobj=(msg, rank), op=MPI.MAXLOC) if self.comm.rank == 0: self.assertIsNone(res) else: self.assertEqual(res, (rank-1, rank-1)) class TestS(BaseTest, unittest.TestCase): def setUp(self): self.comm = Intracomm(MPI.COMM_SELF) class TestW(BaseTest, unittest.TestCase): def setUp(self): self.comm = Intracomm(MPI.COMM_WORLD) class TestSD(BaseTest, unittest.TestCase): def setUp(self): self.comm = Intracomm(MPI.COMM_SELF.Dup()) def tearDown(self): self.comm.Free() class TestWD(BaseTest, unittest.TestCase): def setUp(self): self.comm = Intracomm(MPI.COMM_WORLD.Dup()) def tearDown(self): self.comm.Free() if __name__ == "__main__": unittest.main() mpi4py-4.0.3/demo/sequential/000077500000000000000000000000001475341043600160455ustar00rootroot00000000000000mpi4py-4.0.3/demo/sequential/makefile000066400000000000000000000002341475341043600175440ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean default: build test clean build: test: $(MPIEXEC_PYTHON) test_seq.py clean: $(RM) -r __pycache__ mpi4py-4.0.3/demo/sequential/runtests.sh000077500000000000000000000001541475341043600202730ustar00rootroot00000000000000#!/bin/sh MPIEXEC=mpiexec NP_FLAG=-n NP=5 PYTHON=python set -x $MPIEXEC $NP_FLAG $NP $PYTHON test_seq.py mpi4py-4.0.3/demo/sequential/seq.py000066400000000000000000000024241475341043600172110ustar00rootroot00000000000000class Seq: """ Sequential execution """ def __init__(self, comm, ng=1, tag=0): ng = int(ng) tag = int(tag) assert ng >= 1 assert ng <= comm.Get_size() self.comm = comm self.ng = ng self.tag = tag def __enter__(self): self.begin() return self def __exit__(self, *exc): self.end() return None def begin(self): """ Begin a sequential execution of a section of code """ comm = self.comm size = comm.Get_size() if size == 1: return rank = comm.Get_rank() ng = self.ng tag = self.tag if rank != 0: comm.Recv([None, 'B'], rank - 1, tag) if rank != (size - 1) and (rank % ng) < (ng - 1): comm.Send([None, 'B'], rank + 1, tag) def end(self): """ End a sequential execution of a section of code """ comm = self.comm size = comm.Get_size() if size == 1: return rank = comm.Get_rank() ng = self.ng tag = self.tag if rank == (size - 1) or (rank % ng) == (ng - 1): comm.Send([None, 'B'], (rank + 1) % size, tag) if rank == 0: comm.Recv([None, 'B'], size - 1, tag) mpi4py-4.0.3/demo/sequential/test_seq.py000066400000000000000000000007121475341043600202460ustar00rootroot00000000000000from mpi4py import MPI import unittest import sys, os sys.path.insert(0, os.path.dirname(__file__)) from seq import Seq del sys.path[0] def test(): size = MPI.COMM_WORLD.Get_size() rank = MPI.COMM_WORLD.Get_rank() name = MPI.Get_processor_name() with Seq(MPI.COMM_WORLD, 1, 10): print( f"Hello, World! I am process {rank} of {size} on {name}.", flush=True, ) if __name__ == "__main__": test() mpi4py-4.0.3/demo/spawning/000077500000000000000000000000001475341043600155215ustar00rootroot00000000000000mpi4py-4.0.3/demo/spawning/cpi-master.c000066400000000000000000000013211475341043600177260ustar00rootroot00000000000000#include #include #include #include int main(int argc, char *argv[]) { char cmd[32] = "./cpi-worker-c.exe"; MPI_Comm worker; int n; double pi; MPI_Init(&argc, &argv); if (argc > 1) strcpy(cmd, argv[1]); printf("%s -> %s\n", argv[0], cmd); MPI_Comm_spawn(cmd, MPI_ARGV_NULL, 5, MPI_INFO_NULL, 0, MPI_COMM_SELF, &worker, MPI_ERRCODES_IGNORE); n = 100; MPI_Bcast(&n, 1, MPI_INT, MPI_ROOT, worker); MPI_Reduce(MPI_BOTTOM, &pi, 1, MPI_DOUBLE, MPI_SUM, MPI_ROOT, worker); MPI_Comm_disconnect(&worker); printf("pi: %.16f, error: %.16f\n", pi, fabs(M_PI-pi)); MPI_Finalize(); return 0; } mpi4py-4.0.3/demo/spawning/cpi-master.cxx000066400000000000000000000012411475341043600203070ustar00rootroot00000000000000#include #include #include #include int main(int argc, char *argv[]) { MPI::Init(); char cmd[32] = "./cpi-worker-cxx.exe"; if (argc > 1) std::strcpy(cmd, argv[1]); std::printf("%s -> %s\n", argv[0], cmd); MPI::Intercomm worker; worker = MPI::COMM_SELF.Spawn(cmd, MPI::ARGV_NULL, 5, MPI::INFO_NULL, 0); int n = 100; worker.Bcast(&n, 1, MPI::INT, MPI::ROOT); double pi; worker.Reduce(MPI::BOTTOM, &pi, 1, MPI::DOUBLE, MPI::SUM, MPI::ROOT); worker.Disconnect(); std::printf("pi: %.16f, error: %.16f\n", pi, std::fabs(M_PI-pi)); MPI::Finalize(); return 0; } mpi4py-4.0.3/demo/spawning/cpi-master.f90000066400000000000000000000017421475341043600201110ustar00rootroot00000000000000PROGRAM main USE mpi implicit none real (kind=8), parameter :: PI = 3.1415926535897931D0 integer argc character(len=32) argv(0:1) character(len=32) cmd integer ierr, n, worker real(kind=8) cpi call MPI_INIT(ierr) argc = iargc() + 1 call getarg(0, argv(0)) call getarg(1, argv(1)) cmd = './cpi-worker-f90.exe' if (argc > 1) then cmd = argv(1) end if write(*,'(A,A,A)') trim(argv(0)), ' -> ', trim(cmd) call MPI_COMM_SPAWN(cmd, MPI_ARGV_NULL, 5, & MPI_INFO_NULL, 0, & MPI_COMM_SELF, worker, & MPI_ERRCODES_IGNORE, ierr) n = 100 call MPI_BCAST(n, 1, MPI_INTEGER, & MPI_ROOT, worker, ierr) call MPI_REDUCE(MPI_BOTTOM, cpi, 1, MPI_DOUBLE_PRECISION, & MPI_SUM, MPI_ROOT, worker, ierr) call MPI_COMM_DISCONNECT(worker, ierr) write(*,'(A,F18.16,A,F18.16)') 'pi: ', cpi, ', error: ', abs(PI-cpi) call MPI_FINALIZE(ierr) END PROGRAM main mpi4py-4.0.3/demo/spawning/cpi-master.py000066400000000000000000000007751475341043600201500ustar00rootroot00000000000000from mpi4py import MPI from array import array from math import pi as PI from sys import argv cmd = './cpi-worker-py.exe' if len(argv) > 1: cmd = argv[1] print(f"{argv[0]} -> {cmd}") worker = MPI.COMM_SELF.Spawn(cmd, None, 5) n = array('i', [100]) worker.Bcast([n,MPI.INT], root=MPI.ROOT) pi = array('d', [0.0]) worker.Reduce(sendbuf=None, recvbuf=[pi, MPI.DOUBLE], op=MPI.SUM, root=MPI.ROOT) pi = pi[0] worker.Disconnect() print(f"pi: {pi:.16f}, error: {abs(PI-pi):.16f}") mpi4py-4.0.3/demo/spawning/cpi-worker.c000066400000000000000000000011251475341043600177460ustar00rootroot00000000000000#include int main(int argc, char *argv[]) { int myrank, nprocs; int n, i; double h, s, pi; MPI_Comm master; MPI_Init(&argc, &argv); MPI_Comm_get_parent(&master); MPI_Comm_size(master, &nprocs); MPI_Comm_rank(master, &myrank); MPI_Bcast(&n, 1, MPI_INT, 0, master); h = 1.0 / (double) n; s = 0.0; for (i = myrank+1; i < n+1; i += nprocs) { double x = h * (i - 0.5); s += 4.0 / (1.0 + x*x); } pi = s * h; MPI_Reduce(&pi, MPI_BOTTOM, 1, MPI_DOUBLE, MPI_SUM, 0, master); MPI_Comm_disconnect(&master); MPI_Finalize(); return 0; } mpi4py-4.0.3/demo/spawning/cpi-worker.cxx000066400000000000000000000010471475341043600203310ustar00rootroot00000000000000#include int main(int argc, char *argv[]) { MPI::Init(); MPI::Intercomm master = MPI::Comm::Get_parent(); int nprocs = master.Get_size(); int myrank = master.Get_rank(); int n; master.Bcast(&n, 1, MPI_INT, 0); double h = 1.0 / (double) n; double s = 0.0; for (int i = myrank+1; i < n+1; i += nprocs) { double x = h * (i - 0.5); s += 4.0 / (1.0 + x*x); } double pi = s * h; master.Reduce(&pi, MPI_BOTTOM, 1, MPI_DOUBLE, MPI_SUM, 0); master.Disconnect(); MPI::Finalize(); return 0; } mpi4py-4.0.3/demo/spawning/cpi-worker.f90000066400000000000000000000012521475341043600201230ustar00rootroot00000000000000PROGRAM main USE mpi implicit none integer ierr integer n, i, master, myrank, nprocs real (kind=8) h, s, x, cpi call MPI_INIT(ierr) call MPI_COMM_GET_PARENT(master, ierr) call MPI_COMM_SIZE(master, nprocs, ierr) call MPI_COMM_RANK(master, myrank, ierr) call MPI_BCAST(n, 1, MPI_INTEGER, & 0, master, ierr) h = 1 / DFLOAT(n) s = 0.0 DO i=myrank+1,n,nprocs x = h * (DFLOAT(i) - 0.5) s = s + 4.0 / (1.0 + x*x) END DO cpi = s * h call MPI_REDUCE(cpi, MPI_BOTTOM, 1, MPI_DOUBLE_PRECISION, & MPI_SUM, 0, master, ierr) call MPI_COMM_DISCONNECT(master, ierr) call MPI_FINALIZE(ierr) END PROGRAM main mpi4py-4.0.3/demo/spawning/cpi-worker.py000066400000000000000000000007221475341043600201560ustar00rootroot00000000000000from mpi4py import MPI from array import array master = MPI.Comm.Get_parent() nprocs = master.Get_size() myrank = master.Get_rank() n = array('i', [0]) master.Bcast([n, MPI.INT], root=0) n = n[0] h = 1.0 / n s = 0.0 for i in range(myrank+1, n+1, nprocs): x = h * (i - 0.5) s += 4.0 / (1.0 + x**2) pi = s * h pi = array('d', [pi]) master.Reduce(sendbuf=[pi, MPI.DOUBLE], recvbuf=None, op=MPI.SUM, root=0) master.Disconnect() mpi4py-4.0.3/demo/spawning/makefile000066400000000000000000000017771475341043600172350ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean default: build test clean LANGS = py c cxx f90 MASTERS = $(foreach lang,$(LANGS),cpi-master-$(lang).exe) WORKERS = $(foreach lang,$(LANGS),cpi-worker-$(lang).exe) build: $(MASTERS) $(WORKERS) MPIEXEC = mpiexec -n 1 test: build @for i in $(LANGS); do \ for j in $(LANGS); do \ $(MPIEXEC) ./cpi-master-$$i.exe ./cpi-worker-$$j.exe; \ done; \ done clean: $(RM) -r $(MASTERS) $(WORKERS) __pycache__ # Python cpi-master-py.exe: cpi-master.py echo '#!'`which python` > $@ cat $< >> $@ chmod +x $@ cpi-worker-py.exe: cpi-worker.py echo '#!'`which python` > $@ cat $< >> $@ chmod +x $@ # C cpi-master-c.exe: cpi-master.c $(MPICC) $< -o $@ cpi-worker-c.exe: cpi-worker.c $(MPICC) $< -o $@ # C++ cpi-master-cxx.exe: cpi-master.cxx $(MPICXX) $< -o $@ cpi-worker-cxx.exe: cpi-worker.cxx $(MPICXX) $< -o $@ # Fortran 90 cpi-master-f90.exe: cpi-master.f90 $(MPIFORT) $< -o $@ cpi-worker-f90.exe: cpi-worker.f90 $(MPIFORT) $< -o $@ mpi4py-4.0.3/demo/test-run/000077500000000000000000000000001475341043600154545ustar00rootroot00000000000000mpi4py-4.0.3/demo/test-run/makefile000066400000000000000000000002011475341043600171450ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean default: build test clean build: test: $(PYTHON) test_run.py -v clean: mpi4py-4.0.3/demo/test-run/run-script.py000066400000000000000000000025361475341043600201420ustar00rootroot00000000000000import os import sys import optparse from mpi4py import MPI assert __name__ == '__main__' assert sys.path[0] == os.path.dirname(__file__) if os.path.basename(__file__) == '__main__.py': assert sys.argv[0] == os.path.dirname(__file__) else: assert sys.argv[0] == __file__ parser = optparse.OptionParser() parser.add_option( "--rank", action='store', type='int', dest="rank", default=0, ) parser.add_option( "--interrupt", action='store_true', dest="interrupt", default=False, ) parser.add_option( "--sys-exit", action='store', type='int', dest="sys_exit", default=None, ) parser.add_option( "--sys-exit-msg", action='store', type='string', dest="sys_exit", default=None, ) parser.add_option( "--exception", action='store', type='string', dest="exception", default=None, ) (options, args) = parser.parse_args() assert not args comm = MPI.COMM_WORLD comm.Barrier() comm.Barrier() if comm.rank == options.rank: if options.interrupt: raise KeyboardInterrupt if options.sys_exit: sys.exit(options.sys_exit) if options.exception: raise RuntimeError(options.exception) comm.Barrier() comm.Barrier() if comm.rank > 0: comm.Recv([None, 'B'], comm.rank - 1) print("Hello, World!", flush=True) if comm.rank < comm.size - 1: comm.Send([None, 'B'], comm.rank + 1) comm.Barrier() sys.exit() mpi4py-4.0.3/demo/test-run/test_run.py000066400000000000000000000214741475341043600177010ustar00rootroot00000000000000import sys import os import shlex import shutil import warnings import subprocess import unittest import mpi4py on_pypy = hasattr(sys, 'pypy_version_info') on_ci = any(( os.environ.get('GITHUB_ACTIONS') == 'true', os.environ.get('TF_BUILD') == 'True', os.environ.get('CIRCLECI') == 'true', )) def find_executable(exe): command = shlex.split(exe) executable = shutil.which(command[0]) if executable: command[0] = executable try: # Python 3.8 return shlex.join(command) except AttributeError: return ' '.join(shlex.quote(arg) for arg in command) def find_mpiexec(mpiexec='mpiexec'): mpiexec = os.environ.get('MPIEXEC') or mpiexec mpiexec = find_executable(mpiexec) if not mpiexec and sys.platform.startswith('win'): I_MPI_DIR = os.environ.get('I_MPI_DIR', '') mpiexec = os.path.join(I_MPI_DIR, 'bin', 'mpiexec.exe') mpiexec = shutil.which(mpiexec) if mpiexec: mpiexec = shlex.quote(mpiexec) if not mpiexec and sys.platform.startswith('win'): MSMPI_BIN = os.environ.get('MSMPI_BIN', '') mpiexec = os.path.join(MSMPI_BIN, 'mpiexec.exe') mpiexec = shutil.which(mpiexec) if mpiexec: mpiexec = shlex.quote(mpiexec) return mpiexec def launcher(np): mpiexec = find_mpiexec() python = shlex.quote(sys.executable) if 'coverage' in sys.modules: python += ' -m coverage run -p' module = 'mpi4py.run -rc threads=False' command = f'{mpiexec} -n {np} {python} -m {module}' return shlex.split(command) def execute(np, cmd, args=''): mpi4pyroot = os.path.abspath(os.path.dirname(mpi4py.__path__[0])) pythonpath = os.environ.get('PYTHONPATH', '').split(os.pathsep) pythonpath.insert(0, mpi4pyroot) mpiexec = launcher(np) if isinstance(cmd, str): cmd = shlex.split(cmd) if isinstance(args, str): args = shlex.split(args) command = mpiexec + cmd + args env = os.environ.copy() env['PYTHONPATH'] = os.pathsep.join(pythonpath) env['PYTHONUNBUFFERED'] = '1' p = subprocess.Popen( command, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr = p.communicate() return p.returncode, stdout.decode(), stderr.decode() @unittest.skipIf(not find_mpiexec(), 'mpiexec') class BaseTestRun(unittest.TestCase): def assertMPIAbort(self, stdout, stderr, message=None): patterns = ( 'MPI_Abort', # MPICH 'MPI_ABORT', # Open MPI 'aborting MPI_COMM_WORLD', # Microsoft MPI ) if on_pypy and message == 'KeyboardInterrupt': patterns += ( 'EXIT STRING: Interrupt (signal 2)', # MPICH 'exited on signal 2 (Interrupt)', # Open MPI ) aborted = any( mpiabort in output for output in (stdout, stderr) for mpiabort in patterns ) if aborted: if message is not None and not on_ci: self.assertIn(message, stderr) return if not (stdout or stderr) or on_ci: with warnings.catch_warnings(): warnings.simplefilter("always") warnings.warn( "expecting MPI_Abort() message in stdout/stderr", RuntimeWarning, 2, ) return raise self.failureException( "expecting MPI_Abort() message in stdout/stderr:\n" f"[stdout]:\n{stdout}\n[stderr]:\n{stderr}\n" ) class TestRunScript(BaseTestRun): pyfile = 'run-script.py' def execute(self, args='', np=1): dirname = os.path.abspath(os.path.dirname(__file__)) script = os.path.join(dirname, self.pyfile) return execute(np, shlex.quote(script), args) def testSuccess(self): success = 'Hello, World!' for np in (1, 2): status, stdout, stderr = self.execute(np=np) self.assertEqual(status, 0) self.assertEqual(stdout.count(success), np) self.assertEqual(stderr, '') def testException(self): message = r'ABCDEFGHIJKLMNOPQRSTUVWXYZ' excmess = f'RuntimeError: {message}' for np in (1, 2): for rank in range(0, np): args = ['--rank', str(rank), '--exception', message] status, stdout, stderr = self.execute(args, np) if on_ci and status == 221: continue self.assertEqual(status, 1) self.assertMPIAbort(stdout, stderr, excmess) def testSysExitCode(self): errcode = 7 for np in (1, 2): for r in sorted({0, np - 1}): args = ['--rank', str(r), '--sys-exit', str(errcode)] status, stdout, stderr = self.execute(args, np) self.assertIn(status, (errcode, 1)) self.assertMPIAbort(stdout, stderr) self.assertNotIn('Traceback', stderr) def testSysExitMess(self): exitmsg = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' for np in (1, 2): for r in sorted({0, np - 1}): args = ['--rank', str(r), '--sys-exit-msg', exitmsg] status, stdout, stderr = self.execute(args, np) self.assertEqual(status, 1) self.assertMPIAbort(stdout, stderr, exitmsg) self.assertNotIn('Traceback', stderr) def testInterrupt(self): from signal import SIGINT excmess = 'KeyboardInterrupt' for np in (1, 2): for rank in range(0, np): args = ['--rank', str(rank), '--interrupt'] status, stdout, stderr = self.execute(args, np) if on_ci and status == 221: continue if not on_pypy: self.assertEqual(status, SIGINT + 128) self.assertMPIAbort(stdout, stderr, excmess) class TestRunDirectory(TestRunScript): directory = 'run-directory' @classmethod def setUpClass(cls): from tempfile import mkdtemp cls.tempdir = mkdtemp() cls.directory = os.path.join(cls.tempdir, cls.directory) os.makedirs(cls.directory) topdir = os.path.dirname(__file__) script = os.path.join(topdir, super().pyfile) pymain = os.path.join(cls.directory, '__main__.py') shutil.copy(script, pymain) cls.pyfile = cls.directory @classmethod def tearDownClass(cls): shutil.rmtree(cls.tempdir) class TestRunZipFile(TestRunScript): zipfile = 'run-zipfile.zip' @classmethod def setUpClass(cls): from tempfile import mkdtemp from zipfile import ZipFile cls.tempdir = mkdtemp() cls.zipfile = os.path.join(cls.tempdir, cls.zipfile) topdir = os.path.dirname(__file__) script = os.path.join(topdir, super().pyfile) with ZipFile(cls.zipfile, 'w') as f: f.write(script, '__main__.py') cls.pyfile = cls.zipfile @classmethod def tearDownClass(cls): shutil.rmtree(cls.tempdir) class TestRunModule(BaseTestRun): def execute(self, module, np=1): return execute(np, '-m', module) def testSuccess(self): module = 'mpi4py.bench --no-threads helloworld' message = 'Hello, World!' for np in (1, 2): status, stdout, stderr = self.execute(module, np) self.assertEqual(status, 0) self.assertEqual(stdout.count(message), np) self.assertEqual(stderr, '') class TestRunCommand(BaseTestRun): def execute(self, command, np=1): return execute(np, '-c', shlex.quote(command)) def testArgv0(self): command = 'import sys; print(sys.argv[0], flush=True)' status, stdout, stderr = self.execute(command, 1) self.assertEqual(status, 0) self.assertEqual(stdout.strip(), '-c') self.assertEqual(stderr.strip(), '') def testSuccess(self): command = 'from mpi4py import MPI' for np in (1, 2): status, stdout, stderr = self.execute(command, np) self.assertEqual(status, 0) self.assertEqual(stdout, '') self.assertEqual(stderr, '') def testException(self): command = '; '.join(( 'from mpi4py import MPI', 'comm = MPI.COMM_WORLD', 'comm.Barrier()', 'comm.Barrier()', 'comm.Get_rank() == {} and (1/0)', 'comm.Barrier()', )) excmess = 'ZeroDivisionError:' for np in (1, 2): for rank in range(0, np): status, stdout, stderr = self.execute(command.format(rank), np) if on_ci and status == 221: continue self.assertEqual(status, 1) self.assertMPIAbort(stdout, stderr, excmess) if __name__ == '__main__': unittest.main() mpi4py-4.0.3/demo/threads/000077500000000000000000000000001475341043600153255ustar00rootroot00000000000000mpi4py-4.0.3/demo/threads/makefile000066400000000000000000000001761475341043600170310ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean default: build test clean build: test: $(PYTHON) sendrecv.py clean: mpi4py-4.0.3/demo/threads/sendrecv.py000066400000000000000000000017701475341043600175150ustar00rootroot00000000000000import sys import threading from mpi4py import MPI if MPI.Query_thread() < MPI.THREAD_MULTIPLE: sys.stderr.write("MPI does not provide enough thread support\n") sys.exit(0) try: import numpy except ImportError: sys.stderr.write("NumPy package not available\n") sys.exit(0) send_msg = numpy.arange(1000000, dtype='i') recv_msg = numpy.zeros_like(send_msg) start_event = threading.Event() def self_send(): start_event.wait() comm = MPI.COMM_WORLD rank = comm.Get_rank() comm.Send([send_msg, MPI.INT], dest=rank, tag=0) def self_recv(): start_event.wait() comm = MPI.COMM_WORLD rank = comm.Get_rank() comm.Recv([recv_msg, MPI.INT], source=rank, tag=0) send_thread = threading.Thread(target=self_send) recv_thread = threading.Thread(target=self_recv) for t in (recv_thread, send_thread): t.start() assert not numpy.allclose(send_msg, recv_msg) start_event.set() for t in (recv_thread, send_thread): t.join() assert numpy.allclose(send_msg, recv_msg) mpi4py-4.0.3/demo/wrap-boost/000077500000000000000000000000001475341043600157705ustar00rootroot00000000000000mpi4py-4.0.3/demo/wrap-boost/helloworld.cxx000066400000000000000000000017121475341043600206700ustar00rootroot00000000000000#include #include static void sayhello(MPI_Comm comm) { if (comm == MPI_COMM_NULL) { std::cout << "You passed MPI_COMM_NULL !!!" << std::endl; return; } int size; MPI_Comm_size(comm, &size); int rank; MPI_Comm_rank(comm, &rank); int plen; char pname[MPI_MAX_PROCESSOR_NAME]; MPI_Get_processor_name(pname, &plen); std::cout << "Hello, World! " << "I am process " << rank << " of " << size << " on " << pname << "." << std::endl; } #include #include using namespace boost::python; static void hw_sayhello(object py_comm) { PyObject* py_obj = py_comm.ptr(); MPI_Comm *comm_p = PyMPIComm_Get(py_obj); if (comm_p == NULL) throw_error_already_set(); sayhello(*comm_p); } BOOST_PYTHON_MODULE(helloworld) { if (import_mpi4py() < 0) return; def("sayhello", hw_sayhello); } /* * Local Variables: * mode: C++ * End: */ mpi4py-4.0.3/demo/wrap-boost/makefile000066400000000000000000000010401475341043600174630ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean default: build test clean MODULE = helloworld SOURCE = $(MODULE).cxx TARGET = $(MODULE)$(EXT_SUFFIX) BOOST_PYVER = $(shell $(PYTHON) -c 'import sys; print(*sys.version_info[:2], sep="")') BOOST_INCLUDE = BOOST_LDFLAGS = -lboost_python$(BOOST_PYVER) PYCCFLAGS += $(BOOST_INCLUDE) PYLDFLAGS += $(BOOST_LDFLAGS) $(TARGET): $(SOURCE) $(MPICXX) $(MPI4PY_INCLUDE) $(CXX_FLAGS_SHARED) -o $@ $< build: $(TARGET) test: build $(MPIEXEC_PYTHON) test.py clean: $(RM) -r $(TARGET) __pycache__ mpi4py-4.0.3/demo/wrap-boost/test.py000066400000000000000000000003321475341043600173170ustar00rootroot00000000000000from mpi4py import MPI import helloworld as hw null = MPI.COMM_NULL hw.sayhello(null) comm = MPI.COMM_WORLD hw.sayhello(comm) try: hw.sayhello(list()) except: pass else: assert 0, "exception not raised" mpi4py-4.0.3/demo/wrap-c/000077500000000000000000000000001475341043600150645ustar00rootroot00000000000000mpi4py-4.0.3/demo/wrap-c/helloworld.c000066400000000000000000000041001475341043600173760ustar00rootroot00000000000000#define Py_LIMITED_API 0x03060000 #define MPI4PY_LIMITED_API 1 #define MPI4PY_LIMITED_API_SKIP_MESSAGE 1 #define MPI4PY_LIMITED_API_SKIP_SESSION 1 #define MPICH_SKIP_MPICXX 1 #define OMPI_SKIP_MPICXX 1 #include #include /* -------------------------------------------------------------------------- */ static void sayhello(MPI_Comm comm) { int size, rank; char pname[MPI_MAX_PROCESSOR_NAME]; int len; if (comm == MPI_COMM_NULL) { printf("You passed MPI_COMM_NULL !!!\n"); return; } MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank); MPI_Get_processor_name(pname, &len); pname[len] = 0; printf("Hello, World! I am process %d of %d on %s.\n", rank, size, pname); } /* -------------------------------------------------------------------------- */ static PyObject * hw_sayhello(PyObject *self, PyObject *args) { PyObject *py_comm = NULL; MPI_Comm *comm_p = NULL; if (!PyArg_ParseTuple(args, "O:sayhello", &py_comm)) return NULL; comm_p = PyMPIComm_Get(py_comm); if (comm_p == NULL) return NULL; sayhello(*comm_p); Py_INCREF(Py_None); return Py_None; } static struct PyMethodDef hw_methods[] = { {"sayhello", (PyCFunction)hw_sayhello, METH_VARARGS, NULL}, {NULL, NULL, 0, NULL} /* sentinel */ }; static struct PyModuleDef hw_module = { PyModuleDef_HEAD_INIT, "helloworld", /* m_name */ NULL, /* m_doc */ -1, /* m_size */ hw_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; PyMODINIT_FUNC PyInit_helloworld(void); PyMODINIT_FUNC PyInit_helloworld(void) { PyObject *m = NULL; /* Initialize mpi4py's C-API */ if (import_mpi4py() < 0) goto bad; /* Module initialization */ m = PyModule_Create(&hw_module); if (m == NULL) goto bad; return m; bad: return NULL; } /* -------------------------------------------------------------------------- */ /* Local variables: c-basic-offset: 2 indent-tabs-mode: nil End: */ mpi4py-4.0.3/demo/wrap-c/makefile000066400000000000000000000005121475341043600165620ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean default: build test clean MODULE = helloworld SOURCE = $(MODULE).c TARGET = $(MODULE)$(EXT_SUFFIX) $(TARGET): $(SOURCE) $(MPICC) $(MPI4PY_INCLUDE) $(CC_FLAGS_SHARED) -o $@ $< build: $(TARGET) test: build $(MPIEXEC_PYTHON) test.py clean: $(RM) -r $(TARGET) __pycache__ mpi4py-4.0.3/demo/wrap-c/test.py000066400000000000000000000003321475341043600164130ustar00rootroot00000000000000from mpi4py import MPI import helloworld as hw null = MPI.COMM_NULL hw.sayhello(null) comm = MPI.COMM_WORLD hw.sayhello(comm) try: hw.sayhello(list()) except: pass else: assert 0, "exception not raised" mpi4py-4.0.3/demo/wrap-cffi/000077500000000000000000000000001475341043600155515ustar00rootroot00000000000000mpi4py-4.0.3/demo/wrap-cffi/helloworld.c000066400000000000000000000011131475341043600200640ustar00rootroot00000000000000#define MPICH_SKIP_MPICXX 1 #define OMPI_SKIP_MPICXX 1 #include #include #ifdef __cplusplus extern "C" { #endif extern void sayhello(MPI_Comm); #ifdef __cplusplus } #endif void sayhello(MPI_Comm comm) { int size, rank; char pname[MPI_MAX_PROCESSOR_NAME]; int len; if (comm == MPI_COMM_NULL) { printf("You passed MPI_COMM_NULL !!!\n"); return; } MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank); MPI_Get_processor_name(pname, &len); pname[len] = 0; printf("Hello, World! I am process %d of %d on %s.\n", rank, size, pname); } mpi4py-4.0.3/demo/wrap-cffi/helloworld.py000066400000000000000000000006471475341043600203050ustar00rootroot00000000000000from mpi4py import MPI import cffi import os _libdir = os.path.dirname(__file__) ffi = cffi.FFI() if MPI._sizeof(MPI.Comm) == ffi.sizeof('int'): MPI_Comm = 'int' else: MPI_Comm = 'void*' ffi.cdef(f""" typedef {MPI_Comm} MPI_Comm; void sayhello(MPI_Comm); """) lib = ffi.dlopen(os.path.join(_libdir, "libhelloworld.so")) def sayhello(comm): comm_c = ffi.cast('MPI_Comm', comm.handle) lib.sayhello(comm_c) mpi4py-4.0.3/demo/wrap-cffi/makefile000066400000000000000000000004761475341043600172600ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean default: build test clean LIBRARY = helloworld SOURCE = $(LIBRARY).c TARGET = lib$(LIBRARY)$(LIB_SUFFIX) $(TARGET): $(SOURCE) $(MPICC) $(CC_FLAGS_SHARED) -o $@ $< build: $(TARGET) test: build $(MPIEXEC_PYTHON) test.py clean: $(RM) -r $(TARGET) __pycache__ mpi4py-4.0.3/demo/wrap-cffi/test.py000066400000000000000000000003321475341043600171000ustar00rootroot00000000000000from mpi4py import MPI import helloworld as hw null = MPI.COMM_NULL hw.sayhello(null) comm = MPI.COMM_WORLD hw.sayhello(comm) try: hw.sayhello(list()) except: pass else: assert 0, "exception not raised" mpi4py-4.0.3/demo/wrap-ctypes-f08/000077500000000000000000000000001475341043600165445ustar00rootroot00000000000000mpi4py-4.0.3/demo/wrap-ctypes-f08/helloworld.f08000066400000000000000000000010621475341043600212350ustar00rootroot00000000000000subroutine sayhello(comm) bind(C) use mpi_f08 implicit none type(MPI_Comm), intent(in) :: comm integer :: rank, size, nlen character (len=MPI_MAX_PROCESSOR_NAME) :: name if (comm == MPI_COMM_NULL) then print*, "You passed MPI_COMM_NULL !!!" return end if call MPI_Comm_rank(comm, rank) call MPI_Comm_size(comm, size) call MPI_Get_processor_name(name, nlen) print '(2A,I2,A,I2,3A)', & 'Hello, World!', & ' I am process ', rank, & ' of ', size, & ' on ', name(1:nlen), '.' end subroutine sayhello mpi4py-4.0.3/demo/wrap-ctypes-f08/helloworld.py000066400000000000000000000006251475341043600212740ustar00rootroot00000000000000from mpi4py import MPI import ctypes import os _libdir = os.path.dirname(__file__) MPI_Fint = ctypes.c_int class MPI_Comm(ctypes.Structure): _fields_ = [("mpi_val", MPI_Fint)] lib = ctypes.CDLL(os.path.join(_libdir, "libhelloworld.so")) lib.sayhello.restype = None lib.sayhello.argtypes = [ctypes.POINTER(MPI_Comm)] def sayhello(comm): comm_f = MPI_Comm(comm.py2f()) lib.sayhello(comm_f) mpi4py-4.0.3/demo/wrap-ctypes-f08/makefile000066400000000000000000000006141475341043600202450ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean default: build test clean LIBRARY = helloworld SOURCE = $(LIBRARY).f08 TARGET = lib$(LIBRARY)$(LIB_SUFFIX) ifneq ($(MPI_FORTRAN_MOD_DIR),) FCFLAGS += -I$(MPI_FORTRAN_MOD_DIR) endif $(TARGET): $(SOURCE) $(MPIFORT) $(FC_FLAGS_SHARED) -o $@ $< build: $(TARGET) test: build $(MPIEXEC_PYTHON) test.py clean: $(RM) -r $(TARGET) __pycache__ mpi4py-4.0.3/demo/wrap-ctypes-f08/test.py000066400000000000000000000003321475341043600200730ustar00rootroot00000000000000from mpi4py import MPI import helloworld as hw null = MPI.COMM_NULL hw.sayhello(null) comm = MPI.COMM_WORLD hw.sayhello(comm) try: hw.sayhello(list()) except: pass else: assert 0, "exception not raised" mpi4py-4.0.3/demo/wrap-ctypes-f90/000077500000000000000000000000001475341043600165455ustar00rootroot00000000000000mpi4py-4.0.3/demo/wrap-ctypes-f90/helloworld.f90000066400000000000000000000010651475341043600212420ustar00rootroot00000000000000subroutine sayhello(comm) use mpi implicit none integer, intent(in) :: comm integer :: rank, size, nlen, ierr character (len=MPI_MAX_PROCESSOR_NAME) :: name if (comm == MPI_COMM_NULL) then print*, 'You passed MPI_COMM_NULL !!!' return end if call MPI_Comm_rank(comm, rank, ierr) call MPI_Comm_size(comm, size, ierr) call MPI_Get_processor_name(name, nlen, ierr) print '(2A,I2,A,I2,3A)', & 'Hello, World!', & ' I am process ', rank, & ' of ', size, & ' on ', name(1:nlen), '.' end subroutine sayhello mpi4py-4.0.3/demo/wrap-ctypes-f90/helloworld.py000066400000000000000000000005151475341043600212730ustar00rootroot00000000000000from mpi4py import MPI import ctypes import os _libdir = os.path.dirname(__file__) MPI_Fint = ctypes.c_int lib = ctypes.CDLL(os.path.join(_libdir, "libhelloworld.so")) lib.sayhello_.restype = None lib.sayhello_.argtypes = [ctypes.POINTER(MPI_Fint)] def sayhello(comm): comm_f = MPI_Fint(comm.py2f()) lib.sayhello_(comm_f) mpi4py-4.0.3/demo/wrap-ctypes-f90/makefile000066400000000000000000000006141475341043600202460ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean default: build test clean LIBRARY = helloworld SOURCE = $(LIBRARY).f90 TARGET = lib$(LIBRARY)$(LIB_SUFFIX) ifneq ($(MPI_FORTRAN_MOD_DIR),) FCFLAGS += -I$(MPI_FORTRAN_MOD_DIR) endif $(TARGET): $(SOURCE) $(MPIFORT) $(FC_FLAGS_SHARED) -o $@ $< build: $(TARGET) test: build $(MPIEXEC_PYTHON) test.py clean: $(RM) -r $(TARGET) __pycache__ mpi4py-4.0.3/demo/wrap-ctypes-f90/test.py000066400000000000000000000003321475341043600200740ustar00rootroot00000000000000from mpi4py import MPI import helloworld as hw null = MPI.COMM_NULL hw.sayhello(null) comm = MPI.COMM_WORLD hw.sayhello(comm) try: hw.sayhello(list()) except: pass else: assert 0, "exception not raised" mpi4py-4.0.3/demo/wrap-ctypes/000077500000000000000000000000001475341043600161515ustar00rootroot00000000000000mpi4py-4.0.3/demo/wrap-ctypes/helloworld.c000066400000000000000000000011131475341043600204640ustar00rootroot00000000000000#define MPICH_SKIP_MPICXX 1 #define OMPI_SKIP_MPICXX 1 #include #include #ifdef __cplusplus extern "C" { #endif extern void sayhello(MPI_Comm); #ifdef __cplusplus } #endif void sayhello(MPI_Comm comm) { int size, rank; char pname[MPI_MAX_PROCESSOR_NAME]; int len; if (comm == MPI_COMM_NULL) { printf("You passed MPI_COMM_NULL !!!\n"); return; } MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank); MPI_Get_processor_name(pname, &len); pname[len] = 0; printf("Hello, World! I am process %d of %d on %s.\n", rank, size, pname); } mpi4py-4.0.3/demo/wrap-ctypes/helloworld.py000066400000000000000000000006341475341043600207010ustar00rootroot00000000000000from mpi4py import MPI import ctypes import os _libdir = os.path.dirname(__file__) if MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_int): MPI_Comm = ctypes.c_int else: MPI_Comm = ctypes.c_void_p lib = ctypes.CDLL(os.path.join(_libdir, "libhelloworld.so")) lib.sayhello.restype = None lib.sayhello.argtypes = [MPI_Comm] def sayhello(comm): comm_c = MPI_Comm(comm.handle) lib.sayhello(comm_c) mpi4py-4.0.3/demo/wrap-ctypes/makefile000066400000000000000000000004761475341043600176600ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean default: build test clean LIBRARY = helloworld SOURCE = $(LIBRARY).c TARGET = lib$(LIBRARY)$(LIB_SUFFIX) $(TARGET): $(SOURCE) $(MPICC) $(CC_FLAGS_SHARED) -o $@ $< build: $(TARGET) test: build $(MPIEXEC_PYTHON) test.py clean: $(RM) -r $(TARGET) __pycache__ mpi4py-4.0.3/demo/wrap-ctypes/test.py000066400000000000000000000003321475341043600175000ustar00rootroot00000000000000from mpi4py import MPI import helloworld as hw null = MPI.COMM_NULL hw.sayhello(null) comm = MPI.COMM_WORLD hw.sayhello(comm) try: hw.sayhello(list()) except: pass else: assert 0, "exception not raised" mpi4py-4.0.3/demo/wrap-cython/000077500000000000000000000000001475341043600161465ustar00rootroot00000000000000mpi4py-4.0.3/demo/wrap-cython/helloworld.pyx000066400000000000000000000012671475341043600210710ustar00rootroot00000000000000# cython: language_level=3str cdef extern from "mpi-compat.h": pass cdef extern from "stdio.h": int printf(char*, ...) cimport mpi4py.MPI as MPI from mpi4py.libmpi cimport * cdef void c_sayhello(MPI_Comm comm): cdef int size, rank, plen cdef char pname[MPI_MAX_PROCESSOR_NAME] if comm == MPI_COMM_NULL: printf(b"You passed MPI_COMM_NULL !!!%s", b"\n") return MPI_Comm_size(comm, &size) MPI_Comm_rank(comm, &rank) MPI_Get_processor_name(pname, &plen) printf(b"Hello, World! I am process %d of %d on %s.\n", rank, size, pname) def sayhello(MPI.Comm comm not None ): cdef MPI_Comm c_comm = comm.ob_mpi c_sayhello(c_comm) mpi4py-4.0.3/demo/wrap-cython/makefile000066400000000000000000000006211475341043600176450ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean src default: build test clean MODULE = helloworld SOURCE = $(MODULE).pyx GENSRC = $(MODULE).c TARGET = $(MODULE)$(EXT_SUFFIX) $(GENSRC): $(SOURCE) $(CYTHON) $< $(TARGET): $(GENSRC) $(MPICC) $(CC_FLAGS_SHARED) -o $@ $< src: $(GENSRC) build: $(TARGET) test: build $(MPIEXEC_PYTHON) test.py clean: $(RM) -r $(TARGET) __pycache__ $(GENSRC) mpi4py-4.0.3/demo/wrap-cython/mpi-compat.h000066400000000000000000000006511475341043600203670ustar00rootroot00000000000000/* Author: Lisandro Dalcin */ /* Contact: dalcinl@gmail.com */ #ifndef MPI_COMPAT_H #define MPI_COMPAT_H #include #if (MPI_VERSION < 3) && !defined(PyMPI_HAVE_MPI_Message) typedef void *PyMPI_MPI_Message; #define MPI_Message PyMPI_MPI_Message #endif #if (MPI_VERSION < 4) && !defined(PyMPI_HAVE_MPI_Session) typedef void *PyMPI_MPI_Session; #define MPI_Session PyMPI_MPI_Session #endif #endif/*MPI_COMPAT_H*/ mpi4py-4.0.3/demo/wrap-cython/test.py000066400000000000000000000004621475341043600175010ustar00rootroot00000000000000from mpi4py import MPI import helloworld as hw null = MPI.COMM_NULL hw.sayhello(null) comm = MPI.COMM_WORLD hw.sayhello(comm) try: hw.sayhello(None) except: pass else: assert 0, "exception not raised" try: hw.sayhello(list()) except: pass else: assert 0, "exception not raised" mpi4py-4.0.3/demo/wrap-f2py/000077500000000000000000000000001475341043600155225ustar00rootroot00000000000000mpi4py-4.0.3/demo/wrap-f2py/helloworld.f90000066400000000000000000000015051475341043600202160ustar00rootroot00000000000000! ! $ export FC=mpifort ! $ fcld=($(mpifort -link-info)); unset fcld[0]; ! $ export LDFLAGS=$fcld ! $ f2py -m helloworld -c helloworld.f90 ! subroutine sayhello(comm) use mpi implicit none integer :: comm integer :: rank, size, nlen, ierr character (len=MPI_MAX_PROCESSOR_NAME) :: pname if (comm == MPI_COMM_NULL) then print *, 'You passed MPI_COMM_NULL !!!' return end if call MPI_Comm_rank(comm, rank, ierr) call MPI_Comm_size(comm, size, ierr) call MPI_Get_processor_name(pname, nlen, ierr) print *, 'Hello, World!', & ' I am process ', rank, & ' of ', size, & ' on ', pname(1:nlen), '.' end subroutine sayhello ! program main ! use mpi ! implicit none ! integer ierr ! call MPI_Init(ierr) ! call sayhello(MPI_COMM_WORLD) ! call MPI_Finalize(ierr) ! end program main mpi4py-4.0.3/demo/wrap-f2py/makefile000066400000000000000000000010701475341043600172200ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean default: build test clean MODULE = helloworld SOURCE = $(MODULE).f90 TARGET = $(MODULE)$(EXT_SUFFIX) ldflags := ifndef ldflags # mpich ldflags := $(shell $(MPIFORT) -link-info 2> /dev/null) endif ifndef ldflags # openmpi ldflags := $(shell $(MPIFORT) -show:link 2> /dev/null) endif export FC=$(MPIFORT) export LDFLAGS=$(wordlist 2,$(words $(ldflags)),$(ldflags)) $(TARGET): $(SOURCE) $(F2PY) -m $(MODULE) -c $< build: $(TARGET) test: build $(MPIEXEC_PYTHON) test.py clean: $(RM) -r $(TARGET) __pycache__ mpi4py-4.0.3/demo/wrap-f2py/test.py000066400000000000000000000004041475341043600170510ustar00rootroot00000000000000from mpi4py import MPI import helloworld as hw null = MPI.COMM_NULL fnull = null.py2f() hw.sayhello(fnull) comm = MPI.COMM_WORLD fcomm = comm.py2f() hw.sayhello(fcomm) try: hw.sayhello(list()) except: pass else: assert 0, "exception not raised" mpi4py-4.0.3/demo/wrap-pybind11/000077500000000000000000000000001475341043600162715ustar00rootroot00000000000000mpi4py-4.0.3/demo/wrap-pybind11/helloworld.cxx000066400000000000000000000023511475341043600211710ustar00rootroot00000000000000#include #include static void sayhello(MPI_Comm comm) { if (comm == MPI_COMM_NULL) { std::cout << "You passed MPI_COMM_NULL !!!" << std::endl; return; } int size; MPI_Comm_size(comm, &size); int rank; MPI_Comm_rank(comm, &rank); int plen; char pname[MPI_MAX_PROCESSOR_NAME]; MPI_Get_processor_name(pname, &plen); std::cout << "Hello, World! " << "I am process " << rank << " of " << size << " on " << pname << "." << std::endl; } #include #define MPI4PY_LIMITED_API 1 #define MPI4PY_LIMITED_API_SKIP_MESSAGE 1 #define MPI4PY_LIMITED_API_SKIP_SESSION 1 #include namespace py = pybind11; template T py2mpi(py::object); template<> MPI_Comm py2mpi(py::object obj) { PyObject *pyobj = obj.ptr(); MPI_Comm *mpi_ptr = PyMPIComm_Get(pyobj); if (!mpi_ptr) throw py::error_already_set(); return *mpi_ptr; } static void hw_sayhello(py::object py_comm) { MPI_Comm comm = py2mpi(py_comm); sayhello(comm); } PYBIND11_MODULE(helloworld, m) { if (import_mpi4py() < 0) throw py::error_already_set(); m.def("sayhello", &hw_sayhello); } /* * Local Variables: * mode: C++ * End: */ mpi4py-4.0.3/demo/wrap-pybind11/makefile000066400000000000000000000006241475341043600177730ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean default: build test clean MODULE = helloworld SOURCE = $(MODULE).cxx TARGET = $(MODULE)$(EXT_SUFFIX) CXX_FLAGS += -std=c++11 CXX_FLAGS += $(PYBIND11_INCLUDE) CXX_FLAGS += $(MPI4PY_INCLUDE) $(TARGET): $(SOURCE) $(MPICXX) $(CXX_FLAGS_SHARED) -o $@ $< build: $(TARGET) test: build $(MPIEXEC_PYTHON) test.py clean: $(RM) -r $(TARGET) __pycache__ mpi4py-4.0.3/demo/wrap-pybind11/test.py000066400000000000000000000003321475341043600176200ustar00rootroot00000000000000from mpi4py import MPI import helloworld as hw null = MPI.COMM_NULL hw.sayhello(null) comm = MPI.COMM_WORLD hw.sayhello(comm) try: hw.sayhello(list()) except: pass else: assert 0, "exception not raised" mpi4py-4.0.3/demo/wrap-swig/000077500000000000000000000000001475341043600156135ustar00rootroot00000000000000mpi4py-4.0.3/demo/wrap-swig/helloworld.i000066400000000000000000000012161475341043600201400ustar00rootroot00000000000000%module helloworld %{ #define MPICH_SKIP_MPICXX 1 #define OMPI_SKIP_MPICXX 1 #include #include void sayhello(MPI_Comm comm) { int size, rank; char pname[MPI_MAX_PROCESSOR_NAME]; int len; if (comm == MPI_COMM_NULL) { printf("You passed MPI_COMM_NULL !!!\n"); return; } MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank); MPI_Get_processor_name(pname, &len); pname[len] = 0; printf("Hello, World! I am process %d of %d on %s.\n", rank, size, pname); } %} %include mpi4py/mpi4py.i %mpi4py_typemap(Comm, MPI_Comm); void sayhello(MPI_Comm comm); /* * Local Variables: * mode: C * End: */ mpi4py-4.0.3/demo/wrap-swig/makefile000066400000000000000000000007131475341043600173140ustar00rootroot00000000000000include ../config.mk .PHONY: default build test clean src default: build test clean MODULE = helloworld SOURCE = $(MODULE).i GENSRC = $(MODULE)_wrap.c TARGET = _$(MODULE)$(EXT_SUFFIX) $(GENSRC): $(SOURCE) $(SWIG) -python $(MPI4PY_INCLUDE) $< $(TARGET): $(GENSRC) $(MPICC) $(MPI4PY_INCLUDE) $(CC_FLAGS_SHARED) -o $@ $< src: $(GENSRC) build: $(TARGET) test: build $(MPIEXEC_PYTHON) test.py clean: $(RM) -r $(TARGET) __pycache__ $(GENSRC) $(MODULE).py mpi4py-4.0.3/demo/wrap-swig/test.py000066400000000000000000000003321475341043600171420ustar00rootroot00000000000000from mpi4py import MPI import helloworld as hw null = MPI.COMM_NULL hw.sayhello(null) comm = MPI.COMM_WORLD hw.sayhello(comm) try: hw.sayhello(list()) except: pass else: assert 0, "exception not raised" mpi4py-4.0.3/docs/000077500000000000000000000000001475341043600136775ustar00rootroot00000000000000mpi4py-4.0.3/docs/index.rst000066400000000000000000000035271475341043600155470ustar00rootroot00000000000000============== MPI for Python ============== :Author: Lisandro Dalcin :Contact: dalcinl@gmail.com Online Documentation -------------------- Hosted at *Read the Docs* [https://mpi4py.readthedocs.io/]: + `Stable `_: |rtd-stable| + `Latest `_: |rtd-latest| .. |rtd-stable| image:: https://readthedocs.org/projects/mpi4py/badge/?version=stable :target: `rtd-stable`_ .. _rtd-stable: https://mpi4py.readthedocs.io/en/stable/ .. |rtd-latest| image:: https://readthedocs.org/projects/mpi4py/badge/?version=latest :target: `rtd-latest`_ .. _rtd-latest: https://mpi4py.readthedocs.io/en/latest Hosted at *GitHub* [https://mpi4py.github.io/]: + `User Manual (HTML)`_ (generated with Sphinx_). + `User Manual (PDF)`_ (generated with Sphinx_). + `Reference Guide`_ (generated with Sphinx_). .. _User Manual (HTML): html/index.html .. _User Manual (PDF): mpi4py.pdf .. _Reference Guide: html/reference/index.html .. _Sphinx: https://www.sphinx-doc.org/ Discussion and Support ---------------------- Hosted at Google Groups: + Group Page: https://groups.google.com/g/mpi4py + Mailing List: mpi4py@googlegroups.com Hosted at GitHub: * Discussions: https://github.com/mpi4py/mpi4py/discussions Downloads and Development ------------------------- Hosted at GitHub: + Project Site: https://github.com/mpi4py/mpi4py + Source Releases: https://github.com/mpi4py/mpi4py/releases + Issue Tracker: https://github.com/mpi4py/mpi4py/issues + Git Repository: https://github.com/mpi4py/mpi4py.git Citation -------- .. include:: ../CITATION.rst Acknowledgments --------------- This project was partially supported by the Advanced Algorithm and Numerical Simulations Laboratory (AANSLab), Division of Computer, Electrical, and Mathematical Sciences & Engineering (CEMSE), King Abdullah University of Science and Technology (KAUST). mpi4py-4.0.3/docs/mpi4py.bib000066400000000000000000000033171475341043600156030ustar00rootroot00000000000000@article{mpi4py.futures, title = {mpi4py.futures: {MPI}-Based Asynchronous Task Execution for {P}ython}, author = {Rogowski, Marcin and Aseeri, Samar and Keyes, David and Dalcin, Lisandro}, journal = {IEEE Transactions on Parallel and Distributed Systems}, volume = {34}, number = {2}, pages = {611--622}, year = {2023}, doi = {10.1109/TPDS.2022.3225481} } @article{Dalcin2021, title = {mpi4py: Status Update After 12 Years of Development}, author = {Dalcin, Lisandro and Fang, Yao-Lung Leo}, journal = {Computing in Science \& Engineering}, volume = {23}, number = {4}, pages = {47--54}, year = {2021}, issn = {1521-9615}, doi = {10.1109/MCSE.2021.3083216} } @article{Dalcin2011, title = {Parallel distributed computing using {P}ython}, author = {Lisandro D. Dalcin and Rodrigo R. Paz and Pablo A. Kler and Alejandro Cosimo}, journal = {Advances in Water Resources}, volume = {34}, number = {9}, pages = {1124--1139}, year = {2011}, issn = {0309-1708}, doi = {10.1016/j.advwatres.2011.04.013}, } @article{mpi4py2008, title = {{MPI} for {P}ython: Performance improvements and {MPI-2} extensions}, author = {Lisandro Dalcin and Rodrigo Paz and Mario Storti and Jorge D'Elia}, journal = {Journal of Parallel and Distributed Computing}, volume = {68}, number = {5}, pages = {655--662}, year = {2008}, issn = {0743-7315}, doi = {10.1016/j.jpdc.2007.09.005}, } @article{mpi4py2005, title = {{MPI} for {P}ython}, author = {Lisandro Dalcin and Rodrigo Paz and Mario Storti}, journal = {Journal of Parallel and Distributed Computing}, volume = {65}, number = {9}, pages = {1108--1115}, year = {2005}, issn = {0743-7315}, doi = {10.1016/j.jpdc.2005.03.010}, } mpi4py-4.0.3/docs/mpi4py.svg000066400000000000000000000003731475341043600156450ustar00rootroot00000000000000 mpi4py-4.0.3/docs/source/000077500000000000000000000000001475341043600151775ustar00rootroot00000000000000mpi4py-4.0.3/docs/source/Makefile000066400000000000000000000011721475341043600166400ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line, and also # from the environment for the first two. SPHINXOPTS ?= SPHINXBUILD ?= sphinx-build SOURCEDIR = . BUILDDIR = _build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) mpi4py-4.0.3/docs/source/_templates/000077500000000000000000000000001475341043600173345ustar00rootroot00000000000000mpi4py-4.0.3/docs/source/_templates/autosummary/000077500000000000000000000000001475341043600217225ustar00rootroot00000000000000mpi4py-4.0.3/docs/source/_templates/autosummary/class.rst000066400000000000000000000032271475341043600235650ustar00rootroot00000000000000{{ fullname | escape | underline}} .. currentmodule:: {{ module }} {%- if autotype is defined %} {%- set objtype = autotype.get(name) or objtype %} {%- endif %} .. auto{{ objtype }}:: {{ objname }} :show-inheritance: {% for item in ['__new__', '__init__'] %} {%- if item in members and item not in inherited_members %} .. automethod:: {{item}} {%- endif %} {%- endfor %} {%- for item in inherited_members %} {%- if item in methods %} {%- set dummy = methods.remove(item) %} {%- endif %} {%- if item in attributes %} {%- set dummy = attributes.remove(item) %} {%- endif %} {%- endfor %} {%- for item in ['__new__', '__init__'] %} {%- if item in methods %} {%- set dummy = methods.remove(item) %} {%- endif %} {%- endfor %} {% block methods_summary %} {%- if methods %} .. rubric:: Methods Summary .. autosummary:: {% for item in methods %} ~{{ name }}.{{ item }} {%- endfor %} {%- endif %} {%- endblock %} {% block attributes_summary %} {%- if attributes %} .. rubric:: Attributes Summary .. autosummary:: {% for item in attributes %} ~{{ name }}.{{ item }} {%- endfor %} {%- endif %} {%- endblock %} {% block methods_documentation %} {%- if methods %} .. rubric:: Methods Documentation {% for item in methods %} .. automethod:: {{ item }} {%- endfor %} {%- endif %} {%- endblock %} {% block attributes_documentation %} {%- if attributes %} .. rubric:: Attributes Documentation {% for item in attributes %} .. autoattribute:: {{ item }} {%- endfor %} {%- endif %} {%- endblock %} {# #} mpi4py-4.0.3/docs/source/_templates/autosummary/module.rst000066400000000000000000000023351475341043600237440ustar00rootroot00000000000000{{ fullname | escape | underline}} .. automodule:: {{ fullname }} {%- if synopsis is defined %} :synopsis: {{ synopsis.get(fullname, '') }} {%- endif %} {% block classes %} {%- if classes %} .. rubric:: {{ _('Classes') }} .. autosummary:: :toctree: {% for item in classes %} {{ item }} {%- endfor %} {%- endif %} {%- endblock %} {% block exceptions %} {%- if exceptions %} .. rubric:: {{ _('Exceptions') }} .. autosummary:: :toctree: {% for item in exceptions %} {{ item }} {%- endfor %} {%- endif %} {%- endblock %} {% block functions %} {%- if functions %} .. rubric:: {{ _('Functions') }} .. autosummary:: :toctree: {% for item in functions %} {{ item }} {%- endfor %} {%- endif %} {%- endblock %} {% block attributes %} {%- if attributes %} .. rubric:: {{ _('Attributes') }} .. autosummary:: :toctree: {% for item in attributes %} {{ item }} {%- endfor %} {%- endif %} {%- endblock %} {% block modules %} {%- if modules %} .. rubric:: {{ _('Modules') }} .. autosummary:: :toctree: :recursive: {% for item in modules %} {{ item }} {%- endfor %} {%- endif %} {%- endblock %} mpi4py-4.0.3/docs/source/_templates/layout.html000066400000000000000000000005731475341043600215440ustar00rootroot00000000000000{% extends "!layout.html" %} {% macro menu_genindex() -%} {%- endmacro %} {% block menu %} {{ super() }} {%- if builder == 'html' %} {{ menu_genindex() }} {%- endif %} {%- if builder == 'dirhtml' %} {{ menu_genindex() }} {%- endif %} {% endblock %} mpi4py-4.0.3/docs/source/apidoc.py000066400000000000000000000363031475341043600170150ustar00rootroot00000000000000import os import sys import inspect import textwrap def is_cyfunction(obj): return type(obj).__name__ == 'cython_function_or_method' def is_function(obj): return ( inspect.isbuiltin(obj) or is_cyfunction(obj) or type(obj) is type(ord) ) def is_method(obj): return ( inspect.ismethoddescriptor(obj) or inspect.ismethod(obj) or is_cyfunction(obj) or type(obj) in ( type(str.index), type(str.__add__), type(str.__new__), ) ) def is_classmethod(obj): return ( inspect.isbuiltin(obj) or type(obj).__name__ in ( 'classmethod', 'classmethod_descriptor', ) ) def is_staticmethod(obj): return ( type(obj).__name__ in ( 'staticmethod', ) ) def is_datadescr(obj): return inspect.isdatadescriptor(obj) and not hasattr(obj, 'fget') def is_property(obj): return inspect.isdatadescriptor(obj) and hasattr(obj, 'fget') def is_class(obj): return inspect.isclass(obj) or type(obj) is type(int) class Lines(list): INDENT = " " * 4 level = 0 @property def add(self): return self @add.setter def add(self, lines): if lines is None: return if isinstance(lines, str): lines = textwrap.dedent(lines).strip().split('\n') indent = self.INDENT * self.level for line in lines: self.append(indent + line) def signature(obj): doc = obj.__doc__ sig = doc.partition('\n')[0] return sig or None def docstring(obj): doc = obj.__doc__ doc = doc.partition('\n')[2] doc = textwrap.dedent(doc).strip() doc = f'"""{doc}\n"""' doc = textwrap.indent(doc, Lines.INDENT) return doc def visit_constant(constant): name, value = constant typename = type(value).__name__ kind = "Constant" if isinstance(value, (str, int, float)) else "Object" init = f"_def({typename}, '{name}')" doc = f"#: {kind} ``{name}`` of type :class:`{typename}`" return f"{name}: {typename} = {init} {doc}\n" def visit_function(function): sig = signature(function) doc = docstring(function) body = Lines.INDENT + "..." return f"def {sig}:\n{doc}\n{body}\n" def visit_method(method): sig = signature(method) doc = docstring(method) body = Lines.INDENT + "..." return f"def {sig}:\n{doc}\n{body}\n" def visit_datadescr(datadescr, name=None): sig = signature(datadescr) doc = docstring(datadescr) name = sig.split(':')[0].strip() type = sig.split(':')[1].strip() sig = f"{name}(self) -> {type}" body = Lines.INDENT + "..." return f"@property\ndef {sig}:\n{doc}\n{body}\n" def visit_property(prop, name=None): sig = signature(prop.fget) name = name or prop.fget.__name__ type = sig.rsplit('->', 1)[-1].strip() sig = f"{name}(self) -> {type}" doc = f'"""{prop.__doc__}"""' doc = textwrap.indent(doc, Lines.INDENT) body = Lines.INDENT + "..." return f"@property\ndef {sig}:\n{doc}\n{body}\n" def visit_constructor(cls, name='__init__', args=None): init = (name == '__init__') argname = cls.__mro__[-2].__name__.lower() argtype = cls.__name__ initarg = args or f"{argname}: {argtype} | None = None" selfarg = 'self' if init else 'cls' rettype = 'None' if init else 'Self' arglist = f"{selfarg}, {initarg}" sig = f"{name}({arglist}) -> {rettype}" ret = '...' if init else 'return super().__new__(cls)' body = Lines.INDENT + ret return f"def {sig}:\n{body}" def visit_class(cls, done=None): skip = { '__doc__', '__dict__', '__module__', '__weakref__', '__pyx_vtable__', '__lt__', '__le__', '__ge__', '__gt__', '__str__', '__repr__', } special = { '__len__': ("self", "int", None), '__bool__': ("self", "bool", None), '__hash__': ("self", "int", None), '__int__': ("self", "int", None), '__index__': ("self", "int", None), '__eq__': ("self", "other: object", "bool", None), '__ne__': ("self", "other: object", "bool", None), '__buffer__': ("self", "flags: int", "memoryview", (3, 12)), } constructor = ( '__new__', '__init__', ) override = OVERRIDE.get(cls.__name__, {}) done = set() if done is None else done lines = Lines() base = cls.__base__ if base is object: lines.add = f"class {cls.__name__}:" else: lines.add = f"class {cls.__name__}({base.__name__}):" lines.level += 1 lines.add = docstring(cls) name = "__init__" if name not in override: sig = signature(cls) if sig is not None: done.update(constructor) args = sig.strip().split('->', 1)[0].strip() args = args[args.index('('):][1:-1] lines.add = visit_constructor(cls, name, args) for name in constructor: if name in done: break if name in override: done.update(constructor) lines.add = override[name] break if name in cls.__dict__: done.update(constructor) lines.add = visit_constructor(cls, name) break if '__hash__' in cls.__dict__: if cls.__hash__ is None: done.add('__hash__') dct = cls.__dict__ keys = list(dct.keys()) for name in keys: if name in done: continue if name in skip: continue if name in override: done.add(name) lines.add = override[name] continue if name in special: done.add(name) *args, retv, py = special[name] sig = f"{name}({', '.join(args)}) -> {retv}" if py is not None: lines.add = f"if sys.version_info >= {py}:" lines.level += 1 lines.add = f"def {sig}: ..." if py is not None: lines.level -= 1 continue attr = getattr(cls, name) if is_method(attr): done.add(name) if name == attr.__name__: obj = dct[name] if is_classmethod(obj): lines.add = "@classmethod" elif is_staticmethod(obj): lines.add = "@staticmethod" lines.add = visit_method(attr) elif False: lines.add = f"{name} = {attr.__name__}" continue if is_datadescr(attr): done.add(name) lines.add = visit_datadescr(attr) continue if is_property(attr): done.add(name) lines.add = visit_property(attr, name) continue leftovers = [name for name in keys if name not in done and name not in skip] if leftovers: raise RuntimeError(f"leftovers: {leftovers}") lines.level -= 1 return lines def visit_module(module, done=None): skip = { '__doc__', '__name__', '__loader__', '__spec__', '__file__', '__package__', '__builtins__', } done = set() if done is None else done lines = Lines() keys = list(module.__dict__.keys()) keys.sort(key=lambda name: name.startswith("_")) constants = [ (name, getattr(module, name)) for name in keys if all(( name not in done and name not in skip, isinstance(getattr(module, name), int), )) ] for _, value in constants: cls = type(value) name = cls.__name__ if name in done or name in skip: continue if cls.__module__ == module.__name__: done.add(name) lines.add = visit_class(cls) lines.add = "" for name, value in constants: done.add(name) if name in OVERRIDE: lines.add = OVERRIDE[name] else: lines.add = visit_constant((name, value)) if constants: lines.add = "" for name in keys: if name in done or name in skip: continue value = getattr(module, name) if is_class(value): done.add(name) lines.add = visit_class(value) lines.add = "" aliases = [ (k, getattr(module, k)) for k in keys if all(( k not in done and k not in skip, getattr(module, k) is value, )) ] for aliasname, target in aliases: done.add(aliasname) lines.add = f"{aliasname} = {target.__name__}" if aliases: lines.add = "" instances = [ (k, getattr(module, k)) for k in keys if all(( k not in done and k not in skip, type(getattr(module, k)) is value, )) ] for attrname, attrvalue in instances: done.add(attrname) lines.add = visit_constant((attrname, attrvalue)) if instances: lines.add = "" continue if is_function(value): done.add(name) if name == value.__name__: lines.add = visit_function(value) else: lines.add = f"{name} = {value.__name__}" continue lines.add = "" for name in keys: if name in done or name in skip: continue value = getattr(module, name) done.add(name) if name in OVERRIDE: lines.add = OVERRIDE[name] else: lines.add = visit_constant((name, value)) leftovers = [name for name in keys if name not in done and name not in skip] if leftovers: raise RuntimeError(f"leftovers: {leftovers}") return lines IMPORTS = """ import sys from typing import ( Any, AnyStr, Final, Literal, NoReturn, ) from typing import ( Callable, Hashable, Iterable, Iterator, Sequence, Mapping, ) try: from typing import Self except ImportError: Self = 'Self' from os import PathLike """ HELPERS = """ class _Int(int): pass def _repr(obj): try: return obj._name except AttributeError: return super(obj).__repr__() def _def(cls, name): if cls is int: cls = _Int obj = cls() if cls.__name__ in ('Pickle', 'buffer'): return obj obj._name = name if '__repr__' not in cls.__dict__: cls.__repr__ = _repr return obj """ OVERRIDE = { 'Exception': { '__new__': ( "def __new__(cls, ierr: int = SUCCESS) -> Self:\n" " return super().__new__(cls, ierr)"), "__lt__": "def __lt__(self, other: int) -> bool: ...", "__le__": "def __le__(self, other: int) -> bool: ...", "__gt__": "def __gt__(self, other: int) -> bool: ...", "__ge__": "def __ge__(self, other: int) -> bool: ...", }, 'Info': { '__iter__': "def __iter__(self) -> Iterator[str]: ...", '__getitem__': "def __getitem__(self, item: str) -> str: ...", '__setitem__': "def __setitem__(self, item: str, value: str) -> None: ...", '__delitem__': "def __delitem__(self, item: str) -> None: ...", '__contains__': "def __contains__(self, value: str) -> bool: ...", }, 'Op': { '__call__': "def __call__(self, x: Any, y: Any) -> Any: ...", }, 'buffer': { '__new__': ( "def __new__(cls, buf: Buffer) -> Self:\n" " return super().__new__(cls)"), '__getitem__': ( "def __getitem__(self, " "item: int | slice) " "-> int | buffer: ..."), '__setitem__': ( "def __setitem__(self, " "item: int | slice, " "value:int | Buffer) " "-> None: ..."), '__delitem__': None, }, '__pyx_capi__': None, '_typedict': "_typedict: Dict[str, Datatype] = {}", '_typedict_c': "_typedict_c: Dict[str, Datatype] = {}", '_typedict_f': "_typedict_f: Dict[str, Datatype] = {}", '_keyval_registry': None, } OVERRIDE.update({ subtype: { '__new__': ( f"def __new__(cls) -> Self:\n" f" return super().__new__({subtype})"), '__repr__': "def __repr__(self) -> str: return self._name", } for subtype in ( 'BottomType', 'InPlaceType', 'BufferAutomaticType', ) }) TYPING = """ from .typing import * """ def visit_mpi4py_MPI(): from mpi4py import MPI as module lines = Lines() lines.add = f'"""{module.__doc__}"""' lines.add = "# flake8: noqa" lines.add = IMPORTS lines.add = "" lines.add = HELPERS lines.add = "" lines.add = visit_module(module) lines.add = "" lines.add = TYPING return lines def generate(filename): dirname = os.path.dirname(filename) os.makedirs(dirname, exist_ok=True) with open(filename, 'w') as f: for line in visit_mpi4py_MPI(): print(line, file=f) def load_module(filename, name=None): if name is None: name, _ = os.path.splitext( os.path.basename(filename)) module = type(sys)(name) module.__file__ = filename module.__package__ = name.rsplit('.', 1)[0] with open(filename) as f: preamble = "from __future__ import annotations" codelines = f.read().split("\n") for lineno, line in enumerate(codelines): match = line.strip().startswith if not any(map(match, "'\"#")): codelines.insert(lineno, preamble) break code = "\n".join(codelines) exec(code, module.__dict__) # noqa: S102 return module _sys_modules = {} def replace_module(module): name = module.__name__ assert name not in _sys_modules # noqa: S101 _sys_modules[name] = sys.modules[name] sys.modules[name] = module pkgname, _, modname = name.rpartition('.') if pkgname: setattr(sys.modules[pkgname], modname, module) def restore_module(module): name = module.__name__ assert name in _sys_modules # noqa: S101 sys.modules[name] = _sys_modules[name] pkgname, _, modname = name.rpartition('.') if pkgname: setattr(sys.modules[pkgname], modname, sys.modules[name]) def annotate(dest, source): try: dest.__annotations__ = source.__annotations__ except AttributeError: pass if isinstance(dest, type): for name in dest.__dict__.keys(): if hasattr(source, name): obj = getattr(dest, name) annotate(obj, getattr(source, name)) if isinstance(dest, type(sys)): for name in dir(dest): if hasattr(source, name): obj = getattr(dest, name) mod = getattr(obj, '__module__', None) if dest.__name__ == mod: annotate(obj, getattr(source, name)) for name in dir(source): if not hasattr(dest, name): setattr(dest, name, getattr(source, name)) OUTDIR = 'reference' if __name__ == '__main__': generate(os.path.join(OUTDIR, 'mpi4py.MPI.py')) mpi4py-4.0.3/docs/source/changes.rst000066400000000000000000000001471475341043600173430ustar00rootroot00000000000000:tocdepth: 1 .. _changes: CHANGES ------- .. default-role:: literal .. include:: ../../CHANGES.rst mpi4py-4.0.3/docs/source/citation.rst000066400000000000000000000002771475341043600175510ustar00rootroot00000000000000Citation ======== If MPI for Python been significant to a project that leads to an academic publication, please acknowledge that fact by citing the project. .. include:: ../../CITATION.rst mpi4py-4.0.3/docs/source/conf.py000066400000000000000000000263061475341043600165050ustar00rootroot00000000000000# Configuration file for the Sphinx documentation builder. # # For the full list of built-in configuration values, see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. import os import sys import typing import datetime import importlib sys.path.insert(0, os.path.abspath('.')) _today = datetime.datetime.now() # -- Project information ----------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information package = 'mpi4py' def pkg_version(): import re here = os.path.dirname(__file__) pardir = [os.path.pardir] * 2 topdir = os.path.join(here, *pardir) srcdir = os.path.join(topdir, 'src') with open(os.path.join(srcdir, 'mpi4py', '__init__.py')) as f: m = re.search(r"__version__\s*=\s*'(.*)'", f.read()) return m.groups()[0] project = 'MPI for Python' author = 'Lisandro Dalcin' copyright = f'{_today.year}, {author}' release = pkg_version() version = release.rsplit('.', 1)[0] # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.intersphinx', 'sphinx.ext.napoleon', ] templates_path = ['_templates'] exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] needs_sphinx = '5.0.0' default_role = 'any' nitpicky = True nitpick_ignore = [ ('c:func', r'atexit'), ('py:mod', r'__worker__'), ('py:mod', r'pickle5'), ] nitpick_ignore_regex = [ (r'c:.*', r'MPI_.*'), (r'envvar', r'(LD_LIBRARY_)?PATH'), (r'envvar', r'(MPICH|OMPI|MPIEXEC)_.*'), ] toc_object_entries = False toc_object_entries_show_parents = 'hide' # python_use_unqualified_type_names = True autodoc_class_signature = 'separated' autodoc_typehints = 'description' autodoc_typehints_format = 'short' autodoc_mock_imports = [] autodoc_type_aliases = {} autosummary_context = { 'synopsis': {}, 'autotype': {}, } intersphinx_mapping = { 'python': ('https://docs.python.org/3/', None), 'numpy': ('https://numpy.org/doc/stable/', None), 'dlpack': ('https://dmlc.github.io/dlpack/latest/', None), 'numba': ('https://numba.readthedocs.io/en/stable/', None), } napoleon_preprocess_types = True try: import sphinx_rtd_theme if 'sphinx_rtd_theme' not in extensions: extensions.append('sphinx_rtd_theme') except ImportError: sphinx_rtd_theme = None try: import sphinx_copybutton if 'sphinx_copybutton' not in extensions: extensions.append('sphinx_copybutton') except ImportError: sphinx_copybutton = None copybutton_exclude = '.linenos, .gp, .go' copybutton_prompt_text = r'\$ |>>> |\.\.\. ' copybutton_prompt_is_regexp = True copybutton_line_continuation_character = '\\' extensions.append('sphinx.ext.coverage') coverage_ignore_modules = [r'mpi4py\.(bench|run)'] coverage_ignore_classes = [r'Rc', r'memory'] def _setup_numpy_typing(): try: import numpy as np except ImportError: from types import new_class from typing import Generic, TypeVar np = type(sys)('numpy') sys.modules[np.__name__] = np np.dtype = new_class('dtype', (Generic[TypeVar('T')],)) np.dtype.__module__ = np.__name__ try: import numpy.typing as npt except ImportError: npt = type(sys)('numpy.typing') np.typing = npt sys.modules[npt.__name__] = npt npt.__all__ = [] for attr in ['ArrayLike', 'DTypeLike']: setattr(npt, attr, typing.Any) npt.__all__.append(attr) def _patch_domain_python(): try: from numpy.typing import __all__ as numpy_types except ImportError: numpy_types = [] try: from mpi4py.typing import __all__ as mpi4py_types except ImportError: mpi4py_types = [] numpy_types = set(numpy_types) mpi4py_types = set(mpi4py_types) for name in numpy_types: autodoc_type_aliases[name] = f'~numpy.typing.{name}' for name in mpi4py_types: autodoc_type_aliases[name] = f'~mpi4py.typing.{name}' from sphinx.domains.python import PythonDomain PythonDomain.object_types['data'].roles += ('class',) def _setup_autodoc(app): from sphinx.ext import autodoc from sphinx.ext import autosummary from sphinx.util import typing from sphinx.locale import _ # class ClassDocumenterMixin: def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.config.autodoc_class_signature == 'separated': members = self.options.members special_members = self.options.special_members if special_members is not None: for name in ('__new__', '__init__'): if name in members: members.remove(name) if name in special_members: special_members.remove(name) class ClassDocumenter( ClassDocumenterMixin, autodoc.ClassDocumenter, ): pass class ExceptionDocumenter( ClassDocumenterMixin, autodoc.ExceptionDocumenter, ): pass app.add_autodocumenter(ClassDocumenter, override=True) app.add_autodocumenter(ExceptionDocumenter, override=True) # def istypealias(obj, name): if isinstance(obj, type): return name != getattr(obj, '__name__', None) return obj in ( typing.Any, ) def istypevar(obj): return isinstance(obj, typing.TypeVar) class TypeDocumenter(autodoc.DataDocumenter): objtype = 'type' directivetype = 'data' priority = autodoc.ClassDocumenter.priority + 1 @classmethod def can_document_member(cls, member, membername, _isattr, parent): return ( isinstance(parent, autodoc.ModuleDocumenter) and parent.name == 'mpi4py.typing' and (istypevar(member) or istypealias(member, membername)) ) def add_directive_header(self, sig): if istypevar(self.object): obj = self.object if not self.options.annotation: self.options.annotation = f' = TypeVar("{obj.__name__}")' super().add_directive_header(sig) def update_content(self, more_content): obj = self.object if istypevar(obj): if obj.__covariant__: kind = _("Covariant") elif obj.__contravariant__: kind = _("Contravariant") else: kind = _("Invariant") content = f"{kind} :class:`~typing.TypeVar`." more_content.append(content, '') more_content.append('', '') if istypealias(obj, self.name): content = _('alias of %s') % typing.restify(obj) more_content.append(content, '') more_content.append('', '') super().update_content(more_content) def get_doc(self, *args, **kwargs): obj = self.object if istypevar(obj): if obj.__doc__ == typing.TypeVar.__doc__: return [] return super().get_doc(*args, **kwargs) app.add_autodocumenter(TypeDocumenter) # class ExceptionDocumenterCustom(ExceptionDocumenter): objtype = 'class' def get_documenter(app, obj, parent): if isinstance(obj, type) and issubclass(obj, BaseException): caller = sys._getframe().f_back.f_code.co_name if caller == 'generate_autosummary_content': if obj.__module__ == 'mpi4py.MPI': if obj.__name__ == 'Exception': return ExceptionDocumenterCustom return autosummary.get_documenter(app, obj, parent) from sphinx.ext.autosummary import generate generate.get_documenter = get_documenter def setup(app): _setup_numpy_typing() _patch_domain_python() _setup_autodoc(app) try: from mpi4py import MPI except ImportError: autodoc_mock_imports.append('mpi4py') return sys_dwb = sys.dont_write_bytecode sys.dont_write_bytecode = True import apidoc sys.dont_write_bytecode = sys_dwb name = MPI.__name__ here = os.path.abspath(os.path.dirname(__file__)) outdir = os.path.join(here, apidoc.OUTDIR) source = os.path.join(outdir, f'{name}.py') getmtime = os.path.getmtime generate = ( not os.path.exists(source) or getmtime(source) < getmtime(MPI.__file__) or getmtime(source) < getmtime(apidoc.__file__) ) if generate: apidoc.generate(source) module = apidoc.load_module(source) apidoc.replace_module(module) synopsis = autosummary_context['synopsis'] synopsis[module.__name__] = module.__doc__.strip() autotype = autosummary_context['autotype'] autotype[module.Exception.__name__] = 'exception' modules = [ 'mpi4py', 'mpi4py.run', 'mpi4py.util.dtlib', 'mpi4py.util.pkl5', 'mpi4py.util.pool', 'mpi4py.util.sync', ] typing_overload = typing.overload typing.overload = lambda arg: arg for name in modules: mod = importlib.import_module(name) ann = apidoc.load_module(f'{mod.__file__}i', name) apidoc.annotate(mod, ann) typing.overload = typing_overload # -- Options for HTML output ------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output html_theme = ( 'sphinx_rtd_theme' if 'sphinx_rtd_theme' in extensions else 'default' ) html_logo = '../mpi4py.svg' html_favicon = '../mpi4py.svg' if html_theme == 'default': html_copy_source = False # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = f'{package}-man' # -- Options for LaTeX output --------------------------------------------- # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', f'{package}.tex', project, author, 'howto'), ] latex_elements = { 'papersize': 'a4', } # -- Options for manual page output --------------------------------------- # (source start file, name, description, authors, manual section). man_pages = [ ('index', package, project, [author], 3) ] # -- Options for Texinfo output ------------------------------------------- # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', package, project, author, package, f'{project}.', 'Miscellaneous'), ] # -- Options for Epub output ---------------------------------------------- # Output file base name for ePub builder. epub_basename = package mpi4py-4.0.3/docs/source/develop.rst000066400000000000000000000117701475341043600173750ustar00rootroot00000000000000Development =========== Prerequisites ------------- You need to have the following software properly installed to develop *MPI for Python*: * `Python`_ 3.6 or above. * The `Cython`_ compiler. * A working `MPI`_ implementation like `MPICH`_ or `Open MPI`_, preferably supporting MPI-4 and built with shared/dynamic libraries. Optionally, consider installing the following packages: * `NumPy`_ for enabling comprehensive testing of MPI communication. * `CuPy`_ for enabling comprehensive testing with a GPU-aware MPI. * `Sphinx`_ to build the documentation. .. tip:: Most routine development tasks like building, installing in editable mode, testing, and generating documentation can be performed with the `spin`_ developer tool. Run :command:`spin` at the top level source directory for a list of available subcommands. .. _Python: https://www.python.org/ .. _Cython: https://cython.org/ .. _MPI: https://www.mpi-forum.org/ .. _MPICH: https://www.mpich.org/ .. _Open MPI: https://www.open-mpi.org/ .. _NumPy: https://numpy.org/ .. _CuPy: https://cupy.dev/ .. _Sphinx: https://www.sphinx-doc.org/ .. _spin: https://github.com/scientific-python/spin Building -------- *MPI for Python* uses **setuptools**-based build system that relies on the :file:`setup.py` file. Some setuptools commands (e.g., *build*) accept additional options: .. cmdoption:: --mpi= Lets you pass a section with MPI configuration within a special configuration file. Alternatively, you can use the :envvar:`MPICFG` environment variable. .. cmdoption:: --mpicc= Specify the path or name of the :program:`mpicc` C compiler wrapper. Alternatively, use the :envvar:`MPICC` environment variable. .. cmdoption:: --mpild= Specify the full path or name for the MPI-aware C linker. Alternatively, use the :envvar:`MPILD` environment variable. If not set, the :program:`mpicc` C compiler wrapper is used for linking. .. cmdoption:: --configure Runs exhaustive tests for checking about missing MPI types, constants, and functions. This option should be passed in order to build *MPI for Python* against old MPI-1, MPI-2, or MPI-3 implementations, possibly providing a subset of MPI-4. If you use a MPI implementation providing a :program:`mpicc` C compiler wrapper (e.g., MPICH or Open MPI), it will be used for compilation and linking. This is the preferred and easiest way to build *MPI for Python*. If :program:`mpicc` is found in the executable search path (:envvar:`PATH` environment variable), simply run the *build* command:: $ python setup.py build If :program:`mpicc` is not in your search path or the compiler wrapper has a different name, you can run the *build* command specifying its location, either via the :option:`--mpicc` command option or using the :envvar:`MPICC` environment variable:: $ python setup.py build --mpicc=/path/to/mpicc $ env MPICC=/path/to/mpicc python setup.py build Alternatively, you can provide all the relevant information about your MPI implementation by editing the :file:`mpi.cfg` file located in the top level source directory. You can use the default section ``[mpi]`` or add a new custom section, for example ``[vendor_mpi]`` (see the examples provided in the :file:`mpi.cfg` file as a starting point to write your own section): .. code-block:: ini [mpi] include_dirs = /usr/local/mpi/include libraries = mpi library_dirs = /usr/local/mpi/lib runtime_library_dirs = /usr/local/mpi/lib [vendor_mpi] include_dirs = /opt/mpi/include ... libraries = mpi ... library_dirs = /opt/mpi/lib ... runtime_library_dirs = /opt/mpi/lib ... ... and then run the *build* command specifying you custom configuration section:: $ python setup.py build --mpi=vendor_mpi $ env MPICFG=vendor_mpi python setup.py build Installing ---------- *MPI for Python* can be installed in editable mode:: $ python -m pip install --editable . After modifying Cython sources, an in-place rebuild is needed:: $ python setup.py build --inplace Testing ------- To quickly test the installation:: $ mpiexec -n 5 python -m mpi4py.bench helloworld Hello, World! I am process 0 of 5 on localhost. Hello, World! I am process 1 of 5 on localhost. Hello, World! I am process 2 of 5 on localhost. Hello, World! I am process 3 of 5 on localhost. Hello, World! I am process 4 of 5 on localhost. $ mpiexec -n 5 python -m mpi4py.bench ringtest -l 10 -n 1048576 time for 10 loops = 0.00361614 seconds (5 processes, 1048576 bytes) If you installed from a git clone or the source distribution, issuing at the command line:: $ mpiexec -n 5 python demo/helloworld.py will launch a five-process run of the Python interpreter and run the demo script :file:`demo/helloworld.py` from the source distribution. You can also run all the *unittest* scripts:: $ mpiexec -n 5 python test/main.py or, if you have the `pytest`_ unit testing framework installed:: $ mpiexec -n 5 pytest .. _pytest: https://docs.pytest.org/ mpi4py-4.0.3/docs/source/guidelines.rst000066400000000000000000000123061475341043600200630ustar00rootroot00000000000000Guidelines ========== Fair play --------- Summary +++++++ This section defines Rules of Play for companies and outside developers that engage with the mpi4py project. It covers: * Restrictions on use of the mpi4py name. * How and whether to publish a modified distribution. * How to make us aware of patched versions. After reading this section, companies and developers will know what kinds of behavior the mpi4py developers and contributors would like to see, and which we consider troublesome, bothersome, and unacceptable. This document is a close adaptation of `NumPy NEP 36`_. .. _NumPy NEP 36: https://numpy.org/neps/nep-0036-fair-play.html Motivation ++++++++++ Occasionally, we learn of modified mpi4py versions and binary distributions circulated by outsiders. These patched versions can cause problems to mpi4py users (see, e.g., `mpi4py/mpi4py#508`_). When issues like these arise, our developers waste time identifying the problematic release, locating alterations, and determining an appropriate course of action. In addition, packages on the Python Packaging Index are sometimes named such that users assume they are sanctioned or maintained by the mpi4py developers. We wish to reduce the number of such incidents. .. _mpi4py/mpi4py#508: https://github.com/mpi4py/mpi4py/issues/508 Scope +++++ This document aims to define a minimal set of rules that, when followed, will be considered good-faith efforts in line with the expectations of the mpi4py developers and contributors. Our hope is that companies and outside developers who feel they need to modify mpi4py will first consider contributing to the project, or use alternative mechanisms for patching and extending mpi4py. When in doubt, please `talk to us first`__. We may suggest an alternative; at minimum, we'll be informed and we may even grant an exception if deemed appropriate. __ https://github.com/mpi4py/mpi4py/discussions/ Fair play rules +++++++++++++++ 1. Do not reuse the mpi4py name for projects not affiliated with the mpi4py project. At time of writing, there are only a handful of ``mpi4py``-named packages developed by the mpi4py project, including ``mpi4py`` and ``mpi4py-fft``. We ask that outside packages not include the phrase ``mpi4py``, i.e., avoid names such as ``mycompany-mpi4py`` or ``mpi4py-mycompany``. To be clear, this rule only applies to modules (package names); it is perfectly acceptable to have a *submodule* of your own package named ``mycompany.mpi4py``. 2. Do not publish binary mpi4py wheels on PyPI (https://pypi.org/). We ask companies and outside developers to not publish binary mpi4py wheels in the main Python Package Index (https://pypi.org/) under names such ``mpi4py-mpich``, ``mpi4py-openmpi``, or ``mpi4py-vendor_mpi``. The usual approaches to build binary Python wheels involve the embedding of dependent shared libraries. While such an approach may seem convenient and often is, in the particular case of MPI and mpi4py it is ultimately harmful to end users. Embedding the MPI shared libraries would prevent the use of external, system-provided MPI installations with hardware-specific optimizations and site-specific tweaks. The MPI Forum is currently discussing the standardization of a proposal for an Application Binary Interface (ABI) for MPI, see [mpi-abi-paper]_ and [mpi-abi-issue]_. Such standardization will allow for any binary dependent on the MPI library to be used with multiple MPI backends. Once this proposal becomes part of the MPI standard, the mpi4py project will consider publishing on PyPI binary wheels capable of using any backend MPI implementation supporting the new MPI ABI specification. In the mean time, mpi4py is currently distributing experimental MPI and mpi4py binary wheels on https://anaconda.org/mpi4py. .. [mpi-abi-paper] J. Hammond, L. Dalcin, E. Schnetter, M. Pérache, J. B. Besnard, J. Brown, G. Brito Gadeschi, S. Byrne, J. Schuchart, and H. Zhou. MPI Application Binary Interface Standardization. EuroMPI 2023, Bristol, UK, September 2023. https://doi.org/10.1145/3615318.3615319 .. [mpi-abi-issue] MPI Forum GitHub Issue: *MPI needs a standard ABI*. https://github.com/mpi-forum/mpi-issues/issues/751 3. Do not republish modified versions of mpi4py. Modified versions of mpi4py make it very difficult for the developers to address bug reports, since we typically do not know which parts of mpi4py have been modified. If you have to break this rule (and we implore you not to!), then make it clear in the ``__version__`` tag that you have modified mpi4py, e.g.:: >>> print(mpi4py.__version__) '4.0.0+mycompany.13` We understand that minor patches are often required to make a library work inside of a package ecosystem. This is totally acceptable, but we ask that no substantive changes are made. 4. Do not extend or modify mpi4py's API. If you absolutely have to break the previous rule, please do not add additional functions to the namespace, or modify the API of existing functions. Having additional functions exposed in distributed versions is confusing for users and developers alike. .. Local variables: .. fill-column: 79 .. End: mpi4py-4.0.3/docs/source/index.rst000066400000000000000000000021611475341043600170400ustar00rootroot00000000000000MPI for Python ============== .. only:: html or man :Author: Lisandro Dalcin :Contact: dalcinl@gmail.com :Date: |today| .. topic:: Abstract *MPI for Python* provides Python bindings for the *Message Passing Interface* (MPI) standard, allowing Python applications to exploit multiple processors on workstations, clusters and supercomputers. This package builds on the MPI specification and provides an object oriented interface resembling the MPI-2 C++ bindings. It supports point-to-point (sends, receives) and collective (broadcasts, scatters, gathers) communication of any *picklable* Python object, as well as efficient communication of Python objects exposing the Python buffer interface (e.g. NumPy arrays and builtin bytes/array/memoryview objects). .. toctree:: :caption: Contents :maxdepth: 2 intro overview tutorial mpi4py mpi4py.MPI mpi4py.typing mpi4py.futures mpi4py.util mpi4py.run mpi4py.bench reference citation install develop guidelines license changes .. only:: html and not singlehtml * :ref:`genindex` mpi4py-4.0.3/docs/source/install.rst000066400000000000000000000114611475341043600174020ustar00rootroot00000000000000Installation ============ Build backends -------------- mpi4py supports three different build backends: `setuptools`_ (default), `scikit-build-core`_ (`CMake`_-based), and `meson-python`_ (`Meson`_-based). The build backend can be selected by setting the :envvar:`MPI4PY_BUILD_BACKEND` environment variable. .. envvar:: MPI4PY_BUILD_BACKEND :choices: ``"setuptools"``, ``"scikit-build-core"``, ``"meson-python"`` :default: ``"setuptools"`` Request a build backend for building mpi4py from sources. Using **setuptools** ~~~~~~~~~~~~~~~~~~~~ .. tip:: Set the :envvar:`MPI4PY_BUILD_BACKEND` environment variable to ``"setuptools"`` to use the `setuptools`_ build backend. .. _setuptools: https://setuptools.pypa.io/ When using the default `setuptools`_ build backend, mpi4py relies on the legacy Python distutils framework to build C extension modules. The following environment variables affect the build configuration. .. envvar:: MPI4PY_BUILD_MPICC The :program:`mpicc` compiler wrapper command is searched for in the executable search path (:envvar:`PATH` environment variable) and used to compile the :mod:`mpi4py.MPI` C extension module. Alternatively, use the :envvar:`MPI4PY_BUILD_MPICC` environment variable to the full path or command corresponding to the MPI-aware C compiler. .. envvar:: MPI4PY_BUILD_MPILD The :program:`mpicc` compiler wrapper command is also used for linking the :mod:`mpi4py.MPI` C extension module. Alternatively, use the :envvar:`MPI4PY_BUILD_MPILD` environment variable to specify the full path or command corresponding to the MPI-aware C linker. .. envvar:: MPI4PY_BUILD_MPICFG If the MPI implementation does not provide a compiler wrapper, or it is not installed in a default system location, all relevant build information like include/library locations and library lists can be provided in an ini-style configuration file under a ``[mpi]`` section. mpi4py can then be asked to use the custom build information by setting the :envvar:`MPI4PY_BUILD_MPICFG` environment variable to the full path of the configuration file. As an example, see the :file:`mpi.cfg` file located in the top level mpi4py source directory. .. envvar:: MPI4PY_BUILD_CONFIGURE Some vendor MPI implementations may not provide complete coverage of the MPI standard, or may provide partial features of newer MPI standard versions while advertising support for an older version. Setting the :envvar:`MPI4PY_BUILD_CONFIGURE` environment variable to a non-empty string will trigger the run of exhaustive checks for the availability of all MPI constants, predefined handles, and routines. The following environment variables are aliases for the ones described above. Having shorter names, they are convenient for occasional use in the command line. Its usage is not recommended in automation scenarios like packaging recipes, deployment scripts, and container image creation. .. envvar:: MPICC Convenience alias for :envvar:`MPI4PY_BUILD_MPICC`. .. envvar:: MPILD Convenience alias for :envvar:`MPI4PY_BUILD_MPILD`. .. envvar:: MPICFG Convenience alias for :envvar:`MPI4PY_BUILD_MPICFG`. Using **scikit-build-core** ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. tip:: Set the :envvar:`MPI4PY_BUILD_BACKEND` environment variable to ``"scikit-build-core"`` to use the `scikit-build-core`_ build backend. When using the `scikit-build-core`_ build backend, mpi4py delegates all of MPI build configuration to `CMake`_'s `FindMPI`_ module. Besides the obvious advantage of cross-platform support, this delegation to CMake may be convenient in build environments exposing vendor software stacks via intricate module systems. Note however that mpi4py will not be able to look for MPI routines available beyond the MPI standard version the MPI implementation advertises to support (via the :c:macro:`MPI_VERSION` and :c:macro:`MPI_SUBVERSION` macro constants in the :file:`mpi.h` header file), any missing MPI constant or symbol will prevent a successful build. .. _CMake: https://cmake.org/ .. _FindMPI: https://cmake.org/cmake/help/latest/module/FindMPI.html .. _scikit-build-core: https://scikit-build-core.readthedocs.io/ Using **meson-python** ~~~~~~~~~~~~~~~~~~~~~~ .. tip:: Set the :envvar:`MPI4PY_BUILD_BACKEND` environment variable to ``"meson-python"`` to use the `meson-python`_ build backend. When using the `meson-python`_ build backend, mpi4py delegates build tasks to the `Meson`_ build system. .. warning:: mpi4py support for the `meson-python`_ build backend is experimental. For the time being, users must set the :envvar:`CC` environment variable to the command or path corresponding to the :program:`mpicc` C compiler wrapper. .. _Meson: https://mesonbuild.com/ .. _meson-python: https://meson-python.readthedocs.io/ .. include:: ../../INSTALL.rst mpi4py-4.0.3/docs/source/intro.rst000066400000000000000000000231571475341043600170740ustar00rootroot00000000000000Introduction ============ Over the last years, high performance computing has become an affordable resource to many more researchers in the scientific community than ever before. The conjunction of quality open source software and commodity hardware strongly influenced the now widespread popularity of Beowulf_ class clusters and cluster of workstations. Among many parallel computational models, message-passing has proven to be an effective one. This paradigm is specially suited for (but not limited to) distributed memory architectures and is used in today's most demanding scientific and engineering application related to modeling, simulation, design, and signal processing. However, portable message-passing parallel programming used to be a nightmare in the past because of the many incompatible options developers were faced to. Fortunately, this situation definitely changed after the MPI Forum released its standard specification. High performance computing is traditionally associated with software development using compiled languages. However, in typical applications programs, only a small part of the code is time-critical enough to require the efficiency of compiled languages. The rest of the code is generally related to memory management, error handling, input/output, and user interaction, and those are usually the most error prone and time-consuming lines of code to write and debug in the whole development process. Interpreted high-level languages can be really advantageous for this kind of tasks. For implementing general-purpose numerical computations, MATLAB [#]_ is the dominant interpreted programming language. In the open source side, Octave and Scilab are well known, freely distributed software packages providing compatibility with the MATLAB language. In this work, we present MPI for Python, a new package enabling applications to exploit multiple processors using standard MPI "look and feel" in Python scripts. .. [#] MATLAB is a registered trademark of The MathWorks, Inc. What is MPI? ------------ MPI_, [mpi-using]_ [mpi-ref]_ the *Message Passing Interface*, is a standardized and portable message-passing system designed to function on a wide variety of parallel computers. The standard defines the syntax and semantics of library routines and allows users to write portable programs in the main scientific programming languages (Fortran, C, or C++). Since its release, the MPI specification [mpi-std1]_ [mpi-std2]_ has become the leading standard for message-passing libraries for parallel computers. Implementations are available from vendors of high-performance computers and from well known open source projects like MPICH_ [mpi-mpich]_ and `Open MPI`_ [mpi-openmpi]_. What is Python? --------------- Python_ is a modern, easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming with dynamic typing and dynamic binding. It supports modules and packages, which encourages program modularity and code reuse. Python's elegant syntax, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. The Python interpreter and the extensive standard library are available in source or binary form without charge for all major platforms, and can be freely distributed. It is easily extended with new functions and data types implemented in C or C++. Python is also suitable as an extension language for customizable applications. Python is an ideal candidate for writing the higher-level parts of large-scale scientific applications [Hinsen97]_ and driving simulations in parallel architectures [Beazley97]_ like clusters of PC's or SMP's. Python codes are quickly developed, easily maintained, and can achieve a high degree of integration with other libraries written in compiled languages. Related Projects ---------------- As this work started and evolved, some ideas were borrowed from well known MPI and Python related open source projects from the Internet. * `OOMPI`_ + It has no relation with Python, but is an excellent object oriented approach to MPI. + It is a C++ class library specification layered on top of the C bindings that encapsulates MPI into a functional class hierarchy. + It provides a flexible and intuitive interface by adding some abstractions, like *Ports* and *Messages*, which enrich and simplify the syntax. * `Pypar`_ + Its interface is rather minimal. There is no support for communicators or process topologies. + It does not require the Python interpreter to be modified or recompiled, but does not permit interactive parallel runs. + General (*picklable*) Python objects of any type can be communicated. There is good support for numeric arrays, practically full MPI bandwidth can be achieved. * `pyMPI`_ + It rebuilds the Python interpreter providing a built-in module for message passing. It does permit interactive parallel runs, which are useful for learning and debugging. + It provides an interface suitable for basic parallel programming. There is not full support for defining new communicators or process topologies. + General (picklable) Python objects can be messaged between processors. There is native support for numeric arrays. * `Scientific Python`_ + It provides a collection of Python modules that are useful for scientific computing. + There is an interface to MPI and BSP (*Bulk Synchronous Parallel programming*). + The interface is simple but incomplete and does not resemble the MPI specification. There is support for numeric arrays. Additionally, we would like to mention some available tools for scientific computing and software development with Python. + `NumPy`_ is a package that provides array manipulation and computational capabilities similar to those found in IDL, MATLAB, or Octave. Using NumPy, it is possible to write many efficient numerical data processing applications directly in Python without using any C, C++ or Fortran code. + `SciPy`_ is an open source library of scientific tools for Python, gathering a variety of high level science and engineering modules together as a single package. It includes modules for graphics and plotting, optimization, integration, special functions, signal and image processing, genetic algorithms, ODE solvers, and others. + `Cython`_ is a language that makes writing C extensions for the Python language as easy as Python itself. The Cython language is very close to the Python language, but Cython additionally supports calling C functions and declaring C types on variables and class attributes. This allows the compiler to generate very efficient C code from Cython code. This makes Cython the ideal language for wrapping for external C libraries, and for fast C modules that speed up the execution of Python code. + `SWIG`_ is a software development tool that connects programs written in C and C++ with a variety of high-level programming languages like Perl, Tcl/Tk, Ruby and Python. Issuing header files to SWIG is the simplest approach to interfacing C/C++ libraries from a Python module. .. External Links .. .............. .. _MPI: https://www.mpi-forum.org/ .. _MPICH: https://www.mpich.org/ .. _Open MPI: https://www.open-mpi.org/ .. _Beowulf: https://www.beowulf.org/ .. _Python: https://www.python.org/ .. _NumPy: https://numpy.org/ .. _SciPy: https://scipy.org/ .. _Cython: https://cython.org/ .. _SWIG: https://www.swig.org/ .. _OOMPI: https://web.archive.org/web/20100614170656/http://www.osl.iu.edu/research/oompi/overview.php .. _Pypar: https://github.com/daleroberts/pypar .. _pyMPI: https://sourceforge.net/projects/pympi/ .. _Scientific Python: http://dirac.cnrs-orleans.fr/ScientificPython.html .. References .. .......... .. [mpi-std1] MPI Forum. MPI: A Message Passing Interface Standard. International Journal of Supercomputer Applications, volume 8, number 3-4, pages 159-416, 1994. .. [mpi-std2] MPI Forum. MPI: A Message Passing Interface Standard. High Performance Computing Applications, volume 12, number 1-2, pages 1-299, 1998. .. [mpi-using] William Gropp, Ewing Lusk, and Anthony Skjellum. Using MPI: portable parallel programming with the message-passing interface. MIT Press, 1994. .. [mpi-ref] Mark Snir, Steve Otto, Steven Huss-Lederman, David Walker, and Jack Dongarra. MPI - The Complete Reference, volume 1, The MPI Core. MIT Press, 2nd. edition, 1998. .. [mpi-mpich] W. Gropp, E. Lusk, N. Doss, and A. Skjellum. A high-performance, portable implementation of the MPI message passing interface standard. Parallel Computing, 22(6):789-828, September 1996. .. [mpi-openmpi] Edgar Gabriel, Graham E. Fagg, George Bosilca, Thara Angskun, Jack J. Dongarra, Jeffrey M. Squyres, Vishal Sahay, Prabhanjan Kambadur, Brian Barrett, Andrew Lumsdaine, Ralph H. Castain, David J. Daniel, Richard L. Graham, and Timothy S. Woodall. Open MPI: Goals, Concept, and Design of a Next Generation MPI Implementation. In Proceedings, 11th European PVM/MPI Users' Group Meeting, Budapest, Hungary, September 2004. .. [Hinsen97] Konrad Hinsen. The Molecular Modelling Toolkit: a case study of a large scientific application in Python. In Proceedings of the 6th International Python Conference, pages 29-35, San Jose, Ca., October 1997. .. [Beazley97] David M. Beazley and Peter S. Lomdahl. Feeding a large-scale physics application to Python. In Proceedings of the 6th International Python Conference, pages 21-29, San Jose, Ca., October 1997. mpi4py-4.0.3/docs/source/license.rst000066400000000000000000000001141475341043600173470ustar00rootroot00000000000000:tocdepth: 1 .. _license: LICENSE ------- .. include:: ../../LICENSE.rst mpi4py-4.0.3/docs/source/make.bat000066400000000000000000000014401475341043600166030ustar00rootroot00000000000000@ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=. set BUILDDIR=_build %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.https://www.sphinx-doc.org/ exit /b 1 ) if "%1" == "" goto help %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% :end popd mpi4py-4.0.3/docs/source/mpi4py.MPI.rst000066400000000000000000000136041475341043600176030ustar00rootroot00000000000000mpi4py.MPI ========== .. currentmodule:: mpi4py.MPI Classes ------- .. rubric:: Ancillary .. autosummary:: Datatype Status Request Prequest Grequest Op Group Info Session .. rubric:: Communication .. autosummary:: Comm Intracomm Topocomm Cartcomm Graphcomm Distgraphcomm Intercomm Message .. rubric:: One-sided operations .. autosummary:: Win .. rubric:: Input/Output .. autosummary:: File .. rubric:: Error handling .. autosummary:: Errhandler Exception .. rubric:: Auxiliary .. autosummary:: Pickle buffer Functions --------- .. rubric:: Version inquiry .. autosummary:: Get_version Get_library_version .. rubric:: Initialization and finalization .. autosummary:: Init Init_thread Finalize Is_initialized Is_finalized Query_thread Is_thread_main .. rubric:: Memory allocation .. autosummary:: Alloc_mem Free_mem .. rubric:: Address manipulation .. autosummary:: Get_address Aint_add Aint_diff .. rubric:: Timer .. autosummary:: Wtick Wtime .. rubric:: Error handling .. autosummary:: Get_error_class Get_error_string Add_error_class Add_error_code Add_error_string Remove_error_class Remove_error_code Remove_error_string .. rubric:: Dynamic process management .. autosummary:: Open_port Close_port Publish_name Unpublish_name Lookup_name .. rubric:: Miscellanea .. autosummary:: Attach_buffer Detach_buffer Flush_buffer Iflush_buffer Compute_dims Get_processor_name Register_datarep Pcontrol .. rubric:: Utilities .. autosummary:: get_vendor .. _typecode _sizeof _addressof _handleof .. _comm_lock _comm_lock_table _commctx_inter _commctx_intra _set_abort_status Attributes ---------- .. autosummary:: UNDEFINED ANY_SOURCE ANY_TAG PROC_NULL ROOT BOTTOM IN_PLACE BUFFER_AUTOMATIC KEYVAL_INVALID TAG_UB IO WTIME_IS_GLOBAL UNIVERSE_SIZE APPNUM LASTUSEDCODE WIN_BASE WIN_SIZE WIN_DISP_UNIT WIN_CREATE_FLAVOR WIN_FLAVOR WIN_MODEL SUCCESS ERR_LASTCODE ERR_COMM ERR_GROUP ERR_TYPE ERR_REQUEST ERR_OP ERR_ERRHANDLER ERR_BUFFER ERR_COUNT ERR_TAG ERR_RANK ERR_ROOT ERR_TRUNCATE ERR_IN_STATUS ERR_PENDING ERR_TOPOLOGY ERR_DIMS ERR_ARG ERR_OTHER ERR_UNKNOWN ERR_INTERN ERR_INFO ERR_FILE ERR_WIN ERR_KEYVAL ERR_INFO_KEY ERR_INFO_VALUE ERR_INFO_NOKEY ERR_ACCESS ERR_AMODE ERR_BAD_FILE ERR_FILE_EXISTS ERR_FILE_IN_USE ERR_NO_SPACE ERR_NO_SUCH_FILE ERR_IO ERR_READ_ONLY ERR_CONVERSION ERR_DUP_DATAREP ERR_UNSUPPORTED_DATAREP ERR_UNSUPPORTED_OPERATION ERR_NAME ERR_NO_MEM ERR_NOT_SAME ERR_PORT ERR_QUOTA ERR_SERVICE ERR_SPAWN ERR_BASE ERR_SIZE ERR_DISP ERR_ASSERT ERR_LOCKTYPE ERR_RMA_CONFLICT ERR_RMA_SYNC ERR_RMA_RANGE ERR_RMA_ATTACH ERR_RMA_SHARED ERR_RMA_FLAVOR ORDER_C ORDER_F ORDER_FORTRAN TYPECLASS_INTEGER TYPECLASS_REAL TYPECLASS_COMPLEX DISTRIBUTE_NONE DISTRIBUTE_BLOCK DISTRIBUTE_CYCLIC DISTRIBUTE_DFLT_DARG COMBINER_NAMED COMBINER_DUP COMBINER_CONTIGUOUS COMBINER_VECTOR COMBINER_HVECTOR COMBINER_INDEXED COMBINER_HINDEXED COMBINER_INDEXED_BLOCK COMBINER_HINDEXED_BLOCK COMBINER_STRUCT COMBINER_SUBARRAY COMBINER_DARRAY COMBINER_RESIZED COMBINER_VALUE_INDEX COMBINER_F90_REAL COMBINER_F90_COMPLEX COMBINER_F90_INTEGER IDENT CONGRUENT SIMILAR UNEQUAL CART GRAPH DIST_GRAPH UNWEIGHTED WEIGHTS_EMPTY COMM_TYPE_SHARED BSEND_OVERHEAD WIN_FLAVOR_CREATE WIN_FLAVOR_ALLOCATE WIN_FLAVOR_DYNAMIC WIN_FLAVOR_SHARED WIN_SEPARATE WIN_UNIFIED MODE_NOCHECK MODE_NOSTORE MODE_NOPUT MODE_NOPRECEDE MODE_NOSUCCEED LOCK_EXCLUSIVE LOCK_SHARED MODE_RDONLY MODE_WRONLY MODE_RDWR MODE_CREATE MODE_EXCL MODE_DELETE_ON_CLOSE MODE_UNIQUE_OPEN MODE_SEQUENTIAL MODE_APPEND SEEK_SET SEEK_CUR SEEK_END DISPLACEMENT_CURRENT DISP_CUR THREAD_SINGLE THREAD_FUNNELED THREAD_SERIALIZED THREAD_MULTIPLE VERSION SUBVERSION MAX_PROCESSOR_NAME MAX_ERROR_STRING MAX_PORT_NAME MAX_INFO_KEY MAX_INFO_VAL MAX_OBJECT_NAME MAX_DATAREP_STRING MAX_LIBRARY_VERSION_STRING DATATYPE_NULL PACKED BYTE AINT OFFSET COUNT CHAR WCHAR SIGNED_CHAR SHORT INT LONG LONG_LONG UNSIGNED_CHAR UNSIGNED_SHORT UNSIGNED UNSIGNED_LONG UNSIGNED_LONG_LONG FLOAT DOUBLE LONG_DOUBLE C_BOOL INT8_T INT16_T INT32_T INT64_T UINT8_T UINT16_T UINT32_T UINT64_T C_COMPLEX C_FLOAT_COMPLEX C_DOUBLE_COMPLEX C_LONG_DOUBLE_COMPLEX CXX_BOOL CXX_FLOAT_COMPLEX CXX_DOUBLE_COMPLEX CXX_LONG_DOUBLE_COMPLEX SHORT_INT INT_INT TWOINT LONG_INT FLOAT_INT DOUBLE_INT LONG_DOUBLE_INT CHARACTER LOGICAL INTEGER REAL DOUBLE_PRECISION COMPLEX DOUBLE_COMPLEX LOGICAL1 LOGICAL2 LOGICAL4 LOGICAL8 INTEGER1 INTEGER2 INTEGER4 INTEGER8 INTEGER16 REAL2 REAL4 REAL8 REAL16 COMPLEX4 COMPLEX8 COMPLEX16 COMPLEX32 UNSIGNED_INT SIGNED_SHORT SIGNED_INT SIGNED_LONG SIGNED_LONG_LONG BOOL SINT8_T SINT16_T SINT32_T SINT64_T F_BOOL F_INT F_FLOAT F_DOUBLE F_COMPLEX F_FLOAT_COMPLEX F_DOUBLE_COMPLEX REQUEST_NULL MESSAGE_NULL MESSAGE_NO_PROC OP_NULL MAX MIN SUM PROD LAND BAND LOR BOR LXOR BXOR MAXLOC MINLOC REPLACE NO_OP GROUP_NULL GROUP_EMPTY INFO_NULL INFO_ENV ERRHANDLER_NULL ERRORS_RETURN ERRORS_ARE_FATAL COMM_NULL COMM_SELF COMM_WORLD WIN_NULL FILE_NULL pickle .. Local variables: .. fill-column: 79 .. End: mpi4py-4.0.3/docs/source/mpi4py.bench.rst000066400000000000000000000001711475341043600202300ustar00rootroot00000000000000mpi4py.bench ============ .. module:: mpi4py.bench :synopsis: Run MPI benchmarks and tests. .. versionadded:: 3.0.0 mpi4py-4.0.3/docs/source/mpi4py.futures.rst000066400000000000000000000727571475341043600206710ustar00rootroot00000000000000mpi4py.futures ============== .. module:: mpi4py.futures :synopsis: Execute computations concurrently using MPI processes. .. versionadded:: 3.0.0 This package provides a high-level interface for asynchronously executing callables on a pool of worker processes using MPI for inter-process communication. The :mod:`mpi4py.futures` package is based on :mod:`concurrent.futures` from the Python standard library. More precisely, :mod:`mpi4py.futures` provides the :class:`MPIPoolExecutor` class as a concrete implementation of the abstract class :class:`~concurrent.futures.Executor`. The :meth:`~concurrent.futures.Executor.submit` interface schedules a callable to be executed asynchronously and returns a :class:`~concurrent.futures.Future` object representing the execution of the callable. :class:`~concurrent.futures.Future` instances can be queried for the call result or exception. Sets of :class:`~concurrent.futures.Future` instances can be passed to the :func:`~concurrent.futures.wait` and :func:`~concurrent.futures.as_completed` functions. .. seealso:: Module :mod:`concurrent.futures` Documentation of the :mod:`concurrent.futures` standard module. MPIPoolExecutor --------------- The :class:`MPIPoolExecutor` class uses a pool of MPI processes to execute calls asynchronously. By performing computations in separate processes, it allows to side-step the :term:`global interpreter lock` but also means that only picklable objects can be executed and returned. The :mod:`__main__` module must be importable by worker processes, thus :class:`MPIPoolExecutor` instances may not work in the interactive interpreter. :class:`MPIPoolExecutor` takes advantage of the dynamic process management features introduced in the MPI-2 standard. In particular, the `MPI.Intracomm.Spawn` method of `MPI.COMM_SELF` is used in the master (or parent) process to spawn new worker (or child) processes running a Python interpreter. The master process uses a separate thread (one for each :class:`MPIPoolExecutor` instance) to communicate back and forth with the workers. The worker processes serve the execution of tasks in the main (and only) thread until they are signaled for completion. .. note:: The worker processes must import the main script in order to *unpickle* any callable defined in the :mod:`__main__` module and submitted from the master process. Furthermore, the callables may need access to other global variables. At the worker processes, :mod:`mpi4py.futures` executes the main script code (using the :mod:`runpy` module) under the :mod:`__worker__` namespace to define the :mod:`__main__` module. The :mod:`__main__` and :mod:`__worker__` modules are added to :data:`sys.modules` (both at the master and worker processes) to ensure proper *pickling* and *unpickling*. .. warning:: During the initial import phase at the workers, the main script cannot create and use new :class:`MPIPoolExecutor` instances. Otherwise, each worker would attempt to spawn a new pool of workers, leading to infinite recursion. :mod:`mpi4py.futures` detects such recursive attempts to spawn new workers and aborts the MPI execution environment. As the main script code is run under the :mod:`__worker__` namespace, the easiest way to avoid spawn recursion is using the idiom :code:`if __name__ == '__main__': ...` in the main script. .. class:: MPIPoolExecutor(max_workers=None, \ initializer=None, initargs=(), **kwargs) An :class:`~concurrent.futures.Executor` subclass that executes calls asynchronously using a pool of at most *max_workers* processes. If *max_workers* is `None` or not given, its value is determined from the :envvar:`MPI4PY_FUTURES_MAX_WORKERS` environment variable if set, or the MPI universe size if set, otherwise a single worker process is spawned. If *max_workers* is lower than or equal to ``0``, then a :exc:`ValueError` will be raised. *initializer* is an optional callable that is called at the start of each worker process before executing any tasks; *initargs* is a tuple of arguments passed to the initializer. If *initializer* raises an exception, all pending tasks and any attempt to submit new tasks to the pool will raise a :exc:`~concurrent.futures.BrokenExecutor` exception. Other parameters: * *python_exe*: Path to the Python interpreter executable used to spawn worker processes, otherwise :data:`sys.executable` is used. * *python_args*: :class:`list` or iterable with additional command line flags to pass to the Python executable. Command line flags determined from inspection of :data:`sys.flags`, :data:`sys.warnoptions` and :data:`sys._xoptions` in are passed unconditionally. * *mpi_info*: :class:`dict` or iterable yielding ``(key, value)`` pairs. These ``(key, value)`` pairs are passed (through an `MPI.Info` object) to the `MPI.Intracomm.Spawn` call used to spawn worker processes. This mechanism allows telling the MPI runtime system where and how to start the processes. Check the documentation of the backend MPI implementation about the set of keys it interprets and the corresponding format for values. * *globals*: :class:`dict` or iterable yielding ``(name, value)`` pairs to initialize the main module namespace in worker processes. * *main*: If set to `False`, do not import the :mod:`__main__` module in worker processes. Setting *main* to `False` prevents worker processes from accessing definitions in the parent :mod:`__main__` namespace. * *path*: :class:`list` or iterable with paths to append to :data:`sys.path` in worker processes to extend the :ref:`module search path `. * *wdir*: Path to set the current working directory in worker processes using :func:`os.chdir()`. The initial working directory is set by the MPI implementation. Quality MPI implementations should honor a ``wdir`` info key passed through *mpi_info*, although such feature is not mandatory. * *env*: :class:`dict` or iterable yielding ``(name, value)`` pairs with environment variables to update :data:`os.environ` in worker processes. The initial environment is set by the MPI implementation. MPI implementations may allow setting the initial environment through *mpi_info*, however such feature is not required nor recommended by the MPI standard. * *use_pkl5*: If set to `True`, use :mod:`pickle5` with out-of-band buffers for interprocess communication. If *use_pkl5* is set to `None` or not given, its value is determined from the :envvar:`MPI4PY_FUTURES_USE_PKL5` environment variable. Using :mod:`pickle5` with out-of-band buffers may benefit applications dealing with large buffer-like objects like NumPy arrays. See :mod:`mpi4py.util.pkl5` for additional information. * *backoff*: :class:`float` value specifying the maximum number of seconds a worker thread or process suspends execution with :func:`time.sleep()` while idle-waiting. If not set, its value is determined from the :envvar:`MPI4PY_FUTURES_BACKOFF` environment variable if set, otherwise the default value of 0.001 seconds is used. Lower values will reduce latency and increase execution throughput for very short-lived tasks, albeit at the expense of spinning CPU cores and increased energy consumption. .. method:: submit(func, *args, **kwargs) Schedule the callable, *func*, to be executed as ``func(*args, **kwargs)`` and returns a :class:`~concurrent.futures.Future` object representing the execution of the callable. :: executor = MPIPoolExecutor(max_workers=1) future = executor.submit(pow, 321, 1234) print(future.result()) .. method:: map(func, *iterables, timeout=None, chunksize=1, **kwargs) Equivalent to :func:`map(func, *iterables) ` except *func* is executed asynchronously and several calls to *func* may be made concurrently, out-of-order, in separate processes. The returned iterator raises a :exc:`~concurrent.futures.TimeoutError` if :meth:`~iterator.__next__` is called and the result isn't available after *timeout* seconds from the original call to :meth:`~MPIPoolExecutor.map`. *timeout* can be an int or a float. If *timeout* is not specified or `None`, there is no limit to the wait time. If a call raises an exception, then that exception will be raised when its value is retrieved from the iterator. This method chops *iterables* into a number of chunks which it submits to the pool as separate tasks. The (approximate) size of these chunks can be specified by setting *chunksize* to a positive integer. For very long iterables, using a large value for *chunksize* can significantly improve performance compared to the default size of one. By default, the returned iterator yields results in-order, waiting for successive tasks to complete . This behavior can be changed by passing the keyword argument *unordered* as `True`, then the result iterator will yield a result as soon as any of the tasks complete. :: executor = MPIPoolExecutor(max_workers=3) for result in executor.map(pow, [2]*32, range(32)): print(result) .. method:: starmap(func, iterable, timeout=None, chunksize=1, **kwargs) Equivalent to :func:`itertools.starmap(func, iterable) `. Used instead of :meth:`~MPIPoolExecutor.map` when argument parameters are already grouped in tuples from a single iterable (the data has been "pre-zipped"). :func:`map(func, *iterable) ` is equivalent to :func:`starmap(func, zip(*iterable)) `. :: executor = MPIPoolExecutor(max_workers=3) iterable = ((2, n) for n in range(32)) for result in executor.starmap(pow, iterable): print(result) .. method:: shutdown(wait=True, cancel_futures=False) Signal the executor that it should free any resources that it is using when the currently pending futures are done executing. Calls to :meth:`~MPIPoolExecutor.submit` and :meth:`~MPIPoolExecutor.map` made after :meth:`~MPIPoolExecutor.shutdown` will raise :exc:`RuntimeError`. If *wait* is `True` then this method will not return until all the pending futures are done executing and the resources associated with the executor have been freed. If *wait* is `False` then this method will return immediately and the resources associated with the executor will be freed when all pending futures are done executing. Regardless of the value of *wait*, the entire Python program will not exit until all pending futures are done executing. If *cancel_futures* is `True`, this method will cancel all pending futures that the executor has not started running. Any futures that are completed or running won't be cancelled, regardless of the value of *cancel_futures*. You can avoid having to call this method explicitly if you use the :keyword:`with` statement, which will shutdown the executor instance (waiting as if :meth:`~MPIPoolExecutor.shutdown` were called with *wait* set to `True`). :: import time with MPIPoolExecutor(max_workers=1) as executor: future = executor.submit(time.sleep, 2) assert future.done() .. method:: bootup(wait=True) Signal the executor that it should allocate eagerly any required resources (in particular, MPI worker processes). If *wait* is `True`, then :meth:`~MPIPoolExecutor.bootup` will not return until the executor resources are ready to process submissions. Resources are automatically allocated in the first call to :meth:`~MPIPoolExecutor.submit`, thus calling :meth:`~MPIPoolExecutor.bootup` explicitly is seldom needed. .. attribute:: num_workers Number or worker processes in the pool. .. envvar:: MPI4PY_FUTURES_MAX_WORKERS If the *max_workers* parameter to :class:`MPIPoolExecutor` is `None` or not given, the :envvar:`MPI4PY_FUTURES_MAX_WORKERS` environment variable provides a fallback value for the maximum number of MPI worker processes to spawn. .. versionadded:: 3.1.0 .. envvar:: MPI4PY_FUTURES_USE_PKL5 If the *use_pkl5* keyword argument to :class:`MPIPoolExecutor` is `None` or not given, the :envvar:`MPI4PY_FUTURES_USE_PKL5` environment variable provides a fallback value for whether the executor should use :mod:`pickle5` with out-of-band buffers for interprocess communication. Accepted values are ``0`` and ``1`` (interpreted as `False` and `True`, respectively), and strings specifying a `YAML boolean`_ value (case-insensitive). Using :mod:`pickle5` with out-of-band buffers may benefit applications dealing with large buffer-like objects like NumPy arrays. See :mod:`mpi4py.util.pkl5` for additional information. .. versionadded:: 4.0.0 .. _YAML boolean: https://yaml.org/type/bool.html .. envvar:: MPI4PY_FUTURES_BACKOFF If the *backoff* keyword argument to :class:`MPIPoolExecutor` is not given, the :envvar:`MPI4PY_FUTURES_BACKOFF` environment variable can be set to a :class:`float` value specifying the maximum number of seconds a worker thread or process suspends execution with :func:`time.sleep()` while idle-waiting. If not set, the default backoff value is 0.001 seconds. Lower values will reduce latency and increase execution throughput for very short-lived tasks, albeit at the expense of spinning CPU cores and increased energy consumption. .. versionadded:: 4.0.0 .. note:: As the master process uses a separate thread to perform MPI communication with the workers, the backend MPI implementation should provide support for `MPI.THREAD_MULTIPLE`. However, some popular MPI implementations do not support yet concurrent MPI calls from multiple threads. Additionally, users may decide to initialize MPI with a lower level of thread support. If the level of thread support in the backend MPI is less than `MPI.THREAD_MULTIPLE`, :mod:`mpi4py.futures` will use a global lock to serialize MPI calls. If the level of thread support is less than `MPI.THREAD_SERIALIZED`, :mod:`mpi4py.futures` will emit a :exc:`RuntimeWarning`. .. warning:: If the level of thread support in the backend MPI is less than `MPI.THREAD_SERIALIZED` (i.e, it is either `MPI.THREAD_SINGLE` or `MPI.THREAD_FUNNELED`), in theory :mod:`mpi4py.futures` cannot be used. Rather than raising an exception, :mod:`mpi4py.futures` emits a warning and takes a "cross-fingers" attitude to continue execution in the hope that serializing MPI calls with a global lock will actually work. MPICommExecutor --------------- Legacy MPI-1 implementations (as well as some vendor MPI-2 implementations) do not support the dynamic process management features introduced in the MPI-2 standard. Additionally, job schedulers and batch systems in supercomputing facilities may pose additional complications to applications using the :c:func:`MPI_Comm_spawn` routine. With these issues in mind, :mod:`mpi4py.futures` supports an additional, more traditional, SPMD-like usage pattern requiring MPI-1 calls only. Python applications are started the usual way, e.g., using the :program:`mpiexec` command. Python code should make a collective call to the :class:`MPICommExecutor` context manager to partition the set of MPI processes within a MPI communicator in one master processes and many workers processes. The master process gets access to an :class:`MPIPoolExecutor` instance to submit tasks. Meanwhile, the worker process follow a different execution path and team-up to execute the tasks submitted from the master. Besides alleviating the lack of dynamic process management features in legacy MPI-1 or partial MPI-2 implementations, the :class:`MPICommExecutor` context manager may be useful in classic MPI-based Python applications willing to take advantage of the simple, task-based, master/worker approach available in the :mod:`mpi4py.futures` package. .. class:: MPICommExecutor(comm=None, root=0) Context manager for :class:`MPIPoolExecutor`. This context manager splits a MPI (intra)communicator *comm* (defaults to `MPI.COMM_WORLD` if not provided or `None`) in two disjoint sets: a single master process (with rank *root* in *comm*) and the remaining worker processes. These sets are then connected through an intercommunicator. The target of the :keyword:`with` statement is assigned either an :class:`MPIPoolExecutor` instance (at the master) or `None` (at the workers). :: from mpi4py import MPI from mpi4py.futures import MPICommExecutor with MPICommExecutor(MPI.COMM_WORLD, root=0) as executor: if executor is not None: future = executor.submit(abs, -42) assert future.result() == 42 answer = set(executor.map(abs, [-42, 42])) assert answer == {42} .. warning:: If :class:`MPICommExecutor` is passed a communicator of size one (e.g., `MPI.COMM_SELF`), then the executor instance assigned to the target of the :keyword:`with` statement will execute all submitted tasks in a single worker thread, thus ensuring that task execution still progress asynchronously. However, the :term:`GIL` will prevent the main and worker threads from running concurrently in multicore processors. Moreover, the thread context switching may harm noticeably the performance of CPU-bound tasks. In case of I/O-bound tasks, the :term:`GIL` is not usually an issue, however, as a single worker thread is used, it progress one task at a time. We advice against using :class:`MPICommExecutor` with communicators of size one and suggest refactoring your code to use instead a :class:`~concurrent.futures.ThreadPoolExecutor`. Command line ------------ Recalling the issues related to the lack of support for dynamic process management features in MPI implementations, :mod:`mpi4py.futures` supports an alternative usage pattern where Python code (either from scripts, modules, or zip files) is run under command line control of the :mod:`mpi4py.futures` package by passing :samp:`-m mpi4py.futures` to the :program:`python` executable. The ``mpi4py.futures`` invocation should be passed a *pyfile* path to a script (or a zipfile/directory containing a :file:`__main__.py` file). Additionally, ``mpi4py.futures`` accepts :samp:`-m {mod}` to execute a module named *mod*, :samp:`-c {cmd}` to execute a command string *cmd*, or even :samp:`-` to read commands from standard input (:data:`sys.stdin`). Summarizing, :samp:`mpi4py.futures` can be invoked in the following ways: * :samp:`$ mpiexec -n {numprocs} python -m mpi4py.futures {pyfile} [arg] ...` * :samp:`$ mpiexec -n {numprocs} python -m mpi4py.futures -m {mod} [arg] ...` * :samp:`$ mpiexec -n {numprocs} python -m mpi4py.futures -c {cmd} [arg] ...` * :samp:`$ mpiexec -n {numprocs} python -m mpi4py.futures - [arg] ...` Before starting the main script execution, :mod:`mpi4py.futures` splits `MPI.COMM_WORLD` in one master (the process with rank 0 in `MPI.COMM_WORLD`) and *numprocs - 1* workers and connects them through an MPI intercommunicator. Afterwards, the master process proceeds with the execution of the user script code, which eventually creates :class:`MPIPoolExecutor` instances to submit tasks. Meanwhile, the worker processes follow a different execution path to serve the master. Upon successful termination of the main script at the master, the entire MPI execution environment exists gracefully. In case of any unhandled exception in the main script, the master process calls ``MPI.COMM_WORLD.Abort(1)`` to prevent deadlocks and force termination of entire MPI execution environment. .. warning:: Running scripts under command line control of :mod:`mpi4py.futures` is quite similar to executing a single-process application that spawn additional workers as required. However, there is a very important difference users should be aware of. All :class:`~MPIPoolExecutor` instances created at the master will share the pool of workers. Tasks submitted at the master from many different executors will be scheduled for execution in random order as soon as a worker is idle. Any executor can easily starve all the workers (e.g., by calling :func:`MPIPoolExecutor.map` with long iterables). If that ever happens, submissions from other executors will not be serviced until free workers are available. .. seealso:: :ref:`python:using-on-cmdline` Documentation on Python command line interface. Parallel tasks -------------- The :mod:`mpi4py.futures` package favors an embarrassingly parallel execution model involving a series of sequential tasks independent of each other and executed asynchronously. Albeit unnatural, :attr:`MPIPoolExecutor` can still be used for handling workloads involving parallel tasks, where worker processes communicate and coordinate each other via MPI. .. function:: get_comm_workers() Access an intracommunicator grouping MPI worker processes. Executing parallel tasks with :mod:`mpi4py.futures` requires following some rules, cf. highlighted lines in example :ref:`cpi-py` : * Use :attr:`MPIPoolExecutor.num_workers` to determine the number of worker processes in the executor and **submit exactly one callable per worker process** using the :meth:`MPIPoolExecutor.submit` method. * The submitted callable must use :func:`get_comm_workers` to access an intracommunicator grouping MPI worker processes. Afterwards, it is highly recommended calling the :meth:`~mpi4py.MPI.Comm.Barrier` method on the communicator. The barrier synchronization ensures that every worker process is executing the submitted callable exactly once. Afterwards, the parallel task can safely perform any kind of point-to-point or collective operation using the returned communicator. * The :class:`~concurrent.futures.Future` instances returned by :meth:`MPIPoolExecutor.submit` should be collected in a sequence. Use :func:`~concurrent.futures.wait` with the sequence of :class:`~concurrent.futures.Future` instances to ensure logical completion of the parallel task. Utilities --------- The :mod:`mpi4py.futures` package provides additional utilities for handling :class:`~concurrent.futures.Future` instances. .. autofunction:: mpi4py.futures.collect .. autofunction:: mpi4py.futures.compose Examples -------- Computing the Julia set +++++++++++++++++++++++ The following :ref:`julia-py` script computes the `Julia set`_ and dumps an image to disk in binary `PGM`_ format. The code starts by importing :class:`MPIPoolExecutor` from the :mod:`mpi4py.futures` package. Next, some global constants and functions implement the computation of the Julia set. The computations are protected with the standard :code:`if __name__ == '__main__': ...` idiom. The image is computed by whole scanlines submitting all these tasks at once using the :class:`~MPIPoolExecutor.map` method. The result iterator yields scanlines in-order as the tasks complete. Finally, each scanline is dumped to disk. .. _`Julia set`: https://en.wikipedia.org/wiki/Julia_set .. _`PGM`: https://netpbm.sourceforge.net/doc/pgm.html .. code-block:: python :name: julia-py :caption: :file:`julia.py` :emphasize-lines: 1,26,28,29 :linenos: from mpi4py.futures import MPIPoolExecutor x0, x1, w = -2.0, +2.0, 640*2 y0, y1, h = -1.5, +1.5, 480*2 dx = (x1 - x0) / w dy = (y1 - y0) / h c = complex(0, 0.65) def julia(x, y): z = complex(x, y) n = 255 while abs(z) < 3 and n > 1: z = z**2 + c n -= 1 return n def julia_line(k): line = bytearray(w) y = y1 - k * dy for j in range(w): x = x0 + j * dx line[j] = julia(x, y) return line if __name__ == '__main__': with MPIPoolExecutor() as executor: image = executor.map(julia_line, range(h)) with open('julia.pgm', 'wb') as f: f.write(b'P5 %d %d %d\n' % (w, h, 255)) for line in image: f.write(line) The recommended way to execute the script is by using the :program:`mpiexec` command specifying one MPI process (master) and (optional but recommended) the desired MPI universe size, which determines the number of additional dynamically spawned processes (workers). The MPI universe size is provided either by a batch system or set by the user via command-line arguments to :program:`mpiexec` or environment variables. Below we provide examples for MPICH and Open MPI implementations [#]_. In all of these examples, the :program:`mpiexec` command launches a single master process running the Python interpreter and executing the main script. When required, :mod:`mpi4py.futures` spawns the pool of 16 worker processes. The master submits tasks to the workers and waits for the results. The workers receive incoming tasks, execute them, and send back the results to the master. .. highlight:: console When using MPICH implementation or its derivatives based on the Hydra process manager, users can set the MPI universe size via the ``-usize`` argument to :program:`mpiexec`:: $ mpiexec -n 1 -usize 17 python julia.py or, alternatively, by setting the :envvar:`MPIEXEC_UNIVERSE_SIZE` environment variable:: $ env MPIEXEC_UNIVERSE_SIZE=17 mpiexec -n 1 python julia.py In the Open MPI implementation, the MPI universe size can be set via the ``-host`` argument to :program:`mpiexec`:: $ mpiexec -n 1 -host localhost:17 python julia.py Another way to specify the number of workers is to use the :mod:`mpi4py.futures`-specific environment variable :envvar:`MPI4PY_FUTURES_MAX_WORKERS`:: $ env MPI4PY_FUTURES_MAX_WORKERS=16 mpiexec -n 1 python julia.py Note that in this case, the MPI universe size is ignored. Alternatively, users may decide to execute the script in a more traditional way, that is, all the MPI processes are started at once. The user script is run under command-line control of :mod:`mpi4py.futures` passing the :ref:`-m ` flag to the :program:`python` executable:: $ mpiexec -n 17 python -m mpi4py.futures julia.py As explained previously, the 17 processes are partitioned in one master and 16 workers. The master process executes the main script while the workers execute the tasks submitted by the master. .. [#] When using an MPI implementation other than MPICH or Open MPI, please check the documentation of the implementation and/or batch system for the ways to specify the desired MPI universe size. Computing Pi (parallel task) ++++++++++++++++++++++++++++ The number :math:`\pi` can be approximated via numerical integration with the simple midpoint rule, that is: .. math:: \pi = \int_{0}^{1} \frac{4}{1+x^2} \,dx \approx \frac{1}{n} \sum_{i=1}^{n} \frac{4}{1 + \left[\frac{1}{n} \left(i-\frac{1}{2}\right) \right]^2} . The following :ref:`cpi-py` script computes such approximations using :mod:`mpi4py.futures` with a parallel task involving a collective reduction operation. Highlighted lines correspond to the rules discussed in `Parallel tasks`_. .. code-block:: python :name: cpi-py :caption: :file:`cpi.py` :emphasize-lines: 9-10,21,35-36,39 :linenos: import math import sys from mpi4py.futures import MPIPoolExecutor, wait from mpi4py.futures import get_comm_workers def compute_pi(n): # Access intracommunicator and synchronize comm = get_comm_workers() comm.Barrier() rank = comm.Get_rank() size = comm.Get_size() # Local computation h = 1.0 / n s = 0.0 for i in range(rank + 1, n + 1, size): x = h * (i - 0.5) s += 4.0 / (1.0 + x**2) pi_partial = s * h # Parallel reduce-to-all pi = comm.allreduce(pi_partial) # All workers return the same value return pi if __name__ == '__main__': n = int(sys.argv[1]) if len(sys.argv) > 1 else 256 with MPIPoolExecutor() as executor: # Submit exactly one callable per worker P = executor.num_workers fs = [executor.submit(compute_pi, n) for _ in range(P)] # Wait for all workers to finish wait(fs) # Get result from the first future object. # In this particular example, due to using reduce-to-all, # all the other future objects hold the same result value. pi = fs[0].result() print( f"pi: {pi:.16f}, error: {abs(pi - math.pi):.3e}", f"({n:d} intervals, {P:d} workers)", ) .. highlight:: console To run in modern MPI-2 mode:: $ env MPI4PY_FUTURES_MAX_WORKERS=4 mpiexec -n 1 python cpi.py 128 pi: 3.1415977398528137, error: 5.086e-06 (128 intervals, 4 workers) $ env MPI4PY_FUTURES_MAX_WORKERS=8 mpiexec -n 1 python cpi.py 512 pi: 3.1415929714812316, error: 3.179e-07 (512 intervals, 8 workers) To run in legacy MPI-1 mode:: $ mpiexec -n 5 python -m mpi4py.futures cpi.py 128 pi: 3.1415977398528137, error: 5.086e-06 (128 intervals, 4 workers) $ mpiexec -n 9 python -m mpi4py.futures cpi.py 512 pi: 3.1415929714812316, error: 3.179e-07 (512 intervals, 8 workers) Citation -------- If :mod:`mpi4py.futures` been significant to a project that leads to an academic publication, please acknowledge our work by citing the following article [mpi4py-futures]_: .. [mpi4py-futures] M. Rogowski, S. Aseeri, D. Keyes, and L. Dalcin, *mpi4py.futures: MPI-Based Asynchronous Task Execution for Python*, IEEE Transactions on Parallel and Distributed Systems, 34(2):611-622, 2023. https://doi.org/10.1109/TPDS.2022.3225481 .. Local variables: .. fill-column: 79 .. End: mpi4py-4.0.3/docs/source/mpi4py.rst000066400000000000000000000146351475341043600171640ustar00rootroot00000000000000mpi4py ====== .. automodule:: mpi4py :synopsis: The MPI for Python package. Runtime configuration options ----------------------------- .. data:: mpi4py.rc This object has attributes exposing runtime configuration options that become effective at import time of the :mod:`~mpi4py.MPI` module. .. rubric:: Attributes Summary .. table:: :widths: grid ============== ======================================================== `initialize` Automatic MPI initialization at import `threads` Request initialization with thread support `thread_level` Level of thread support to request `finalize` Automatic MPI finalization at exit `fast_reduce` Use tree-based reductions for objects `recv_mprobe` Use matched probes to receive objects `irecv_bufsz` Default buffer size in bytes for :meth:`~MPI.Comm.irecv` `errors` Error handling policy ============== ======================================================== .. rubric:: Attributes Documentation .. attribute:: mpi4py.rc.initialize Automatic MPI initialization at import. :type: :class:`bool` :default: :obj:`True` .. seealso:: :envvar:`MPI4PY_RC_INITIALIZE` .. attribute:: mpi4py.rc.threads Request initialization with thread support. :type: :class:`bool` :default: :obj:`True` .. seealso:: :envvar:`MPI4PY_RC_THREADS` .. attribute:: mpi4py.rc.thread_level Level of thread support to request. :type: :class:`str` :default: ``"multiple"`` :choices: ``"multiple"``, ``"serialized"``, ``"funneled"``, ``"single"`` .. seealso:: :envvar:`MPI4PY_RC_THREAD_LEVEL` .. attribute:: mpi4py.rc.finalize Automatic MPI finalization at exit. :type: :obj:`None` or :class:`bool` :default: :obj:`None` .. seealso:: :envvar:`MPI4PY_RC_FINALIZE` .. attribute:: mpi4py.rc.fast_reduce Use tree-based reductions for objects. :type: :class:`bool` :default: :obj:`True` .. seealso:: :envvar:`MPI4PY_RC_FAST_REDUCE` .. attribute:: mpi4py.rc.recv_mprobe Use matched probes to receive objects. :type: :class:`bool` :default: :obj:`True` .. seealso:: :envvar:`MPI4PY_RC_RECV_MPROBE` .. attribute:: mpi4py.rc.irecv_bufsz Default buffer size in bytes for :meth:`~MPI.Comm.irecv`. :type: :class:`int` :default: ``32768`` .. seealso:: :envvar:`MPI4PY_RC_IRECV_BUFSZ` .. versionadded:: 4.0.0 .. attribute:: mpi4py.rc.errors Error handling policy. :type: :class:`str` :default: ``"exception"`` :choices: ``"exception"``, ``"default"``, ``"abort"``, ``"fatal"`` .. seealso:: :envvar:`MPI4PY_RC_ERRORS` .. rubric:: Example MPI for Python features automatic initialization and finalization of the MPI execution environment. By using the `mpi4py.rc` object, MPI initialization and finalization can be handled programmatically:: import mpi4py mpi4py.rc.initialize = False # do not initialize MPI automatically mpi4py.rc.finalize = False # do not finalize MPI automatically from mpi4py import MPI # import the 'MPI' module MPI.Init() # manual initialization of the MPI environment ... # your finest code here ... MPI.Finalize() # manual finalization of the MPI environment Environment variables --------------------- The following environment variables override the corresponding attributes of the :data:`mpi4py.rc` and :data:`MPI.pickle` objects at import time of the :mod:`~mpi4py.MPI` module. .. note:: For variables of boolean type, accepted values are ``0`` and ``1`` (interpreted as :obj:`False` and :obj:`True`, respectively), and strings specifying a `YAML boolean`_ value (case-insensitive). .. _YAML boolean: https://yaml.org/type/bool.html .. envvar:: MPI4PY_RC_INITIALIZE :type: :class:`bool` :default: :obj:`True` Whether to automatically initialize MPI at import time of the :mod:`mpi4py.MPI` module. .. seealso:: :attr:`mpi4py.rc.initialize` .. versionadded:: 4.0.0 .. envvar:: MPI4PY_RC_FINALIZE :type: :obj:`None` | :class:`bool` :default: :obj:`None` :choices: :obj:`None`, :obj:`True`, :obj:`False` Whether to automatically finalize MPI at exit time of the Python process. .. seealso:: :attr:`mpi4py.rc.finalize` .. versionadded:: 4.0.0 .. envvar:: MPI4PY_RC_THREADS :type: :class:`bool` :default: :obj:`True` Whether to initialize MPI with thread support. .. seealso:: :attr:`mpi4py.rc.threads` .. versionadded:: 3.1.0 .. envvar:: MPI4PY_RC_THREAD_LEVEL :default: ``"multiple"`` :choices: ``"single"``, ``"funneled"``, ``"serialized"``, ``"multiple"`` The level of required thread support. .. seealso:: :attr:`mpi4py.rc.thread_level` .. versionadded:: 3.1.0 .. envvar:: MPI4PY_RC_FAST_REDUCE :type: :class:`bool` :default: :obj:`True` Whether to use tree-based reductions for objects. .. seealso:: :attr:`mpi4py.rc.fast_reduce` .. versionadded:: 3.1.0 .. envvar:: MPI4PY_RC_RECV_MPROBE :type: :class:`bool` :default: :obj:`True` Whether to use matched probes to receive objects. .. seealso:: :attr:`mpi4py.rc.recv_mprobe` .. envvar:: MPI4PY_RC_IRECV_BUFSZ :type: :class:`int` :default: ``32768`` Default buffer size in bytes for :meth:`~MPI.Comm.irecv`. .. seealso:: :attr:`mpi4py.rc.irecv_bufsz` .. versionadded:: 4.0.0 .. envvar:: MPI4PY_RC_ERRORS :default: ``"exception"`` :choices: ``"exception"``, ``"default"``, ``"abort"``, ``"fatal"`` Controls default MPI error handling policy. .. seealso:: :attr:`mpi4py.rc.errors` .. versionadded:: 3.1.0 .. envvar:: MPI4PY_PICKLE_PROTOCOL :type: :class:`int` :default: :data:`pickle.HIGHEST_PROTOCOL` Controls the default pickle protocol to use when communicating Python objects. .. seealso:: :attr:`~mpi4py.MPI.Pickle.PROTOCOL` attribute of the :data:`MPI.pickle` object within the :mod:`~mpi4py.MPI` module. .. versionadded:: 3.1.0 .. envvar:: MPI4PY_PICKLE_THRESHOLD :type: :class:`int` :default: ``262144`` Controls the default buffer size threshold for switching from in-band to out-of-band buffer handling when using pickle protocol version 5 or higher. .. seealso:: :attr:`~mpi4py.MPI.Pickle.THRESHOLD` attribute of the :data:`MPI.pickle` object within the :mod:`~mpi4py.MPI` module. .. versionadded:: 3.1.2 Miscellaneous functions ----------------------- .. autofunction:: mpi4py.profile .. autofunction:: mpi4py.get_include .. autofunction:: mpi4py.get_config .. Local variables: .. fill-column: 79 .. End: mpi4py-4.0.3/docs/source/mpi4py.run.rst000066400000000000000000000107401475341043600177600ustar00rootroot00000000000000mpi4py.run ========== .. module:: mpi4py.run :synopsis: Run Python code using ``-m mpi4py``. .. versionadded:: 3.0.0 At import time, :mod:`mpi4py` initializes the MPI execution environment calling :c:func:`MPI_Init_thread` and installs an exit hook to automatically call :c:func:`MPI_Finalize` just before the Python process terminates. Additionally, :mod:`mpi4py` overrides the default `ERRORS_ARE_FATAL` error handler in favor of `ERRORS_RETURN`, which allows translating MPI errors in Python exceptions. These departures from standard MPI behavior may be controversial, but are quite convenient within the highly dynamic Python programming environment. Third-party code using :mod:`mpi4py` can just ``from mpi4py import MPI`` and perform MPI calls without the tedious initialization/finalization handling. MPI errors, once translated automatically to Python exceptions, can be dealt with the common :keyword:`try`...\ :keyword:`except`...\ :keyword:`finally` clauses; unhandled MPI exceptions will print a traceback which helps in locating problems in source code. Unfortunately, the interplay of automatic MPI finalization and unhandled exceptions may lead to deadlocks. In unattended runs, these deadlocks will drain the battery of your laptop, or burn precious allocation hours in your supercomputing facility. Exceptions and deadlocks ------------------------ Consider the following snippet of Python code. Assume this code is stored in a standard Python script file and run with :command:`mpiexec` in two or more processes. .. code-block:: python :name: deadlock-py :caption: :file:`deadlock.py` :emphasize-lines: 5 :linenos: from mpi4py import MPI assert MPI.COMM_WORLD.Get_size() > 1 rank = MPI.COMM_WORLD.Get_rank() if rank == 0: 1/0 MPI.COMM_WORLD.send(None, dest=1, tag=42) elif rank == 1: MPI.COMM_WORLD.recv(source=0, tag=42) Process 0 raises `ZeroDivisionError` exception before performing a send call to process 1. As the exception is not handled, the Python interpreter running in process 0 will proceed to exit with non-zero status. However, as :mod:`mpi4py` installed a finalizer hook to call :c:func:`MPI_Finalize` before exit, process 0 will block waiting for other processes to also enter the :c:func:`MPI_Finalize` call. Meanwhile, process 1 will block waiting for a message to arrive from process 0, thus never reaching to :c:func:`MPI_Finalize`. The whole MPI execution environment is irremediably in a deadlock state. To alleviate this issue, :mod:`mpi4py` offers a simple, alternative command line execution mechanism based on using the :ref:`-m ` flag and implemented with the :mod:`runpy` module. To use this features, Python code should be run passing ``-m mpi4py`` in the command line invoking the Python interpreter. In case of unhandled exceptions, the finalizer hook will call :c:func:`MPI_Abort` on the :c:data:`MPI_COMM_WORLD` communicator, thus effectively aborting the MPI execution environment. .. warning:: When a process is forced to abort, resources (e.g. open files) are not cleaned-up and any registered finalizers (either with the :mod:`atexit` module, the Python C/API function :c:func:`Py_AtExit()`, or even the C standard library function :c:func:`atexit`) will not be executed. Thus, aborting execution is an extremely impolite way of ensuring process termination. However, MPI provides no other mechanism to recover from a deadlock state. Command line ------------ The use of ``-m mpi4py`` to execute Python code on the command line resembles that of the Python interpreter. * :samp:`mpiexec -n {numprocs} python -m mpi4py {pyfile} [arg] ...` * :samp:`mpiexec -n {numprocs} python -m mpi4py -m {mod} [arg] ...` * :samp:`mpiexec -n {numprocs} python -m mpi4py -c {cmd} [arg] ...` * :samp:`mpiexec -n {numprocs} python -m mpi4py - [arg] ...` .. describe:: Execute the Python code contained in *pyfile*, which must be a filesystem path referring to either a Python file, a directory containing a :file:`__main__.py` file, or a zipfile containing a :file:`__main__.py` file. .. cmdoption:: -m Search :data:`sys.path` for the named module *mod* and execute its contents. .. cmdoption:: -c Execute the Python code in the *cmd* string command. .. describe:: - Read commands from standard input (:data:`sys.stdin`). .. seealso:: :ref:`python:using-on-cmdline` Documentation on Python command line interface. .. Local variables: .. fill-column: 79 .. End: mpi4py-4.0.3/docs/source/mpi4py.typing.rst000066400000000000000000000022221475341043600204620ustar00rootroot00000000000000mpi4py.typing ============= .. module:: mpi4py.typing :synopsis: Typing support. .. versionadded:: 4.0.0 This module provides :term:`type aliases ` used to add :term:`type hints ` to the various functions and methods within the :mod:`~mpi4py.MPI` module. .. seealso:: Module :mod:`typing` Documentation of the :mod:`typing` standard module. .. currentmodule:: mpi4py.typing .. rubric:: Types Summary .. autosummary:: SupportsBuffer SupportsDLPack SupportsCAI Buffer Bottom InPlace Aint Count Displ Offset TypeSpec BufSpec BufSpecB BufSpecV BufSpecW TargetSpec .. rubric:: Types Documentation .. autotype:: SupportsBuffer .. autotype:: SupportsDLPack .. autotype:: SupportsCAI .. autotype:: Buffer .. autotype:: Bottom .. autotype:: InPlace .. autotype:: Aint .. autotype:: Count .. autotype:: Displ .. autotype:: Offset .. autotype:: TypeSpec .. autotype:: BufSpec .. autotype:: BufSpecB .. autotype:: BufSpecV .. autotype:: BufSpecW .. autotype:: TargetSpec .. autotype:: S .. autotype:: T .. autotype:: U .. autotype:: V .. Local variables: .. fill-column: 79 .. End: mpi4py-4.0.3/docs/source/mpi4py.util.dtlib.rst000066400000000000000000000006621475341043600212300ustar00rootroot00000000000000mpi4py.util.dtlib ----------------- .. module:: mpi4py.util.dtlib :synopsis: Convert NumPy and MPI datatypes. .. versionadded:: 3.1.0 The :mod:`mpi4py.util.dtlib` module provides converter routines between NumPy and MPI datatypes. .. autofunction:: from_numpy_dtype :param dtype: NumPy dtype-like object. .. autofunction:: to_numpy_dtype :param datatype: MPI datatype. .. Local variables: .. fill-column: 79 .. End: mpi4py-4.0.3/docs/source/mpi4py.util.pkl5.rst000066400000000000000000000112011475341043600207740ustar00rootroot00000000000000mpi4py.util.pkl5 ---------------- .. module:: mpi4py.util.pkl5 :synopsis: Pickle-based communication using protocol 5. .. versionadded:: 3.1.0 :mod:`pickle` protocol 5 (see :pep:`574`) introduced support for out-of-band buffers, allowing for more efficient handling of certain object types with large memory footprints. MPI for Python uses the traditional in-band handling of buffers. This approach is appropriate for communicating non-buffer Python objects, or buffer-like objects with small memory footprints. For point-to-point communication, in-band buffer handling allows for the communication of a pickled stream with a single MPI message, at the expense of additional CPU and memory overhead in the pickling and unpickling steps. The :mod:`mpi4py.util.pkl5` module provides communicator wrapper classes reimplementing pickle-based point-to-point and collective communication methods using pickle protocol 5. Handling out-of-band buffers necessarily involves multiple MPI messages, thus increasing latency and hurting performance in case of small size data. However, in case of large size data, the zero-copy savings of out-of-band buffer handling more than offset the extra latency costs. Additionally, these wrapper methods overcome the infamous 2 GiB message count limit (MPI-1 to MPI-3). .. note:: Support for pickle protocol 5 is available in the :mod:`pickle` module within the Python standard library since Python 3.8. Previous Python 3 releases can use the :mod:`pickle5` backport, which is available on `PyPI `_ and can be installed with:: python -m pip install pickle5 .. _pickle5-pypi: https://pypi.org/project/pickle5/ .. autoclass:: Request Custom request class for nonblocking communications. .. note:: :class:`Request` is not a subclass of :class:`mpi4py.MPI.Request` .. automethod:: Free .. automethod:: free .. automethod:: cancel .. automethod:: get_status .. automethod:: test .. automethod:: wait .. automethod:: get_status_all :classmethod: .. automethod:: testall :classmethod: .. automethod:: waitall :classmethod: .. autoclass:: Message Custom message class for matching probes. .. note:: :class:`Message` is not a subclass of :class:`mpi4py.MPI.Message` .. automethod:: free .. automethod:: recv .. automethod:: irecv .. automethod:: probe :classmethod: .. automethod:: iprobe :classmethod: .. autoclass:: Comm Base communicator wrapper class. .. automethod:: send .. automethod:: bsend .. automethod:: ssend .. automethod:: isend .. automethod:: ibsend .. automethod:: issend .. automethod:: recv .. automethod:: irecv .. warning:: This method cannot be supported reliably and raises :exc:`RuntimeError`. .. automethod:: sendrecv .. automethod:: mprobe .. automethod:: improbe .. automethod:: bcast .. versionadded:: 3.1.0 .. automethod:: gather .. versionadded:: 4.0.0 .. automethod:: scatter .. versionadded:: 4.0.0 .. automethod:: allgather .. versionadded:: 4.0.0 .. automethod:: alltoall .. versionadded:: 4.0.0 .. autoclass:: Intracomm Intracommunicator wrapper class. .. autoclass:: Intercomm Intercommunicator wrapper class. Examples ++++++++ .. code-block:: python :name: test-pkl5-1 :caption: :file:`test-pkl5-1.py` :emphasize-lines: 3,5,11 :linenos: import numpy as np from mpi4py import MPI from mpi4py.util import pkl5 comm = pkl5.Intracomm(MPI.COMM_WORLD) # comm wrapper size = comm.Get_size() rank = comm.Get_rank() dst = (rank + 1) % size src = (rank - 1) % size sobj = np.full(1024**3, rank, dtype='i4') # > 4 GiB sreq = comm.isend(sobj, dst, tag=42) robj = comm.recv (None, src, tag=42) sreq.Free() assert np.min(robj) == src assert np.max(robj) == src .. code-block:: python :name: test-pkl5-2 :caption: :file:`test-pkl5-2.py` :emphasize-lines: 3,5,11 :linenos: import numpy as np from mpi4py import MPI from mpi4py.util import pkl5 comm = pkl5.Intracomm(MPI.COMM_WORLD) # comm wrapper size = comm.Get_size() rank = comm.Get_rank() dst = (rank + 1) % size src = (rank - 1) % size sobj = np.full(1024**3, rank, dtype='i4') # > 4 GiB sreq = comm.isend(sobj, dst, tag=42) status = MPI.Status() rmsg = comm.mprobe(status=status) assert status.Get_source() == src assert status.Get_tag() == 42 rreq = rmsg.irecv() robj = rreq.wait() sreq.Free() assert np.max(robj) == src assert np.min(robj) == src .. Local variables: .. fill-column: 79 .. End: mpi4py-4.0.3/docs/source/mpi4py.util.pool.rst000066400000000000000000000027301475341043600211010ustar00rootroot00000000000000mpi4py.util.pool ---------------- .. module:: mpi4py.util.pool :synopsis: :mod:`multiprocessing.pool` interface via :mod:`mpi4py.futures`. .. versionadded:: 4.0.0 .. seealso:: This module intends to be a drop-in replacement for the :mod:`multiprocessing.pool` interface from the Python standard library. The :class:`~mpi4py.util.pool.Pool` class exposed here is implemented as a thin wrapper around :class:`~mpi4py.futures.MPIPoolExecutor`. .. note:: The :mod:`mpi4py.futures` package offers a higher level interface for asynchronously pushing tasks to MPI worker process, allowing for a clear separation between submitting tasks and waiting for the results. .. autoclass:: mpi4py.util.pool.Pool .. automethod:: __init__ .. automethod:: apply .. automethod:: apply_async .. automethod:: map .. automethod:: map_async .. automethod:: imap .. automethod:: imap_unordered .. automethod:: starmap .. automethod:: starmap_async .. automethod:: istarmap .. automethod:: istarmap_unordered .. automethod:: close .. automethod:: terminate .. automethod:: join .. autoclass:: mpi4py.util.pool.ThreadPool :show-inheritance: .. autoclass:: mpi4py.util.pool.AsyncResult .. automethod:: get .. automethod:: wait .. automethod:: ready .. automethod:: successful .. autoclass:: mpi4py.util.pool.ApplyResult :show-inheritance: .. autoclass:: mpi4py.util.pool.MapResult :show-inheritance: mpi4py-4.0.3/docs/source/mpi4py.util.rst000066400000000000000000000005761475341043600201370ustar00rootroot00000000000000mpi4py.util =========== .. module:: mpi4py.util :synopsis: Miscellaneous utilities. .. versionadded:: 3.1.0 The :mod:`mpi4py.util` package collects miscellaneous utilities within the intersection of Python and MPI. .. toctree:: :maxdepth: 1 mpi4py.util.dtlib mpi4py.util.pkl5 mpi4py.util.pool mpi4py.util.sync .. Local variables: .. fill-column: 79 .. End: mpi4py-4.0.3/docs/source/mpi4py.util.sync.rst000066400000000000000000000130311475341043600211000ustar00rootroot00000000000000mpi4py.util.sync ---------------- .. module:: mpi4py.util.sync :synopsis: Synchronization utilities. .. versionadded:: 4.0.0 The :mod:`mpi4py.util.sync` module provides parallel synchronization utilities. Sequential execution ++++++++++++++++++++ .. autoclass:: mpi4py.util.sync.Sequential Context manager for sequential execution within a group of MPI processes. The implementation is based in MPI-1 point-to-point communication. A process with rank *i* waits in a blocking receive until the previous process rank *i-1* finish executing and signals the next rank *i* with a send. .. automethod:: __init__ .. automethod:: __enter__ .. automethod:: __exit__ .. automethod:: begin .. automethod:: end Global counter ++++++++++++++ .. autoclass:: mpi4py.util.sync.Counter Produce consecutive values within a group of MPI processes. The counter interface is close to that of `itertools.count`. The implementation is based in MPI-3 one-sided operations. A root process (typically rank ``0``) holds the counter, and its value is queried and incremented with an atomic RMA *fetch-and-add* operation. .. automethod:: __init__ .. automethod:: __iter__ .. automethod:: __next__ .. automethod:: next .. automethod:: free Mutual exclusion ++++++++++++++++ .. autoclass:: mpi4py.util.sync.Mutex Establish a critical section or mutual exclusion among MPI processes. The mutex interface is close to that of `threading.Lock` and `threading.RLock`, allowing the use of either recursive or non-recursive mutual exclusion. However, a mutex should be used within a group of MPI processes, not threads. In non-recursive mode, the semantics of `Mutex` are somewhat different than these of `threading.Lock`: * Once acquired, a mutex is held and owned by a process until released. * Trying to acquire a mutex already held raises `RuntimeError`. * Trying to release a mutex not yet held raises `RuntimeError`. This mutex implementation uses the scalable and fair spinlock algorithm from [mcs-paper]_ and took inspiration from the MPI-3 RMA implementation of [uam-book]_. .. automethod:: __init__ .. automethod:: __enter__ .. automethod:: __exit__ .. automethod:: acquire .. automethod:: release .. automethod:: locked .. automethod:: count .. automethod:: free .. [mcs-paper] John M. Mellor-Crummey and Michael L. Scott. Algorithms for scalable synchronization on shared-memory multiprocessors. ACM Transactions on Computer Systems, 9(1):21-65, February 1991. https://doi.org/10.1145/103727.103729 .. [uam-book] William Gropp, Torsten Hoefler, Rajeev Thakur, Ewing Lusk. Using Advanced MPI - Modern Features of the Message-Passing Interface. Chapter 4, Section 4.7, Pages 130-131. The MIT Press, November 2014. https://mitpress.mit.edu/9780262527637/using-advanced-mpi/ Condition variable ++++++++++++++++++ .. autoclass:: mpi4py.util.sync.Condition A condition variable allows one or more MPI processes to wait until they are notified by another processes. The condition variable interface is close to that of `threading.Condition`, allowing the use of either recursive or non-recursive mutual exclusion. However, the condition variable should be used within a group of MPI processes, not threads. This condition variable implementation uses a MPI-3 RMA-based scalable and fair circular queue algorithm to track the set of waiting processes. .. automethod:: __init__ .. automethod:: __enter__ .. automethod:: __exit__ .. automethod:: acquire .. automethod:: release .. automethod:: locked .. automethod:: wait .. automethod:: wait_for .. automethod:: notify .. automethod:: notify_all .. automethod:: free Semaphore object ++++++++++++++++ .. autoclass:: mpi4py.util.sync.Semaphore A semaphore object manages an internal counter which is decremented by each `acquire()` call and incremented by each `release()` call. The internal counter never reaches a value below zero; when `acquire()` finds that it is zero, it blocks and waits until some other process calls `release()`. The semaphore interface is close to that of `threading.Semaphore` and `threading.BoundedSemaphore`, allowing the use of either bounded (default) or unbounded semaphores. With a bounded semaphore, the internal counter never exceeds its initial value; otherwise `release()` raises `ValueError`. This semaphore implementation uses a global `Counter` and a `Condition` variable to handle waiting and and notification. .. automethod:: __init__ .. automethod:: __enter__ .. automethod:: __exit__ .. automethod:: acquire .. automethod:: release .. automethod:: free Examples ++++++++ .. code-block:: python :name: test-sync-1 :caption: :file:`test-sync-1.py` :emphasize-lines: 2,6-9 :linenos: from mpi4py import MPI from mpi4py.util.sync import Counter, Sequential comm = MPI.COMM_WORLD counter = Counter(comm) with Sequential(comm): value = next(counter) counter.free() assert comm.rank == value .. code-block:: python :name: test-sync-2 :caption: :file:`test-sync-2.py` :emphasize-lines: 2,6-11 :linenos: from mpi4py import MPI from mpi4py.util.sync import Counter, Mutex comm = MPI.COMM_WORLD mutex = Mutex(comm) counter = Counter(comm) with mutex: value = next(counter) counter.free() mutex.free() assert ( list(range(comm.size)) == sorted(comm.allgather(value)) ) .. Local variables: .. fill-column: 79 .. End: mpi4py-4.0.3/docs/source/overview.rst000066400000000000000000000522071475341043600176050ustar00rootroot00000000000000Overview ======== .. currentmodule:: mpi4py.MPI MPI for Python provides an object oriented approach to message passing which grounds on the standard MPI-2 C++ bindings. The interface was designed with focus in translating MPI syntax and semantics of standard MPI-2 bindings for C++ to Python. Any user of the standard C/C++ MPI bindings should be able to use this module without need of learning a new interface. Communicating Python Objects and Array Data ------------------------------------------- The Python standard library supports different mechanisms for data persistence. Many of them rely on disk storage, but *pickling* and *marshaling* can also work with memory buffers. The :mod:`pickle` modules provide user-extensible facilities to serialize general Python objects using ASCII or binary formats. The :mod:`marshal` module provides facilities to serialize built-in Python objects using a binary format specific to Python, but independent of machine architecture issues. *MPI for Python* can communicate any built-in or user-defined Python object taking advantage of the features provided by the :mod:`pickle` module. These facilities will be routinely used to build binary representations of objects to communicate (at sending processes), and restoring them back (at receiving processes). Although simple and general, the serialization approach (i.e., *pickling* and *unpickling*) previously discussed imposes important overheads in memory as well as processor usage, especially in the scenario of objects with large memory footprints being communicated. Pickling general Python objects, ranging from primitive or container built-in types to user-defined classes, necessarily requires computer resources. Processing is also needed for dispatching the appropriate serialization method (that depends on the type of the object) and doing the actual packing. Additional memory is always needed, and if its total amount is not known *a priori*, many reallocations can occur. Indeed, in the case of large numeric arrays, this is certainly unacceptable and precludes communication of objects occupying half or more of the available memory resources. *MPI for Python* supports direct communication of any object exporting the single-segment buffer interface. This interface is a standard Python mechanism provided by some types (e.g., strings and numeric arrays), allowing access in the C side to a contiguous memory buffer (i.e., address and length) containing the relevant data. This feature, in conjunction with the capability of constructing user-defined MPI datatypes describing complicated memory layouts, enables the implementation of many algorithms involving multidimensional numeric arrays (e.g., image processing, fast Fourier transforms, finite difference schemes on structured Cartesian grids) directly in Python, with negligible overhead, and almost as fast as compiled Fortran, C, or C++ codes. Communicators ------------- In *MPI for Python*, `Comm` is the base class of communicators. The `Intracomm` and `Intercomm` classes are subclasses of the `Comm` class. The `Comm.Is_inter` method (and `Comm.Is_intra`, provided for convenience but not part of the MPI specification) is defined for communicator objects and can be used to determine the particular communicator class. The two predefined intracommunicator instances are available: `COMM_SELF` and `COMM_WORLD`. From them, new communicators can be created as needed. The number of processes in a communicator and the calling process rank can be respectively obtained with methods `Comm.Get_size` and `Comm.Get_rank`. The associated process group can be retrieved from a communicator by calling the `Comm.Get_group` method, which returns an instance of the `Group` class. Set operations with `Group` objects like like `Group.Union`, `Group.Intersection` and `Group.Difference` are fully supported, as well as the creation of new communicators from these groups using `Comm.Create` and `Intracomm.Create_group`. New communicator instances can be obtained with the `Comm.Clone`, `Comm.Dup` and `Comm.Split` methods, as well methods `Intracomm.Create_intercomm` and `Intercomm.Merge`. Virtual topologies (`Cartcomm`, `Graphcomm` and `Distgraphcomm` classes, which are specializations of the `Intracomm` class) are fully supported. New instances can be obtained from intracommunicator instances with factory methods `Intracomm.Create_cart` and `Intracomm.Create_graph`. Point-to-Point Communications ----------------------------- Point to point communication is a fundamental capability of message passing systems. This mechanism enables the transmission of data between a pair of processes, one side sending, the other receiving. MPI provides a set of *send* and *receive* functions allowing the communication of *typed* data with an associated *tag*. The type information enables the conversion of data representation from one architecture to another in the case of heterogeneous computing environments; additionally, it allows the representation of non-contiguous data layouts and user-defined datatypes, thus avoiding the overhead of (otherwise unavoidable) packing/unpacking operations. The tag information allows selectivity of messages at the receiving end. Blocking Communications ^^^^^^^^^^^^^^^^^^^^^^^ MPI provides basic send and receive functions that are *blocking*. These functions block the caller until the data buffers involved in the communication can be safely reused by the application program. In *MPI for Python*, the `Comm.Send`, `Comm.Recv` and `Comm.Sendrecv` methods of communicator objects provide support for blocking point-to-point communications within `Intracomm` and `Intercomm` instances. These methods can communicate memory buffers. The variants `Comm.send`, `Comm.recv` and `Comm.sendrecv` can communicate general Python objects. Nonblocking Communications ^^^^^^^^^^^^^^^^^^^^^^^^^^ On many systems, performance can be significantly increased by overlapping communication and computation. This is particularly true on systems where communication can be executed autonomously by an intelligent, dedicated communication controller. MPI provides *nonblocking* send and receive functions. They allow the possible overlap of communication and computation. Non-blocking communication always come in two parts: posting functions, which begin the requested operation; and test-for-completion functions, which allow to discover whether the requested operation has completed. In *MPI for Python*, the `Comm.Isend` and `Comm.Irecv` methods initiate send and receive operations, respectively. These methods return a `Request` instance, uniquely identifying the started operation. Its completion can be managed using the `Request.Test`, `Request.Wait` and `Request.Cancel` methods. The management of `Request` objects and associated memory buffers involved in communication requires a careful, rather low-level coordination. Users must ensure that objects exposing their memory buffers are not accessed at the Python level while they are involved in nonblocking message-passing operations. Persistent Communications ^^^^^^^^^^^^^^^^^^^^^^^^^ Often a communication with the same argument list is repeatedly executed within an inner loop. In such cases, communication can be further optimized by using persistent communication, a particular case of nonblocking communication allowing the reduction of the overhead between processes and communication controllers. Furthermore , this kind of optimization can also alleviate the extra call overheads associated to interpreted, dynamic languages like Python. In *MPI for Python*, the `Comm.Send_init` and `Comm.Recv_init` methods create persistent requests for a send and receive operation, respectively. These methods return an instance of the `Prequest` class, a subclass of the `Request` class. The actual communication can be effectively started using the `Prequest.Start` method, and its completion can be managed as previously described. Collective Communications -------------------------- Collective communications allow the transmittal of data between multiple processes of a group simultaneously. The syntax and semantics of collective functions is consistent with point-to-point communication. Collective functions communicate *typed* data, but messages are not paired with an associated *tag*; selectivity of messages is implied in the calling order. Additionally, collective functions come in blocking versions only. The more commonly used collective communication operations are the following. * Barrier synchronization across all group members. * Global communication functions + Broadcast data from one member to all members of a group. + Gather data from all members to one member of a group. + Scatter data from one member to all members of a group. * Global reduction operations such as sum, maximum, minimum, etc. In *MPI for Python*, the `Comm.Bcast`, `Comm.Scatter`, `Comm.Gather`, `Comm.Allgather`, `Comm.Alltoall` methods provide support for collective communications of memory buffers. The lower-case variants `Comm.bcast`, `Comm.scatter`, `Comm.gather`, `Comm.allgather` and `Comm.alltoall` can communicate general Python objects. The vector variants (which can communicate different amounts of data to each process) `Comm.Scatterv`, `Comm.Gatherv`, `Comm.Allgatherv`, `Comm.Alltoallv` and `Comm.Alltoallw` are also supported, they can only communicate objects exposing memory buffers. Global reduction operations on memory buffers are accessible through the `Comm.Reduce`, `Comm.Reduce_scatter`, `Comm.Allreduce`, `Intracomm.Scan` and `Intracomm.Exscan` methods. The lower-case variants `Comm.reduce`, `Comm.allreduce`, `Intracomm.scan` and `Intracomm.exscan` can communicate general Python objects; however, the actual required reduction computations are performed sequentially at some process. All the predefined (i.e., `SUM`, `PROD`, `MAX`, etc.) reduction operations can be applied. Support for GPU-aware MPI ------------------------- Several MPI implementations, including Open MPI and MVAPICH, support passing GPU pointers to MPI calls to avoid explicit data movement between host and device. On the Python side, support for handling GPU arrays have been implemented in many libraries related GPU computation such as `CuPy`_, `Numba`_, `PyTorch`_, and `PyArrow`_. To maximize interoperability across library boundaries, two kinds of zero-copy data exchange protocols have been defined and agreed upon: `DLPack ` and `CUDA Array Interface (CAI) `. .. _CuPy: https://cupy.dev/ .. _Numba: https://numba.pydata.org/ .. _PyTorch: https://pytorch.org/ .. _PyArrow: https://arrow.apache.org/docs/python/ *MPI for Python* provides an experimental support for GPU-aware MPI. This feature requires: 1. mpi4py is built against a GPU-aware MPI library. 2. The Python GPU arrays are compliant with either of the protocols. See the :doc:`tutorial` section for further information. We note that * Whether or not a MPI call can work for GPU arrays depends on the underlying MPI implementation, not on mpi4py. * This support is currently experimental and subject to change in the future. Dynamic Process Management -------------------------- In the context of the MPI-1 specification, a parallel application is static; that is, no processes can be added to or deleted from a running application after it has been started. Fortunately, this limitation was addressed in MPI-2. The new specification added a process management model providing a basic interface between an application and external resources and process managers. This MPI-2 extension can be really useful, especially for sequential applications built on top of parallel modules, or parallel applications with a client/server model. The MPI-2 process model provides a mechanism to create new processes and establish communication between them and the existing MPI application. It also provides mechanisms to establish communication between two existing MPI applications, even when one did not *start* the other. In *MPI for Python*, new independent process groups can be created by calling the `Intracomm.Spawn` method within an intracommunicator. This call returns a new intercommunicator (i.e., an `Intercomm` instance) at the parent process group. The child process group can retrieve the matching intercommunicator by calling the `Comm.Get_parent` class method. At each side, the new intercommunicator can be used to perform point to point and collective communications between the parent and child groups of processes. Alternatively, disjoint groups of processes can establish communication using a client/server approach. Any server application must first call the `Open_port` function to open a *port* and the `Publish_name` function to publish a provided *service*, and next call the `Intracomm.Accept` method. Any client applications can first find a published *service* by calling the `Lookup_name` function, which returns the *port* where a server can be contacted; and next call the `Intracomm.Connect` method. Both `Intracomm.Accept` and `Intracomm.Connect` methods return an `Intercomm` instance. When connection between client/server processes is no longer needed, all of them must cooperatively call the `Comm.Disconnect` method. Additionally, server applications should release resources by calling the `Unpublish_name` and `Close_port` functions. One-Sided Communications ------------------------ One-sided communications (also called *Remote Memory Access*, *RMA*) supplements the traditional two-sided, send/receive based MPI communication model with a one-sided, put/get based interface. One-sided communication that can take advantage of the capabilities of highly specialized network hardware. Additionally, this extension lowers latency and software overhead in applications written using a shared-memory-like paradigm. The MPI specification revolves around the use of objects called *windows*; they intuitively specify regions of a process's memory that have been made available for remote read and write operations. The published memory blocks can be accessed through three functions for put (remote send), get (remote write), and accumulate (remote update or reduction) data items. A much larger number of functions support different synchronization styles; the semantics of these synchronization operations are fairly complex. In *MPI for Python*, one-sided operations are available by using instances of the `Win` class. New window objects are created by calling the `Win.Create` method at all processes within a communicator and specifying a memory buffer . When a window instance is no longer needed, the `Win.Free` method should be called. The three one-sided MPI operations for remote write, read and reduction are available through calling the methods `Win.Put`, `Win.Get`, and `Win.Accumulate` respectively within a `Win` instance. These methods need an integer rank identifying the target process and an integer offset relative the base address of the remote memory block being accessed. The one-sided operations read, write, and reduction are implicitly nonblocking, and must be synchronized by using two primary modes. Active target synchronization requires the origin process to call the `Win.Start` and `Win.Complete` methods at the origin process, and target process cooperates by calling the `Win.Post` and `Win.Wait` methods. There is also a collective variant provided by the `Win.Fence` method. Passive target synchronization is more lenient, only the origin process calls the `Win.Lock` and `Win.Unlock` methods. Locks are used to protect remote accesses to the locked remote window and to protect local load/store accesses to a locked local window. Parallel Input/Output --------------------- The POSIX standard provides a model of a widely portable file system. However, the optimization needed for parallel input/output cannot be achieved with this generic interface. In order to ensure efficiency and scalability, the underlying parallel input/output system must provide a high-level interface supporting partitioning of file data among processes and a collective interface supporting complete transfers of global data structures between process memories and files. Additionally, further efficiencies can be gained via support for asynchronous input/output, strided accesses to data, and control over physical file layout on storage devices. This scenario motivated the inclusion in the MPI-2 standard of a custom interface in order to support more elaborated parallel input/output operations. The MPI specification for parallel input/output revolves around the use objects called *files*. As defined by MPI, files are not just contiguous byte streams. Instead, they are regarded as ordered collections of *typed* data items. MPI supports sequential or random access to any integral set of these items. Furthermore, files are opened collectively by a group of processes. The common patterns for accessing a shared file (broadcast, scatter, gather, reduction) is expressed by using user-defined datatypes. Compared to the communication patterns of point-to-point and collective communications, this approach has the advantage of added flexibility and expressiveness. Data access operations (read and write) are defined for different kinds of positioning (using explicit offsets, individual file pointers, and shared file pointers), coordination (non-collective and collective), and synchronism (blocking, nonblocking, and split collective with begin/end phases). In *MPI for Python*, all MPI input/output operations are performed through instances of the `File` class. File handles are obtained by calling the `File.Open` method at all processes within a communicator and providing a file name and the intended access mode. After use, they must be closed by calling the `File.Close` method. Files even can be deleted by calling method `File.Delete`. After creation, files are typically associated with a per-process *view*. The view defines the current set of data visible and accessible from an open file as an ordered set of elementary datatypes. This data layout can be set and queried with the `File.Set_view` and `File.Get_view` methods respectively. Actual input/output operations are achieved by many methods combining read and write calls with different behavior regarding positioning, coordination, and synchronism. Summing up, *MPI for Python* provides the thirty (30) methods defined in MPI-2 for reading from or writing to files using explicit offsets or file pointers (individual or shared), in blocking or nonblocking and collective or noncollective versions. Environmental Management ------------------------ Initialization and Exit ^^^^^^^^^^^^^^^^^^^^^^^ Module functions `Init` or `Init_thread` and `Finalize` provide MPI initialization and finalization respectively. Module functions `Is_initialized` and `Is_finalized` provide the respective tests for initialization and finalization. .. note:: :c:func:`MPI_Init` or :c:func:`MPI_Init_thread` is actually called when you import the :mod:`~mpi4py.MPI` module from the :mod:`mpi4py` package, but only if MPI is not already initialized. In such case, calling `Init` or `Init_thread` from Python is expected to generate an MPI error, and in turn an exception will be raised. .. note:: :c:func:`MPI_Finalize` is registered (by using Python C/API function :c:func:`Py_AtExit`) for being automatically called when Python processes exit, but only if :mod:`mpi4py` actually initialized MPI. Therefore, there is no need to call `Finalize` from Python to ensure MPI finalization. Implementation Information ^^^^^^^^^^^^^^^^^^^^^^^^^^ * The MPI version number can be retrieved from module function `Get_version`. It returns a two-integer tuple ``(version, subversion)``. * The `Get_processor_name` function can be used to access the processor name. * The values of predefined attributes attached to the world communicator can be obtained by calling the `Comm.Get_attr` method within the `COMM_WORLD` instance. Timers ^^^^^^ MPI timer functionalities are available through the `Wtime` and `Wtick` functions. Error Handling ^^^^^^^^^^^^^^ In order to facilitate handle sharing with other Python modules interfacing MPI-based parallel libraries, the predefined MPI error handlers `ERRORS_RETURN` and `ERRORS_ARE_FATAL` can be assigned to and retrieved from communicators using methods `Comm.Set_errhandler` and `Comm.Get_errhandler`, and similarly for windows and files. New custom error handlers can be created with `Comm.Create_errhandler`. When the predefined error handler `ERRORS_RETURN` is set, errors returned from MPI calls within Python code will raise an instance of the exception class `Exception`, which is a subclass of the standard Python exception `python:RuntimeError`. .. note:: After import, mpi4py overrides the default MPI rules governing inheritance of error handlers. The `ERRORS_RETURN` error handler is set in the predefined `COMM_SELF` and `COMM_WORLD` communicators, as well as any new `Comm`, `Win`, or `File` instance created through mpi4py. If you ever pass such handles to C/C++/Fortran library code, it is recommended to set the `ERRORS_ARE_FATAL` error handler on them to ensure MPI errors do not pass silently. .. warning:: Importing with ``from mpi4py.MPI import *`` will cause a name clashing with the standard Python `python:Exception` base class. mpi4py-4.0.3/docs/source/reference.rst000066400000000000000000000001351475341043600176660ustar00rootroot00000000000000.. _reference: Reference ========= .. autosummary:: :toctree: reference/ mpi4py.MPI mpi4py-4.0.3/docs/source/tutorial.rst000066400000000000000000000350161475341043600176010ustar00rootroot00000000000000.. _tutorial: Tutorial ======== .. currentmodule:: mpi4py.MPI .. warning:: Under construction. Contributions very welcome! .. tip:: `Rolf Rabenseifner`_ at `HLRS`_ developed a comprehensive MPI-3.1/4.0 course with slides and a large set of exercises including solutions. This material is `available online `_ for self-study. The slides and exercises show the C, Fortran, and Python (mpi4py) interfaces. For performance reasons, most Python exercises use NumPy arrays and communication routines involving buffer-like objects. .. _Rolf Rabenseifner: https://www.hlrs.de/people/rolf-rabenseifner .. _HLRS: https://www.hlrs.de/ .. _hlrs-mpi: https://www.hlrs.de/training/self-study-materials/mpi-course-material .. tip:: `Victor Eijkhout`_ at `TACC`_ authored the book *Parallel Programming for Science and Engineering*. This book is `available online `_ in PDF and `HTML `_ formats. The book covers parallel programming with MPI and OpenMP in C/C++ and Fortran, and MPI in Python using mpi4py. .. _Victor Eijkhout: https://tacc.utexas.edu/about/staff-directory/victor-eijkhout .. _TACC: https://tacc.utexas.edu/ .. _ppse-book: https://theartofhpc.com/pcse.html .. _ppse-html: https://theartofhpc.com/pcse/index.html *MPI for Python* supports convenient, *pickle*-based communication of generic Python object as well as fast, near C-speed, direct array data communication of buffer-provider objects (e.g., NumPy arrays). * Communication of generic Python objects You have to use methods with **all-lowercase** names, like `Comm.send`, `Comm.recv`, `Comm.bcast`, `Comm.scatter`, `Comm.gather` . An object to be sent is passed as a parameter to the communication call, and the received object is simply the return value. The `Comm.isend` and `Comm.irecv` methods return `Request` instances; completion of these methods can be managed using the `Request.test` and `Request.wait` methods. The `Comm.recv` and `Comm.irecv` methods may be passed a buffer object that can be repeatedly used to receive messages avoiding internal memory allocation. This buffer must be sufficiently large to accommodate the transmitted messages; hence, any buffer passed to `Comm.recv` or `Comm.irecv` must be at least as long as the *pickled* data transmitted to the receiver. Collective calls like `Comm.scatter`, `Comm.gather`, `Comm.allgather`, `Comm.alltoall` expect a single value or a sequence of `Comm.size` elements at the root or all process. They return a single value, a list of `Comm.size` elements, or `None`. .. note:: *MPI for Python* uses the **highest** :ref:`protocol version ` available in the Python runtime (see the :data:`~pickle.HIGHEST_PROTOCOL` constant in the :mod:`pickle` module). The default protocol can be changed at import time by setting the :envvar:`MPI4PY_PICKLE_PROTOCOL` environment variable, or at runtime by assigning a different value to the :attr:`~mpi4py.MPI.Pickle.PROTOCOL` attribute of the :obj:`~mpi4py.MPI.pickle` object within the :mod:`~mpi4py.MPI` module. * Communication of buffer-like objects You have to use method names starting with an **upper-case** letter, like `Comm.Send`, `Comm.Recv`, `Comm.Bcast`, `Comm.Scatter`, `Comm.Gather`. In general, buffer arguments to these calls must be explicitly specified by using a 2/3-list/tuple like ``[data, MPI.DOUBLE]``, or ``[data, count, MPI.DOUBLE]`` (the former one uses the byte-size of ``data`` and the extent of the MPI datatype to define ``count``). For vector collectives communication operations like `Comm.Scatterv` and `Comm.Gatherv`, buffer arguments are specified as ``[data, count, displ, datatype]``, where ``count`` and ``displ`` are sequences of integral values. Automatic MPI datatype discovery for NumPy/GPU arrays and PEP-3118 buffers is supported, but limited to basic C types (all C/C99-native signed/unsigned integral types and single/double precision real/complex floating types) and availability of matching datatypes in the underlying MPI implementation. In this case, the buffer-provider object can be passed directly as a buffer argument, the count and MPI datatype will be inferred. If mpi4py is built against a GPU-aware MPI implementation, GPU arrays can be passed to upper-case methods as long as they have either the ``__dlpack__`` and ``__dlpack_device__`` methods or the ``__cuda_array_interface__`` attribute that are compliant with the respective standard specifications. Moreover, only C-contiguous or Fortran-contiguous GPU arrays are supported. It is important to note that GPU buffers must be fully ready before any MPI routines operate on them to avoid race conditions. This can be ensured by using the synchronization API of your array library. mpi4py does not have access to any GPU-specific functionality and thus cannot perform this operation automatically for users. Running Python scripts with MPI ------------------------------- Most MPI programs can be run with the command :program:`mpiexec`. In practice, running Python programs looks like:: $ mpiexec -n 4 python script.py to run the program with 4 processors. Point-to-Point Communication ---------------------------- * Python objects (:mod:`pickle` under the hood):: from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() if rank == 0: data = {'a': 7, 'b': 3.14} comm.send(data, dest=1, tag=11) elif rank == 1: data = comm.recv(source=0, tag=11) * Python objects with non-blocking communication:: from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() if rank == 0: data = {'a': 7, 'b': 3.14} req = comm.isend(data, dest=1, tag=11) req.wait() elif rank == 1: req = comm.irecv(source=0, tag=11) data = req.wait() * NumPy arrays (the fast way!):: from mpi4py import MPI import numpy comm = MPI.COMM_WORLD rank = comm.Get_rank() # passing MPI datatypes explicitly if rank == 0: data = numpy.arange(1000, dtype='i') comm.Send([data, MPI.INT], dest=1, tag=77) elif rank == 1: data = numpy.empty(1000, dtype='i') comm.Recv([data, MPI.INT], source=0, tag=77) # automatic MPI datatype discovery if rank == 0: data = numpy.arange(100, dtype=numpy.float64) comm.Send(data, dest=1, tag=13) elif rank == 1: data = numpy.empty(100, dtype=numpy.float64) comm.Recv(data, source=0, tag=13) Collective Communication ------------------------ * Broadcasting a Python dictionary:: from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() if rank == 0: data = {'key1' : [7, 2.72, 2+3j], 'key2' : ( 'abc', 'xyz')} else: data = None data = comm.bcast(data, root=0) * Scattering Python objects:: from mpi4py import MPI comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() if rank == 0: data = [(i+1)**2 for i in range(size)] else: data = None data = comm.scatter(data, root=0) assert data == (rank+1)**2 * Gathering Python objects:: from mpi4py import MPI comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() data = (rank+1)**2 data = comm.gather(data, root=0) if rank == 0: for i in range(size): assert data[i] == (i+1)**2 else: assert data is None * Broadcasting a NumPy array:: from mpi4py import MPI import numpy as np comm = MPI.COMM_WORLD rank = comm.Get_rank() if rank == 0: data = np.arange(100, dtype='i') else: data = np.empty(100, dtype='i') comm.Bcast(data, root=0) for i in range(100): assert data[i] == i * Scattering NumPy arrays:: from mpi4py import MPI import numpy as np comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() sendbuf = None if rank == 0: sendbuf = np.empty([size, 100], dtype='i') sendbuf.T[:,:] = range(size) recvbuf = np.empty(100, dtype='i') comm.Scatter(sendbuf, recvbuf, root=0) assert np.allclose(recvbuf, rank) * Gathering NumPy arrays:: from mpi4py import MPI import numpy as np comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() sendbuf = np.zeros(100, dtype='i') + rank recvbuf = None if rank == 0: recvbuf = np.empty([size, 100], dtype='i') comm.Gather(sendbuf, recvbuf, root=0) if rank == 0: for i in range(size): assert np.allclose(recvbuf[i,:], i) * Parallel matrix-vector product:: from mpi4py import MPI import numpy def matvec(comm, A, x): m = A.shape[0] # local rows p = comm.Get_size() xg = numpy.zeros(m*p, dtype='d') comm.Allgather([x, MPI.DOUBLE], [xg, MPI.DOUBLE]) y = numpy.dot(A, xg) return y Input/Output (MPI-IO) --------------------- * Collective I/O with NumPy arrays:: from mpi4py import MPI import numpy as np amode = MPI.MODE_WRONLY|MPI.MODE_CREATE comm = MPI.COMM_WORLD fh = MPI.File.Open(comm, "./datafile.contig", amode) buffer = np.empty(10, dtype=np.int) buffer[:] = comm.Get_rank() offset = comm.Get_rank()*buffer.nbytes fh.Write_at_all(offset, buffer) fh.Close() * Non-contiguous Collective I/O with NumPy arrays and datatypes:: from mpi4py import MPI import numpy as np comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() amode = MPI.MODE_WRONLY|MPI.MODE_CREATE fh = MPI.File.Open(comm, "./datafile.noncontig", amode) item_count = 10 buffer = np.empty(item_count, dtype='i') buffer[:] = rank filetype = MPI.INT.Create_vector(item_count, 1, size) filetype.Commit() displacement = MPI.INT.Get_size()*rank fh.Set_view(displacement, filetype=filetype) fh.Write_all(buffer) filetype.Free() fh.Close() Dynamic Process Management -------------------------- * Compute Pi - Master (or parent, or client) side:: #!/usr/bin/env python from mpi4py import MPI import numpy import sys comm = MPI.COMM_SELF.Spawn(sys.executable, args=['cpi.py'], maxprocs=5) N = numpy.array(100, 'i') comm.Bcast([N, MPI.INT], root=MPI.ROOT) PI = numpy.array(0.0, 'd') comm.Reduce(None, [PI, MPI.DOUBLE], op=MPI.SUM, root=MPI.ROOT) print(PI) comm.Disconnect() * Compute Pi - Worker (or child, or server) side:: #!/usr/bin/env python from mpi4py import MPI import numpy comm = MPI.Comm.Get_parent() size = comm.Get_size() rank = comm.Get_rank() N = numpy.array(0, dtype='i') comm.Bcast([N, MPI.INT], root=0) h = 1.0 / N; s = 0.0 for i in range(rank, N, size): x = h * (i + 0.5) s += 4.0 / (1.0 + x**2) PI = numpy.array(s * h, dtype='d') comm.Reduce([PI, MPI.DOUBLE], None, op=MPI.SUM, root=0) comm.Disconnect() GPU-aware MPI + Python GPU arrays --------------------------------- * Reduce-to-all CuPy arrays:: from mpi4py import MPI import cupy as cp comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() sendbuf = cp.arange(10, dtype='i') recvbuf = cp.empty_like(sendbuf) cp.cuda.get_current_stream().synchronize() comm.Allreduce(sendbuf, recvbuf) assert cp.allclose(recvbuf, sendbuf*size) One-Sided Communication (RMA) ----------------------------- * Read from (write to) the entire RMA window:: import numpy as np from mpi4py import MPI from mpi4py.util import dtlib comm = MPI.COMM_WORLD rank = comm.Get_rank() datatype = MPI.FLOAT np_dtype = dtlib.to_numpy_dtype(datatype) itemsize = datatype.Get_size() N = 10 win_size = N * itemsize if rank == 0 else 0 win = MPI.Win.Allocate(win_size, comm=comm) buf = np.empty(N, dtype=np_dtype) if rank == 0: buf.fill(42) win.Lock(rank=0) win.Put(buf, target_rank=0) win.Unlock(rank=0) comm.Barrier() else: comm.Barrier() win.Lock(rank=0) win.Get(buf, target_rank=0) win.Unlock(rank=0) assert np.all(buf == 42) * Accessing a part of the RMA window using the ``target`` argument, which is defined as ``(offset, count, datatype)``:: import numpy as np from mpi4py import MPI from mpi4py.util import dtlib comm = MPI.COMM_WORLD rank = comm.Get_rank() datatype = MPI.FLOAT np_dtype = dtlib.to_numpy_dtype(datatype) itemsize = datatype.Get_size() N = comm.Get_size() + 1 win_size = N * itemsize if rank == 0 else 0 win = MPI.Win.Allocate( size=win_size, disp_unit=itemsize, comm=comm, ) if rank == 0: mem = np.frombuffer(win, dtype=np_dtype) mem[:] = np.arange(len(mem), dtype=np_dtype) comm.Barrier() buf = np.zeros(3, dtype=np_dtype) target = (rank, 2, datatype) win.Lock(rank=0) win.Get(buf, target_rank=0, target=target) win.Unlock(rank=0) assert np.all(buf == [rank, rank+1, 0]) Wrapping with SWIG ------------------ * C source: .. sourcecode:: c /* file: helloworld.c */ void sayhello(MPI_Comm comm) { int size, rank; MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank); printf("Hello, World! " "I am process %d of %d.\n", rank, size); } * SWIG interface file: .. sourcecode:: c // file: helloworld.i %module helloworld %{ #include #include "helloworld.c" }% %include mpi4py/mpi4py.i %mpi4py_typemap(Comm, MPI_Comm); void sayhello(MPI_Comm comm); * Try it in the Python prompt:: >>> from mpi4py import MPI >>> import helloworld >>> helloworld.sayhello(MPI.COMM_WORLD) Hello, World! I am process 0 of 1. Wrapping with F2Py ------------------ * Fortran 90 source: .. sourcecode:: fortran ! file: helloworld.f90 subroutine sayhello(comm) use mpi implicit none integer :: comm, rank, size, ierr call MPI_Comm_size(comm, size, ierr) call MPI_Comm_rank(comm, rank, ierr) print *, 'Hello, World! I am process ',rank,' of ',size,'.' end subroutine sayhello * Compiling example using f2py :: $ f2py -c --f90exec=mpif90 helloworld.f90 -m helloworld * Try it in the Python prompt:: >>> from mpi4py import MPI >>> import helloworld >>> fcomm = MPI.COMM_WORLD.py2f() >>> helloworld.sayhello(fcomm) Hello, World! I am process 0 of 1. mpi4py-4.0.3/makefile000066400000000000000000000020201475341043600144410ustar00rootroot00000000000000.PHONY: default default: build default: opt=--inplace PYTHON = python$(py) MPIEXEC = mpiexec # ---- .PHONY: config build test config: $(PYTHON) setup.py config $(opt) build: $(PYTHON) setup.py build $(opt) test: $(VALGRIND) $(PYTHON) $(PWD)/test/main.py $(opt) test-%: $(MPIEXEC) -n $* $(VALGRIND) $(PYTHON) $(PWD)/test/main.py $(opt) .PHONY: srcbuild srcclean srcbuild: $(PYTHON) setup.py build_src $(opt) srcclean: $(RM) src/mpi4py/MPI.c $(RM) src/mpi4py/MPI.h $(RM) src/mpi4py/MPI_api.h .PHONY: clean distclean fullclean clean: $(PYTHON) setup.py clean --all distclean: clean srcclean $(RM) -r build _configtest* $(RM) -r .*_cache .eggs .tox $(RM) -r htmlcov .coverage .coverage.* $(RM) src/mpi4py/MPI.*.so find . -name __pycache__ | xargs $(RM) -r fullclean: distclean find . -name '*~' -exec $(RM) -f {} ';' # ---- .PHONY: install editable uninstall install: $(PYTHON) -m pip install $(opt) . editable: $(PYTHON) -m pip install --editable $(opt) . uninstall: $(PYTHON) -m pip uninstall $(opt) mpi4py # ---- mpi4py-4.0.3/meson.build000066400000000000000000000074441475341043600151220ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com project( 'mpi4py', 'c', # 'cython', version: run_command( find_program('python3'), files('conf' / 'metadata.py'), 'version', check: true, ).stdout().strip(), license: 'BSD-3-Clause', meson_version: '>=1.0.0', ) # --- fs = import('fs') py = import('python').find_installation('python3', pure: false) srcdir = 'src' dstdir = py.get_install_dir(pure: false) # --- if host_machine.system() == 'windows' mpi = dependency('mpi', required: true) else mpi = dependency('', required: false) endif pyembed = py.dependency(embed: true, disabler: true) compiler = meson.get_compiler('c') configtest_c = ''' #include int main(int argc, char *argv[]) { MPI_Init(&argc, &argv); MPI_Finalize(); return 0; } ''' advise = ''' Set environment variable `CC=mpicc` and try again.''' if not compiler.has_header('mpi.h', dependencies: [mpi]) error('MPI C header file "mpi.h" not found.', advise) endif if not compiler.has_function('MPI_Init', dependencies: [mpi]) error('MPI C function MPI_Init not found.', advise) endif if not compiler.has_function('MPI_Finalize', dependencies: [mpi]) error('MPI C function MPI_Finalize not found.', advise) endif if not compiler.compiles(configtest_c, dependencies: [mpi]) error('Cannot compile basic MPI program.', advise) endif if not compiler.links(configtest_c, dependencies: [mpi]) error('Cannot link basic MPI program.', advise) endif # --- echo = [py, '-c', 'import sys; print(*sys.argv[1:], sep=chr(10))'] copy = [py, '-c', 'import shutil, sys; shutil.copy(*sys.argv[1:])'] # --- cython = [py, files('conf' / 'cythonize.py')] cython_flags = ['--3str', '--cleanup', '3'] MPI_ch = custom_target( 'MPI.[ch]', input: srcdir / 'mpi4py' / 'MPI.pyx', output: ['MPI.c', 'MPI.h', 'MPI_api.h'], command: [cython, cython_flags, '@INPUT@', '--output-file', '@OUTPUT0@'], install: true, install_dir: [false, dstdir / 'mpi4py', dstdir / 'mpi4py'] ) py.extension_module( 'MPI', sources: MPI_ch[0], include_directories: [srcdir], implicit_include_directories: false, dependencies: [mpi], subdir: 'mpi4py', install: true, ) executable( 'python-mpi', sources: srcdir / 'python.c', implicit_include_directories: false, dependencies: [pyembed, mpi], install: true, install_dir: dstdir / 'mpi4py' / 'bin', ) package = { 'mpi4py': [ '__init__.py', '__main__.py', 'bench.py', 'run.py', 'typing.py', '__init__.pxd', 'libmpi.pxd', 'MPI.pxd', ], 'mpi4py.futures': [ '__init__.py', '__main__.py', '_base.py', '_core.py', 'pool.py', 'util.py', 'server.py', 'aplus.py', ], 'mpi4py.util': [ '__init__.py', 'pkl5.py', 'dtlib.py', 'pool.py', 'sync.py', ], } foreach pkg, src : package subdir = join_paths(pkg.split('.')) sources = [] foreach fn : src sources += srcdir / subdir / fn endforeach py.install_sources( sources, pure: false, subdir: subdir, ) endforeach install_subdir( srcdir / 'mpi4py' / 'include', install_dir: dstdir / 'mpi4py', ) if py.language_version().version_compare('>=3.8') foreach pkg, src : package subdir = join_paths(pkg.split('.')) sources = [] if not pkg.contains('.') sources += srcdir / subdir / 'py.typed' sources += srcdir / subdir / 'MPI.pyi' endif foreach fn : src if fn.endswith('.py') sources += srcdir / subdir / fn + 'i' endif endforeach py.install_sources( sources, pure: false, subdir: subdir, ) endforeach endif if host_machine.system() == 'windows' mpidllpath = ['mpi.pth'] foreach fn : mpidllpath if fs.exists(srcdir / fn) py.install_sources( srcdir / fn, pure: false, subdir: '', ) endif endforeach endif # --- mpi4py-4.0.3/mpi.cfg000066400000000000000000000064721475341043600142260ustar00rootroot00000000000000# Some Linux distributions have RPM's for some MPI implementations. # In such a case, headers and libraries usually are in default system # locations, and you should not need any special configuration. # If you do not have MPI distribution in a default location, please # uncomment and fill-in appropriately the following lines. Yo can use # as examples the [mpich], [openmpi], and [msmpi] sections # below the [mpi] section (which is the one used by default). # If you specify multiple locations for includes and libraries, # please separate them with the path separator for your platform, # i.e., ':' on Unix-like systems and ';' on Windows # Default configuration # --------------------- [mpi] ## mpi_dir = /usr ## mpi_dir = /usr/local ## mpi_dir = /usr/local/mpi ## mpi_dir = /opt ## mpi_dir = /opt/mpi ## mpi_dir = = $ProgramFiles\MPI ## mpicc = %(mpi_dir)s/bin/mpicc ## mpicxx = %(mpi_dir)s/bin/mpicxx ## define_macros = ## undef_macros = ## include_dirs = %(mpi_dir)s/include ## libraries = mpi ## library_dirs = %(mpi_dir)s/lib ## runtime_library_dirs = %(mpi_dir)s/lib ## extra_compile_args = ## extra_link_args = ## extra_objects = # MPICH example # ------------- [mpich] mpi_dir = /home/devel/mpi/mpich/4.2.2 mpicc = %(mpi_dir)s/bin/mpicc mpicxx = %(mpi_dir)s/bin/mpicxx #include_dirs = %(mpi_dir)s/include #libraries = mpi #library_dirs = %(mpi_dir)s/lib #runtime_library_dirs = %(library_dirs)s # Open MPI example # ---------------- [openmpi] mpi_dir = /home/devel/mpi/openmpi/5.0.2 mpicc = %(mpi_dir)s/bin/mpicc mpicxx = %(mpi_dir)s/bin/mpicxx #include_dirs = %(mpi_dir)s/include #libraries = mpi #library_dirs = %(mpi_dir)s/lib #runtime_library_dirs = %(library_dirs)s # Fujitsu MPI example # ------------------- [fujitsu-mpi] mpicc = mpifcc mpicxx = mpiFCC define_macros = OPENMPI_DLOPEN_LIBMPI=1 extra_compile_args = -Nclang extra_link_args = -Knolargepage # Intel MPI example # ----------------- [impi-linux] mpi_dir = /opt/intel/oneapi/mpi/latest mpicc = %(mpi_dir)s/bin/mpicc mpicxx = %(mpi_dir)s/bin/mpicxx #include_dirs = %(mpi_dir)s/include #libraries = mpi #library_dirs = %(mpi_dir)s/lib/release #runtime_library_dirs = %(library_dirs)s [impi-windows] mpi_dir = $ProgramFiles\Intel\oneAPI\mpi\latest include_dirs = %(mpi_dir)s\include libraries = impi library_dirs = %(mpi_dir)s\lib\release # Microsoft MPI example # --------------------- [msmpi-64bit] mpi_dir = $ProgramFiles\Microsoft SDKs\MPI include_dirs = %(mpi_dir)s\Include libraries = msmpi library_dirs = %(mpi_dir)s\Lib\x64 [msmpi-32bit] mpi_dir = $ProgramFiles\Microsoft SDKs\MPI include_dirs = %(mpi_dir)s\Include libraries = msmpi library_dirs = %(mpi_dir)s\Lib\x86 # MPI stubs # --------- [stubs] mpi_dir = $MPI_STUBS_ROOT include_dirs = %(mpi_dir)s/include libraries = mpi_abi library_dirs = %(mpi_dir)s/lib # MPIUNI (PETSc) # -------------- [mpiuni] include_dirs = conf/mpiuni:$PETSC_DIR/include:$PETSC_DIR/$PETSC_ARCH/include # NoMPI # ----- [nompi] include_dirs = conf/nompi [nompi-fast] include_dirs = conf/nompi define_macros = HAVE_PYMPICONF_H=1 mpi4py-4.0.3/pyproject.toml000066400000000000000000000001521475341043600156610ustar00rootroot00000000000000[build-system] requires = ["setuptools >= 42", "build"] build-backend = "builder" backend-path = ["conf"] mpi4py-4.0.3/setup.cfg000066400000000000000000000001301475341043600145620ustar00rootroot00000000000000[config] # mpicc = mpicc # mpicxx = mpicxx [build] # debug = 0 # compiler = mingw32 mpi4py-4.0.3/setup.py000066400000000000000000000125631475341043600144700ustar00rootroot00000000000000#!/usr/bin/env python # Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """mpi4py: Python bindings for MPI.""" # ruff: noqa: C408 # ruff: noqa: D103 # ruff: noqa: S101 import os import sys import glob topdir = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, os.path.join(topdir, 'conf')) # -------------------------------------------------------------------- # Metadata # -------------------------------------------------------------------- require_python = (3, 6) maxknow_python = (3, 13) def get_metadata(): import metadata as md req_py = '>={}.{}'.format(*require_python) assert req_py == md.requires_python author = md.authors[0] readme = md.get_readme() return { # distutils 'name' : md.get_name(), 'version' : md.get_version(), 'description' : md.description, 'long_description' : readme['text'], 'classifiers' : md.classifiers, 'keywords' : md.keywords, 'license' : md.license, 'author' : author['name'], 'author_email' : author['email'], # setuptools 'project_urls': md.urls, 'python_requires': md.requires_python, 'long_description_content_type': readme['content-type'], } # -------------------------------------------------------------------- # Extension modules # -------------------------------------------------------------------- def sources(): # mpi4py.MPI MPI = dict( source='src/mpi4py/MPI.pyx', depends=[ 'src/mpi4py/*.pyx', 'src/mpi4py/*.pxd', 'src/mpi4py/MPI.src/*.pyx', 'src/mpi4py/MPI.src/*.pxi', ], ) # return [MPI] def extensions(): import mpidistutils # MPI extension module MPI = dict( name='mpi4py.MPI', sources=['src/mpi4py/MPI.c'], depends=( glob.glob('src/*.h') + glob.glob('src/lib-mpi/*.h') + glob.glob('src/lib-mpi/config/*.h') + glob.glob('src/lib-mpi/compat/*.h') ), include_dirs=['src'], define_macros=[], configure=mpidistutils.configure_mpi, ) if sys.version_info[:2] > maxknow_python: api = '0x{:02x}{:02x}0000'.format(*maxknow_python) MPI['define_macros'].extend([ ('CYTHON_LIMITED_API', api), ]) if os.environ.get('CIBUILDWHEEL') == '1': MPI['define_macros'].extend([ ('CIBUILDWHEEL', 1), ]) # return [MPI] def executables(): import mpidistutils # MPI-enabled Python interpreter pyexe = dict( name='python-mpi', optional=True, package='mpi4py', dest_dir='bin', sources=['src/python.c'], configure=mpidistutils.configure_pyexe, ) # return [pyexe] # -------------------------------------------------------------------- # Setup # -------------------------------------------------------------------- package_info = dict( packages = [ 'mpi4py', 'mpi4py.futures', 'mpi4py.util', ], package_data = { 'mpi4py' : [ '*.pxd', 'MPI*.h', 'include/mpi4py/*.h', 'include/mpi4py/*.i', 'include/mpi4py/*.pxi', 'py.typed', '*.pyi', '*/*.pyi', ], }, package_dir = {'' : 'src'}, ) if sys.version_info < (3, 8): del package_info['package_data']['mpi4py'][-3:] def run_setup(): """Call setuptools.setup(*args, **kwargs).""" try: import setuptools except ImportError as exc: setuptools = None if sys.version_info >= (3, 12): sys.exit(exc) from mpidistutils import setup from mpidistutils import Extension as Ext from mpidistutils import Executable as Exe # from mpidistutils import build_src build_src.sources = sources() # metadata = get_metadata() builder_args = dict( ext_modules = [Ext(**ext) for ext in extensions()], executables = [Exe(**exe) for exe in executables()], ) if setuptools: builder_args['zip_safe'] = False else: metadata.pop('project_urls') metadata.pop('python_requires') metadata.pop('long_description_content_type') # setup_args = dict(i for d in ( metadata, package_info, builder_args, ) for i in d.items()) # setup(**setup_args) def run_skbuild(): """Call setuptools.setup(*args, **kwargs).""" from setuptools import setup # metadata = get_metadata() builder_args = dict( cmake_source_dir = '.', ) # setup_args = dict(i for d in ( metadata, package_info, builder_args, ) for i in d.items()) # setup(**setup_args) # -------------------------------------------------------------------- def main(): try: import builder name = builder.get_build_backend_name() except RuntimeError as exc: sys.exit(exc) if name == 'setuptools': run_setup() if name == 'skbuild': run_skbuild() if __name__ == '__main__': if sys.version_info < require_python: raise SystemExit( "error: requires Python version " + ".".join(map(str, require_python)) ) main() # -------------------------------------------------------------------- mpi4py-4.0.3/src/000077500000000000000000000000001475341043600135365ustar00rootroot00000000000000mpi4py-4.0.3/src/lib-mpi/000077500000000000000000000000001475341043600150675ustar00rootroot00000000000000mpi4py-4.0.3/src/lib-mpi/compat.h000066400000000000000000000010621475341043600165220ustar00rootroot00000000000000#if defined(PyMPI_ABI) #include "compat/mpiabi.h" #elif defined(I_MPI_NUMVERSION) #include "compat/impi.h" #elif defined(MSMPI_VER) #include "compat/msmpi.h" #elif defined(MPICH_NAME) && (MPICH_NAME >= 4) #include "compat/mpich.h" #elif defined(MPICH_NAME) && (MPICH_NAME == 3) #include "compat/mpich3.h" #elif defined(MPICH_NAME) && (MPICH_NAME == 2) #include "compat/mpich2.h" #elif defined(MPICH_NAME) && (MPICH_NAME == 1) #include "compat/mpich1.h" #elif defined(OPEN_MPI) #include "compat/openmpi.h" #elif defined(LAM_MPI) #include "compat/lammpi.h" #endif mpi4py-4.0.3/src/lib-mpi/compat/000077500000000000000000000000001475341043600163525ustar00rootroot00000000000000mpi4py-4.0.3/src/lib-mpi/compat/impi.h000066400000000000000000000015451475341043600174660ustar00rootroot00000000000000#ifndef PyMPI_COMPAT_IMPI_H #define PyMPI_COMPAT_IMPI_H /* -------------------------------------------------------------------------- */ static int PyMPI_IMPI_MPI_Initialized(int *flag) { int ierr; ierr = MPI_Initialized(flag); if (ierr) return ierr; if (!flag || *flag) return MPI_SUCCESS; ierr = MPI_Finalized(flag); if (ierr) return ierr; return MPI_SUCCESS; } #define MPI_Initialized PyMPI_IMPI_MPI_Initialized /* -------------------------------------------------------------------------- */ /* https://github.com/mpi4py/mpi4py/issues/418#issuecomment-2026805886 */ #if I_MPI_NUMVERSION == 20211200300 #undef MPI_Status_c2f #define MPI_Status_c2f PMPI_Status_c2f #undef MPI_Status_f2c #define MPI_Status_f2c PMPI_Status_f2c #endif /* -------------------------------------------------------------------------- */ #endif /* !PyMPI_COMPAT_IMPI_H */ mpi4py-4.0.3/src/lib-mpi/compat/lammpi.h000066400000000000000000000271221475341043600200060ustar00rootroot00000000000000#ifndef PyMPI_COMPAT_LAMMPI_H #define PyMPI_COMPAT_LAMMPI_H /* ---------------------------------------------------------------- */ static int PyMPI_LAMMPI_MPI_Info_free(MPI_Info *info) { if (info == 0) return MPI_ERR_ARG; if (*info == MPI_INFO_NULL) return MPI_ERR_ARG; return MPI_Info_free(info); } #undef MPI_Info_free #define MPI_Info_free PyMPI_LAMMPI_MPI_Info_free /* ---------------------------------------------------------------- */ static int PyMPI_LAMMPI_MPI_Cancel(MPI_Request *request) { int ierr = MPI_SUCCESS; ierr = MPI_Cancel(request); if (ierr == MPI_ERR_ARG) { if (request != 0 && *request == MPI_REQUEST_NULL) ierr = MPI_ERR_REQUEST; } return ierr; } #undef MPI_Cancel #define MPI_Cancel PyMPI_LAMMPI_MPI_Cancel static int PyMPI_LAMMPI_MPI_Comm_disconnect(MPI_Comm *comm) { if (comm == 0) return MPI_ERR_ARG; if (*comm == MPI_COMM_NULL) return MPI_ERR_COMM; if (*comm == MPI_COMM_SELF) return MPI_ERR_COMM; if (*comm == MPI_COMM_WORLD) return MPI_ERR_COMM; return MPI_Comm_disconnect(comm); } #undef MPI_Comm_disconnect #define MPI_Comm_disconnect PyMPI_LAMMPI_MPI_Comm_disconnect /* ---------------------------------------------------------------- */ #if defined(__cplusplus) extern "C" { #endif struct _errhdl { void (*eh_func)(void); int eh_refcount; int eh_f77handle; int eh_flags; }; #if defined(__cplusplus) } #endif static int PyMPI_LAMMPI_Errhandler_free(MPI_Errhandler *errhandler) { if (errhandler == 0) return MPI_ERR_ARG; if (*errhandler == MPI_ERRORS_RETURN || *errhandler == MPI_ERRORS_ARE_FATAL) { struct _errhdl *eh = (struct _errhdl *) (*errhandler); eh->eh_refcount--; *errhandler = MPI_ERRHANDLER_NULL; return MPI_SUCCESS; } else { return MPI_Errhandler_free(errhandler); } } #undef MPI_Errhandler_free #define MPI_Errhandler_free PyMPI_LAMMPI_Errhandler_free /* -- */ static int PyMPI_LAMMPI_MPI_Comm_get_errhandler(MPI_Comm comm, MPI_Errhandler *errhandler) { int ierr = MPI_SUCCESS; if (comm == MPI_COMM_NULL) return MPI_ERR_COMM; if (errhandler == 0) return MPI_ERR_ARG; /* get error handler */ ierr = MPI_Errhandler_get(comm, errhandler); if (ierr != MPI_SUCCESS) return ierr; return MPI_SUCCESS; } #undef MPI_Errhandler_get #define MPI_Errhandler_get PyMPI_LAMMPI_MPI_Comm_get_errhandler #undef MPI_Comm_get_errhandler #define MPI_Comm_get_errhandler PyMPI_LAMMPI_MPI_Comm_get_errhandler static int PyMPI_LAMMPI_MPI_Comm_set_errhandler(MPI_Comm comm, MPI_Errhandler errhandler) { int ierr = MPI_SUCCESS, ierr2 = MPI_SUCCESS; MPI_Errhandler previous = MPI_ERRHANDLER_NULL; if (comm == MPI_COMM_NULL) return MPI_ERR_COMM; if (errhandler == MPI_ERRHANDLER_NULL) return MPI_ERR_ARG; /* get previous error handler*/ ierr2 = MPI_Errhandler_get(comm, &previous); if (ierr2 != MPI_SUCCESS) return ierr2; /* increment reference counter */ if (errhandler != MPI_ERRHANDLER_NULL) { struct _errhdl *eh = (struct _errhdl *) (errhandler); eh->eh_refcount++; } /* set error handler */ ierr = MPI_Errhandler_set(comm, errhandler); /* decrement reference counter */ if (errhandler != MPI_ERRHANDLER_NULL) { struct _errhdl *eh = (struct _errhdl *) (errhandler); eh->eh_refcount--; } /* free previous error handler*/ if (previous != MPI_ERRHANDLER_NULL) { ierr2 = MPI_Errhandler_free(&previous); } if (ierr != MPI_SUCCESS) return ierr; if (ierr2 != MPI_SUCCESS) return ierr2; return MPI_SUCCESS; } #undef MPI_Errhandler_set #define MPI_Errhandler_set PyMPI_LAMMPI_MPI_Comm_set_errhandler #undef MPI_Comm_set_errhandler #define MPI_Comm_set_errhandler PyMPI_LAMMPI_MPI_Comm_set_errhandler /* -- */ static int PyMPI_LAMMPI_MPI_Win_get_errhandler(MPI_Win win, MPI_Errhandler *errhandler) { int ierr = MPI_SUCCESS; if (win == MPI_WIN_NULL) return MPI_ERR_WIN; if (errhandler == 0) return MPI_ERR_ARG; /* get error handler */ ierr = MPI_Win_get_errhandler(win, errhandler); if (ierr != MPI_SUCCESS) return ierr; /* increment reference counter */ if (*errhandler != MPI_ERRHANDLER_NULL) { struct _errhdl *eh = (struct _errhdl *) (*errhandler); eh->eh_refcount++; } return MPI_SUCCESS; } #undef MPI_Win_get_errhandler #define MPI_Win_get_errhandler PyMPI_LAMMPI_MPI_Win_get_errhandler static int PyMPI_LAMMPI_MPI_Win_set_errhandler(MPI_Win win, MPI_Errhandler errhandler) { int ierr = MPI_SUCCESS, ierr2 = MPI_SUCCESS; MPI_Errhandler previous = MPI_ERRHANDLER_NULL; if (win == MPI_WIN_NULL) return MPI_ERR_WIN; if (errhandler == MPI_ERRHANDLER_NULL) return MPI_ERR_ARG; /* get previous error handler*/ ierr2 = MPI_Win_get_errhandler(win, &previous); if (ierr2 != MPI_SUCCESS) return ierr2; /* increment reference counter */ if (errhandler != MPI_ERRHANDLER_NULL) { struct _errhdl *eh = (struct _errhdl *) (errhandler); eh->eh_refcount++; } /* set error handler */ ierr = MPI_Win_set_errhandler(win, errhandler); /* decrement reference counter */ if (errhandler != MPI_ERRHANDLER_NULL) { struct _errhdl *eh = (struct _errhdl *) (errhandler); eh->eh_refcount--; } /* free previous error handler*/ if (previous != MPI_ERRHANDLER_NULL) { ierr2 = MPI_Errhandler_free(&previous); } if (ierr != MPI_SUCCESS) return ierr; if (ierr2 != MPI_SUCCESS) return ierr2; return MPI_SUCCESS; } #undef MPI_Win_set_errhandler #define MPI_Win_set_errhandler PyMPI_LAMMPI_MPI_Win_set_errhandler static int PyMPI_LAMMPI_MPI_Win_create(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, MPI_Win *win) { int ierr = MPI_SUCCESS; MPI_Errhandler errhandler = MPI_ERRHANDLER_NULL; ierr = MPI_Win_create(base, size, disp_unit, info, comm, win); if (ierr != MPI_SUCCESS) return ierr; ierr = MPI_Win_get_errhandler(*win, &errhandler); if (ierr != MPI_SUCCESS) return ierr; return MPI_SUCCESS; } #undef MPI_Win_create #define MPI_Win_create PyMPI_LAMMPI_MPI_Win_create static int PyMPI_LAMMPI_MPI_Win_free(MPI_Win *win) { int ierr = MPI_SUCCESS, ierr2 = MPI_SUCCESS; MPI_Errhandler errhandler = MPI_ERRHANDLER_NULL; if (win != 0 && *win != MPI_WIN_NULL ) { MPI_Errhandler previous; ierr2 = MPI_Win_get_errhandler(*win, &previous); if (ierr2 != MPI_SUCCESS) return ierr2; errhandler = previous; if (previous != MPI_ERRHANDLER_NULL) { ierr2 = MPI_Errhandler_free(&previous); if (ierr2 != MPI_SUCCESS) return ierr2; } } ierr = MPI_Win_free(win); if (errhandler != MPI_ERRHANDLER_NULL) { ierr2 = MPI_Errhandler_free(&errhandler); if (ierr2 != MPI_SUCCESS) return ierr2; } if (ierr != MPI_SUCCESS) return ierr; return MPI_SUCCESS; } #undef MPI_Win_free #define MPI_Win_free PyMPI_LAMMPI_MPI_Win_free /* -- */ #if defined(ROMIO_VERSION) #if defined(__cplusplus) extern "C" { #endif #define ADIOI_FILE_COOKIE 2487376 #define FDTYPE int #define ADIO_Offset MPI_Offset #define ADIOI_Fns struct ADIOI_Fns_struct #define ADIOI_Hints struct ADIOI_Hints_struct extern MPI_Errhandler ADIOI_DFLT_ERR_HANDLER; struct ADIOI_FileD { int cookie; /* for error checking */ FDTYPE fd_sys; /* system file descriptor */ #ifdef XFS int fd_direct; /* On XFS, this is used for direct I/O; fd_sys is used for buffered I/O */ int direct_read; /* flag; 1 means use direct read */ int direct_write; /* flag; 1 means use direct write */ /* direct I/O attributes */ unsigned d_mem; /* data buffer memory alignment */ unsigned d_miniosz; /* min xfer size, xfer size multiple, and file seek offset alignment */ unsigned d_maxiosz; /* max xfer size */ #endif ADIO_Offset fp_ind; /* individual file pointer in MPI-IO (in bytes)*/ ADIO_Offset fp_sys_posn; /* current location of the system file-pointer in bytes */ ADIOI_Fns *fns; /* struct of I/O functions to use */ MPI_Comm comm; /* communicator indicating who called open */ char *filename; int file_system; /* type of file system */ int access_mode; ADIO_Offset disp; /* reqd. for MPI-IO */ MPI_Datatype etype; /* reqd. for MPI-IO */ MPI_Datatype filetype; /* reqd. for MPI-IO */ int etype_size; /* in bytes */ ADIOI_Hints *hints; /* structure containing fs-indep. info values */ MPI_Info info; int split_coll_count; /* count of outstanding split coll. ops. */ char *shared_fp_fname; /* name of file containing shared file pointer */ struct ADIOI_FileD *shared_fp_fd; /* file handle of file containing shared fp */ int async_count; /* count of outstanding nonblocking operations */ int perm; int atomicity; /* true=atomic, false=nonatomic */ int iomode; /* reqd. to implement Intel PFS modes */ MPI_Errhandler err_handler; }; #if defined(__cplusplus) } #endif static int PyMPI_LAMMPI_MPI_File_get_errhandler(MPI_File file, MPI_Errhandler *errhandler) { /* check arguments */ if (file != MPI_FILE_NULL) { struct ADIOI_FileD * fh = (struct ADIOI_FileD *) file; if (fh->cookie != ADIOI_FILE_COOKIE) return MPI_ERR_ARG; } if (errhandler == 0) return MPI_ERR_ARG; /* get error handler */ if (file == MPI_FILE_NULL) { *errhandler = ADIOI_DFLT_ERR_HANDLER; } else { struct ADIOI_FileD * fh = (struct ADIOI_FileD *) file; *errhandler = fh->err_handler; } /* increment reference counter */ if (*errhandler != MPI_ERRHANDLER_NULL) { struct _errhdl *eh = (struct _errhdl *) (*errhandler); eh->eh_refcount++; } return MPI_SUCCESS; } #undef MPI_File_get_errhandler #define MPI_File_get_errhandler PyMPI_LAMMPI_MPI_File_get_errhandler static int PyMPI_LAMMPI_MPI_File_set_errhandler(MPI_File file, MPI_Errhandler errhandler) { /* check arguments */ if (file != MPI_FILE_NULL) { struct ADIOI_FileD * fh = (struct ADIOI_FileD *) file; if (fh->cookie != ADIOI_FILE_COOKIE) return MPI_ERR_ARG; } if (errhandler == MPI_ERRHANDLER_NULL) return MPI_ERR_ARG; if (errhandler != MPI_ERRORS_RETURN && errhandler != MPI_ERRORS_ARE_FATAL) return MPI_ERR_ARG; /* increment reference counter */ if (errhandler != MPI_ERRHANDLER_NULL ) { struct _errhdl *eh = (struct _errhdl *) errhandler; eh->eh_refcount++; } /* set error handler */ if (file == MPI_FILE_NULL) { MPI_Errhandler tmp = ADIOI_DFLT_ERR_HANDLER; ADIOI_DFLT_ERR_HANDLER = errhandler; errhandler = tmp; } else { struct ADIOI_FileD *fh = (struct ADIOI_FileD *) file; MPI_Errhandler tmp = fh->err_handler; fh->err_handler = errhandler; errhandler = tmp; } /* decrement reference counter */ if (errhandler != MPI_ERRHANDLER_NULL ) { struct _errhdl *eh = (struct _errhdl *) errhandler; eh->eh_refcount--; } return MPI_SUCCESS; } #undef MPI_File_set_errhandler #define MPI_File_set_errhandler PyMPI_LAMMPI_MPI_File_set_errhandler #endif /* ---------------------------------------------------------------- */ #endif /* !PyMPI_COMPAT_LAMMPI_H */ /* Local variables: c-basic-offset: 2 indent-tabs-mode: nil End: */ mpi4py-4.0.3/src/lib-mpi/compat/mpiabi.h000066400000000000000000000002631475341043600177650ustar00rootroot00000000000000#ifndef PyMPI_COMPAT_MPIABI_H #define PyMPI_COMPAT_MPIABI_H /* -------------------------------------------------------------------------- */ #endif /* !PyMPI_COMPAT_MPIABI_H */ mpi4py-4.0.3/src/lib-mpi/compat/mpich.h000066400000000000000000000127671475341043600176400ustar00rootroot00000000000000#ifndef PyMPI_COMPAT_MPICH_H #define PyMPI_COMPAT_MPICH_H #if defined(MPICH_NUMVERSION) /* -------------------------------------------------------------------------- */ /* https://github.com/pmodels/mpich/pull/5467 */ #undef MPI_MAX_PORT_NAME #define MPI_MAX_PORT_NAME 1024 static int PyMPI_MPICH_port_info(MPI_Info info, MPI_Info *port_info) { int ierr; # define pympi_str_(s) #s # define pympi_str(s) pympi_str_(s) const char *key = "port_name_size"; const char *val = pympi_str(MPI_MAX_PORT_NAME); # undef pympi_str_ # undef pympi_str if (info == MPI_INFO_NULL) { ierr = MPI_Info_create(port_info); if (ierr) return ierr; } else { ierr = MPI_Info_dup(info, port_info); if (ierr) return ierr; } ierr = MPI_Info_set(*port_info, key, val); if (ierr) (void) MPI_Info_free(port_info); return ierr; } static int PyMPI_MPICH_MPI_Open_port(MPI_Info info, char *port_name) { int ierr; ierr = PyMPI_MPICH_port_info(info, &info); if (ierr) return ierr; ierr = MPI_Open_port(info, port_name); (void) MPI_Info_free(&info); return ierr; } #undef MPI_Open_port #define MPI_Open_port PyMPI_MPICH_MPI_Open_port static int PyMPI_MPICH_MPI_Lookup_name(const char *service_name, MPI_Info info, char *port_name) { int ierr; ierr = PyMPI_MPICH_port_info(info, &info); if (ierr) return ierr; ierr = MPI_Lookup_name(service_name, info, port_name); (void) MPI_Info_free(&info); return ierr; } #undef MPI_Lookup_name #define MPI_Lookup_name PyMPI_MPICH_MPI_Lookup_name /* -------------------------------------------------------------------------- */ /* https://github.com/pmodels/mpich/issues/6981 */ #if MPI_VERSION == 4 && MPI_SUBVERSION <= 1 #if (MPICH_NUMVERSION < 40300300) || defined(CIBUILDWHEEL) static int PyMPI_MPICH_MPI_Info_free(MPI_Info *info) { if (info && *info == MPI_INFO_ENV) { (void) MPI_Comm_call_errhandler(MPI_COMM_SELF, MPI_ERR_INFO); return MPI_ERR_INFO; } return MPI_Info_free(info); } #undef MPI_Info_free #define MPI_Info_free PyMPI_MPICH_MPI_Info_free #endif #endif /* -------------------------------------------------------------------------- */ /* https://github.com/pmodels/mpich/issues/5413 */ /* https://github.com/pmodels/mpich/pull/6146 */ #if MPI_VERSION == 4 && MPI_SUBVERSION == 0 #if (MPICH_NUMVERSION < 40003300) || defined(CIBUILDWHEEL) static int PyMPI_MPICH_MPI_Status_set_elements_c(MPI_Status *status, MPI_Datatype datatype, MPI_Count elements) { return MPI_Status_set_elements_x(status, datatype, elements); } #undef MPI_Status_set_elements_c #define MPI_Status_set_elements_c PyMPI_MPICH_MPI_Status_set_elements_c #endif #if defined(CIBUILDWHEEL) && defined(__linux__) #undef MPI_Status_set_elements_c extern int MPI_Status_set_elements_c(MPI_Status *, MPI_Datatype, MPI_Count) __attribute__((weak, alias("PyMPI_MPICH_MPI_Status_set_elements_c"))); #endif #endif /* -------------------------------------------------------------------------- */ /* https://github.com/pmodels/mpich/issues/6351 */ /* https://github.com/pmodels/mpich/pull/6354 */ #if MPI_VERSION == 4 && MPI_SUBVERSION == 0 #if (MPICH_NUMVERSION < 40100300) || defined(CIBUILDWHEEL) static int PyMPI_MPICH_MPI_Reduce_c(const void *sendbuf, void *recvbuf, MPI_Count count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm) { const char dummy[1] = {0}; if (!sendbuf && (root == MPI_ROOT || root == MPI_PROC_NULL)) sendbuf = dummy; return MPI_Reduce_c(sendbuf, recvbuf, count, datatype, op, root, comm); } #undef MPI_Reduce_c #define MPI_Reduce_c PyMPI_MPICH_MPI_Reduce_c #endif #endif /* -------------------------------------------------------------------------- */ #if defined(CIBUILDWHEEL) #define PyMPI_MPICH_CALL_WEAK_SYMBOL(function, ...) \ if (function) return function(__VA_ARGS__); \ return PyMPI_UNAVAILABLE(#function, __VA_ARGS__); \ #undef MPI_Type_create_f90_integer #pragma weak MPI_Type_create_f90_integer static int PyMPI_MPICH_MPI_Type_create_f90_integer(int r, MPI_Datatype *t) { PyMPI_MPICH_CALL_WEAK_SYMBOL(MPI_Type_create_f90_integer, r, t); } #define MPI_Type_create_f90_integer PyMPI_MPICH_MPI_Type_create_f90_integer #undef MPI_Type_create_f90_real #pragma weak MPI_Type_create_f90_real static int PyMPI_MPICH_MPI_Type_create_f90_real(int p, int r, MPI_Datatype *t) { PyMPI_MPICH_CALL_WEAK_SYMBOL(MPI_Type_create_f90_real, p, r, t); } #define MPI_Type_create_f90_real PyMPI_MPICH_MPI_Type_create_f90_real #undef MPI_Type_create_f90_complex #pragma weak MPI_Type_create_f90_complex static int PyMPI_MPICH_MPI_Type_create_f90_complex(int p, int r, MPI_Datatype *t) { PyMPI_MPICH_CALL_WEAK_SYMBOL(MPI_Type_create_f90_complex, p, r, t); } #define MPI_Type_create_f90_complex PyMPI_MPICH_MPI_Type_create_f90_complex #undef MPI_Status_c2f #pragma weak MPI_Status_c2f static int PyMPI_MPICH_MPI_Status_c2f(const MPI_Status *cs, MPI_Fint *fs) { PyMPI_MPICH_CALL_WEAK_SYMBOL(MPI_Status_c2f, cs, fs); } #define MPI_Status_c2f PyMPI_MPICH_MPI_Status_c2f #undef MPI_Status_f2c #pragma weak MPI_Status_f2c static int PyMPI_MPICH_MPI_Status_f2c(const MPI_Fint *fs, MPI_Status *cs) { PyMPI_MPICH_CALL_WEAK_SYMBOL(MPI_Status_f2c, fs, cs); } #define MPI_Status_f2c PyMPI_MPICH_MPI_Status_f2c #endif /* -------------------------------------------------------------------------- */ #endif /* !MPICH_NUMVERSION */ #endif /* !PyMPI_COMPAT_MPICH_H */ mpi4py-4.0.3/src/lib-mpi/compat/mpich1.h000066400000000000000000000136161475341043600177130ustar00rootroot00000000000000#ifndef PyMPI_COMPAT_MPICH1_H #define PyMPI_COMPAT_MPICH1_H /* ---------------------------------------------------------------- */ static int PyMPI_MPICH1_argc = 0; static char *PyMPI_MPICH1_argv[1] = {(char*)0}; static void PyMPI_MPICH1_FixArgs(int **argc, char ****argv) { if (argc[0] && argv[0]) return; argc[0] = (int *) &PyMPI_MPICH1_argc; argv[0] = (char ***) &PyMPI_MPICH1_argv; } static int PyMPI_MPICH1_MPI_Init(int *argc, char ***argv) { PyMPI_MPICH1_FixArgs(&argc, &argv); return MPI_Init(argc, argv); } #undef MPI_Init #define MPI_Init PyMPI_MPICH1_MPI_Init static int PyMPI_MPICH1_MPI_Init_thread(int *argc, char ***argv, int required, int *provided) { PyMPI_MPICH1_FixArgs(&argc, &argv); return MPI_Init_thread(argc, argv, required, provided); } #undef MPI_Init_thread #define MPI_Init_thread PyMPI_MPICH1_MPI_Init_thread /* ---------------------------------------------------------------- */ #undef MPI_SIGNED_CHAR #define MPI_SIGNED_CHAR MPI_CHAR /* ---------------------------------------------------------------- */ static int PyMPI_MPICH1_MPI_Status_set_elements(MPI_Status *status, MPI_Datatype datatype, int count) { if (datatype == MPI_DATATYPE_NULL) return MPI_ERR_TYPE; return MPI_Status_set_elements(status, datatype, count); } #undef MPI_Status_set_elements #define MPI_Status_set_elements PyMPI_MPICH1_MPI_Status_set_elements /* ---------------------------------------------------------------- */ static int PyMPI_MPICH1_MPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dest, int sendtag, void *recvbuf, int recvcount, MPI_Datatype recvtype, int source, int recvtag, MPI_Comm comm, MPI_Status *status) { MPI_Status dummy; if (status == MPI_STATUS_IGNORE) status = &dummy; return MPI_Sendrecv(sendbuf, sendcount, sendtype, dest, sendtag, recvbuf, recvcount, recvtype, source, recvtag, comm, status); } #undef MPI_Sendrecv #define MPI_Sendrecv PyMPI_MPICH1_MPI_Sendrecv static int PyMPI_MPICH1_MPI_Sendrecv_replace(void *buf, int count, MPI_Datatype datatype, int dest, int sendtag, int source, int recvtag, MPI_Comm comm, MPI_Status *status) { MPI_Status dummy; if (status == MPI_STATUS_IGNORE) status = &dummy; return MPI_Sendrecv_replace(buf, count, datatype, dest, sendtag, source, recvtag, comm, status); } #undef MPI_Sendrecv_replace #define MPI_Sendrecv_replace PyMPI_MPICH1_MPI_Sendrecv_replace /* ---------------------------------------------------------------- */ #ifndef PyMPI_HAVE_MPI_Win #undef MPI_Win_c2f #define MPI_Win_c2f(win) ((MPI_Fint)0) #undef MPI_Win_f2c #define MPI_Win_f2c(win) MPI_WIN_NULL #endif /* ---------------------------------------------------------------- */ #if defined(__cplusplus) extern "C" { #endif extern void *MPIR_ToPointer(int); #if defined(__cplusplus) } #endif #if defined(ROMIO_VERSION) #if defined(__cplusplus) extern "C" { #endif struct MPIR_Errhandler { unsigned long cookie; MPI_Handler_function *routine; int ref_count; }; #if defined(__cplusplus) } #endif static int PyMPI_MPICH1_MPI_File_get_errhandler(MPI_File file, MPI_Errhandler *errhandler) { int ierr = MPI_SUCCESS; ierr = MPI_File_get_errhandler(file, errhandler); if (ierr != MPI_SUCCESS) return ierr; if (errhandler == 0) return ierr; /* just in case */ /* manage reference counting */ if (*errhandler != MPI_ERRHANDLER_NULL) { struct MPIR_Errhandler *eh = (struct MPIR_Errhandler *) MPIR_ToPointer(*errhandler); if (eh) eh->ref_count++; } return MPI_SUCCESS; } static int PyMPI_MPICH1_MPI_File_set_errhandler(MPI_File file, MPI_Errhandler errhandler) { int ierr = MPI_SUCCESS; MPI_Errhandler previous = MPI_ERRHANDLER_NULL; ierr = MPI_File_get_errhandler(file, &previous); if (ierr != MPI_SUCCESS) return ierr; ierr = MPI_File_set_errhandler(file, errhandler); if (ierr != MPI_SUCCESS) return ierr; /* manage reference counting */ if (previous != MPI_ERRHANDLER_NULL) { struct MPIR_Errhandler *eh = (struct MPIR_Errhandler *) MPIR_ToPointer(previous); if (eh) eh->ref_count--; } if (errhandler != MPI_ERRHANDLER_NULL) { struct MPIR_Errhandler *eh = (struct MPIR_Errhandler *) MPIR_ToPointer(errhandler); if (eh) eh->ref_count++; } return MPI_SUCCESS; } #undef MPI_File_get_errhandler #define MPI_File_get_errhandler PyMPI_MPICH1_MPI_File_get_errhandler #undef MPI_File_set_errhandler #define MPI_File_set_errhandler PyMPI_MPICH1_MPI_File_set_errhandler #endif /* !ROMIO_VERSION */ /* ---------------------------------------------------------------- */ #undef MPI_ERR_KEYVAL #define MPI_ERR_KEYVAL MPI_ERR_OTHER #undef MPI_MAX_OBJECT_NAME #define MPI_MAX_OBJECT_NAME MPI_MAX_NAME_STRING /* ---------------------------------------------------------------- */ #endif /* !PyMPI_COMPAT_MPICH1_H */ /* Local variables: c-basic-offset: 2 indent-tabs-mode: nil End: */ mpi4py-4.0.3/src/lib-mpi/compat/mpich2.h000066400000000000000000000014011475341043600177010ustar00rootroot00000000000000#ifndef PyMPI_COMPAT_MPICH2_H #define PyMPI_COMPAT_MPICH2_H static int PyMPI_MPICH2_MPI_Add_error_class(int *errorclass) { int ierr; char errstr[1] = {0}; ierr = MPI_Add_error_class(errorclass); if (ierr) return ierr; return MPI_Add_error_string(*errorclass,errstr); } #undef MPI_Add_error_class #define MPI_Add_error_class PyMPI_MPICH2_MPI_Add_error_class static int PyMPI_MPICH2_MPI_Add_error_code(int errorclass, int *errorcode) { int ierr; char errstr[1] = {0}; ierr = MPI_Add_error_code(errorclass,errorcode); if (ierr) return ierr; return MPI_Add_error_string(*errorcode,errstr); } #undef MPI_Add_error_code #define MPI_Add_error_code PyMPI_MPICH2_MPI_Add_error_code #endif /* !PyMPI_COMPAT_MPICH2_H */ mpi4py-4.0.3/src/lib-mpi/compat/mpich3.h000066400000000000000000000202301475341043600177030ustar00rootroot00000000000000#ifndef PyMPI_COMPAT_MPICH3_H #define PyMPI_COMPAT_MPICH3_H #if defined(MPICH_NUMVERSION) #include "mpich.h" /* -------------------------------------------------------------------------- */ #if (MPICH_NUMVERSION >= 30000000) || defined(CIBUILDWHEEL) static int PyMPI_MPICH3_MPI_Type_get_extent_x(MPI_Datatype datatype, MPI_Count *lb, MPI_Count *extent) { int ierr; MPI_Aint lb_a = MPI_UNDEFINED; MPI_Aint extent_a = MPI_UNDEFINED; if (sizeof(MPI_Count) == sizeof(MPI_Aint)) { ierr = MPI_Type_get_extent(datatype, &lb_a, &extent_a); if (ierr) goto fn_exit; if (lb) *lb = lb_a; if (extent) *extent = extent_a; } else { ierr = MPI_Type_get_extent(datatype, &lb_a, &extent_a); if (ierr) goto fn_exit; if (lb_a != MPI_UNDEFINED && extent_a != MPI_UNDEFINED) { if (lb) *lb = lb_a; if (extent) *extent = extent_a; } else { ierr = MPI_Type_get_extent_x(datatype, lb, extent); if (ierr) goto fn_exit; } } fn_exit: return ierr; } #define MPI_Type_get_extent_x PyMPI_MPICH3_MPI_Type_get_extent_x #endif #if (MPICH_NUMVERSION >= 30000000) || defined(CIBUILDWHEEL) static int PyMPI_MPICH3_MPI_Type_get_true_extent_x(MPI_Datatype datatype, MPI_Count *lb, MPI_Count *extent) { int ierr; MPI_Aint lb_a = MPI_UNDEFINED; MPI_Aint extent_a = MPI_UNDEFINED; if (sizeof(MPI_Count) == sizeof(MPI_Aint)) { ierr = MPI_Type_get_true_extent(datatype, &lb_a, &extent_a); if (ierr) goto fn_exit; if (lb) *lb = lb_a; if (extent) *extent = extent_a; } else { ierr = MPI_Type_get_true_extent(datatype, &lb_a, &extent_a); if (ierr) goto fn_exit; if (lb_a != MPI_UNDEFINED && extent_a != MPI_UNDEFINED) { if (lb) *lb = lb_a; if (extent) *extent = extent_a; } else { ierr = MPI_Type_get_true_extent_x(datatype, lb, extent); if (ierr) goto fn_exit; } } fn_exit: return ierr; } #define MPI_Type_get_true_extent_x PyMPI_MPICH3_MPI_Type_get_true_extent_x #endif #if (MPICH_NUMVERSION >= 30400000) || defined(CIBUILDWHEEL) static int PyMPI_MPICH3_MPI_Initialized(int *flag) { int ierr; ierr = MPI_Initialized(flag); if (ierr) return ierr; if (!flag || *flag) return MPI_SUCCESS; ierr = MPI_Finalized(flag); if (ierr) return ierr; return MPI_SUCCESS; } #define MPI_Initialized PyMPI_MPICH3_MPI_Initialized #endif #if (MPICH_NUMVERSION >= 30400000) || defined(CIBUILDWHEEL) static int PyMPI_MPICH3_MPI_Win_get_attr(MPI_Win win, int keyval, void *attrval, int *flag) { int ierr; static MPI_Aint zero[1] = {0}; zero[0] = 0; ierr = MPI_Win_get_attr(win, keyval, attrval, flag); if (ierr) return ierr; if (keyval == MPI_WIN_SIZE && flag && *flag && attrval) if (**((MPI_Aint**)attrval) == -1) *((void**)attrval) = zero; return MPI_SUCCESS; } #define MPI_Win_get_attr PyMPI_MPICH3_MPI_Win_get_attr #endif /* -------------------------------------------------------------------------- */ #if (MPICH_NUMVERSION == 30101300) static int PyMPI_MPICH_MPI_Status_c2f(const MPI_Status *c_status, MPI_Fint *f_status) { if (c_status == MPI_STATUS_IGNORE || c_status == MPI_STATUSES_IGNORE) return MPI_ERR_OTHER; *(MPI_Status *)f_status = *c_status; return MPI_SUCCESS; } #define MPI_Status_c2f PyMPI_MPICH_MPI_Status_c2f #endif #if (MPICH_NUMVERSION < 30100301) static int PyMPI_MPICH_MPI_Add_error_class(int *errorclass) { int ierr; char errstr[1] = {0}; ierr = MPI_Add_error_class(errorclass); if (ierr) return ierr; return MPI_Add_error_string(*errorclass,errstr); } #undef MPI_Add_error_class #define MPI_Add_error_class PyMPI_MPICH_MPI_Add_error_class static int PyMPI_MPICH_MPI_Add_error_code(int errorclass, int *errorcode) { int ierr; char errstr[1] = {0}; ierr = MPI_Add_error_code(errorclass,errorcode); if (ierr) return ierr; return MPI_Add_error_string(*errorcode,errstr); } #undef MPI_Add_error_code #define MPI_Add_error_code PyMPI_MPICH_MPI_Add_error_code #endif #if (MPICH_NUMVERSION < 30100000) static int PyMPI_MPICH_MPI_Type_size_x(MPI_Datatype datatype, MPI_Count *size) { int ierr = MPI_Type_commit(&datatype); if (ierr) return ierr; return MPI_Type_size_x(datatype,size); } #undef MPI_Type_size_x #define MPI_Type_size_x PyMPI_MPICH_MPI_Type_size_x static int PyMPI_MPICH_MPI_Type_get_extent_x(MPI_Datatype datatype, MPI_Count *lb, MPI_Count *extent) { int ierr = MPI_Type_commit(&datatype); if (ierr) return ierr; return MPI_Type_get_extent_x(datatype,lb,extent); } #undef MPI_Type_get_extent_x #define MPI_Type_get_extent_x PyMPI_MPICH_MPI_Type_get_extent_x static int PyMPI_MPICH_MPI_Type_get_true_extent_x(MPI_Datatype datatype, MPI_Count *lb, MPI_Count *extent) { int ierr = MPI_Type_commit(&datatype); if (ierr) return ierr; return MPI_Type_get_true_extent_x(datatype,lb,extent); } #undef MPI_Type_get_true_extent_x #define MPI_Type_get_true_extent_x PyMPI_MPICH_MPI_Type_get_true_extent_x static int PyMPI_MPICH_MPI_Get_accumulate(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr, int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win) { double origin_buf, result_buf; if (!origin_addr && !origin_count) origin_addr = (const void *)&origin_buf; if (!result_addr && !result_count) result_addr = (void *)&result_buf; return MPI_Get_accumulate(origin_addr, origin_count, origin_datatype, result_addr, result_count, result_datatype, target_rank, target_disp, target_count, target_datatype, op, win); } #undef MPI_Get_accumulate #define MPI_Get_accumulate PyMPI_MPICH_MPI_Get_accumulate static int PyMPI_MPICH_MPI_Rget_accumulate(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr, int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request *request) { double origin_buf, result_buf; if (!origin_addr && !origin_count) origin_addr = (const void *)&origin_buf; if (!result_addr && !result_count) result_addr = (void *)&result_buf; return MPI_Rget_accumulate(origin_addr, origin_count, origin_datatype, result_addr, result_count, result_datatype, target_rank, target_disp, target_count, target_datatype, op, win, request); } #undef MPI_Rget_accumulate #define MPI_Rget_accumulate PyMPI_MPICH_MPI_Rget_accumulate #endif /* -------------------------------------------------------------------------- */ #endif /* !MPICH_NUMVERSION */ #endif /* !PyMPI_COMPAT_MPICH3_H */ mpi4py-4.0.3/src/lib-mpi/compat/msmpi.h000066400000000000000000000016121475341043600176500ustar00rootroot00000000000000#ifndef PyMPI_COMPAT_MSMPI_H #define PyMPI_COMPAT_MSMPI_H static int PyMPI_MSMPI_MPI_Add_error_class(int *errorclass) { int ierr; char errstr[1] = {0}; ierr = MPI_Add_error_class(errorclass); if (ierr) return ierr; return MPI_Add_error_string(*errorclass,errstr); } #undef MPI_Add_error_class #define MPI_Add_error_class PyMPI_MSMPI_MPI_Add_error_class static int PyMPI_MSMPI_MPI_Add_error_code(int errorclass, int *errorcode) { int ierr; char errstr[1] = {0}; ierr = MPI_Add_error_code(errorclass,errorcode); if (ierr) return ierr; return MPI_Add_error_string(*errorcode,errstr); } #undef MPI_Add_error_code #define MPI_Add_error_code PyMPI_MSMPI_MPI_Add_error_code #if defined(MPICH_NAME) #undef MPI_File_c2f #define MPI_File_c2f PMPI_File_c2f #undef MPI_File_f2c #define MPI_File_f2c PMPI_File_f2c #endif #endif /* !PyMPI_COMPAT_MSMPI_H */ mpi4py-4.0.3/src/lib-mpi/compat/openmpi.h000066400000000000000000000253501475341043600201770ustar00rootroot00000000000000#ifndef PyMPI_COMPAT_OPENMPI_H #define PyMPI_COMPAT_OPENMPI_H /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* * The hackery below redefines the actual calls to 'MPI_Init()' and * 'MPI_Init_thread()' in order to preload the main MPI dynamic * library with appropriate flags to 'dlopen()' ensuring global * availability of library symbols. */ #if !defined(OPENMPI_DLOPEN_LIBMPI) && defined(OMPI_MAJOR_VERSION) #if OMPI_MAJOR_VERSION >= 3 #define OPENMPI_DLOPEN_LIBMPI 0 #endif #endif #ifndef OPENMPI_DLOPEN_LIBMPI #define OPENMPI_DLOPEN_LIBMPI 1 #endif #if OPENMPI_DLOPEN_LIBMPI #if HAVE_DLOPEN #include "../dynload.h" /* static void * my_dlopen(const char *name, int mode) { void *handle; static int called = 0; if (!called) { called = 1; #if HAVE_DLFCN_H printf("HAVE_DLFCN_H: yes\n"); #else printf("HAVE_DLFCN_H: no\n"); #endif printf("\n"); printf("RTLD_LAZY: 0x%X\n", RTLD_LAZY ); printf("RTLD_NOW: 0x%X\n", RTLD_NOW ); printf("RTLD_LOCAL: 0x%X\n", RTLD_LOCAL ); printf("RTLD_GLOBAL: 0x%X\n", RTLD_GLOBAL ); #ifdef RTLD_NOLOAD printf("RTLD_NOLOAD: 0x%X\n", RTLD_NOLOAD ); #endif printf("\n"); } handle = dlopen(name, mode); printf("dlopen(\"%s\",0x%X) -> %p\n", name, mode, handle); printf("dlerror() -> %s\n\n", dlerror()); return handle; } #define dlopen my_dlopen */ static void PyMPI_OPENMPI_dlopen_libmpi(void) { void *handle = 0; int mode = RTLD_NOW | RTLD_GLOBAL; #if defined(__APPLE__) /* macOS */ #ifdef RTLD_NOLOAD mode |= RTLD_NOLOAD; #endif #if defined(OMPI_MAJOR_VERSION) #if OMPI_MAJOR_VERSION >= 5 if (!handle) handle = dlopen("libmpi.40.dylib", mode); #elif OMPI_MAJOR_VERSION == 4 if (!handle) handle = dlopen("libmpi.40.dylib", mode); #elif OMPI_MAJOR_VERSION == 3 if (!handle) handle = dlopen("libmpi.40.dylib", mode); #elif OMPI_MAJOR_VERSION == 2 if (!handle) handle = dlopen("libmpi.20.dylib", mode); #elif OMPI_MAJOR_VERSION == 1 && OMPI_MINOR_VERSION >= 10 if (!handle) handle = dlopen("libmpi.12.dylib", mode); #elif OMPI_MAJOR_VERSION == 1 && OMPI_MINOR_VERSION >= 6 if (!handle) handle = dlopen("libmpi.1.dylib", mode); #elif OMPI_MAJOR_VERSION == 1 if (!handle) handle = dlopen("libmpi.0.dylib", mode); #endif #endif if (!handle) handle = dlopen("libmpi.dylib", mode); #else /* GNU/Linux and others */ #ifdef RTLD_NOLOAD mode |= RTLD_NOLOAD; #endif #if defined(OMPI_MAJOR_VERSION) #if OMPI_MAJOR_VERSION >= 5 if (!handle) handle = dlopen("libmpi.so.40", mode); #elif OMPI_MAJOR_VERSION == 4 if (!handle) handle = dlopen("libmpi.so.40", mode); #elif OMPI_MAJOR_VERSION == 3 if (!handle) handle = dlopen("libmpi.so.40", mode); #elif OMPI_MAJOR_VERSION == 2 if (!handle) handle = dlopen("libmpi.so.20", mode); #elif OMPI_MAJOR_VERSION == 1 && OMPI_MINOR_VERSION >= 10 if (!handle) handle = dlopen("libmpi.so.12", mode); #elif OMPI_MAJOR_VERSION == 1 && OMPI_MINOR_VERSION >= 6 if (!handle) handle = dlopen("libmpi.so.1", mode); #elif OMPI_MAJOR_VERSION == 1 if (!handle) handle = dlopen("libmpi.so.0", mode); #endif #endif if (!handle) handle = dlopen("libmpi.so", mode); #endif } static int PyMPI_OPENMPI_MPI_Init(int *argc, char ***argv) { PyMPI_OPENMPI_dlopen_libmpi(); return MPI_Init(argc, argv); } #undef MPI_Init #define MPI_Init PyMPI_OPENMPI_MPI_Init static int PyMPI_OPENMPI_MPI_Init_thread(int *argc, char ***argv, int required, int *provided) { PyMPI_OPENMPI_dlopen_libmpi(); return MPI_Init_thread(argc, argv, required, provided); } #undef MPI_Init_thread #define MPI_Init_thread PyMPI_OPENMPI_MPI_Init_thread #endif /* !HAVE_DLOPEN */ #endif /* !OPENMPI_DLOPEN_LIBMPI */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ #if (defined(OMPI_MAJOR_VERSION) && \ defined(OMPI_MINOR_VERSION) && \ defined(OMPI_RELEASE_VERSION)) #define PyMPI_OPENMPI_VERSION ((OMPI_MAJOR_VERSION * 10000) + \ (OMPI_MINOR_VERSION * 100) + \ (OMPI_RELEASE_VERSION * 1)) #else #define PyMPI_OPENMPI_VERSION 10000 #endif /* ------------------------------------------------------------------------- */ /* * Open MPI < 1.1.3 generates an error when MPI_File_get_errhandler() * is called with the predefined error handlers MPI_ERRORS_RETURN and * MPI_ERRORS_ARE_FATAL. */ #if PyMPI_OPENMPI_VERSION < 10103 static int PyMPI_OPENMPI_Errhandler_free(MPI_Errhandler *errhandler) { if (errhandler && ((*errhandler == MPI_ERRORS_RETURN) || (*errhandler == MPI_ERRORS_ARE_FATAL))) { *errhandler = MPI_ERRHANDLER_NULL; return MPI_SUCCESS; } return MPI_Errhandler_free(errhandler); } #undef MPI_Errhandler_free #define MPI_Errhandler_free PyMPI_OPENMPI_Errhandler_free #endif /* !(PyMPI_OPENMPI_VERSION < 10103) */ /* ------------------------------------------------------------------------- */ /* * Open MPI 1.1 generates an error when MPI_File_get_errhandler() is * called with the MPI_FILE_NULL handle. The code below try to fix * this bug by intercepting the calls to the functions setting and * getting the error handlers for MPI_File's. */ #if PyMPI_OPENMPI_VERSION < 10200 static MPI_Errhandler PyMPI_OPENMPI_FILE_NULL_ERRHANDLER = (MPI_Errhandler)0; static int PyMPI_OPENMPI_File_get_errhandler(MPI_File file, MPI_Errhandler *errhandler) { if (file == MPI_FILE_NULL) { if (PyMPI_OPENMPI_FILE_NULL_ERRHANDLER == (MPI_Errhandler)0) { PyMPI_OPENMPI_FILE_NULL_ERRHANDLER = MPI_ERRORS_RETURN; } *errhandler = PyMPI_OPENMPI_FILE_NULL_ERRHANDLER; return MPI_SUCCESS; } return MPI_File_get_errhandler(file, errhandler); } #undef MPI_File_get_errhandler #define MPI_File_get_errhandler PyMPI_OPENMPI_File_get_errhandler static int PyMPI_OPENMPI_File_set_errhandler(MPI_File file, MPI_Errhandler errhandler) { int ierr = MPI_File_set_errhandler(file, errhandler); if (ierr != MPI_SUCCESS) return ierr; if (file == MPI_FILE_NULL) { PyMPI_OPENMPI_FILE_NULL_ERRHANDLER = errhandler; } return ierr; } #undef MPI_File_set_errhandler #define MPI_File_set_errhandler PyMPI_OPENMPI_File_set_errhandler #endif /* !(PyMPI_OPENMPI_VERSION < 10200) */ /* ---------------------------------------------------------------- */ #if PyMPI_OPENMPI_VERSION < 10301 static MPI_Fint PyMPI_OPENMPI_File_c2f(MPI_File file) { if (file == MPI_FILE_NULL) return (MPI_Fint)0; return MPI_File_c2f(file); } #define MPI_File_c2f PyMPI_OPENMPI_File_c2f #endif /* !(PyMPI_OPENMPI_VERSION < 10301) */ /* ------------------------------------------------------------------------- */ #if PyMPI_OPENMPI_VERSION < 10402 static int PyMPI_OPENMPI_MPI_Cancel(MPI_Request *request) { if (request && *request == MPI_REQUEST_NULL) { MPI_Comm_call_errhandler(MPI_COMM_WORLD, MPI_ERR_REQUEST); return MPI_ERR_REQUEST; } return MPI_Cancel(request); } #undef MPI_Cancel #define MPI_Cancel PyMPI_OPENMPI_MPI_Cancel static int PyMPI_OPENMPI_MPI_Request_free(MPI_Request *request) { if (request && *request == MPI_REQUEST_NULL) { MPI_Comm_call_errhandler(MPI_COMM_WORLD, MPI_ERR_REQUEST); return MPI_ERR_REQUEST; } return MPI_Request_free(request); } #undef MPI_Request_free #define MPI_Request_free PyMPI_OPENMPI_MPI_Request_free static int PyMPI_OPENMPI_MPI_Win_get_errhandler(MPI_Win win, MPI_Errhandler *errhandler) { if (win == MPI_WIN_NULL) { MPI_Comm_call_errhandler(MPI_COMM_WORLD, MPI_ERR_WIN); return MPI_ERR_WIN; } return MPI_Win_get_errhandler(win, errhandler); } #undef MPI_Win_get_errhandler #define MPI_Win_get_errhandler PyMPI_OPENMPI_MPI_Win_get_errhandler static int PyMPI_OPENMPI_MPI_Win_set_errhandler(MPI_Win win, MPI_Errhandler errhandler) { if (win == MPI_WIN_NULL) { MPI_Comm_call_errhandler(MPI_COMM_WORLD, MPI_ERR_WIN); return MPI_ERR_WIN; } return MPI_Win_set_errhandler(win, errhandler); } #undef MPI_Win_set_errhandler #define MPI_Win_set_errhandler PyMPI_OPENMPI_MPI_Win_set_errhandler #endif /* !(PyMPI_OPENMPI_VERSION < 10402) */ /* ------------------------------------------------------------------------- */ /* * Open MPI 1.7 tries to set status even in the case of MPI_STATUS_IGNORE. */ #if PyMPI_OPENMPI_VERSION >= 10700 && PyMPI_OPENMPI_VERSION < 10800 static int PyMPI_OPENMPI_MPI_Mrecv(void *buf, int count, MPI_Datatype type, MPI_Message *message, MPI_Status *status) { MPI_Status sts; if (status == MPI_STATUS_IGNORE) status = &sts; return MPI_Mrecv(buf, count, type, message, status); } #undef MPI_Mrecv #define MPI_Mrecv PyMPI_OPENMPI_MPI_Mrecv #endif /* !(PyMPI_OPENMPI_VERSION > 10700) */ /* ------------------------------------------------------------------------- */ /* * Open MPI < 1.10.3 errors with MPI_Get_address(MPI_BOTTOM, &address). */ #if PyMPI_OPENMPI_VERSION < 11003 static int PyMPI_OPENMPI_Get_address(const void *location, MPI_Aint *address) { if (location == MPI_BOTTOM && address) { *address = 0; return MPI_SUCCESS; } return MPI_Get_address(location, address); } #undef MPI_Get_address #define MPI_Get_address PyMPI_OPENMPI_Get_address #endif /* ------------------------------------------------------------------------- */ /* * Open MPI < 2.0.0 matched probes do not return MPI_MESSAGE_NO_PROC * for source=MPI_PROC_NULL if status=MPI_STATUS_IGNORE. */ #if PyMPI_OPENMPI_VERSION < 20000 static int PyMPI_OPENMPI_Mprobe(int source, int tag, MPI_Comm comm, MPI_Message *message, MPI_Status *status) { MPI_Status _pympi_status; if (source == MPI_PROC_NULL && status == MPI_STATUS_IGNORE) status = &_pympi_status; return MPI_Mprobe(source, tag, comm, message, status); } #undef MPI_Mprobe #define MPI_Mprobe PyMPI_OPENMPI_Mprobe static int PyMPI_OPENMPI_Improbe(int source, int tag, MPI_Comm comm, int *flag, MPI_Message *message, MPI_Status *status) { MPI_Status _pympi_status; if (source == MPI_PROC_NULL && status == MPI_STATUS_IGNORE) status = &_pympi_status; return MPI_Improbe(source, tag, comm, flag, message, status); } #undef MPI_Improbe #define MPI_Improbe PyMPI_OPENMPI_Improbe #endif /* ------------------------------------------------------------------------- */ #endif /* !PyMPI_COMPAT_OPENMPI_H */ /* Local Variables: c-basic-offset: 2 indent-tabs-mode: nil End: */ mpi4py-4.0.3/src/lib-mpi/config.h000066400000000000000000000024171475341043600165110ustar00rootroot00000000000000#if defined(MPI_ABI_VERSION) # if MPI_ABI_VERSION >= 1 # define PyMPI_ABI 1 # endif #endif #if defined(MS_WINDOWS) # if !defined(MSMPI_VER) # if defined(MPICH2) && defined(MPIAPI) # define MSMPI_VER 0x100 # endif # endif #endif #if !defined(MPIAPI) # define MPIAPI #endif #if defined(HAVE_PYMPICONF_H) #include "pympiconf.h" #elif defined(PyMPI_ABI) #include "config/mpiapi.h" #elif defined(I_MPI_NUMVERSION) #include "config/impi.h" #elif defined(MSMPI_VER) #include "config/msmpi.h" #elif defined(MPICH_NAME) && (MPICH_NAME >= 4) #include "config/mpich.h" #elif defined(MPICH_NAME) && (MPICH_NAME == 3) #include "config/mpich3.h" #elif defined(MPICH_NAME) && (MPICH_NAME == 2) #include "config/mpich2.h" #elif defined(OPEN_MPI) #include "config/openmpi.h" #else /* Unknown MPI*/ #include "config/unknown.h" #endif #ifdef PyMPI_MISSING_MPI_Type_create_f90_integer #undef PyMPI_HAVE_MPI_Type_create_f90_integer #endif #ifdef PyMPI_MISSING_MPI_Type_create_f90_real #undef PyMPI_HAVE_MPI_Type_create_f90_real #endif #ifdef PyMPI_MISSING_MPI_Type_create_f90_complex #undef PyMPI_HAVE_MPI_Type_create_f90_complex #endif #ifdef PyMPI_MISSING_MPI_Status_c2f #undef PyMPI_HAVE_MPI_Status_c2f #endif #ifdef PyMPI_MISSING_MPI_Status_f2c #undef PyMPI_HAVE_MPI_Status_f2c #endif mpi4py-4.0.3/src/lib-mpi/config/000077500000000000000000000000001475341043600163345ustar00rootroot00000000000000mpi4py-4.0.3/src/lib-mpi/config/impi.h000066400000000000000000000233751475341043600174550ustar00rootroot00000000000000#ifndef PyMPI_CONFIG_IMPI_H #define PyMPI_CONFIG_IMPI_H #include "mpiapi.h" /* These types may not be available */ #ifndef MPI_REAL2 #undef PyMPI_HAVE_MPI_REAL2 #endif #ifndef MPI_MPI_COMPLEX4 #undef PyMPI_HAVE_MPI_COMPLEX4 #endif #if !defined(CIBUILDWHEEL) #if I_MPI_NUMVERSION >= 20210900300 #define PyMPI_HAVE_MPI_Bcast_c 1 #define PyMPI_HAVE_MPI_Gather_c 1 #define PyMPI_HAVE_MPI_Scatter_c 1 #define PyMPI_HAVE_MPI_Allgather_c 1 #define PyMPI_HAVE_MPI_Alltoall_c 1 #define PyMPI_HAVE_MPI_Reduce_c 1 #define PyMPI_HAVE_MPI_Allreduce_c 1 #define PyMPI_HAVE_MPI_Reduce_scatter_block_c 1 #define PyMPI_HAVE_MPI_Scan_c 1 #define PyMPI_HAVE_MPI_Exscan_c 1 #define PyMPI_HAVE_MPI_Neighbor_allgather_c 1 #define PyMPI_HAVE_MPI_Neighbor_alltoall_c 1 #define PyMPI_HAVE_MPI_Ibcast_c 1 #define PyMPI_HAVE_MPI_Igather_c 1 #define PyMPI_HAVE_MPI_Iscatter_c 1 #define PyMPI_HAVE_MPI_Iallgather_c 1 #define PyMPI_HAVE_MPI_Ialltoall_c 1 #define PyMPI_HAVE_MPI_Ireduce_c 1 #define PyMPI_HAVE_MPI_Iallreduce_c 1 #define PyMPI_HAVE_MPI_Ireduce_scatter_block_c 1 #define PyMPI_HAVE_MPI_Iscan_c 1 #define PyMPI_HAVE_MPI_Iexscan_c 1 #define PyMPI_HAVE_MPI_Ineighbor_allgather_c 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoall_c 1 #endif #if I_MPI_NUMVERSION >= 20211000300 #define PyMPI_HAVE_MPI_Buffer_attach_c 1 #define PyMPI_HAVE_MPI_Buffer_detach_c 1 #define PyMPI_HAVE_MPI_Send_c 1 #define PyMPI_HAVE_MPI_Recv_c 1 #define PyMPI_HAVE_MPI_Sendrecv_c 1 #define PyMPI_HAVE_MPI_Sendrecv_replace_c 1 #define PyMPI_HAVE_MPI_Bsend_c 1 #define PyMPI_HAVE_MPI_Ssend_c 1 #define PyMPI_HAVE_MPI_Rsend_c 1 #define PyMPI_HAVE_MPI_Isend_c 1 #define PyMPI_HAVE_MPI_Irecv_c 1 #define PyMPI_HAVE_MPI_Ibsend_c 1 #define PyMPI_HAVE_MPI_Issend_c 1 #define PyMPI_HAVE_MPI_Irsend_c 1 #define PyMPI_HAVE_MPI_Send_init_c 1 #define PyMPI_HAVE_MPI_Recv_init_c 1 #define PyMPI_HAVE_MPI_Bsend_init_c 1 #define PyMPI_HAVE_MPI_Ssend_init_c 1 #define PyMPI_HAVE_MPI_Rsend_init_c 1 #define PyMPI_HAVE_MPI_Mrecv_c 1 #define PyMPI_HAVE_MPI_Imrecv_c 1 #define PyMPI_HAVE_MPI_Gatherv_c 1 #define PyMPI_HAVE_MPI_Scatterv_c 1 #define PyMPI_HAVE_MPI_Allgatherv_c 1 #define PyMPI_HAVE_MPI_Alltoallv_c 1 #define PyMPI_HAVE_MPI_Alltoallw_c 1 #define PyMPI_HAVE_MPI_Reduce_scatter_c 1 #define PyMPI_HAVE_MPI_Neighbor_allgatherv_c 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallv_c 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallw_c 1 #define PyMPI_HAVE_MPI_Igatherv_c 1 #define PyMPI_HAVE_MPI_Iscatterv_c 1 #define PyMPI_HAVE_MPI_Iallgatherv_c 1 #define PyMPI_HAVE_MPI_Ialltoallv_c 1 #define PyMPI_HAVE_MPI_Ialltoallw_c 1 #define PyMPI_HAVE_MPI_Ireduce_scatter_c 1 #define PyMPI_HAVE_MPI_Ineighbor_allgatherv_c 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoallv_c 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoallw_c 1 #endif #if I_MPI_NUMVERSION >= 20211100300 #define PyMPI_HAVE_MPI_Session 1 #define PyMPI_HAVE_MPI_ERRORS_ABORT 1 #define PyMPI_HAVE_MPI_SESSION_NULL 1 #define PyMPI_HAVE_MPI_MAX_PSET_NAME_LEN 1 #define PyMPI_HAVE_MPI_Session_init 1 #define PyMPI_HAVE_MPI_Session_finalize 1 #define PyMPI_HAVE_MPI_Session_get_num_psets 1 #define PyMPI_HAVE_MPI_Session_get_nth_pset 1 #define PyMPI_HAVE_MPI_Session_get_info 1 #define PyMPI_HAVE_MPI_Session_get_pset_info 1 #define PyMPI_HAVE_MPI_Group_from_session_pset 1 #define PyMPI_HAVE_MPI_Session_errhandler_function 1 #define PyMPI_HAVE_MPI_Session_create_errhandler 1 #define PyMPI_HAVE_MPI_Session_get_errhandler 1 #define PyMPI_HAVE_MPI_Session_set_errhandler 1 #define PyMPI_HAVE_MPI_Session_call_errhandler 1 #define PyMPI_HAVE_MPI_MAX_STRINGTAG_LEN 1 #define PyMPI_HAVE_MPI_Comm_create_from_group 1 #define PyMPI_HAVE_MPI_COMM_TYPE_HW_GUIDED 1 #define PyMPI_HAVE_MPI_ERR_SESSION 1 #define PyMPI_HAVE_MPI_Session_c2f 1 #define PyMPI_HAVE_MPI_Session_f2c 1 #endif #if I_MPI_NUMVERSION >= 20211200300 #define PyMPI_HAVE_MPI_Type_contiguous_c 1 #define PyMPI_HAVE_MPI_Type_vector_c 1 #define PyMPI_HAVE_MPI_Type_indexed_c 1 #define PyMPI_HAVE_MPI_Type_create_indexed_block_c 1 #define PyMPI_HAVE_MPI_Type_create_subarray_c 1 #define PyMPI_HAVE_MPI_Type_create_darray_c 1 #define PyMPI_HAVE_MPI_Type_create_hvector_c 1 #define PyMPI_HAVE_MPI_Type_create_hindexed_c 1 #define PyMPI_HAVE_MPI_Type_create_hindexed_block_c 1 #define PyMPI_HAVE_MPI_Type_create_struct_c 1 #define PyMPI_HAVE_MPI_Type_create_resized_c 1 #define PyMPI_HAVE_MPI_Type_size_c 1 #define PyMPI_HAVE_MPI_Type_get_extent_c 1 #define PyMPI_HAVE_MPI_Type_get_true_extent_c 1 #define PyMPI_HAVE_MPI_Type_get_envelope_c 1 #define PyMPI_HAVE_MPI_Type_get_contents_c 1 #define PyMPI_HAVE_MPI_Pack_c 1 #define PyMPI_HAVE_MPI_Unpack_c 1 #define PyMPI_HAVE_MPI_Pack_size_c 1 #define PyMPI_HAVE_MPI_Pack_external_c 1 #define PyMPI_HAVE_MPI_Unpack_external_c 1 #define PyMPI_HAVE_MPI_Pack_external_size_c 1 #define PyMPI_HAVE_MPI_Get_count_c 1 #define PyMPI_HAVE_MPI_Get_elements_c 1 #define PyMPI_HAVE_MPI_Status_set_elements_c 1 #define PyMPI_HAVE_MPI_Barrier_init 1 #define PyMPI_HAVE_MPI_Bcast_init 1 #define PyMPI_HAVE_MPI_Gather_init 1 #define PyMPI_HAVE_MPI_Gatherv_init 1 #define PyMPI_HAVE_MPI_Scatter_init 1 #define PyMPI_HAVE_MPI_Scatterv_init 1 #define PyMPI_HAVE_MPI_Allgather_init 1 #define PyMPI_HAVE_MPI_Allgatherv_init 1 #define PyMPI_HAVE_MPI_Alltoall_init 1 #define PyMPI_HAVE_MPI_Alltoallv_init 1 #define PyMPI_HAVE_MPI_Alltoallw_init 1 #define PyMPI_HAVE_MPI_Reduce_init 1 #define PyMPI_HAVE_MPI_Allreduce_init 1 #define PyMPI_HAVE_MPI_Reduce_scatter_block_init 1 #define PyMPI_HAVE_MPI_Reduce_scatter_init 1 #define PyMPI_HAVE_MPI_Scan_init 1 #define PyMPI_HAVE_MPI_Exscan_init 1 #define PyMPI_HAVE_MPI_Neighbor_allgather_init 1 #define PyMPI_HAVE_MPI_Neighbor_allgatherv_init 1 #define PyMPI_HAVE_MPI_Neighbor_alltoall_init 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallv_init 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallw_init 1 #if defined(__linux__) #define PyMPI_HAVE_MPI_Bcast_init_c 1 #define PyMPI_HAVE_MPI_Gather_init_c 1 #define PyMPI_HAVE_MPI_Gatherv_init_c 1 #define PyMPI_HAVE_MPI_Scatter_init_c 1 #define PyMPI_HAVE_MPI_Scatterv_init_c 1 #define PyMPI_HAVE_MPI_Allgather_init_c 1 #define PyMPI_HAVE_MPI_Allgatherv_init_c 1 #define PyMPI_HAVE_MPI_Alltoall_init_c 1 #define PyMPI_HAVE_MPI_Alltoallv_init_c 1 #define PyMPI_HAVE_MPI_Alltoallw_init_c 1 #define PyMPI_HAVE_MPI_Reduce_init_c 1 #define PyMPI_HAVE_MPI_Allreduce_init_c 1 #define PyMPI_HAVE_MPI_Reduce_scatter_block_init_c 1 #define PyMPI_HAVE_MPI_Reduce_scatter_init_c 1 #define PyMPI_HAVE_MPI_Scan_init_c 1 #define PyMPI_HAVE_MPI_Exscan_init_c 1 #define PyMPI_HAVE_MPI_Neighbor_allgather_init_c 1 #define PyMPI_HAVE_MPI_Neighbor_allgatherv_init_c 1 #define PyMPI_HAVE_MPI_Neighbor_alltoall_init_c 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallv_init_c 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallw_init_c 1 #endif #define PyMPI_HAVE_MPI_Win_create_c 1 #define PyMPI_HAVE_MPI_Win_allocate_c 1 #define PyMPI_HAVE_MPI_Win_allocate_shared_c 1 #define PyMPI_HAVE_MPI_Win_shared_query_c 1 #define PyMPI_HAVE_MPI_Get_c 1 #define PyMPI_HAVE_MPI_Put_c 1 #define PyMPI_HAVE_MPI_Accumulate_c 1 #define PyMPI_HAVE_MPI_Get_accumulate_c 1 #define PyMPI_HAVE_MPI_Rget_c 1 #define PyMPI_HAVE_MPI_Rput_c 1 #define PyMPI_HAVE_MPI_Raccumulate_c 1 #define PyMPI_HAVE_MPI_Rget_accumulate_c 1 #if defined(__linux__) #define PyMPI_HAVE_MPI_File_read_at_c 1 #define PyMPI_HAVE_MPI_File_read_at_all_c 1 #define PyMPI_HAVE_MPI_File_write_at_c 1 #define PyMPI_HAVE_MPI_File_write_at_all_c 1 #define PyMPI_HAVE_MPI_File_iread_at_c 1 #define PyMPI_HAVE_MPI_File_iread_at_all_c 1 #define PyMPI_HAVE_MPI_File_iwrite_at_c 1 #define PyMPI_HAVE_MPI_File_iwrite_at_all_c 1 #define PyMPI_HAVE_MPI_File_read_c 1 #define PyMPI_HAVE_MPI_File_read_all_c 1 #define PyMPI_HAVE_MPI_File_write_c 1 #define PyMPI_HAVE_MPI_File_write_all_c 1 #define PyMPI_HAVE_MPI_File_iread_c 1 #define PyMPI_HAVE_MPI_File_iread_all_c 1 #define PyMPI_HAVE_MPI_File_iwrite_c 1 #define PyMPI_HAVE_MPI_File_iwrite_all_c 1 #define PyMPI_HAVE_MPI_File_read_shared_c 1 #define PyMPI_HAVE_MPI_File_write_shared_c 1 #define PyMPI_HAVE_MPI_File_iread_shared_c 1 #define PyMPI_HAVE_MPI_File_iwrite_shared_c 1 #define PyMPI_HAVE_MPI_File_read_ordered_c 1 #define PyMPI_HAVE_MPI_File_write_ordered_c 1 #define PyMPI_HAVE_MPI_File_read_at_all_begin_c 1 #define PyMPI_HAVE_MPI_File_write_at_all_begin_c 1 #define PyMPI_HAVE_MPI_File_read_all_begin_c 1 #define PyMPI_HAVE_MPI_File_write_all_begin_c 1 #define PyMPI_HAVE_MPI_File_read_ordered_begin_c 1 #define PyMPI_HAVE_MPI_File_write_ordered_begin_c 1 #define PyMPI_HAVE_MPI_File_get_type_extent_c 1 #define PyMPI_HAVE_MPI_Datarep_conversion_function_c 1 #define PyMPI_HAVE_MPI_CONVERSION_FN_NULL_C 1 #define PyMPI_HAVE_MPI_Register_datarep_c 1 #endif #endif #if I_MPI_NUMVERSION >= 20211300300 #define PyMPI_HAVE_MPI_User_function_c 1 #define PyMPI_HAVE_MPI_Op_create_c 1 #define PyMPI_HAVE_MPI_Reduce_local_c 1 #endif #if I_MPI_NUMVERSION >= 20211400300 #define PyMPI_HAVE_MPI_Status_get_source 1 #define PyMPI_HAVE_MPI_Status_set_source 1 #define PyMPI_HAVE_MPI_Status_get_tag 1 #define PyMPI_HAVE_MPI_Status_set_tag 1 #define PyMPI_HAVE_MPI_Status_get_error 1 #define PyMPI_HAVE_MPI_Status_set_error 1 #define PyMPI_HAVE_MPI_Request_get_status_any 1 #define PyMPI_HAVE_MPI_Request_get_status_all 1 #define PyMPI_HAVE_MPI_Request_get_status_some 1 #define PyMPI_HAVE_MPI_Pready 1 #define PyMPI_HAVE_MPI_Pready_range 1 #define PyMPI_HAVE_MPI_Pready_list 1 #define PyMPI_HAVE_MPI_Parrived 1 #define PyMPI_HAVE_MPI_BUFFER_AUTOMATIC 1 #define PyMPI_HAVE_MPI_Psend_init 1 #define PyMPI_HAVE_MPI_Precv_init 1 #define PyMPI_HAVE_MPI_Comm_idup_with_info 1 #define PyMPI_HAVE_MPI_COMM_TYPE_HW_UNGUIDED 1 #define PyMPI_HAVE_MPI_COMM_TYPE_RESOURCE_GUIDED 1 #define PyMPI_HAVE_MPI_Intercomm_create_from_groups 1 #define PyMPI_HAVE_MPI_ERR_PROC_ABORTED 1 #define PyMPI_HAVE_MPI_ERR_VALUE_TOO_LARGE 1 #define PyMPI_HAVE_MPI_F_SOURCE 1 #define PyMPI_HAVE_MPI_F_TAG 1 #define PyMPI_HAVE_MPI_F_ERROR 1 #define PyMPI_HAVE_MPI_F_STATUS_SIZE 1 #endif #endif #endif /* !PyMPI_CONFIG_IMPI_H */ mpi4py-4.0.3/src/lib-mpi/config/mpi-11.h000066400000000000000000000203451475341043600175150ustar00rootroot00000000000000#define PyMPI_HAVE_MPI_UNDEFINED 1 #define PyMPI_HAVE_MPI_ANY_SOURCE 1 #define PyMPI_HAVE_MPI_ANY_TAG 1 #define PyMPI_HAVE_MPI_PROC_NULL 1 #define PyMPI_HAVE_MPI_Aint 1 #define PyMPI_HAVE_MPI_Datatype 1 #define PyMPI_HAVE_MPI_DATATYPE_NULL 1 #define PyMPI_HAVE_MPI_UB 1 #define PyMPI_HAVE_MPI_LB 1 #define PyMPI_HAVE_MPI_PACKED 1 #define PyMPI_HAVE_MPI_BYTE 1 #define PyMPI_HAVE_MPI_CHAR 1 #define PyMPI_HAVE_MPI_SHORT 1 #define PyMPI_HAVE_MPI_INT 1 #define PyMPI_HAVE_MPI_LONG 1 #define PyMPI_HAVE_MPI_LONG_LONG_INT 1 #define PyMPI_HAVE_MPI_UNSIGNED_CHAR 1 #define PyMPI_HAVE_MPI_UNSIGNED_SHORT 1 #define PyMPI_HAVE_MPI_UNSIGNED 1 #define PyMPI_HAVE_MPI_UNSIGNED_LONG 1 #define PyMPI_HAVE_MPI_FLOAT 1 #define PyMPI_HAVE_MPI_DOUBLE 1 #define PyMPI_HAVE_MPI_LONG_DOUBLE 1 #define PyMPI_HAVE_MPI_SHORT_INT 1 #define PyMPI_HAVE_MPI_2INT 1 #define PyMPI_HAVE_MPI_LONG_INT 1 #define PyMPI_HAVE_MPI_FLOAT_INT 1 #define PyMPI_HAVE_MPI_DOUBLE_INT 1 #define PyMPI_HAVE_MPI_LONG_DOUBLE_INT 1 #define PyMPI_HAVE_MPI_CHARACTER 1 #define PyMPI_HAVE_MPI_LOGICAL 1 #define PyMPI_HAVE_MPI_INTEGER 1 #define PyMPI_HAVE_MPI_REAL 1 #define PyMPI_HAVE_MPI_DOUBLE_PRECISION 1 #define PyMPI_HAVE_MPI_COMPLEX 1 #define PyMPI_HAVE_MPI_DOUBLE_COMPLEX 1 #define PyMPI_HAVE_MPI_BOTTOM 1 #define PyMPI_HAVE_MPI_Address 1 #define PyMPI_HAVE_MPI_Type_size 1 #define PyMPI_HAVE_MPI_Type_extent 1 #define PyMPI_HAVE_MPI_Type_lb 1 #define PyMPI_HAVE_MPI_Type_ub 1 #define PyMPI_HAVE_MPI_Type_dup 1 #define PyMPI_HAVE_MPI_Type_contiguous 1 #define PyMPI_HAVE_MPI_Type_vector 1 #define PyMPI_HAVE_MPI_Type_indexed 1 #define PyMPI_HAVE_MPI_Type_hvector 1 #define PyMPI_HAVE_MPI_Type_hindexed 1 #define PyMPI_HAVE_MPI_Type_struct 1 #define PyMPI_HAVE_MPI_Type_commit 1 #define PyMPI_HAVE_MPI_Type_free 1 #define PyMPI_HAVE_MPI_Pack 1 #define PyMPI_HAVE_MPI_Unpack 1 #define PyMPI_HAVE_MPI_Pack_size 1 #define PyMPI_HAVE_MPI_Status 1 #define PyMPI_HAVE_MPI_Get_count 1 #define PyMPI_HAVE_MPI_Get_elements 1 #define PyMPI_HAVE_MPI_Test_cancelled 1 #define PyMPI_HAVE_MPI_Request 1 #define PyMPI_HAVE_MPI_REQUEST_NULL 1 #define PyMPI_HAVE_MPI_Request_free 1 #define PyMPI_HAVE_MPI_Wait 1 #define PyMPI_HAVE_MPI_Test 1 #define PyMPI_HAVE_MPI_Request_get_status 1 #define PyMPI_HAVE_MPI_Cancel 1 #define PyMPI_HAVE_MPI_Waitany 1 #define PyMPI_HAVE_MPI_Testany 1 #define PyMPI_HAVE_MPI_Waitall 1 #define PyMPI_HAVE_MPI_Testall 1 #define PyMPI_HAVE_MPI_Waitsome 1 #define PyMPI_HAVE_MPI_Testsome 1 #define PyMPI_HAVE_MPI_Start 1 #define PyMPI_HAVE_MPI_Startall 1 #define PyMPI_HAVE_MPI_Op 1 #define PyMPI_HAVE_MPI_OP_NULL 1 #define PyMPI_HAVE_MPI_MAX 1 #define PyMPI_HAVE_MPI_MIN 1 #define PyMPI_HAVE_MPI_SUM 1 #define PyMPI_HAVE_MPI_PROD 1 #define PyMPI_HAVE_MPI_LAND 1 #define PyMPI_HAVE_MPI_BAND 1 #define PyMPI_HAVE_MPI_LOR 1 #define PyMPI_HAVE_MPI_BOR 1 #define PyMPI_HAVE_MPI_LXOR 1 #define PyMPI_HAVE_MPI_BXOR 1 #define PyMPI_HAVE_MPI_MAXLOC 1 #define PyMPI_HAVE_MPI_MINLOC 1 #define PyMPI_HAVE_MPI_REPLACE 1 #define PyMPI_HAVE_MPI_Op_free 1 #define PyMPI_HAVE_MPI_User_function 1 #define PyMPI_HAVE_MPI_Op_create 1 #define PyMPI_HAVE_MPI_Group 1 #define PyMPI_HAVE_MPI_GROUP_NULL 1 #define PyMPI_HAVE_MPI_GROUP_EMPTY 1 #define PyMPI_HAVE_MPI_Group_free 1 #define PyMPI_HAVE_MPI_Group_size 1 #define PyMPI_HAVE_MPI_Group_rank 1 #define PyMPI_HAVE_MPI_Group_translate_ranks 1 #define PyMPI_HAVE_MPI_IDENT 1 #define PyMPI_HAVE_MPI_CONGRUENT 1 #define PyMPI_HAVE_MPI_SIMILAR 1 #define PyMPI_HAVE_MPI_UNEQUAL 1 #define PyMPI_HAVE_MPI_Group_compare 1 #define PyMPI_HAVE_MPI_Group_union 1 #define PyMPI_HAVE_MPI_Group_intersection 1 #define PyMPI_HAVE_MPI_Group_difference 1 #define PyMPI_HAVE_MPI_Group_incl 1 #define PyMPI_HAVE_MPI_Group_excl 1 #define PyMPI_HAVE_MPI_Group_range_incl 1 #define PyMPI_HAVE_MPI_Group_range_excl 1 #define PyMPI_HAVE_MPI_Comm 1 #define PyMPI_HAVE_MPI_COMM_NULL 1 #define PyMPI_HAVE_MPI_COMM_SELF 1 #define PyMPI_HAVE_MPI_COMM_WORLD 1 #define PyMPI_HAVE_MPI_Comm_free 1 #define PyMPI_HAVE_MPI_Comm_group 1 #define PyMPI_HAVE_MPI_Comm_size 1 #define PyMPI_HAVE_MPI_Comm_rank 1 #define PyMPI_HAVE_MPI_Comm_compare 1 #define PyMPI_HAVE_MPI_Topo_test 1 #define PyMPI_HAVE_MPI_Comm_test_inter 1 #define PyMPI_HAVE_MPI_Abort 1 #define PyMPI_HAVE_MPI_Send 1 #define PyMPI_HAVE_MPI_Recv 1 #define PyMPI_HAVE_MPI_Sendrecv 1 #define PyMPI_HAVE_MPI_Sendrecv_replace 1 #define PyMPI_HAVE_MPI_BSEND_OVERHEAD 1 #define PyMPI_HAVE_MPI_Buffer_attach 1 #define PyMPI_HAVE_MPI_Buffer_detach 1 #define PyMPI_HAVE_MPI_Bsend 1 #define PyMPI_HAVE_MPI_Ssend 1 #define PyMPI_HAVE_MPI_Rsend 1 #define PyMPI_HAVE_MPI_Isend 1 #define PyMPI_HAVE_MPI_Ibsend 1 #define PyMPI_HAVE_MPI_Issend 1 #define PyMPI_HAVE_MPI_Irsend 1 #define PyMPI_HAVE_MPI_Irecv 1 #define PyMPI_HAVE_MPI_Send_init 1 #define PyMPI_HAVE_MPI_Bsend_init 1 #define PyMPI_HAVE_MPI_Ssend_init 1 #define PyMPI_HAVE_MPI_Rsend_init 1 #define PyMPI_HAVE_MPI_Recv_init 1 #define PyMPI_HAVE_MPI_Probe 1 #define PyMPI_HAVE_MPI_Iprobe 1 #define PyMPI_HAVE_MPI_Barrier 1 #define PyMPI_HAVE_MPI_Bcast 1 #define PyMPI_HAVE_MPI_Gather 1 #define PyMPI_HAVE_MPI_Gatherv 1 #define PyMPI_HAVE_MPI_Scatter 1 #define PyMPI_HAVE_MPI_Scatterv 1 #define PyMPI_HAVE_MPI_Allgather 1 #define PyMPI_HAVE_MPI_Allgatherv 1 #define PyMPI_HAVE_MPI_Alltoall 1 #define PyMPI_HAVE_MPI_Alltoallv 1 #define PyMPI_HAVE_MPI_Reduce 1 #define PyMPI_HAVE_MPI_Allreduce 1 #define PyMPI_HAVE_MPI_Reduce_scatter 1 #define PyMPI_HAVE_MPI_Scan 1 #define PyMPI_HAVE_MPI_Comm_dup 1 #define PyMPI_HAVE_MPI_Comm_create 1 #define PyMPI_HAVE_MPI_Comm_split 1 #define PyMPI_HAVE_MPI_CART 1 #define PyMPI_HAVE_MPI_Cart_create 1 #define PyMPI_HAVE_MPI_Cartdim_get 1 #define PyMPI_HAVE_MPI_Cart_get 1 #define PyMPI_HAVE_MPI_Cart_rank 1 #define PyMPI_HAVE_MPI_Cart_coords 1 #define PyMPI_HAVE_MPI_Cart_shift 1 #define PyMPI_HAVE_MPI_Cart_sub 1 #define PyMPI_HAVE_MPI_Cart_map 1 #define PyMPI_HAVE_MPI_Dims_create 1 #define PyMPI_HAVE_MPI_GRAPH 1 #define PyMPI_HAVE_MPI_Graph_create 1 #define PyMPI_HAVE_MPI_Graphdims_get 1 #define PyMPI_HAVE_MPI_Graph_get 1 #define PyMPI_HAVE_MPI_Graph_map 1 #define PyMPI_HAVE_MPI_Graph_neighbors_count 1 #define PyMPI_HAVE_MPI_Graph_neighbors 1 #define PyMPI_HAVE_MPI_Intercomm_create 1 #define PyMPI_HAVE_MPI_Comm_remote_group 1 #define PyMPI_HAVE_MPI_Comm_remote_size 1 #define PyMPI_HAVE_MPI_Intercomm_merge 1 #define PyMPI_HAVE_MPI_Errhandler_get 1 #define PyMPI_HAVE_MPI_Errhandler_set 1 #define PyMPI_HAVE_MPI_Handler_function 1 #define PyMPI_HAVE_MPI_Errhandler_create 1 #define PyMPI_HAVE_MPI_Init 1 #define PyMPI_HAVE_MPI_Finalize 1 #define PyMPI_HAVE_MPI_Initialized 1 #define PyMPI_HAVE_MPI_Finalized 1 #define PyMPI_HAVE_MPI_MAX_PROCESSOR_NAME 1 #define PyMPI_HAVE_MPI_Get_processor_name 1 #define PyMPI_HAVE_MPI_Wtime 1 #define PyMPI_HAVE_MPI_Wtick 1 #define PyMPI_HAVE_MPI_Pcontrol 1 #define PyMPI_HAVE_MPI_Errhandler 1 #define PyMPI_HAVE_MPI_ERRHANDLER_NULL 1 #define PyMPI_HAVE_MPI_ERRORS_RETURN 1 #define PyMPI_HAVE_MPI_ERRORS_ARE_FATAL 1 #define PyMPI_HAVE_MPI_Errhandler_free 1 #define PyMPI_HAVE_MPI_KEYVAL_INVALID 1 #define PyMPI_HAVE_MPI_TAG_UB 1 #define PyMPI_HAVE_MPI_IO 1 #define PyMPI_HAVE_MPI_WTIME_IS_GLOBAL 1 #define PyMPI_HAVE_MPI_HOST 1 #define PyMPI_HAVE_MPI_Attr_get 1 #define PyMPI_HAVE_MPI_Attr_put 1 #define PyMPI_HAVE_MPI_Attr_delete 1 #define PyMPI_HAVE_MPI_Copy_function 1 #define PyMPI_HAVE_MPI_Delete_function 1 #define PyMPI_HAVE_MPI_DUP_FN 1 #define PyMPI_HAVE_MPI_NULL_COPY_FN 1 #define PyMPI_HAVE_MPI_NULL_DELETE_FN 1 #define PyMPI_HAVE_MPI_Keyval_create 1 #define PyMPI_HAVE_MPI_Keyval_free 1 #define PyMPI_HAVE_MPI_SUCCESS 1 #define PyMPI_HAVE_MPI_ERR_LASTCODE 1 #define PyMPI_HAVE_MPI_ERR_COMM 1 #define PyMPI_HAVE_MPI_ERR_GROUP 1 #define PyMPI_HAVE_MPI_ERR_TYPE 1 #define PyMPI_HAVE_MPI_ERR_REQUEST 1 #define PyMPI_HAVE_MPI_ERR_OP 1 #define PyMPI_HAVE_MPI_ERR_BUFFER 1 #define PyMPI_HAVE_MPI_ERR_COUNT 1 #define PyMPI_HAVE_MPI_ERR_TAG 1 #define PyMPI_HAVE_MPI_ERR_RANK 1 #define PyMPI_HAVE_MPI_ERR_ROOT 1 #define PyMPI_HAVE_MPI_ERR_TRUNCATE 1 #define PyMPI_HAVE_MPI_ERR_IN_STATUS 1 #define PyMPI_HAVE_MPI_ERR_PENDING 1 #define PyMPI_HAVE_MPI_ERR_TOPOLOGY 1 #define PyMPI_HAVE_MPI_ERR_DIMS 1 #define PyMPI_HAVE_MPI_ERR_ARG 1 #define PyMPI_HAVE_MPI_ERR_OTHER 1 #define PyMPI_HAVE_MPI_ERR_UNKNOWN 1 #define PyMPI_HAVE_MPI_ERR_INTERN 1 #define PyMPI_HAVE_MPI_MAX_ERROR_STRING 1 #define PyMPI_HAVE_MPI_Error_class 1 #define PyMPI_HAVE_MPI_Error_string 1 mpi4py-4.0.3/src/lib-mpi/config/mpi-12.h000066400000000000000000000003261475341043600175130ustar00rootroot00000000000000#if defined(MPI_VERSION) #if (MPI_VERSION > 1) || (MPI_VERSION == 1 && MPI_SUBVERSION >= 2) #define PyMPI_HAVE_MPI_VERSION 1 #define PyMPI_HAVE_MPI_SUBVERSION 1 #define PyMPI_HAVE_MPI_Get_version 1 #endif #endif mpi4py-4.0.3/src/lib-mpi/config/mpi-20.h000066400000000000000000000314741475341043600175220ustar00rootroot00000000000000#if defined(MPI_VERSION) #if (MPI_VERSION >= 2) #define PyMPI_HAVE_MPI_ERR_KEYVAL 1 #define PyMPI_HAVE_MPI_MAX_OBJECT_NAME 1 #define PyMPI_HAVE_MPI_WCHAR 1 #define PyMPI_HAVE_MPI_SIGNED_CHAR 1 #define PyMPI_HAVE_MPI_LONG_LONG 1 #define PyMPI_HAVE_MPI_UNSIGNED_LONG_LONG 1 #define PyMPI_HAVE_MPI_INTEGER1 1 #define PyMPI_HAVE_MPI_INTEGER2 1 #define PyMPI_HAVE_MPI_INTEGER4 1 #define PyMPI_HAVE_MPI_INTEGER8 1 #define PyMPI_HAVE_MPI_INTEGER16 1 #define PyMPI_HAVE_MPI_REAL4 1 #define PyMPI_HAVE_MPI_REAL8 1 #define PyMPI_HAVE_MPI_REAL16 1 #define PyMPI_HAVE_MPI_COMPLEX8 1 #define PyMPI_HAVE_MPI_COMPLEX16 1 #define PyMPI_HAVE_MPI_COMPLEX32 1 #define PyMPI_HAVE_MPI_Type_dup 1 #define PyMPI_HAVE_MPI_Type_create_indexed_block 1 #define PyMPI_HAVE_MPI_ORDER_C 1 #define PyMPI_HAVE_MPI_ORDER_FORTRAN 1 #define PyMPI_HAVE_MPI_Type_create_subarray 1 #define PyMPI_HAVE_MPI_DISTRIBUTE_NONE 1 #define PyMPI_HAVE_MPI_DISTRIBUTE_BLOCK 1 #define PyMPI_HAVE_MPI_DISTRIBUTE_CYCLIC 1 #define PyMPI_HAVE_MPI_DISTRIBUTE_DFLT_DARG 1 #define PyMPI_HAVE_MPI_Type_create_darray 1 #define PyMPI_HAVE_MPI_Get_address 1 #define PyMPI_HAVE_MPI_Type_create_hvector 1 #define PyMPI_HAVE_MPI_Type_create_hindexed 1 #define PyMPI_HAVE_MPI_Type_create_struct 1 #define PyMPI_HAVE_MPI_Type_get_extent 1 #define PyMPI_HAVE_MPI_Type_create_resized 1 #define PyMPI_HAVE_MPI_Type_get_true_extent 1 #define PyMPI_HAVE_MPI_Type_create_f90_integer 1 #define PyMPI_HAVE_MPI_Type_create_f90_real 1 #define PyMPI_HAVE_MPI_Type_create_f90_complex 1 #define PyMPI_HAVE_MPI_TYPECLASS_INTEGER 1 #define PyMPI_HAVE_MPI_TYPECLASS_REAL 1 #define PyMPI_HAVE_MPI_TYPECLASS_COMPLEX 1 #define PyMPI_HAVE_MPI_Type_match_size 1 #define PyMPI_HAVE_MPI_Pack_external 1 #define PyMPI_HAVE_MPI_Unpack_external 1 #define PyMPI_HAVE_MPI_Pack_external_size 1 #define PyMPI_HAVE_MPI_COMBINER_NAMED 1 #define PyMPI_HAVE_MPI_COMBINER_DUP 1 #define PyMPI_HAVE_MPI_COMBINER_CONTIGUOUS 1 #define PyMPI_HAVE_MPI_COMBINER_VECTOR 1 #define PyMPI_HAVE_MPI_COMBINER_HVECTOR_INTEGER 1 #define PyMPI_HAVE_MPI_COMBINER_HVECTOR 1 #define PyMPI_HAVE_MPI_COMBINER_INDEXED 1 #define PyMPI_HAVE_MPI_COMBINER_HINDEXED_INTEGER 1 #define PyMPI_HAVE_MPI_COMBINER_HINDEXED 1 #define PyMPI_HAVE_MPI_COMBINER_INDEXED_BLOCK 1 #define PyMPI_HAVE_MPI_COMBINER_STRUCT_INTEGER 1 #define PyMPI_HAVE_MPI_COMBINER_STRUCT 1 #define PyMPI_HAVE_MPI_COMBINER_SUBARRAY 1 #define PyMPI_HAVE_MPI_COMBINER_DARRAY 1 #define PyMPI_HAVE_MPI_COMBINER_F90_REAL 1 #define PyMPI_HAVE_MPI_COMBINER_F90_COMPLEX 1 #define PyMPI_HAVE_MPI_COMBINER_F90_INTEGER 1 #define PyMPI_HAVE_MPI_COMBINER_RESIZED 1 #define PyMPI_HAVE_MPI_Type_get_envelope 1 #define PyMPI_HAVE_MPI_Type_get_contents 1 #define PyMPI_HAVE_MPI_Type_get_name 1 #define PyMPI_HAVE_MPI_Type_set_name 1 #define PyMPI_HAVE_MPI_Type_get_attr 1 #define PyMPI_HAVE_MPI_Type_set_attr 1 #define PyMPI_HAVE_MPI_Type_delete_attr 1 #define PyMPI_HAVE_MPI_Type_copy_attr_function 1 #define PyMPI_HAVE_MPI_Type_delete_attr_function 1 #define PyMPI_HAVE_MPI_TYPE_NULL_COPY_FN 1 #define PyMPI_HAVE_MPI_TYPE_DUP_FN 1 #define PyMPI_HAVE_MPI_TYPE_NULL_DELETE_FN 1 #define PyMPI_HAVE_MPI_Type_create_keyval 1 #define PyMPI_HAVE_MPI_Type_free_keyval 1 #define PyMPI_HAVE_MPI_STATUS_IGNORE 1 #define PyMPI_HAVE_MPI_STATUSES_IGNORE 1 #define PyMPI_HAVE_MPI_Status_set_elements 1 #define PyMPI_HAVE_MPI_Status_set_cancelled 1 #define PyMPI_HAVE_MPI_Request_get_status 1 #define PyMPI_HAVE_MPI_Grequest_cancel_function 1 #define PyMPI_HAVE_MPI_Grequest_free_function 1 #define PyMPI_HAVE_MPI_Grequest_query_function 1 #define PyMPI_HAVE_MPI_Grequest_start 1 #define PyMPI_HAVE_MPI_Grequest_complete 1 #define PyMPI_HAVE_MPI_ROOT 1 #define PyMPI_HAVE_MPI_IN_PLACE 1 #define PyMPI_HAVE_MPI_Alltoallw 1 #define PyMPI_HAVE_MPI_Exscan 1 #define PyMPI_HAVE_MPI_Comm_get_errhandler 1 #define PyMPI_HAVE_MPI_Comm_set_errhandler 1 #define PyMPI_HAVE_MPI_Comm_errhandler_fn 1 #define PyMPI_HAVE_MPI_Comm_create_errhandler 1 #define PyMPI_HAVE_MPI_Comm_call_errhandler 1 #define PyMPI_HAVE_MPI_Comm_get_name 1 #define PyMPI_HAVE_MPI_Comm_set_name 1 #define PyMPI_HAVE_MPI_Comm_get_attr 1 #define PyMPI_HAVE_MPI_Comm_set_attr 1 #define PyMPI_HAVE_MPI_Comm_delete_attr 1 #define PyMPI_HAVE_MPI_Comm_copy_attr_function 1 #define PyMPI_HAVE_MPI_Comm_delete_attr_function 1 #define PyMPI_HAVE_MPI_COMM_DUP_FN 1 #define PyMPI_HAVE_MPI_COMM_NULL_COPY_FN 1 #define PyMPI_HAVE_MPI_COMM_NULL_DELETE_FN 1 #define PyMPI_HAVE_MPI_Comm_create_keyval 1 #define PyMPI_HAVE_MPI_Comm_free_keyval 1 #define PyMPI_HAVE_MPI_MAX_PORT_NAME 1 #define PyMPI_HAVE_MPI_Open_port 1 #define PyMPI_HAVE_MPI_Close_port 1 #define PyMPI_HAVE_MPI_Publish_name 1 #define PyMPI_HAVE_MPI_Unpublish_name 1 #define PyMPI_HAVE_MPI_Lookup_name 1 #define PyMPI_HAVE_MPI_Comm_accept 1 #define PyMPI_HAVE_MPI_Comm_connect 1 #define PyMPI_HAVE_MPI_Comm_join 1 #define PyMPI_HAVE_MPI_Comm_disconnect 1 #define PyMPI_HAVE_MPI_ARGV_NULL 1 #define PyMPI_HAVE_MPI_ARGVS_NULL 1 #define PyMPI_HAVE_MPI_ERRCODES_IGNORE 1 #define PyMPI_HAVE_MPI_Comm_spawn 1 #define PyMPI_HAVE_MPI_Comm_spawn_multiple 1 #define PyMPI_HAVE_MPI_Comm_get_parent 1 #define PyMPI_HAVE_MPI_UNIVERSE_SIZE 1 #define PyMPI_HAVE_MPI_APPNUM 1 #define PyMPI_HAVE_MPI_ERR_SPAWN 1 #define PyMPI_HAVE_MPI_ERR_PORT 1 #define PyMPI_HAVE_MPI_ERR_SERVICE 1 #define PyMPI_HAVE_MPI_ERR_NAME 1 #define PyMPI_HAVE_MPI_Alloc_mem 1 #define PyMPI_HAVE_MPI_Free_mem 1 #define PyMPI_HAVE_MPI_ERR_NO_MEM 1 #define PyMPI_HAVE_MPI_Info 1 #define PyMPI_HAVE_MPI_INFO_NULL 1 #define PyMPI_HAVE_MPI_Info_free 1 #define PyMPI_HAVE_MPI_Info_create 1 #define PyMPI_HAVE_MPI_Info_dup 1 #define PyMPI_HAVE_MPI_MAX_INFO_KEY 1 #define PyMPI_HAVE_MPI_MAX_INFO_VAL 1 #define PyMPI_HAVE_MPI_Info_get 1 #define PyMPI_HAVE_MPI_Info_set 1 #define PyMPI_HAVE_MPI_Info_delete 1 #define PyMPI_HAVE_MPI_Info_get_nkeys 1 #define PyMPI_HAVE_MPI_Info_get_nthkey 1 #define PyMPI_HAVE_MPI_Info_get_valuelen 1 #define PyMPI_HAVE_MPI_ERR_INFO 1 #define PyMPI_HAVE_MPI_ERR_INFO_KEY 1 #define PyMPI_HAVE_MPI_ERR_INFO_VALUE 1 #define PyMPI_HAVE_MPI_ERR_INFO_NOKEY 1 #define PyMPI_HAVE_MPI_Win 1 #define PyMPI_HAVE_MPI_WIN_NULL 1 #define PyMPI_HAVE_MPI_Win_free 1 #define PyMPI_HAVE_MPI_Win_create 1 #define PyMPI_HAVE_MPI_Win_get_group 1 #define PyMPI_HAVE_MPI_Get 1 #define PyMPI_HAVE_MPI_Put 1 #define PyMPI_HAVE_MPI_REPLACE 1 #define PyMPI_HAVE_MPI_Accumulate 1 #define PyMPI_HAVE_MPI_MODE_NOCHECK 1 #define PyMPI_HAVE_MPI_MODE_NOSTORE 1 #define PyMPI_HAVE_MPI_MODE_NOPUT 1 #define PyMPI_HAVE_MPI_MODE_NOPRECEDE 1 #define PyMPI_HAVE_MPI_MODE_NOSUCCEED 1 #define PyMPI_HAVE_MPI_Win_fence 1 #define PyMPI_HAVE_MPI_Win_post 1 #define PyMPI_HAVE_MPI_Win_start 1 #define PyMPI_HAVE_MPI_Win_complete 1 #define PyMPI_HAVE_MPI_Win_wait 1 #define PyMPI_HAVE_MPI_Win_test 1 #define PyMPI_HAVE_MPI_LOCK_EXCLUSIVE 1 #define PyMPI_HAVE_MPI_LOCK_SHARED 1 #define PyMPI_HAVE_MPI_Win_lock 1 #define PyMPI_HAVE_MPI_Win_unlock 1 #define PyMPI_HAVE_MPI_Win_get_errhandler 1 #define PyMPI_HAVE_MPI_Win_set_errhandler 1 #define PyMPI_HAVE_MPI_Win_errhandler_fn 1 #define PyMPI_HAVE_MPI_Win_create_errhandler 1 #define PyMPI_HAVE_MPI_Win_call_errhandler 1 #define PyMPI_HAVE_MPI_Win_get_name 1 #define PyMPI_HAVE_MPI_Win_set_name 1 #define PyMPI_HAVE_MPI_WIN_BASE 1 #define PyMPI_HAVE_MPI_WIN_SIZE 1 #define PyMPI_HAVE_MPI_WIN_DISP_UNIT 1 #define PyMPI_HAVE_MPI_Win_get_attr 1 #define PyMPI_HAVE_MPI_Win_set_attr 1 #define PyMPI_HAVE_MPI_Win_delete_attr 1 #define PyMPI_HAVE_MPI_Win_copy_attr_function 1 #define PyMPI_HAVE_MPI_Win_delete_attr_function 1 #define PyMPI_HAVE_MPI_WIN_DUP_FN 1 #define PyMPI_HAVE_MPI_WIN_NULL_COPY_FN 1 #define PyMPI_HAVE_MPI_WIN_NULL_DELETE_FN 1 #define PyMPI_HAVE_MPI_Win_create_keyval 1 #define PyMPI_HAVE_MPI_Win_free_keyval 1 #define PyMPI_HAVE_MPI_ERR_WIN 1 #define PyMPI_HAVE_MPI_ERR_BASE 1 #define PyMPI_HAVE_MPI_ERR_SIZE 1 #define PyMPI_HAVE_MPI_ERR_DISP 1 #define PyMPI_HAVE_MPI_ERR_ASSERT 1 #define PyMPI_HAVE_MPI_ERR_LOCKTYPE 1 #define PyMPI_HAVE_MPI_ERR_RMA_CONFLICT 1 #define PyMPI_HAVE_MPI_ERR_RMA_SYNC 1 #define PyMPI_HAVE_MPI_Offset 1 #define PyMPI_HAVE_MPI_File 1 #define PyMPI_HAVE_MPI_FILE_NULL 1 #define PyMPI_HAVE_MPI_MODE_RDONLY 1 #define PyMPI_HAVE_MPI_MODE_RDWR 1 #define PyMPI_HAVE_MPI_MODE_WRONLY 1 #define PyMPI_HAVE_MPI_MODE_CREATE 1 #define PyMPI_HAVE_MPI_MODE_EXCL 1 #define PyMPI_HAVE_MPI_MODE_DELETE_ON_CLOSE 1 #define PyMPI_HAVE_MPI_MODE_UNIQUE_OPEN 1 #define PyMPI_HAVE_MPI_MODE_APPEND 1 #define PyMPI_HAVE_MPI_MODE_SEQUENTIAL 1 #define PyMPI_HAVE_MPI_File_open 1 #define PyMPI_HAVE_MPI_File_close 1 #define PyMPI_HAVE_MPI_File_delete 1 #define PyMPI_HAVE_MPI_File_set_size 1 #define PyMPI_HAVE_MPI_File_preallocate 1 #define PyMPI_HAVE_MPI_File_get_size 1 #define PyMPI_HAVE_MPI_File_get_group 1 #define PyMPI_HAVE_MPI_File_get_amode 1 #define PyMPI_HAVE_MPI_File_set_info 1 #define PyMPI_HAVE_MPI_File_get_info 1 #define PyMPI_HAVE_MPI_File_get_view 1 #define PyMPI_HAVE_MPI_File_set_view 1 #define PyMPI_HAVE_MPI_File_read_at 1 #define PyMPI_HAVE_MPI_File_read_at_all 1 #define PyMPI_HAVE_MPI_File_write_at 1 #define PyMPI_HAVE_MPI_File_write_at_all 1 #define PyMPI_HAVE_MPI_File_iread_at 1 #define PyMPI_HAVE_MPI_File_iwrite_at 1 #define PyMPI_HAVE_MPI_SEEK_SET 1 #define PyMPI_HAVE_MPI_SEEK_CUR 1 #define PyMPI_HAVE_MPI_SEEK_END 1 #define PyMPI_HAVE_MPI_DISPLACEMENT_CURRENT 1 #define PyMPI_HAVE_MPI_File_seek 1 #define PyMPI_HAVE_MPI_File_get_position 1 #define PyMPI_HAVE_MPI_File_get_byte_offset 1 #define PyMPI_HAVE_MPI_File_read 1 #define PyMPI_HAVE_MPI_File_read_all 1 #define PyMPI_HAVE_MPI_File_write 1 #define PyMPI_HAVE_MPI_File_write_all 1 #define PyMPI_HAVE_MPI_File_iread 1 #define PyMPI_HAVE_MPI_File_iwrite 1 #define PyMPI_HAVE_MPI_File_read_shared 1 #define PyMPI_HAVE_MPI_File_write_shared 1 #define PyMPI_HAVE_MPI_File_iread_shared 1 #define PyMPI_HAVE_MPI_File_iwrite_shared 1 #define PyMPI_HAVE_MPI_File_read_ordered 1 #define PyMPI_HAVE_MPI_File_write_ordered 1 #define PyMPI_HAVE_MPI_File_seek_shared 1 #define PyMPI_HAVE_MPI_File_get_position_shared 1 #define PyMPI_HAVE_MPI_File_read_at_all_begin 1 #define PyMPI_HAVE_MPI_File_read_at_all_end 1 #define PyMPI_HAVE_MPI_File_write_at_all_begin 1 #define PyMPI_HAVE_MPI_File_write_at_all_end 1 #define PyMPI_HAVE_MPI_File_read_all_begin 1 #define PyMPI_HAVE_MPI_File_read_all_end 1 #define PyMPI_HAVE_MPI_File_write_all_begin 1 #define PyMPI_HAVE_MPI_File_write_all_end 1 #define PyMPI_HAVE_MPI_File_read_ordered_begin 1 #define PyMPI_HAVE_MPI_File_read_ordered_end 1 #define PyMPI_HAVE_MPI_File_write_ordered_begin 1 #define PyMPI_HAVE_MPI_File_write_ordered_end 1 #define PyMPI_HAVE_MPI_File_get_type_extent 1 #define PyMPI_HAVE_MPI_File_set_atomicity 1 #define PyMPI_HAVE_MPI_File_get_atomicity 1 #define PyMPI_HAVE_MPI_File_sync 1 #define PyMPI_HAVE_MPI_File_get_errhandler 1 #define PyMPI_HAVE_MPI_File_set_errhandler 1 #define PyMPI_HAVE_MPI_File_errhandler_fn 1 #define PyMPI_HAVE_MPI_File_create_errhandler 1 #define PyMPI_HAVE_MPI_File_call_errhandler 1 #define PyMPI_HAVE_MPI_Datarep_conversion_function 1 #define PyMPI_HAVE_MPI_Datarep_extent_function 1 #define PyMPI_HAVE_MPI_CONVERSION_FN_NULL 1 #define PyMPI_HAVE_MPI_MAX_DATAREP_STRING 1 #define PyMPI_HAVE_MPI_Register_datarep 1 #define PyMPI_HAVE_MPI_ERR_FILE 1 #define PyMPI_HAVE_MPI_ERR_NOT_SAME 1 #define PyMPI_HAVE_MPI_ERR_BAD_FILE 1 #define PyMPI_HAVE_MPI_ERR_NO_SUCH_FILE 1 #define PyMPI_HAVE_MPI_ERR_FILE_EXISTS 1 #define PyMPI_HAVE_MPI_ERR_FILE_IN_USE 1 #define PyMPI_HAVE_MPI_ERR_AMODE 1 #define PyMPI_HAVE_MPI_ERR_ACCESS 1 #define PyMPI_HAVE_MPI_ERR_READ_ONLY 1 #define PyMPI_HAVE_MPI_ERR_NO_SPACE 1 #define PyMPI_HAVE_MPI_ERR_QUOTA 1 #define PyMPI_HAVE_MPI_ERR_UNSUPPORTED_DATAREP 1 #define PyMPI_HAVE_MPI_ERR_UNSUPPORTED_OPERATION 1 #define PyMPI_HAVE_MPI_ERR_CONVERSION 1 #define PyMPI_HAVE_MPI_ERR_DUP_DATAREP 1 #define PyMPI_HAVE_MPI_ERR_IO 1 #define PyMPI_HAVE_MPI_LASTUSEDCODE 1 #define PyMPI_HAVE_MPI_Add_error_class 1 #define PyMPI_HAVE_MPI_Add_error_code 1 #define PyMPI_HAVE_MPI_Add_error_string 1 #define PyMPI_HAVE_MPI_THREAD_SINGLE 1 #define PyMPI_HAVE_MPI_THREAD_FUNNELED 1 #define PyMPI_HAVE_MPI_THREAD_SERIALIZED 1 #define PyMPI_HAVE_MPI_THREAD_MULTIPLE 1 #define PyMPI_HAVE_MPI_Init_thread 1 #define PyMPI_HAVE_MPI_Query_thread 1 #define PyMPI_HAVE_MPI_Is_thread_main 1 #define PyMPI_HAVE_MPI_Fint 1 #define PyMPI_HAVE_MPI_F_STATUS_IGNORE 1 #define PyMPI_HAVE_MPI_F_STATUSES_IGNORE 1 #define PyMPI_HAVE_MPI_Status_c2f 1 #define PyMPI_HAVE_MPI_Status_f2c 1 #define PyMPI_HAVE_MPI_Type_c2f 1 #define PyMPI_HAVE_MPI_Request_c2f 1 #define PyMPI_HAVE_MPI_Op_c2f 1 #define PyMPI_HAVE_MPI_Info_c2f 1 #define PyMPI_HAVE_MPI_Group_c2f 1 #define PyMPI_HAVE_MPI_Comm_c2f 1 #define PyMPI_HAVE_MPI_Win_c2f 1 #define PyMPI_HAVE_MPI_File_c2f 1 #define PyMPI_HAVE_MPI_Errhandler_c2f 1 #define PyMPI_HAVE_MPI_Type_f2c 1 #define PyMPI_HAVE_MPI_Request_f2c 1 #define PyMPI_HAVE_MPI_Op_f2c 1 #define PyMPI_HAVE_MPI_Info_f2c 1 #define PyMPI_HAVE_MPI_Group_f2c 1 #define PyMPI_HAVE_MPI_Comm_f2c 1 #define PyMPI_HAVE_MPI_Win_f2c 1 #define PyMPI_HAVE_MPI_File_f2c 1 #define PyMPI_HAVE_MPI_Errhandler_f2c 1 #endif #endif mpi4py-4.0.3/src/lib-mpi/config/mpi-22.h000066400000000000000000000023311475341043600175120ustar00rootroot00000000000000#if defined(MPI_VERSION) #if (MPI_VERSION > 2) || (MPI_VERSION == 2 && MPI_SUBVERSION >= 2) #define PyMPI_HAVE_MPI_AINT 1 #define PyMPI_HAVE_MPI_OFFSET 1 #define PyMPI_HAVE_MPI_C_BOOL 1 #define PyMPI_HAVE_MPI_INT8_T 1 #define PyMPI_HAVE_MPI_INT16_T 1 #define PyMPI_HAVE_MPI_INT32_T 1 #define PyMPI_HAVE_MPI_INT64_T 1 #define PyMPI_HAVE_MPI_UINT8_T 1 #define PyMPI_HAVE_MPI_UINT16_T 1 #define PyMPI_HAVE_MPI_UINT32_T 1 #define PyMPI_HAVE_MPI_UINT64_T 1 #define PyMPI_HAVE_MPI_C_COMPLEX 1 #define PyMPI_HAVE_MPI_C_FLOAT_COMPLEX 1 #define PyMPI_HAVE_MPI_C_DOUBLE_COMPLEX 1 #define PyMPI_HAVE_MPI_C_LONG_DOUBLE_COMPLEX 1 #define PyMPI_HAVE_MPI_REAL2 1 #define PyMPI_HAVE_MPI_COMPLEX4 1 #define PyMPI_HAVE_MPI_Op_commutative 1 #define PyMPI_HAVE_MPI_Reduce_local 1 #define PyMPI_HAVE_MPI_Reduce_scatter_block 1 #define PyMPI_HAVE_MPI_DIST_GRAPH 1 #define PyMPI_HAVE_MPI_UNWEIGHTED 1 #define PyMPI_HAVE_MPI_Dist_graph_create_adjacent 1 #define PyMPI_HAVE_MPI_Dist_graph_create 1 #define PyMPI_HAVE_MPI_Dist_graph_neighbors_count 1 #define PyMPI_HAVE_MPI_Dist_graph_neighbors 1 #define PyMPI_HAVE_MPI_Comm_errhandler_function 1 #define PyMPI_HAVE_MPI_Win_errhandler_function 1 #define PyMPI_HAVE_MPI_File_errhandler_function 1 #endif #endif mpi4py-4.0.3/src/lib-mpi/config/mpi-30.h000066400000000000000000000077731475341043600175300ustar00rootroot00000000000000#if defined(MPI_VERSION) #if (MPI_VERSION >= 3) #define PyMPI_HAVE_MPI_Count 1 #define PyMPI_HAVE_MPI_COUNT 1 #define PyMPI_HAVE_MPI_CXX_BOOL 1 #define PyMPI_HAVE_MPI_CXX_FLOAT_COMPLEX 1 #define PyMPI_HAVE_MPI_CXX_DOUBLE_COMPLEX 1 #define PyMPI_HAVE_MPI_CXX_LONG_DOUBLE_COMPLEX 1 #define PyMPI_HAVE_MPI_Type_size_x 1 #define PyMPI_HAVE_MPI_Type_get_extent_x 1 #define PyMPI_HAVE_MPI_Type_get_true_extent_x 1 #define PyMPI_HAVE_MPI_Get_elements_x 1 #define PyMPI_HAVE_MPI_Status_set_elements_x 1 #define PyMPI_HAVE_MPI_COMBINER_HINDEXED_BLOCK #define PyMPI_HAVE_MPI_Type_create_hindexed_block 1 #define PyMPI_HAVE_MPI_NO_OP 1 #define PyMPI_HAVE_MPI_Message 1 #define PyMPI_HAVE_MPI_MESSAGE_NULL 1 #define PyMPI_HAVE_MPI_MESSAGE_NO_PROC 1 #define PyMPI_HAVE_MPI_Message_c2f 1 #define PyMPI_HAVE_MPI_Message_f2c 1 #define PyMPI_HAVE_MPI_Mprobe 1 #define PyMPI_HAVE_MPI_Improbe 1 #define PyMPI_HAVE_MPI_Mrecv 1 #define PyMPI_HAVE_MPI_Imrecv 1 #define PyMPI_HAVE_MPI_Neighbor_allgather 1 #define PyMPI_HAVE_MPI_Neighbor_allgatherv 1 #define PyMPI_HAVE_MPI_Neighbor_alltoall 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallv 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallw 1 #define PyMPI_HAVE_MPI_Ibarrier 1 #define PyMPI_HAVE_MPI_Ibcast 1 #define PyMPI_HAVE_MPI_Igather 1 #define PyMPI_HAVE_MPI_Igatherv 1 #define PyMPI_HAVE_MPI_Iscatter 1 #define PyMPI_HAVE_MPI_Iscatterv 1 #define PyMPI_HAVE_MPI_Iallgather 1 #define PyMPI_HAVE_MPI_Iallgatherv 1 #define PyMPI_HAVE_MPI_Ialltoall 1 #define PyMPI_HAVE_MPI_Ialltoallv 1 #define PyMPI_HAVE_MPI_Ialltoallw 1 #define PyMPI_HAVE_MPI_Ireduce 1 #define PyMPI_HAVE_MPI_Iallreduce 1 #define PyMPI_HAVE_MPI_Ireduce_scatter_block 1 #define PyMPI_HAVE_MPI_Ireduce_scatter 1 #define PyMPI_HAVE_MPI_Iscan 1 #define PyMPI_HAVE_MPI_Iexscan 1 #define PyMPI_HAVE_MPI_Ineighbor_allgather 1 #define PyMPI_HAVE_MPI_Ineighbor_allgatherv 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoall 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoallv 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoallw 1 #define PyMPI_HAVE_MPI_WEIGHTS_EMPTY 1 #define PyMPI_HAVE_MPI_Comm_dup_with_info 1 #define PyMPI_HAVE_MPI_Comm_idup 1 #define PyMPI_HAVE_MPI_Comm_create_group 1 #define PyMPI_HAVE_MPI_COMM_TYPE_SHARED 1 #define PyMPI_HAVE_MPI_Comm_split_type 1 #define PyMPI_HAVE_MPI_Comm_set_info 1 #define PyMPI_HAVE_MPI_Comm_get_info 1 #define PyMPI_HAVE_MPI_WIN_CREATE_FLAVOR 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_CREATE 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_ALLOCATE 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_DYNAMIC 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_SHARED 1 #define PyMPI_HAVE_MPI_WIN_MODEL 1 #define PyMPI_HAVE_MPI_WIN_SEPARATE 1 #define PyMPI_HAVE_MPI_WIN_UNIFIED 1 #define PyMPI_HAVE_MPI_Win_allocate 1 #define PyMPI_HAVE_MPI_Win_allocate_shared 1 #define PyMPI_HAVE_MPI_Win_shared_query 1 #define PyMPI_HAVE_MPI_Win_create_dynamic 1 #define PyMPI_HAVE_MPI_Win_attach 1 #define PyMPI_HAVE_MPI_Win_detach 1 #define PyMPI_HAVE_MPI_Win_set_info 1 #define PyMPI_HAVE_MPI_Win_get_info 1 #define PyMPI_HAVE_MPI_Get_accumulate 1 #define PyMPI_HAVE_MPI_Fetch_and_op 1 #define PyMPI_HAVE_MPI_Compare_and_swap 1 #define PyMPI_HAVE_MPI_Rget 1 #define PyMPI_HAVE_MPI_Rput 1 #define PyMPI_HAVE_MPI_Raccumulate 1 #define PyMPI_HAVE_MPI_Rget_accumulate 1 #define PyMPI_HAVE_MPI_Win_lock_all 1 #define PyMPI_HAVE_MPI_Win_unlock_all 1 #define PyMPI_HAVE_MPI_Win_flush 1 #define PyMPI_HAVE_MPI_Win_flush_all 1 #define PyMPI_HAVE_MPI_Win_flush_local 1 #define PyMPI_HAVE_MPI_Win_flush_local_all 1 #define PyMPI_HAVE_MPI_Win_sync 1 #define PyMPI_HAVE_MPI_ERR_RMA_RANGE 1 #define PyMPI_HAVE_MPI_ERR_RMA_ATTACH 1 #define PyMPI_HAVE_MPI_ERR_RMA_SHARED 1 #define PyMPI_HAVE_MPI_ERR_RMA_FLAVOR 1 #define PyMPI_HAVE_MPI_MAX_LIBRARY_VERSION_STRING 1 #define PyMPI_HAVE_MPI_Get_library_version 1 #define PyMPI_HAVE_MPI_INFO_ENV 1 /* #define PyMPI_HAVE_MPI_F08_status 1 #define PyMPI_HAVE_MPI_F08_STATUS_IGNORE 1 #define PyMPI_HAVE_MPI_F08_STATUSES_IGNORE 1 #define PyMPI_HAVE_MPI_Status_c2f08 1 #define PyMPI_HAVE_MPI_Status_f082c 1 #define PyMPI_HAVE_MPI_Status_f2f08 1 #define PyMPI_HAVE_MPI_Status_f082f 1 */ #endif #endif mpi4py-4.0.3/src/lib-mpi/config/mpi-31.h000066400000000000000000000005341475341043600175150ustar00rootroot00000000000000#if defined(MPI_VERSION) #if (MPI_VERSION > 3) || (MPI_VERSION == 3 && MPI_SUBVERSION >= 1) #define PyMPI_HAVE_MPI_Aint_add 1 #define PyMPI_HAVE_MPI_Aint_diff 1 #define PyMPI_HAVE_MPI_File_iread_at_all 1 #define PyMPI_HAVE_MPI_File_iwrite_at_all 1 #define PyMPI_HAVE_MPI_File_iread_all 1 #define PyMPI_HAVE_MPI_File_iwrite_all 1 #endif #endif mpi4py-4.0.3/src/lib-mpi/config/mpi-40.h000066400000000000000000000217231475341043600175200ustar00rootroot00000000000000#if defined(MPI_VERSION) #if (MPI_VERSION >= 4) #define PyMPI_HAVE_MPI_Type_contiguous_c 1 #define PyMPI_HAVE_MPI_Type_vector_c 1 #define PyMPI_HAVE_MPI_Type_indexed_c 1 #define PyMPI_HAVE_MPI_Type_create_indexed_block_c 1 #define PyMPI_HAVE_MPI_Type_create_subarray_c 1 #define PyMPI_HAVE_MPI_Type_create_darray_c 1 #define PyMPI_HAVE_MPI_Type_create_hvector_c 1 #define PyMPI_HAVE_MPI_Type_create_hindexed_c 1 #define PyMPI_HAVE_MPI_Type_create_hindexed_block_c 1 #define PyMPI_HAVE_MPI_Type_create_struct_c 1 #define PyMPI_HAVE_MPI_Type_create_resized_c 1 #define PyMPI_HAVE_MPI_Type_size_c 1 #define PyMPI_HAVE_MPI_Type_get_extent_c 1 #define PyMPI_HAVE_MPI_Type_get_true_extent_c 1 #define PyMPI_HAVE_MPI_Type_get_envelope_c 1 #define PyMPI_HAVE_MPI_Type_get_contents_c 1 #define PyMPI_HAVE_MPI_Pack_c 1 #define PyMPI_HAVE_MPI_Unpack_c 1 #define PyMPI_HAVE_MPI_Pack_size_c 1 #define PyMPI_HAVE_MPI_Pack_external_c 1 #define PyMPI_HAVE_MPI_Unpack_external_c 1 #define PyMPI_HAVE_MPI_Pack_external_size_c 1 #define PyMPI_HAVE_MPI_Get_count_c 1 #define PyMPI_HAVE_MPI_Get_elements_c 1 #define PyMPI_HAVE_MPI_Status_set_elements_c 1 #define PyMPI_HAVE_MPI_Pready 1 #define PyMPI_HAVE_MPI_Pready_range 1 #define PyMPI_HAVE_MPI_Pready_list 1 #define PyMPI_HAVE_MPI_Parrived 1 #define PyMPI_HAVE_MPI_User_function_c 1 #define PyMPI_HAVE_MPI_Op_create_c 1 #define PyMPI_HAVE_MPI_Info_create_env 1 #define PyMPI_HAVE_MPI_Info_get_string 1 #define PyMPI_HAVE_MPI_ERRORS_ABORT 1 #define PyMPI_HAVE_MPI_Session 1 #define PyMPI_HAVE_MPI_SESSION_NULL 1 #define PyMPI_HAVE_MPI_Session_c2f 1 #define PyMPI_HAVE_MPI_Session_f2c 1 #define PyMPI_HAVE_MPI_MAX_PSET_NAME_LEN 1 #define PyMPI_HAVE_MPI_Session_init 1 #define PyMPI_HAVE_MPI_Session_finalize 1 #define PyMPI_HAVE_MPI_Session_get_num_psets 1 #define PyMPI_HAVE_MPI_Session_get_nth_pset 1 #define PyMPI_HAVE_MPI_Session_get_info 1 #define PyMPI_HAVE_MPI_Session_get_pset_info 1 #define PyMPI_HAVE_MPI_Group_from_session_pset 1 #define PyMPI_HAVE_MPI_Session_errhandler_function 1 #define PyMPI_HAVE_MPI_Session_create_errhandler 1 #define PyMPI_HAVE_MPI_Session_get_errhandler 1 #define PyMPI_HAVE_MPI_Session_set_errhandler 1 #define PyMPI_HAVE_MPI_Session_call_errhandler 1 #define PyMPI_HAVE_MPI_Isendrecv 1 #define PyMPI_HAVE_MPI_Isendrecv_replace 1 #define PyMPI_HAVE_MPI_Psend_init 1 #define PyMPI_HAVE_MPI_Precv_init 1 #define PyMPI_HAVE_MPI_Barrier_init 1 #define PyMPI_HAVE_MPI_Bcast_init 1 #define PyMPI_HAVE_MPI_Gather_init 1 #define PyMPI_HAVE_MPI_Gatherv_init 1 #define PyMPI_HAVE_MPI_Scatter_init 1 #define PyMPI_HAVE_MPI_Scatterv_init 1 #define PyMPI_HAVE_MPI_Allgather_init 1 #define PyMPI_HAVE_MPI_Allgatherv_init 1 #define PyMPI_HAVE_MPI_Alltoall_init 1 #define PyMPI_HAVE_MPI_Alltoallv_init 1 #define PyMPI_HAVE_MPI_Alltoallw_init 1 #define PyMPI_HAVE_MPI_Reduce_init 1 #define PyMPI_HAVE_MPI_Allreduce_init 1 #define PyMPI_HAVE_MPI_Reduce_scatter_block_init 1 #define PyMPI_HAVE_MPI_Reduce_scatter_init 1 #define PyMPI_HAVE_MPI_Scan_init 1 #define PyMPI_HAVE_MPI_Exscan_init 1 #define PyMPI_HAVE_MPI_Neighbor_allgather_init 1 #define PyMPI_HAVE_MPI_Neighbor_allgatherv_init 1 #define PyMPI_HAVE_MPI_Neighbor_alltoall_init 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallv_init 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallw_init 1 #define PyMPI_HAVE_MPI_Comm_idup_with_info 1 #define PyMPI_HAVE_MPI_MAX_STRINGTAG_LEN 1 #define PyMPI_HAVE_MPI_Comm_create_from_group 1 #define PyMPI_HAVE_MPI_COMM_TYPE_HW_GUIDED 1 #define PyMPI_HAVE_MPI_COMM_TYPE_HW_UNGUIDED 1 #define PyMPI_HAVE_MPI_Intercomm_create_from_groups 1 #define PyMPI_HAVE_MPI_Buffer_attach_c 1 #define PyMPI_HAVE_MPI_Buffer_detach_c 1 #define PyMPI_HAVE_MPI_Send_c 1 #define PyMPI_HAVE_MPI_Recv_c 1 #define PyMPI_HAVE_MPI_Sendrecv_c 1 #define PyMPI_HAVE_MPI_Sendrecv_replace_c 1 #define PyMPI_HAVE_MPI_Bsend_c 1 #define PyMPI_HAVE_MPI_Ssend_c 1 #define PyMPI_HAVE_MPI_Rsend_c 1 #define PyMPI_HAVE_MPI_Isend_c 1 #define PyMPI_HAVE_MPI_Irecv_c 1 #define PyMPI_HAVE_MPI_Isendrecv_c 1 #define PyMPI_HAVE_MPI_Isendrecv_replace_c 1 #define PyMPI_HAVE_MPI_Ibsend_c 1 #define PyMPI_HAVE_MPI_Issend_c 1 #define PyMPI_HAVE_MPI_Irsend_c 1 #define PyMPI_HAVE_MPI_Send_init_c 1 #define PyMPI_HAVE_MPI_Recv_init_c 1 #define PyMPI_HAVE_MPI_Bsend_init_c 1 #define PyMPI_HAVE_MPI_Ssend_init_c 1 #define PyMPI_HAVE_MPI_Rsend_init_c 1 #define PyMPI_HAVE_MPI_Mrecv_c 1 #define PyMPI_HAVE_MPI_Imrecv_c 1 #define PyMPI_HAVE_MPI_Bcast_c 1 #define PyMPI_HAVE_MPI_Gather_c 1 #define PyMPI_HAVE_MPI_Gatherv_c 1 #define PyMPI_HAVE_MPI_Scatter_c 1 #define PyMPI_HAVE_MPI_Scatterv_c 1 #define PyMPI_HAVE_MPI_Allgather_c 1 #define PyMPI_HAVE_MPI_Allgatherv_c 1 #define PyMPI_HAVE_MPI_Alltoall_c 1 #define PyMPI_HAVE_MPI_Alltoallv_c 1 #define PyMPI_HAVE_MPI_Alltoallw_c 1 #define PyMPI_HAVE_MPI_Reduce_local_c 1 #define PyMPI_HAVE_MPI_Reduce_c 1 #define PyMPI_HAVE_MPI_Allreduce_c 1 #define PyMPI_HAVE_MPI_Reduce_scatter_block_c 1 #define PyMPI_HAVE_MPI_Reduce_scatter_c 1 #define PyMPI_HAVE_MPI_Scan_c 1 #define PyMPI_HAVE_MPI_Exscan_c 1 #define PyMPI_HAVE_MPI_Neighbor_allgather_c 1 #define PyMPI_HAVE_MPI_Neighbor_allgatherv_c 1 #define PyMPI_HAVE_MPI_Neighbor_alltoall_c 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallv_c 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallw_c 1 #define PyMPI_HAVE_MPI_Ibcast_c 1 #define PyMPI_HAVE_MPI_Igather_c 1 #define PyMPI_HAVE_MPI_Igatherv_c 1 #define PyMPI_HAVE_MPI_Iscatter_c 1 #define PyMPI_HAVE_MPI_Iscatterv_c 1 #define PyMPI_HAVE_MPI_Iallgather_c 1 #define PyMPI_HAVE_MPI_Iallgatherv_c 1 #define PyMPI_HAVE_MPI_Ialltoall_c 1 #define PyMPI_HAVE_MPI_Ialltoallv_c 1 #define PyMPI_HAVE_MPI_Ialltoallw_c 1 #define PyMPI_HAVE_MPI_Ireduce_c 1 #define PyMPI_HAVE_MPI_Iallreduce_c 1 #define PyMPI_HAVE_MPI_Ireduce_scatter_block_c 1 #define PyMPI_HAVE_MPI_Ireduce_scatter_c 1 #define PyMPI_HAVE_MPI_Iscan_c 1 #define PyMPI_HAVE_MPI_Iexscan_c 1 #define PyMPI_HAVE_MPI_Ineighbor_allgather_c 1 #define PyMPI_HAVE_MPI_Ineighbor_allgatherv_c 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoall_c 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoallv_c 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoallw_c 1 #define PyMPI_HAVE_MPI_Bcast_init_c 1 #define PyMPI_HAVE_MPI_Gather_init_c 1 #define PyMPI_HAVE_MPI_Gatherv_init_c 1 #define PyMPI_HAVE_MPI_Scatter_init_c 1 #define PyMPI_HAVE_MPI_Scatterv_init_c 1 #define PyMPI_HAVE_MPI_Allgather_init_c 1 #define PyMPI_HAVE_MPI_Allgatherv_init_c 1 #define PyMPI_HAVE_MPI_Alltoall_init_c 1 #define PyMPI_HAVE_MPI_Alltoallv_init_c 1 #define PyMPI_HAVE_MPI_Alltoallw_init_c 1 #define PyMPI_HAVE_MPI_Reduce_init_c 1 #define PyMPI_HAVE_MPI_Allreduce_init_c 1 #define PyMPI_HAVE_MPI_Reduce_scatter_block_init_c 1 #define PyMPI_HAVE_MPI_Reduce_scatter_init_c 1 #define PyMPI_HAVE_MPI_Scan_init_c 1 #define PyMPI_HAVE_MPI_Exscan_init_c 1 #define PyMPI_HAVE_MPI_Neighbor_allgather_init_c 1 #define PyMPI_HAVE_MPI_Neighbor_allgatherv_init_c 1 #define PyMPI_HAVE_MPI_Neighbor_alltoall_init_c 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallv_init_c 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallw_init_c 1 #define PyMPI_HAVE_MPI_Win_create_c 1 #define PyMPI_HAVE_MPI_Win_allocate_c 1 #define PyMPI_HAVE_MPI_Win_allocate_shared_c 1 #define PyMPI_HAVE_MPI_Win_shared_query_c 1 #define PyMPI_HAVE_MPI_Get_c 1 #define PyMPI_HAVE_MPI_Put_c 1 #define PyMPI_HAVE_MPI_Accumulate_c 1 #define PyMPI_HAVE_MPI_Get_accumulate_c 1 #define PyMPI_HAVE_MPI_Rget_c 1 #define PyMPI_HAVE_MPI_Rput_c 1 #define PyMPI_HAVE_MPI_Raccumulate_c 1 #define PyMPI_HAVE_MPI_Rget_accumulate_c 1 #define PyMPI_HAVE_MPI_File_read_at_c 1 #define PyMPI_HAVE_MPI_File_read_at_all_c 1 #define PyMPI_HAVE_MPI_File_write_at_c 1 #define PyMPI_HAVE_MPI_File_write_at_all_c 1 #define PyMPI_HAVE_MPI_File_iread_at_c 1 #define PyMPI_HAVE_MPI_File_iread_at_all_c 1 #define PyMPI_HAVE_MPI_File_iwrite_at_c 1 #define PyMPI_HAVE_MPI_File_iwrite_at_all_c 1 #define PyMPI_HAVE_MPI_File_read_c 1 #define PyMPI_HAVE_MPI_File_read_all_c 1 #define PyMPI_HAVE_MPI_File_write_c 1 #define PyMPI_HAVE_MPI_File_write_all_c 1 #define PyMPI_HAVE_MPI_File_iread_c 1 #define PyMPI_HAVE_MPI_File_iread_all_c 1 #define PyMPI_HAVE_MPI_File_iwrite_c 1 #define PyMPI_HAVE_MPI_File_iwrite_all_c 1 #define PyMPI_HAVE_MPI_File_read_shared_c 1 #define PyMPI_HAVE_MPI_File_write_shared_c 1 #define PyMPI_HAVE_MPI_File_iread_shared_c 1 #define PyMPI_HAVE_MPI_File_iwrite_shared_c 1 #define PyMPI_HAVE_MPI_File_read_ordered_c 1 #define PyMPI_HAVE_MPI_File_write_ordered_c 1 #define PyMPI_HAVE_MPI_File_read_at_all_begin_c 1 #define PyMPI_HAVE_MPI_File_write_at_all_begin_c 1 #define PyMPI_HAVE_MPI_File_read_all_begin_c 1 #define PyMPI_HAVE_MPI_File_write_all_begin_c 1 #define PyMPI_HAVE_MPI_File_read_ordered_begin_c 1 #define PyMPI_HAVE_MPI_File_write_ordered_begin_c 1 #define PyMPI_HAVE_MPI_File_get_type_extent_c 1 #define PyMPI_HAVE_MPI_Datarep_conversion_function_c 1 #define PyMPI_HAVE_MPI_CONVERSION_FN_NULL_C 1 #define PyMPI_HAVE_MPI_Register_datarep_c 1 #define PyMPI_HAVE_MPI_ERR_PROC_ABORTED 1 #define PyMPI_HAVE_MPI_ERR_VALUE_TOO_LARGE 1 #define PyMPI_HAVE_MPI_ERR_SESSION 1 #define PyMPI_HAVE_MPI_F_SOURCE 1 #define PyMPI_HAVE_MPI_F_TAG 1 #define PyMPI_HAVE_MPI_F_ERROR 1 #define PyMPI_HAVE_MPI_F_STATUS_SIZE 1 #endif #endif mpi4py-4.0.3/src/lib-mpi/config/mpi-41.h000066400000000000000000000030071475341043600175140ustar00rootroot00000000000000#if defined(MPI_VERSION) #if (MPI_VERSION > 4) || (MPI_VERSION == 4 && MPI_SUBVERSION >= 1) #define PyMPI_HAVE_MPI_ERR_ERRHANDLER 1 #define PyMPI_HAVE_MPI_Remove_error_class 1 #define PyMPI_HAVE_MPI_Remove_error_code 1 #define PyMPI_HAVE_MPI_Remove_error_string 1 #define PyMPI_HAVE_MPI_Status_get_source 1 #define PyMPI_HAVE_MPI_Status_set_source 1 #define PyMPI_HAVE_MPI_Status_get_tag 1 #define PyMPI_HAVE_MPI_Status_set_tag 1 #define PyMPI_HAVE_MPI_Status_get_error 1 #define PyMPI_HAVE_MPI_Status_set_error 1 #define PyMPI_HAVE_MPI_COMBINER_VALUE_INDEX 1 #define PyMPI_HAVE_MPI_Type_get_value_index 1 #define PyMPI_HAVE_MPI_Request_get_status_any 1 #define PyMPI_HAVE_MPI_Request_get_status_all 1 #define PyMPI_HAVE_MPI_Request_get_status_some 1 #define PyMPI_HAVE_MPI_BUFFER_AUTOMATIC 1 #define PyMPI_HAVE_MPI_Buffer_flush 1 #define PyMPI_HAVE_MPI_Buffer_iflush 1 #define PyMPI_HAVE_MPI_Comm_attach_buffer 1 #define PyMPI_HAVE_MPI_Comm_detach_buffer 1 #define PyMPI_HAVE_MPI_Comm_flush_buffer 1 #define PyMPI_HAVE_MPI_Comm_iflush_buffer 1 #define PyMPI_HAVE_MPI_Session_attach_buffer 1 #define PyMPI_HAVE_MPI_Session_detach_buffer 1 #define PyMPI_HAVE_MPI_Session_flush_buffer 1 #define PyMPI_HAVE_MPI_Session_iflush_buffer 1 #define PyMPI_HAVE_MPI_Comm_attach_buffer_c 1 #define PyMPI_HAVE_MPI_Comm_detach_buffer_c 1 #define PyMPI_HAVE_MPI_Session_attach_buffer_c 1 #define PyMPI_HAVE_MPI_Session_detach_buffer_c 1 #define PyMPI_HAVE_MPI_COMM_TYPE_RESOURCE_GUIDED 1 #define PyMPI_HAVE_MPI_Get_hw_resource_info 1 #endif #endif mpi4py-4.0.3/src/lib-mpi/config/mpi-50.h000066400000000000000000000000771475341043600175200ustar00rootroot00000000000000#if defined(MPI_VERSION) #if (MPI_VERSION >= 5) #endif #endif mpi4py-4.0.3/src/lib-mpi/config/mpi-60.h000066400000000000000000000007231475341043600175170ustar00rootroot00000000000000#if defined(MPI_VERSION) #if (MPI_VERSION >= 6) /* #define PyMPI_HAVE_MPI_ERR_REVOKED 1 #define PyMPI_HAVE_MPI_ERR_PROC_FAILED 1 #define PyMPI_HAVE_MPI_ERR_PROC_FAILED_PENDING 1 #define PyMPI_HAVE_MPI_Comm_revoke 1 #define PyMPI_HAVE_MPI_Comm_is_revoked 1 #define PyMPI_HAVE_MPI_Comm_get_failed 1 #define PyMPI_HAVE_MPI_Comm_ack_failed 1 #define PyMPI_HAVE_MPI_Comm_agree 1 #define PyMPI_HAVE_MPI_Comm_iagree 1 #define PyMPI_HAVE_MPI_Comm_shrink 1 */ #endif #endif mpi4py-4.0.3/src/lib-mpi/config/mpi-io.h000066400000000000000000000120271475341043600177010ustar00rootroot00000000000000#undef PyMPI_HAVE_MPI_File #undef PyMPI_HAVE_MPI_FILE_NULL #undef PyMPI_HAVE_MPI_MODE_RDONLY #undef PyMPI_HAVE_MPI_MODE_RDWR #undef PyMPI_HAVE_MPI_MODE_WRONLY #undef PyMPI_HAVE_MPI_MODE_CREATE #undef PyMPI_HAVE_MPI_MODE_EXCL #undef PyMPI_HAVE_MPI_MODE_DELETE_ON_CLOSE #undef PyMPI_HAVE_MPI_MODE_UNIQUE_OPEN #undef PyMPI_HAVE_MPI_MODE_APPEND #undef PyMPI_HAVE_MPI_MODE_SEQUENTIAL #undef PyMPI_HAVE_MPI_File_open #undef PyMPI_HAVE_MPI_File_close #undef PyMPI_HAVE_MPI_File_delete #undef PyMPI_HAVE_MPI_File_set_size #undef PyMPI_HAVE_MPI_File_preallocate #undef PyMPI_HAVE_MPI_File_get_size #undef PyMPI_HAVE_MPI_File_get_group #undef PyMPI_HAVE_MPI_File_get_amode #undef PyMPI_HAVE_MPI_File_set_info #undef PyMPI_HAVE_MPI_File_get_info #undef PyMPI_HAVE_MPI_File_get_view #undef PyMPI_HAVE_MPI_File_set_view #undef PyMPI_HAVE_MPI_File_read_at #undef PyMPI_HAVE_MPI_File_read_at_all #undef PyMPI_HAVE_MPI_File_write_at #undef PyMPI_HAVE_MPI_File_write_at_all #undef PyMPI_HAVE_MPI_File_iread_at #undef PyMPI_HAVE_MPI_File_iread_at_all #undef PyMPI_HAVE_MPI_File_iwrite_at #undef PyMPI_HAVE_MPI_File_iwrite_at_all #undef PyMPI_HAVE_MPI_SEEK_SET #undef PyMPI_HAVE_MPI_SEEK_CUR #undef PyMPI_HAVE_MPI_SEEK_END #undef PyMPI_HAVE_MPI_DISPLACEMENT_CURRENT #undef PyMPI_HAVE_MPI_File_seek #undef PyMPI_HAVE_MPI_File_get_position #undef PyMPI_HAVE_MPI_File_get_byte_offset #undef PyMPI_HAVE_MPI_File_read #undef PyMPI_HAVE_MPI_File_read_all #undef PyMPI_HAVE_MPI_File_write #undef PyMPI_HAVE_MPI_File_write_all #undef PyMPI_HAVE_MPI_File_iread #undef PyMPI_HAVE_MPI_File_iread_all #undef PyMPI_HAVE_MPI_File_iwrite #undef PyMPI_HAVE_MPI_File_iwrite_all #undef PyMPI_HAVE_MPI_File_read_shared #undef PyMPI_HAVE_MPI_File_write_shared #undef PyMPI_HAVE_MPI_File_iread_shared #undef PyMPI_HAVE_MPI_File_iwrite_shared #undef PyMPI_HAVE_MPI_File_read_ordered #undef PyMPI_HAVE_MPI_File_write_ordered #undef PyMPI_HAVE_MPI_File_seek_shared #undef PyMPI_HAVE_MPI_File_get_position_shared #undef PyMPI_HAVE_MPI_File_read_at_all_begin #undef PyMPI_HAVE_MPI_File_read_at_all_end #undef PyMPI_HAVE_MPI_File_write_at_all_begin #undef PyMPI_HAVE_MPI_File_write_at_all_end #undef PyMPI_HAVE_MPI_File_read_all_begin #undef PyMPI_HAVE_MPI_File_read_all_end #undef PyMPI_HAVE_MPI_File_write_all_begin #undef PyMPI_HAVE_MPI_File_write_all_end #undef PyMPI_HAVE_MPI_File_read_ordered_begin #undef PyMPI_HAVE_MPI_File_read_ordered_end #undef PyMPI_HAVE_MPI_File_write_ordered_begin #undef PyMPI_HAVE_MPI_File_write_ordered_end #undef PyMPI_HAVE_MPI_File_get_type_extent #undef PyMPI_HAVE_MPI_File_set_atomicity #undef PyMPI_HAVE_MPI_File_get_atomicity #undef PyMPI_HAVE_MPI_File_sync #undef PyMPI_HAVE_MPI_File_get_errhandler #undef PyMPI_HAVE_MPI_File_set_errhandler #undef PyMPI_HAVE_MPI_File_errhandler_fn #undef PyMPI_HAVE_MPI_File_errhandler_function #undef PyMPI_HAVE_MPI_File_create_errhandler #undef PyMPI_HAVE_MPI_File_call_errhandler #undef PyMPI_HAVE_MPI_MAX_DATAREP_STRING #undef PyMPI_HAVE_MPI_Datarep_extent_function #undef PyMPI_HAVE_MPI_Datarep_conversion_function #undef PyMPI_HAVE_MPI_CONVERSION_FN_NULL #undef PyMPI_HAVE_MPI_Register_datarep #undef PyMPI_HAVE_MPI_File_read_at_c #undef PyMPI_HAVE_MPI_File_read_at_all_c #undef PyMPI_HAVE_MPI_File_write_at_c #undef PyMPI_HAVE_MPI_File_write_at_all_c #undef PyMPI_HAVE_MPI_File_iread_at_c #undef PyMPI_HAVE_MPI_File_iread_at_all_c #undef PyMPI_HAVE_MPI_File_iwrite_at_c #undef PyMPI_HAVE_MPI_File_iwrite_at_all_c #undef PyMPI_HAVE_MPI_File_read_c #undef PyMPI_HAVE_MPI_File_read_all_c #undef PyMPI_HAVE_MPI_File_write_c #undef PyMPI_HAVE_MPI_File_write_all_c #undef PyMPI_HAVE_MPI_File_iread_c #undef PyMPI_HAVE_MPI_File_iread_all_c #undef PyMPI_HAVE_MPI_File_iwrite_c #undef PyMPI_HAVE_MPI_File_iwrite_all_c #undef PyMPI_HAVE_MPI_File_read_shared_c #undef PyMPI_HAVE_MPI_File_write_shared_c #undef PyMPI_HAVE_MPI_File_iread_shared_c #undef PyMPI_HAVE_MPI_File_iwrite_shared_c #undef PyMPI_HAVE_MPI_File_read_ordered_c #undef PyMPI_HAVE_MPI_File_write_ordered_c #undef PyMPI_HAVE_MPI_File_read_at_all_begin_c #undef PyMPI_HAVE_MPI_File_write_at_all_begin_c #undef PyMPI_HAVE_MPI_File_read_all_begin_c #undef PyMPI_HAVE_MPI_File_write_all_begin_c #undef PyMPI_HAVE_MPI_File_read_ordered_begin_c #undef PyMPI_HAVE_MPI_File_write_ordered_begin_c #undef PyMPI_HAVE_MPI_File_get_type_extent_c #undef PyMPI_HAVE_MPI_Datarep_conversion_function_c #undef PyMPI_HAVE_MPI_CONVERSION_FN_NULL_C #undef PyMPI_HAVE_MPI_Register_datarep_c #undef PyMPI_HAVE_MPI_File_c2f #undef PyMPI_HAVE_MPI_File_f2c #if !defined(MPI_ERR_FILE) #undef PyMPI_HAVE_MPI_ERR_FILE #undef PyMPI_HAVE_MPI_ERR_NOT_SAME #undef PyMPI_HAVE_MPI_ERR_BAD_FILE #undef PyMPI_HAVE_MPI_ERR_NO_SUCH_FILE #undef PyMPI_HAVE_MPI_ERR_FILE_EXISTS #undef PyMPI_HAVE_MPI_ERR_FILE_IN_USE #undef PyMPI_HAVE_MPI_ERR_AMODE #undef PyMPI_HAVE_MPI_ERR_ACCESS #undef PyMPI_HAVE_MPI_ERR_READ_ONLY #undef PyMPI_HAVE_MPI_ERR_NO_SPACE #undef PyMPI_HAVE_MPI_ERR_QUOTA #undef PyMPI_HAVE_MPI_ERR_UNSUPPORTED_DATAREP #undef PyMPI_HAVE_MPI_ERR_UNSUPPORTED_OPERATION #undef PyMPI_HAVE_MPI_ERR_CONVERSION #undef PyMPI_HAVE_MPI_ERR_DUP_DATAREP #undef PyMPI_HAVE_MPI_ERR_IO #endif mpi4py-4.0.3/src/lib-mpi/config/mpiapi.h000066400000000000000000000004521475341043600177650ustar00rootroot00000000000000#ifndef PyMPI_CONFIG_MPIAPI_H #define PyMPI_CONFIG_MPIAPI_H #include "mpi-11.h" #include "mpi-12.h" #include "mpi-20.h" #include "mpi-22.h" #include "mpi-30.h" #include "mpi-31.h" #include "mpi-40.h" #include "mpi-41.h" #include "mpi-50.h" #include "mpi-60.h" #endif /* !PyMPI_CONFIG_MPIAPI_H */ mpi4py-4.0.3/src/lib-mpi/config/mpich.h000066400000000000000000000006761475341043600176160ustar00rootroot00000000000000#ifndef PyMPI_CONFIG_MPICH_H #define PyMPI_CONFIG_MPICH_H #include "mpiapi.h" /* These types may not be available */ #ifndef MPI_REAL2 #undef PyMPI_HAVE_MPI_REAL2 #endif #ifndef MPI_MPI_COMPLEX4 #undef PyMPI_HAVE_MPI_COMPLEX4 #endif /* MPI I/O may not be available */ /* https://github.com/pmodels/mpich/issues/7278 */ #if MPICH_NUMVERSION < 40300000 #ifndef ROMIO_VERSION #include "mpi-io.h" #endif #endif #endif /* !PyMPI_CONFIG_MPICH_H */ mpi4py-4.0.3/src/lib-mpi/config/mpich2.h000066400000000000000000000214231475341043600176710ustar00rootroot00000000000000#ifndef PyMPI_CONFIG_MPICH2_H #define PyMPI_CONFIG_MPICH2_H #include "mpiapi.h" /* These types are not available */ #undef PyMPI_HAVE_MPI_REAL2 #undef PyMPI_HAVE_MPI_COMPLEX4 #if defined(MPI_UNWEIGHTED) && (MPICH2_NUMVERSION < 10300000) #undef MPI_UNWEIGHTED #define MPI_UNWEIGHTED ((int *)0) #endif /* MPICH2 < 1.3.0 */ #if !defined(MPICH2_NUMVERSION) || (MPICH2_NUMVERSION < 10100000) #undef PyMPI_HAVE_MPI_Type_create_f90_integer #undef PyMPI_HAVE_MPI_Type_create_f90_real #undef PyMPI_HAVE_MPI_Type_create_f90_complex #endif /* MPICH2 < 1.1.0 */ /* MPI I/O may not be available */ #ifndef ROMIO_VERSION #include "mpi-io.h" #endif #if MPI_VERSION < 3 && defined(MPICH2_NUMVERSION) #if MPICH2_NUMVERSION >= 10500000 && \ MPICH2_NUMVERSION < 20000000 /* #define PyMPI_HAVE_MPI_Count 1 #define PyMPI_HAVE_MPI_COUNT 1 #define PyMPI_HAVE_MPI_Type_size_x 1 #define PyMPI_HAVE_MPI_Type_get_extent_x 1 #define PyMPI_HAVE_MPI_Type_get_true_extent_x 1 #define PyMPI_HAVE_MPI_Get_elements_x 1 #define PyMPI_HAVE_MPI_Status_set_elements_x 1 #define MPI_Count MPIX_Count #define MPI_COUNT MPIX_COUNT #define MPI_Type_size_x MPIX_Type_size_x #define MPI_Type_get_extent_x MPIX_Type_get_extent_x #define MPI_Type_get_true_extent_x MPIX_Type_get_true_extent_x #define MPI_Get_elements_x MPIX_Get_elements_x #define MPI_Status_set_elements_x MPIX_Status_set_elements_x */ #define PyMPI_HAVE_MPI_COMBINER_HINDEXED_BLOCK 1 #define PyMPI_HAVE_MPI_Type_create_hindexed_block 1 #define MPI_COMBINER_HINDEXED_BLOCK MPIX_COMBINER_HINDEXED_BLOCK #define MPI_Type_create_hindexed_block MPIX_Type_create_hindexed_block #define PyMPI_HAVE_MPI_NO_OP 1 #define MPI_NO_OP MPIX_NO_OP #define PyMPI_HAVE_MPI_Message 1 #define PyMPI_HAVE_MPI_MESSAGE_NULL 1 #define PyMPI_HAVE_MPI_MESSAGE_NO_PROC 1 #define PyMPI_HAVE_MPI_Message_c2f 1 #define PyMPI_HAVE_MPI_Message_f2c 1 #define PyMPI_HAVE_MPI_Mprobe 1 #define PyMPI_HAVE_MPI_Improbe 1 #define PyMPI_HAVE_MPI_Mrecv 1 #define PyMPI_HAVE_MPI_Imrecv 1 #define MPI_Message MPIX_Message #define MPI_MESSAGE_NULL MPIX_MESSAGE_NULL #define MPI_MESSAGE_NO_PROC MPIX_MESSAGE_NO_PROC #define MPI_Message_c2f MPIX_Message_c2f #define MPI_Message_f2c MPIX_Message_f2c #define MPI_Mprobe MPIX_Mprobe #define MPI_Improbe MPIX_Improbe #define MPI_Mrecv MPIX_Mrecv #define MPI_Imrecv MPIX_Imrecv #define PyMPI_HAVE_MPI_Ibarrier 1 #define PyMPI_HAVE_MPI_Ibcast 1 #define PyMPI_HAVE_MPI_Igather 1 #define PyMPI_HAVE_MPI_Igatherv 1 #define PyMPI_HAVE_MPI_Iscatter 1 #define PyMPI_HAVE_MPI_Iscatterv 1 #define PyMPI_HAVE_MPI_Iallgather 1 #define PyMPI_HAVE_MPI_Iallgatherv 1 #define PyMPI_HAVE_MPI_Ialltoall 1 #define PyMPI_HAVE_MPI_Ialltoallv 1 #define PyMPI_HAVE_MPI_Ialltoallw 1 #define PyMPI_HAVE_MPI_Ireduce 1 #define PyMPI_HAVE_MPI_Iallreduce 1 #define PyMPI_HAVE_MPI_Ireduce_scatter_block 1 #define PyMPI_HAVE_MPI_Ireduce_scatter 1 #define PyMPI_HAVE_MPI_Iscan 1 #define PyMPI_HAVE_MPI_Iexscan 1 #define MPI_Ibarrier MPIX_Ibarrier #define MPI_Ibcast MPIX_Ibcast #define MPI_Igather MPIX_Igather #define MPI_Igatherv MPIX_Igatherv #define MPI_Iscatter MPIX_Iscatter #define MPI_Iscatterv MPIX_Iscatterv #define MPI_Iallgather MPIX_Iallgather #define MPI_Iallgatherv MPIX_Iallgatherv #define MPI_Ialltoall MPIX_Ialltoall #define MPI_Ialltoallv MPIX_Ialltoallv #define MPI_Ialltoallw MPIX_Ialltoallw #define MPI_Ireduce MPIX_Ireduce #define MPI_Iallreduce MPIX_Iallreduce #define MPI_Ireduce_scatter_block MPIX_Ireduce_scatter_block #define MPI_Ireduce_scatter MPIX_Ireduce_scatter #define MPI_Iscan MPIX_Iscan #define MPI_Iexscan MPIX_Iexscan #define PyMPI_HAVE_MPI_Neighbor_allgather 1 #define PyMPI_HAVE_MPI_Neighbor_allgatherv 1 #define PyMPI_HAVE_MPI_Neighbor_alltoall 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallv 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallw 1 #define MPI_Neighbor_allgather MPIX_Neighbor_allgather #define MPI_Neighbor_allgatherv MPIX_Neighbor_allgatherv #define MPI_Neighbor_alltoall MPIX_Neighbor_alltoall #define MPI_Neighbor_alltoallv MPIX_Neighbor_alltoallv #define MPI_Neighbor_alltoallw MPIX_Neighbor_alltoallw #define PyMPI_HAVE_MPI_Ineighbor_allgather 1 #define PyMPI_HAVE_MPI_Ineighbor_allgatherv 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoall 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoallv 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoallw 1 #define MPI_Ineighbor_allgather MPIX_Ineighbor_allgather #define MPI_Ineighbor_allgatherv MPIX_Ineighbor_allgatherv #define MPI_Ineighbor_alltoall MPIX_Ineighbor_alltoall #define MPI_Ineighbor_alltoallv MPIX_Ineighbor_alltoallv #define MPI_Ineighbor_alltoallw MPIX_Ineighbor_alltoallw #define PyMPI_HAVE_MPI_Comm_idup 1 #define PyMPI_HAVE_MPI_Comm_create_group 1 #define PyMPI_HAVE_MPI_COMM_TYPE_SHARED 1 #define PyMPI_HAVE_MPI_Comm_split_type 1 #define MPI_Comm_idup MPIX_Comm_idup #define MPI_Comm_create_group MPIX_Comm_create_group #define MPI_COMM_TYPE_SHARED MPIX_COMM_TYPE_SHARED #define MPI_Comm_split_type MPIX_Comm_split_type /* #define PyMPI_HAVE_MPI_Comm_dup_with_info 1 #define PyMPI_HAVE_MPI_Comm_set_info 1 #define PyMPI_HAVE_MPI_Comm_get_info 1 #define MPI_Comm_dup_with_info MPIX_Comm_dup_with_info #define MPI_Comm_set_info MPIX_Comm_set_info #define MPI_Comm_get_info MPIX_Comm_get_info */ #define PyMPI_HAVE_MPI_WIN_CREATE_FLAVOR 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_CREATE 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_ALLOCATE 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_DYNAMIC 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_SHARED 1 #define MPI_WIN_CREATE_FLAVOR MPIX_WIN_CREATE_FLAVOR #define MPI_WIN_FLAVOR_CREATE MPIX_WIN_FLAVOR_CREATE #define MPI_WIN_FLAVOR_ALLOCATE MPIX_WIN_FLAVOR_ALLOCATE #define MPI_WIN_FLAVOR_DYNAMIC MPIX_WIN_FLAVOR_DYNAMIC #define MPI_WIN_FLAVOR_SHARED MPIX_WIN_FLAVOR_SHARED #define PyMPI_HAVE_MPI_WIN_MODEL 1 #define PyMPI_HAVE_MPI_WIN_SEPARATE 1 #define PyMPI_HAVE_MPI_WIN_UNIFIED 1 #define MPI_WIN_MODEL MPIX_WIN_MODEL #define MPI_WIN_SEPARATE MPIX_WIN_SEPARATE #define MPI_WIN_UNIFIED MPIX_WIN_UNIFIED #define PyMPI_HAVE_MPI_Win_allocate 1 #define MPI_Win_allocate MPIX_Win_allocate #define PyMPI_HAVE_MPI_Win_allocate_shared 1 #define PyMPI_HAVE_MPI_Win_shared_query 1 #define MPI_Win_allocate_shared MPIX_Win_allocate_shared #define MPI_Win_shared_query MPIX_Win_shared_query #define PyMPI_HAVE_MPI_Win_create_dynamic 1 #define PyMPI_HAVE_MPI_Win_attach 1 #define PyMPI_HAVE_MPI_Win_detach 1 #define MPI_Win_create_dynamic MPIX_Win_create_dynamic #define MPI_Win_attach MPIX_Win_attach #define MPI_Win_detach MPIX_Win_detach /* #define PyMPI_HAVE_MPI_Win_set_info 1 #define PyMPI_HAVE_MPI_Win_get_info 1 #define MPI_Win_set_info MPIX_Win_set_info #define MPI_Win_get_info MPIX_Win_get_info */ #define PyMPI_HAVE_MPI_Get_accumulate 1 #define PyMPI_HAVE_MPI_Fetch_and_op 1 #define PyMPI_HAVE_MPI_Compare_and_swap 1 #define MPI_Get_accumulate MPIX_Get_accumulate #define MPI_Fetch_and_op MPIX_Fetch_and_op #define MPI_Compare_and_swap MPIX_Compare_and_swap #define PyMPI_HAVE_MPI_Rget 1 #define PyMPI_HAVE_MPI_Rput 1 #define PyMPI_HAVE_MPI_Raccumulate 1 #define PyMPI_HAVE_MPI_Rget_accumulate 1 #define MPI_Rget MPIX_Rget #define MPI_Rput MPIX_Rput #define MPI_Raccumulate MPIX_Raccumulate #define MPI_Rget_accumulate MPIX_Rget_accumulate #define PyMPI_HAVE_MPI_Win_lock_all 1 #define PyMPI_HAVE_MPI_Win_unlock_all 1 #define PyMPI_HAVE_MPI_Win_flush 1 #define PyMPI_HAVE_MPI_Win_flush_all 1 #define PyMPI_HAVE_MPI_Win_flush_local 1 #define PyMPI_HAVE_MPI_Win_flush_local_all 1 #define PyMPI_HAVE_MPI_Win_sync #define MPI_Win_lock_all MPIX_Win_lock_all #define MPI_Win_unlock_all MPIX_Win_unlock_all #define MPI_Win_flush MPIX_Win_flush #define MPI_Win_flush_all MPIX_Win_flush_all #define MPI_Win_flush_local MPIX_Win_flush_local #define MPI_Win_flush_local_all MPIX_Win_flush_local_all #define MPI_Win_sync MPIX_Win_sync #define PyMPI_HAVE_MPI_ERR_RMA_RANGE 1 #define PyMPI_HAVE_MPI_ERR_RMA_ATTACH 1 #define PyMPI_HAVE_MPI_ERR_RMA_SHARED 1 #define PyMPI_HAVE_MPI_ERR_RMA_FLAVOR 1 #define MPI_ERR_RMA_RANGE MPIX_ERR_RMA_RANGE #define MPI_ERR_RMA_ATTACH MPIX_ERR_RMA_ATTACH #define MPI_ERR_RMA_SHARED MPIX_ERR_RMA_SHARED #define MPI_ERR_RMA_FLAVOR MPIX_ERR_RMA_WRONG_FLAVOR /* #define PyMPI_HAVE_MPI_MAX_LIBRARY_VERSION_STRING 1 #define PyMPI_HAVE_MPI_Get_library_version 1 #define PyMPI_HAVE_MPI_INFO_ENV 1 #define MPI_MAX_LIBRARY_VERSION_STRING MPIX_MAX_LIBRARY_VERSION_STRING #define MPI_Get_library_version MPIX_Get_library_version #define MPI_INFO_ENV MPIX_INFO_ENV */ #endif /* MPICH2 < 1.5*/ #endif /* MPI < 3.0*/ #endif /* !PyMPI_CONFIG_MPICH2_H */ mpi4py-4.0.3/src/lib-mpi/config/mpich3.h000066400000000000000000000005471475341043600176760ustar00rootroot00000000000000#ifndef PyMPI_CONFIG_MPICH3_H #define PyMPI_CONFIG_MPICH3_H #include "mpiapi.h" /* These types may not be available */ #ifndef MPI_REAL2 #undef PyMPI_HAVE_MPI_REAL2 #endif #ifndef MPI_MPI_COMPLEX4 #undef PyMPI_HAVE_MPI_COMPLEX4 #endif /* MPI I/O may not be available */ #ifndef ROMIO_VERSION #include "mpi-io.h" #endif #endif /* !PyMPI_CONFIG_MPICH3_H */ mpi4py-4.0.3/src/lib-mpi/config/msmpi.h000066400000000000000000000076031475341043600176400ustar00rootroot00000000000000#ifndef PyMPI_CONFIG_MSMPI_H #define PyMPI_CONFIG_MSMPI_H #include "mpiapi.h" #if MSMPI_VER >= 0x402 #define PyMPI_HAVE_MPI_AINT 1 #define PyMPI_HAVE_MPI_OFFSET 1 #define PyMPI_HAVE_MPI_C_BOOL 1 #define PyMPI_HAVE_MPI_INT8_T 1 #define PyMPI_HAVE_MPI_INT16_T 1 #define PyMPI_HAVE_MPI_INT32_T 1 #define PyMPI_HAVE_MPI_INT64_T 1 #define PyMPI_HAVE_MPI_UINT8_T 1 #define PyMPI_HAVE_MPI_UINT16_T 1 #define PyMPI_HAVE_MPI_UINT32_T 1 #define PyMPI_HAVE_MPI_UINT64_T 1 #define PyMPI_HAVE_MPI_C_COMPLEX 1 #define PyMPI_HAVE_MPI_C_FLOAT_COMPLEX 1 #define PyMPI_HAVE_MPI_C_DOUBLE_COMPLEX 1 #define PyMPI_HAVE_MPI_C_LONG_DOUBLE_COMPLEX 1 #define PyMPI_HAVE_MPI_REAL2 1 #define PyMPI_HAVE_MPI_COMPLEX4 1 #define PyMPI_HAVE_MPI_Reduce_local 1 #endif #if MSMPI_VER >= 0x500 #define PyMPI_HAVE_MPI_COMM_TYPE_SHARED 1 #define PyMPI_HAVE_MPI_Comm_split_type 1 #define PyMPI_HAVE_MPI_Win_allocate_shared 1 #define PyMPI_HAVE_MPI_Win_shared_query 1 #define PyMPI_HAVE_MPI_MAX_LIBRARY_VERSION_STRING 1 #define PyMPI_HAVE_MPI_Get_library_version 1 #endif #if MSMPI_VER >= 0x600 #define PyMPI_HAVE_MPI_Count 1 #define PyMPI_HAVE_MPI_COUNT 1 #define PyMPI_HAVE_MPI_Type_create_hindexed_block 1 #define PyMPI_HAVE_MPI_COMBINER_HINDEXED_BLOCK 1 #define PyMPI_HAVE_MPI_Type_size_x 1 #define PyMPI_HAVE_MPI_Type_get_extent_x 1 #define PyMPI_HAVE_MPI_Type_get_true_extent_x 1 #define PyMPI_HAVE_MPI_Get_elements_x 1 #define PyMPI_HAVE_MPI_Status_set_elements_x 1 #define PyMPI_HAVE_MPI_Message 1 #define PyMPI_HAVE_MPI_MESSAGE_NULL 1 #define PyMPI_HAVE_MPI_MESSAGE_NO_PROC 1 #define PyMPI_HAVE_MPI_Mprobe 1 #define PyMPI_HAVE_MPI_Improbe 1 #define PyMPI_HAVE_MPI_Mrecv 1 #define PyMPI_HAVE_MPI_Imrecv 1 #define PyMPI_HAVE_MPI_Message_c2f 1 #define PyMPI_HAVE_MPI_Message_f2c 1 #define PyMPI_HAVE_MPI_Op_commutative 1 #define PyMPI_HAVE_MPI_DIST_GRAPH 1 #define PyMPI_HAVE_MPI_UNWEIGHTED 1 #define PyMPI_HAVE_MPI_WEIGHTS_EMPTY 1 #define PyMPI_HAVE_MPI_Dist_graph_create_adjacent 1 #define PyMPI_HAVE_MPI_Dist_graph_create 1 #define PyMPI_HAVE_MPI_Dist_graph_neighbors_count 1 #define PyMPI_HAVE_MPI_Dist_graph_neighbors 1 #define PyMPI_HAVE_MPI_Ibarrier 1 #define PyMPI_HAVE_MPI_Ibcast 1 #define PyMPI_HAVE_MPI_Igather 1 #define PyMPI_HAVE_MPI_Ireduce 1 #endif #if MSMPI_VER >= 0x700 #define PyMPI_HAVE_MPI_Iallgather 1 #define PyMPI_HAVE_MPI_Iallreduce 1 #define PyMPI_HAVE_MPI_Igatherv 1 #define PyMPI_HAVE_MPI_Iscatter 1 #define PyMPI_HAVE_MPI_Iscatterv 1 #endif #if MSMPI_VER >= 0x800 #define PyMPI_HAVE_MPI_Reduce_scatter_block 1 #define PyMPI_HAVE_MPI_Iallgatherv 1 #define PyMPI_HAVE_MPI_Ialltoall 1 #define PyMPI_HAVE_MPI_Ialltoallv 1 #define PyMPI_HAVE_MPI_Ialltoallw 1 #define PyMPI_HAVE_MPI_Iallreduce 1 #define PyMPI_HAVE_MPI_Ireduce_scatter 1 #define PyMPI_HAVE_MPI_Ireduce_scatter_block 1 #define PyMPI_HAVE_MPI_Iscan 1 #define PyMPI_HAVE_MPI_Iexscan 1 #endif #if MSMPI_VER >= 0x900 #define PyMPI_HAVE_MPI_NO_OP 1 #define PyMPI_HAVE_MPI_Win_allocate 1 #define PyMPI_HAVE_MPI_Win_create_dynamic 1 #define PyMPI_HAVE_MPI_Win_attach 1 #define PyMPI_HAVE_MPI_Win_detach 1 #define PyMPI_HAVE_MPI_Rget 1 #define PyMPI_HAVE_MPI_Rput 1 #define PyMPI_HAVE_MPI_Raccumulate 1 #define PyMPI_HAVE_MPI_Win_flush 1 #define PyMPI_HAVE_MPI_WIN_CREATE_FLAVOR 1 #define PyMPI_HAVE_MPI_WIN_MODEL 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_CREATE 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_ALLOCATE 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_DYNAMIC 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_SHARED 1 #define PyMPI_HAVE_MPI_WIN_SEPARATE 1 #define PyMPI_HAVE_MPI_WIN_UNIFIED 1 #endif #if MSMPI_VER >= 0xA00 #define PyMPI_HAVE_MPI_Get_accumulate #define PyMPI_HAVE_MPI_Rget_accumulate #define PyMPI_HAVE_MPI_Fetch_and_op #define PyMPI_HAVE_MPI_Compare_and_swap #define PyMPI_HAVE_MPI_Win_lock_all #define PyMPI_HAVE_MPI_Win_unlock_all #define PyMPI_HAVE_MPI_Win_flush_all #define PyMPI_HAVE_MPI_Win_flush_local #define PyMPI_HAVE_MPI_Win_flush_local_all #define PyMPI_HAVE_MPI_Win_sync #endif #endif /* !PyMPI_CONFIG_MSMPI_H */ mpi4py-4.0.3/src/lib-mpi/config/openmpi.h000066400000000000000000000150201475341043600201520ustar00rootroot00000000000000#ifndef PyMPI_CONFIG_OPENMPI_H #define PyMPI_CONFIG_OPENMPI_H #include "mpiapi.h" #ifndef OMPI_HAVE_FORTRAN_LOGICAL1 #define OMPI_HAVE_FORTRAN_LOGICAL1 0 #endif #ifndef OMPI_HAVE_FORTRAN_LOGICAL2 #define OMPI_HAVE_FORTRAN_LOGICAL2 0 #endif #ifndef OMPI_HAVE_FORTRAN_LOGICAL4 #define OMPI_HAVE_FORTRAN_LOGICAL4 0 #endif #ifndef OMPI_HAVE_FORTRAN_LOGICAL8 #define OMPI_HAVE_FORTRAN_LOGICAL8 0 #endif #if OMPI_HAVE_FORTRAN_LOGICAL1 #define PyMPI_HAVE_MPI_LOGICAL1 1 #endif #if OMPI_HAVE_FORTRAN_LOGICAL2 #define PyMPI_HAVE_MPI_LOGICAL2 1 #endif #if OMPI_HAVE_FORTRAN_LOGICAL4 #define PyMPI_HAVE_MPI_LOGICAL4 1 #endif #if OMPI_HAVE_FORTRAN_LOGICAL8 #define PyMPI_HAVE_MPI_LOGICAL8 1 #endif #if !OMPI_HAVE_FORTRAN_INTEGER1 #undef PyMPI_HAVE_MPI_INTEGER1 #endif #if !OMPI_HAVE_FORTRAN_INTEGER2 #undef PyMPI_HAVE_MPI_INTEGER2 #endif #if !OMPI_HAVE_FORTRAN_INTEGER4 #undef PyMPI_HAVE_MPI_INTEGER4 #endif #if !OMPI_HAVE_FORTRAN_INTEGER8 #undef PyMPI_HAVE_MPI_INTEGER8 #endif #if !OMPI_HAVE_FORTRAN_INTEGER16 #undef PyMPI_HAVE_MPI_INTEGER16 #endif #if !OMPI_HAVE_FORTRAN_REAL2 #undef PyMPI_HAVE_MPI_REAL2 #undef PyMPI_HAVE_MPI_COMPLEX4 #endif #if !OMPI_HAVE_FORTRAN_REAL4 #undef PyMPI_HAVE_MPI_REAL4 #undef PyMPI_HAVE_MPI_COMPLEX8 #endif #if !OMPI_HAVE_FORTRAN_REAL8 #undef PyMPI_HAVE_MPI_REAL8 #undef PyMPI_HAVE_MPI_COMPLEX16 #endif #if !OMPI_HAVE_FORTRAN_REAL16 #undef PyMPI_HAVE_MPI_REAL16 #undef PyMPI_HAVE_MPI_COMPLEX32 #endif #ifdef OMPI_PROVIDE_MPI_FILE_INTERFACE #if OMPI_PROVIDE_MPI_FILE_INTERFACE == 0 #include "mpi-io.h" #endif #endif #if (defined(OMPI_MAJOR_VERSION) && \ defined(OMPI_MINOR_VERSION) && \ defined(OMPI_RELEASE_VERSION)) #define OMPI_NUMVERSION (OMPI_MAJOR_VERSION*10000 + \ OMPI_MINOR_VERSION*100 + \ OMPI_RELEASE_VERSION) #else #define OMPI_NUMVERSION (10100) #endif #if MPI_VERSION < 3 #if OMPI_NUMVERSION >= 10700 #define PyMPI_HAVE_MPI_Message 1 #define PyMPI_HAVE_MPI_MESSAGE_NULL 1 #define PyMPI_HAVE_MPI_MESSAGE_NO_PROC 1 #define PyMPI_HAVE_MPI_Message_c2f 1 #define PyMPI_HAVE_MPI_Message_f2c 1 #define PyMPI_HAVE_MPI_Mprobe 1 #define PyMPI_HAVE_MPI_Improbe 1 #define PyMPI_HAVE_MPI_Mrecv 1 #define PyMPI_HAVE_MPI_Imrecv 1 #define PyMPI_HAVE_MPI_Ibarrier 1 #define PyMPI_HAVE_MPI_Ibcast 1 #define PyMPI_HAVE_MPI_Igather 1 #define PyMPI_HAVE_MPI_Igatherv 1 #define PyMPI_HAVE_MPI_Iscatter 1 #define PyMPI_HAVE_MPI_Iscatterv 1 #define PyMPI_HAVE_MPI_Iallgather 1 #define PyMPI_HAVE_MPI_Iallgatherv 1 #define PyMPI_HAVE_MPI_Ialltoall 1 #define PyMPI_HAVE_MPI_Ialltoallv 1 #define PyMPI_HAVE_MPI_Ialltoallw 1 #define PyMPI_HAVE_MPI_Ireduce 1 #define PyMPI_HAVE_MPI_Iallreduce 1 #define PyMPI_HAVE_MPI_Ireduce_scatter_block 1 #define PyMPI_HAVE_MPI_Ireduce_scatter 1 #define PyMPI_HAVE_MPI_Iscan 1 #define PyMPI_HAVE_MPI_Iexscan 1 #define PyMPI_HAVE_MPI_MAX_LIBRARY_VERSION_STRING 1 #define PyMPI_HAVE_MPI_Get_library_version 1 #endif /* OMPI >= 1.7.0 */ #if OMPI_NUMVERSION >= 10704 #define PyMPI_HAVE_MPI_Neighbor_allgather 1 #define PyMPI_HAVE_MPI_Neighbor_allgatherv 1 #define PyMPI_HAVE_MPI_Neighbor_alltoall 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallv 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallw 1 #define PyMPI_HAVE_MPI_Ineighbor_allgather 1 #define PyMPI_HAVE_MPI_Ineighbor_allgatherv 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoall 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoallv 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoallw 1 #endif /* OMPI >= 1.7.4 */ #endif #if MPI_VERSION == 3 #if OMPI_NUMVERSION <= 10705 #undef PyMPI_HAVE_MPI_Comm_set_info #undef PyMPI_HAVE_MPI_Comm_get_info #undef PyMPI_HAVE_MPI_WEIGHTS_EMPTY #undef PyMPI_HAVE_MPI_ERR_RMA_SHARED #endif /* OMPI <= 1.7.5 */ #endif #if MPI_VERSION == 3 #if OMPI_NUMVERSION >= 50000 #define PyMPI_HAVE_MPI_Pready 1 #define PyMPI_HAVE_MPI_Pready_range 1 #define PyMPI_HAVE_MPI_Pready_list 1 #define PyMPI_HAVE_MPI_Parrived 1 #define PyMPI_HAVE_MPI_Info_create_env 1 #define PyMPI_HAVE_MPI_Info_get_string 1 #define PyMPI_HAVE_MPI_ERRORS_ABORT 1 #define PyMPI_HAVE_MPI_Session 1 #define PyMPI_HAVE_MPI_SESSION_NULL 1 #define PyMPI_HAVE_MPI_Session_c2f 1 #define PyMPI_HAVE_MPI_Session_f2c 1 #define PyMPI_HAVE_MPI_MAX_PSET_NAME_LEN 1 #define PyMPI_HAVE_MPI_Session_init 1 #define PyMPI_HAVE_MPI_Session_finalize 1 #define PyMPI_HAVE_MPI_Session_get_num_psets 1 #define PyMPI_HAVE_MPI_Session_get_nth_pset 1 #define PyMPI_HAVE_MPI_Session_get_info 1 #define PyMPI_HAVE_MPI_Session_get_pset_info 1 #define PyMPI_HAVE_MPI_Group_from_session_pset 1 #define PyMPI_HAVE_MPI_Session_errhandler_function 1 #define PyMPI_HAVE_MPI_Session_create_errhandler 1 #define PyMPI_HAVE_MPI_Session_get_errhandler 1 #define PyMPI_HAVE_MPI_Session_set_errhandler 1 #define PyMPI_HAVE_MPI_Session_call_errhandler 1 #define PyMPI_HAVE_MPI_Isendrecv 1 #define PyMPI_HAVE_MPI_Isendrecv_replace 1 #define PyMPI_HAVE_MPI_Psend_init 1 #define PyMPI_HAVE_MPI_Precv_init 1 #define PyMPI_HAVE_MPI_Barrier_init 1 #define PyMPI_HAVE_MPI_Bcast_init 1 #define PyMPI_HAVE_MPI_Gather_init 1 #define PyMPI_HAVE_MPI_Gatherv_init 1 #define PyMPI_HAVE_MPI_Scatter_init 1 #define PyMPI_HAVE_MPI_Scatterv_init 1 #define PyMPI_HAVE_MPI_Allgather_init 1 #define PyMPI_HAVE_MPI_Allgatherv_init 1 #define PyMPI_HAVE_MPI_Alltoall_init 1 #define PyMPI_HAVE_MPI_Alltoallv_init 1 #define PyMPI_HAVE_MPI_Alltoallw_init 1 #define PyMPI_HAVE_MPI_Reduce_init 1 #define PyMPI_HAVE_MPI_Allreduce_init 1 #define PyMPI_HAVE_MPI_Reduce_scatter_block_init 1 #define PyMPI_HAVE_MPI_Reduce_scatter_init 1 #define PyMPI_HAVE_MPI_Scan_init 1 #define PyMPI_HAVE_MPI_Exscan_init 1 #define PyMPI_HAVE_MPI_Neighbor_allgather_init 1 #define PyMPI_HAVE_MPI_Neighbor_allgatherv_init 1 #define PyMPI_HAVE_MPI_Neighbor_alltoall_init 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallv_init 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallw_init 1 #define PyMPI_HAVE_MPI_Comm_idup_with_info 1 #define PyMPI_HAVE_MPI_MAX_STRINGTAG_LEN 1 #define PyMPI_HAVE_MPI_Comm_create_from_group 1 #define PyMPI_HAVE_MPI_COMM_TYPE_HW_GUIDED 1 #define PyMPI_HAVE_MPI_COMM_TYPE_HW_UNGUIDED 1 #define PyMPI_HAVE_MPI_Intercomm_create_from_groups 1 #define PyMPI_HAVE_MPI_ERR_PROC_ABORTED 1 #define PyMPI_HAVE_MPI_ERR_VALUE_TOO_LARGE 1 #define PyMPI_HAVE_MPI_ERR_SESSION 1 #define PyMPI_HAVE_MPI_F_SOURCE 1 #define PyMPI_HAVE_MPI_F_TAG 1 #define PyMPI_HAVE_MPI_F_ERROR 1 #define PyMPI_HAVE_MPI_F_STATUS_SIZE 1 #endif #endif #if MPI_VERSION < 5 #ifdef MPI_ERR_REVOKED #define PyMPI_HAVE_MPI_ERR_REVOKED 1 #endif #ifdef MPI_ERR_PROC_FAILED #define PyMPI_HAVE_MPI_ERR_PROC_FAILED 1 #endif #ifdef MPI_ERR_PROC_FAILED_PENDING #define PyMPI_HAVE_MPI_ERR_PROC_FAILED_PENDING 1 #endif #endif #endif /* !PyMPI_CONFIG_OPENMPI_H */ mpi4py-4.0.3/src/lib-mpi/config/unknown.h000066400000000000000000000005351475341043600202070ustar00rootroot00000000000000#ifndef PyMPI_CONFIG_UNKNOWN_H #define PyMPI_CONFIG_UNKNOWN_H #include "mpiapi.h" /* These types are difficult to implement portably */ #ifndef MPI_REAL2 #undef PyMPI_HAVE_MPI_REAL2 #endif #ifndef MPI_COMPLEX4 #undef PyMPI_HAVE_MPI_COMPLEX4 #endif #ifndef MPI_INTEGER16 #undef PyMPI_HAVE_MPI_INTEGER16 #endif #endif /* !PyMPI_CONFIG_UNKNOWN_H */ mpi4py-4.0.3/src/lib-mpi/dynload.h000066400000000000000000000032021475341043600166670ustar00rootroot00000000000000/* Author: Lisandro Dalcin * Contact: dalcinl@gmail.com */ #ifndef PyMPI_DYNLOAD_H #define PyMPI_DYNLOAD_H #if HAVE_DLFCN_H #include #else #if defined(__linux) || defined(__linux__) #define RTLD_LAZY 0x00001 #define RTLD_NOW 0x00002 #define RTLD_LOCAL 0x00000 #define RTLD_GLOBAL 0x00100 #define RTLD_NOLOAD 0x00004 #define RTLD_NODELETE 0x01000 #define RTLD_DEEPBIND 0x00008 #elif defined(__sun) || defined(__sun__) #define RTLD_LAZY 0x00001 #define RTLD_NOW 0x00002 #define RTLD_LOCAL 0x00000 #define RTLD_GLOBAL 0x00100 #define RTLD_NOLOAD 0x00004 #define RTLD_NODELETE 0x01000 #define RTLD_FIRST 0x02000 #elif defined(__APPLE__) #define RTLD_LAZY 0x1 #define RTLD_NOW 0x2 #define RTLD_LOCAL 0x4 #define RTLD_GLOBAL 0x8 #define RTLD_NOLOAD 0x10 #define RTLD_NODELETE 0x80 #define RTLD_FIRST 0x100 #elif defined(__CYGWIN__) #define RTLD_LAZY 1 #define RTLD_NOW 2 #define RTLD_LOCAL 0 #define RTLD_GLOBAL 4 #endif #if defined(__cplusplus) extern "C" { #endif extern void *dlopen(const char *, int); extern void *dlsym(void *, const char *); extern int dlclose(void *); extern char *dlerror(void); #if defined(__cplusplus) } #endif #endif #ifndef RTLD_LAZY #define RTLD_LAZY 1 #endif #ifndef RTLD_NOW #define RTLD_NOW RTLD_LAZY #endif #ifndef RTLD_LOCAL #define RTLD_LOCAL 0 #endif #ifndef RTLD_GLOBAL #define RTLD_GLOBAL RTLD_LOCAL #endif #endif /* !PyMPI_DYNLOAD_H */ /* Local variables: c-basic-offset: 2 indent-tabs-mode: nil End: */ mpi4py-4.0.3/src/lib-mpi/fallback.h000066400000000000000000001637401475341043600170120ustar00rootroot00000000000000#ifndef PyMPI_FALLBACK_H #define PyMPI_FALLBACK_H /* ---------------------------------------------------------------- */ #include #ifndef PyMPI_MALLOC #define PyMPI_MALLOC malloc #endif #ifndef PyMPI_FREE #define PyMPI_FREE free #endif /* ---------------------------------------------------------------- */ /* Version Number */ #ifndef PyMPI_HAVE_MPI_VERSION #if !defined(MPI_VERSION) #define MPI_VERSION 1 #endif #endif #ifndef PyMPI_HAVE_MPI_SUBVERSION #if !defined(MPI_SUBVERSION) #define MPI_SUBVERSION 0 #endif #endif #ifndef PyMPI_HAVE_MPI_Get_version static int PyMPI_Get_version(int *version, int* subversion) { if (!version) return MPI_ERR_ARG; if (!subversion) return MPI_ERR_ARG; *version = MPI_VERSION; *subversion = MPI_SUBVERSION; return MPI_SUCCESS; } #undef MPI_Get_version #define MPI_Get_version PyMPI_Get_version #endif #ifndef PyMPI_HAVE_MPI_Get_library_version #define PyMPI_MAX_LIBRARY_VERSION_STRING 8 static int PyMPI_Get_library_version(char version[], int *rlen) { if (!version) return MPI_ERR_ARG; if (!rlen) return MPI_ERR_ARG; version[0] = 'M'; version[1] = 'P'; version[2] = 'I'; version[3] = ' '; version[4] = '0' + (char) MPI_VERSION; version[5] = '.'; version[6] = '0' + (char) MPI_SUBVERSION; version[7] = 0; *rlen = 7; return MPI_SUCCESS; } #undef MPI_MAX_LIBRARY_VERSION_STRING #define MPI_MAX_LIBRARY_VERSION_STRING \ PyMPI_MAX_LIBRARY_VERSION_STRING #undef MPI_Get_library_version #define MPI_Get_library_version \ PyMPI_Get_library_version #endif /* ---------------------------------------------------------------- */ /* Threading Support */ #ifndef PyMPI_HAVE_MPI_Init_thread static int PyMPI_Init_thread(int *argc, char ***argv, int required, int *provided) { int ierr = MPI_SUCCESS; if (!provided) return MPI_ERR_ARG; ierr = MPI_Init(argc, argv); if (ierr != MPI_SUCCESS) return ierr; (void)required; *provided = MPI_THREAD_SINGLE; return MPI_SUCCESS; } #undef MPI_Init_thread #define MPI_Init_thread PyMPI_Init_thread #endif #ifndef PyMPI_HAVE_MPI_Query_thread static int PyMPI_Query_thread(int *provided) { if (!provided) return MPI_ERR_ARG; *provided = MPI_THREAD_SINGLE; return MPI_SUCCESS; } #undef MPI_Query_thread #define MPI_Query_thread PyMPI_Query_thread #endif #ifndef PyMPI_HAVE_MPI_Is_thread_main static int PyMPI_Is_thread_main(int *flag) { if (!flag) return MPI_ERR_ARG; *flag = 1; /* XXX this is completely broken !! */ return MPI_SUCCESS; } #undef MPI_Is_thread_main #define MPI_Is_thread_main PyMPI_Is_thread_main #endif /* ---------------------------------------------------------------- */ /* Status */ #ifndef PyMPI_HAVE_MPI_STATUS_IGNORE static MPI_Status PyMPI_STATUS_IGNORE; #undef MPI_STATUS_IGNORE #define MPI_STATUS_IGNORE ((MPI_Status*)(&PyMPI_STATUS_IGNORE)) #endif #ifndef PyMPI_HAVE_MPI_STATUSES_IGNORE #ifndef PyMPI_MPI_STATUSES_IGNORE_SIZE #define PyMPI_MPI_STATUSES_IGNORE_SIZE 4096 #endif static MPI_Status PyMPI_STATUSES_IGNORE[PyMPI_MPI_STATUSES_IGNORE_SIZE]; #undef MPI_STATUSES_IGNORE #define MPI_STATUSES_IGNORE ((MPI_Status*)(PyMPI_STATUSES_IGNORE)) #endif #define PyMPI_Status_GET_ATTR(name, NAME) \ static int PyMPI_Status_get_##name(MPI_Status *s, int *i) \ { if (s && i) { *i = s->MPI_##NAME; } return MPI_SUCCESS; } #define PyMPI_Status_SET_ATTR(name, NAME) \ static int PyMPI_Status_set_##name(MPI_Status *s, int i) \ { if (s) { s->MPI_##NAME = i; } return MPI_SUCCESS; } #ifndef PyMPI_HAVE_MPI_Status_get_source #if defined(MPIX_HAVE_MPI_STATUS_GETSET) #define PyMPI_Status_get_source MPIX_Status_get_source #else PyMPI_Status_GET_ATTR(source, SOURCE) #endif #undef MPI_Status_get_source #define MPI_Status_get_source PyMPI_Status_get_source #endif #ifndef PyMPI_HAVE_MPI_Status_set_source #if defined(MPIX_HAVE_MPI_STATUS_GETSET) #define PyMPI_Status_set_source MPIX_Status_set_source #else PyMPI_Status_SET_ATTR(source, SOURCE) #endif #undef MPI_Status_set_source #define MPI_Status_set_source PyMPI_Status_set_source #endif #ifndef PyMPI_HAVE_MPI_Status_get_tag #if defined(MPIX_HAVE_MPI_STATUS_GETSET) #define PyMPI_Status_get_tag MPIX_Status_get_tag #else PyMPI_Status_GET_ATTR(tag, TAG) #endif #undef MPI_Status_get_tag #define MPI_Status_get_tag PyMPI_Status_get_tag #endif #ifndef PyMPI_HAVE_MPI_Status_set_tag #if defined(MPIX_HAVE_MPI_STATUS_GETSET) #define PyMPI_Status_set_tag MPIX_Status_set_tag #else PyMPI_Status_SET_ATTR(tag, TAG) #endif #undef MPI_Status_set_tag #define MPI_Status_set_tag PyMPI_Status_set_tag #endif #ifndef PyMPI_HAVE_MPI_Status_get_error #if defined(MPIX_HAVE_MPI_STATUS_GETSET) #define PyMPI_Status_get_error MPIX_Status_get_error #else PyMPI_Status_GET_ATTR(error, ERROR) #endif #undef MPI_Status_get_error #define MPI_Status_get_error PyMPI_Status_get_error #endif #ifndef PyMPI_HAVE_MPI_Status_set_error #if defined(MPIX_HAVE_MPI_STATUS_GETSET) #define PyMPI_Status_set_error MPIX_Status_set_error #else PyMPI_Status_SET_ATTR(error, ERROR) #endif #undef MPI_Status_set_error #define MPI_Status_set_error PyMPI_Status_set_error #endif #ifdef PyMPI_Status_GET_ATTR #undef PyMPI_Status_GET_ATTR #endif #ifdef PyMPI_Status_SET_ATTR #undef PyMPI_Status_SET_ATTR #endif /* ---------------------------------------------------------------- */ /* Datatypes */ #ifndef PyMPI_HAVE_MPI_LONG_LONG #undef MPI_LONG_LONG #define MPI_LONG_LONG MPI_LONG_LONG_INT #endif #ifndef PyMPI_HAVE_MPI_Type_get_extent static int PyMPI_Type_get_extent(MPI_Datatype datatype, MPI_Aint *lb, MPI_Aint *extent) { int ierr = MPI_SUCCESS; ierr = MPI_Type_lb(datatype, lb); if (ierr != MPI_SUCCESS) return ierr; ierr = MPI_Type_extent(datatype, extent); if (ierr != MPI_SUCCESS) return ierr; return MPI_SUCCESS; } #undef MPI_Type_get_extent #define MPI_Type_get_extent PyMPI_Type_get_extent #endif #ifndef PyMPI_HAVE_MPI_Type_dup static int PyMPI_Type_dup(MPI_Datatype datatype, MPI_Datatype *newtype) { int ierr = MPI_SUCCESS; ierr = MPI_Type_contiguous(1, datatype, newtype); if (ierr != MPI_SUCCESS) return ierr; ierr = MPI_Type_commit(newtype); /* the safe way ... */ if (ierr != MPI_SUCCESS) return ierr; return MPI_SUCCESS; } #undef MPI_Type_dup #define MPI_Type_dup PyMPI_Type_dup #endif #ifndef PyMPI_HAVE_MPI_Type_create_indexed_block static int PyMPI_Type_create_indexed_block(int count, int blocklength, int displacements[], MPI_Datatype oldtype, MPI_Datatype *newtype) { int i, *blocklengths = NULL, ierr = MPI_SUCCESS; if (count > 0) { blocklengths = (int *) PyMPI_MALLOC((size_t)count*sizeof(int)); if (!blocklengths) return MPI_ERR_INTERN; } for (i=0; i 0) { blocklengths = (int *) PyMPI_MALLOC((size_t)count*sizeof(int)); if (!blocklengths) return MPI_ERR_INTERN; } for (i=0; i 0); PyMPI_CHKARG(sizes); PyMPI_CHKARG(subsizes); PyMPI_CHKARG(starts); PyMPI_CHKARG(newtype); for (i=0; i 0); PyMPI_CHKARG(subsizes[i] > 0); PyMPI_CHKARG(starts[i] >= 0); PyMPI_CHKARG(sizes[i] >= subsizes[i]); PyMPI_CHKARG(starts[i] <= (sizes[i] - subsizes[i])); } PyMPI_CHKARG((order==MPI_ORDER_C) || (order==MPI_ORDER_FORTRAN)); ierr = MPI_Type_extent(oldtype, &extent); if (ierr != MPI_SUCCESS) return ierr; if (order == MPI_ORDER_FORTRAN) { if (ndims == 1) { ierr = MPI_Type_contiguous(subsizes[0], oldtype, &tmp1); if (ierr != MPI_SUCCESS) return ierr; } else { ierr = MPI_Type_vector(subsizes[1], subsizes[0], sizes[0], oldtype, &tmp1); if (ierr != MPI_SUCCESS) return ierr; size = sizes[0]*extent; for (i=2; i=0; i--) { size *= sizes[i+1]; ierr = MPI_Type_hvector(subsizes[i], 1, size, tmp1, &tmp2); if (ierr != MPI_SUCCESS) return ierr; ierr = MPI_Type_free(&tmp1); if (ierr != MPI_SUCCESS) return ierr; tmp1 = tmp2; } } /* add displacement and upper bound */ disps[1] = starts[ndims-1]; size = 1; for (i=ndims-2; i>=0; i--) { size *= sizes[i+1]; disps[1] += size*starts[i]; } } disps[1] *= extent; disps[2] = extent; for (i=0; i 0); PyMPI_CHKARG(blksize * nprocs >= global_size); } j = global_size - blksize*rank; mysize = (blksize < j) ? blksize : j; if (mysize < 0) mysize = 0; stride = orig_extent; if (order == MPI_ORDER_FORTRAN) { if (dim == 0) { ierr = MPI_Type_contiguous(mysize, type_old, type_new); if (ierr != MPI_SUCCESS) goto fn_exit; } else { for (i=0; idim; i--) stride *= gsizes[i]; ierr = MPI_Type_hvector(mysize, 1, stride, type_old, type_new); if (ierr != MPI_SUCCESS) goto fn_exit; } } *offset = blksize * rank; if (mysize == 0) *offset = 0; ierr = MPI_SUCCESS; fn_exit: return ierr; } static int PyMPI_Type_cyclic(int *gsizes, int dim, int ndims, int nprocs, int rank, int darg, int order, MPI_Aint orig_extent, MPI_Datatype type_old, MPI_Datatype *type_new, MPI_Aint *offset) { int ierr, blksize, i, blklens[3], st_index, end_index, local_size, rem, count; MPI_Aint stride, disps[3]; MPI_Datatype type_tmp, types[3]; type_tmp = MPI_DATATYPE_NULL; types[0] = types[1] = types[2] = MPI_DATATYPE_NULL; if (darg == MPI_DISTRIBUTE_DFLT_DARG) blksize = 1; else blksize = darg; PyMPI_CHKARG(blksize > 0); st_index = rank*blksize; end_index = gsizes[dim] - 1; if (end_index < st_index) local_size = 0; else { local_size = ((end_index - st_index + 1)/(nprocs*blksize))*blksize; rem = (end_index - st_index + 1) % (nprocs*blksize); local_size += (rem < blksize) ? rem : blksize; } count = local_size/blksize; rem = local_size % blksize; stride = nprocs*blksize*orig_extent; if (order == MPI_ORDER_FORTRAN) for (i=0; idim; i--) stride *= gsizes[i]; ierr = MPI_Type_hvector(count, blksize, stride, type_old, type_new); if (ierr != MPI_SUCCESS) goto fn_exit; /* if the last block is of size less than blksize, include it separately using MPI_Type_struct */ if (rem) { types[0] = *type_new; types[1] = type_old; disps[0] = 0; disps[1] = count*stride; blklens[0] = 1; blklens[1] = rem; ierr = MPI_Type_struct(2, blklens, disps, types, &type_tmp); if (ierr != MPI_SUCCESS) goto fn_exit; ierr = MPI_Type_free(type_new); if (ierr != MPI_SUCCESS) goto fn_exit; *type_new = type_tmp; } /* In the first iteration, we need to set the displacement in that dimension correctly. */ if ( ((order == MPI_ORDER_FORTRAN) && (dim == 0)) || ((order == MPI_ORDER_C) && (dim == ndims-1)) ) { types[0] = MPI_LB; disps[0] = 0; types[1] = *type_new; disps[1] = rank * blksize * orig_extent; types[2] = MPI_UB; disps[2] = orig_extent * gsizes[dim]; blklens[0] = blklens[1] = blklens[2] = 1; ierr = MPI_Type_struct(3, blklens, disps, types, &type_tmp); if (ierr != MPI_SUCCESS) goto fn_exit; ierr = MPI_Type_free(type_new); if (ierr != MPI_SUCCESS) goto fn_exit; *type_new = type_tmp; *offset = 0; } else { *offset = rank * blksize; } if (local_size == 0) *offset = 0; ierr = MPI_SUCCESS; fn_exit: return ierr; } static int PyMPI_Type_create_darray(int size, int rank, int ndims, int gsizes[], int distribs[], int dargs[], int psizes[], int order, MPI_Datatype oldtype, MPI_Datatype *newtype) { int ierr = MPI_SUCCESS, i; int procs, tmp_rank, tmp_size, blklens[3]; MPI_Aint orig_extent, disps[3]; MPI_Datatype type_old, type_new, types[3]; int *coords = NULL; MPI_Aint *offsets = NULL; orig_extent=0; type_old = type_new = MPI_DATATYPE_NULL; types[0] = types[1] = types[2] = MPI_DATATYPE_NULL; ierr = MPI_Type_extent(oldtype, &orig_extent); if (ierr != MPI_SUCCESS) goto fn_exit; PyMPI_CHKARG(rank >= 0); PyMPI_CHKARG(size > 0); PyMPI_CHKARG(ndims > 0); PyMPI_CHKARG(gsizes); PyMPI_CHKARG(distribs); PyMPI_CHKARG(dargs); PyMPI_CHKARG(psizes); PyMPI_CHKARG((order==MPI_ORDER_C) || (order==MPI_ORDER_FORTRAN) ); for (i=0; i < ndims; i++) { PyMPI_CHKARG(gsizes[1] > 0); PyMPI_CHKARG(psizes[1] > 0); PyMPI_CHKARG((distribs[i] == MPI_DISTRIBUTE_NONE) || (distribs[i] == MPI_DISTRIBUTE_BLOCK) || (distribs[i] == MPI_DISTRIBUTE_CYCLIC)); PyMPI_CHKARG((dargs[i] == MPI_DISTRIBUTE_DFLT_DARG) || (dargs[i] > 0)); PyMPI_CHKARG(!((distribs[i] == MPI_DISTRIBUTE_NONE) && (psizes[i] != 1))); } /* calculate position in Cartesian grid as MPI would (row-major ordering) */ coords = (int *) PyMPI_MALLOC((size_t)ndims*sizeof(int)); if (!coords) { ierr = MPI_ERR_INTERN; goto fn_exit; } offsets = (MPI_Aint *) PyMPI_MALLOC((size_t)ndims*sizeof(MPI_Aint)); if (!offsets) { ierr = MPI_ERR_INTERN; goto fn_exit; } procs = size; tmp_rank = rank; for (i=0; i=0; i--) { if (distribs[i] == MPI_DISTRIBUTE_BLOCK) { ierr = PyMPI_Type_block(gsizes, i, ndims, psizes[i], coords[i], dargs[i], order, orig_extent, type_old, &type_new, offsets+i); if (ierr != MPI_SUCCESS) goto fn_exit; } else if (distribs[i] == MPI_DISTRIBUTE_CYCLIC) { ierr = PyMPI_Type_cyclic(gsizes, i, ndims, psizes[i], coords[i], dargs[i], order, orig_extent, type_old, &type_new, offsets+i); if (ierr != MPI_SUCCESS) goto fn_exit; } else if (distribs[i] == MPI_DISTRIBUTE_NONE) { /* treat it as a block distribution on 1 process */ ierr = PyMPI_Type_block(gsizes, i, ndims, psizes[i], coords[i], MPI_DISTRIBUTE_DFLT_DARG, order, orig_extent, type_old, &type_new, offsets+i); if (ierr != MPI_SUCCESS) goto fn_exit; } if (i != ndims-1) { ierr = MPI_Type_free(&type_old); if (ierr != MPI_SUCCESS) goto fn_exit; } type_old = type_new; } /* add displacement and upper bound */ disps[1] = offsets[ndims-1]; tmp_size = 1; for (i=ndims-2; i>=0; i--) { tmp_size *= gsizes[i+1]; disps[1] += tmp_size*offsets[i]; } /* rest done below for both Fortran and C order */ } disps[0] = 0; disps[1] *= orig_extent; disps[2] = orig_extent; for (i=0; i 0) p[n] = 0; #endif status->MPI_SOURCE = MPI_ANY_SOURCE; status->MPI_TAG = MPI_ANY_TAG; status->MPI_ERROR = MPI_SUCCESS; #ifdef PyMPI_HAVE_MPI_Status_set_elements (void)MPI_Status_set_elements(status, MPI_BYTE, 0); #endif #ifdef PyMPI_HAVE_MPI_Status_set_cancelled (void)MPI_Status_set_cancelled(status, 0); #endif } return MPI_SUCCESS; } #undef MPI_Request_get_status #define MPI_Request_get_status PyMPI_Request_get_status #endif #endif /* ---------------------------------------------------------------- */ #ifndef PyMPI_HAVE_MPI_Reduce_scatter_block static int PyMPI_Reduce_scatter_block(void *sendbuf, void *recvbuf, int recvcount, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) { int ierr = MPI_SUCCESS; int n = 1, *recvcounts = NULL; ierr = MPI_Comm_size(comm, &n); if (ierr != MPI_SUCCESS) return ierr; recvcounts = (int *) PyMPI_MALLOC((size_t)n*sizeof(int)); if (!recvcounts) return MPI_ERR_INTERN; while (n-- > 0) recvcounts[n] = recvcount; ierr = MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, datatype, op, comm); PyMPI_FREE(recvcounts); return ierr; } #undef MPI_Reduce_scatter_block #define MPI_Reduce_scatter_block PyMPI_Reduce_scatter_block #endif /* ---------------------------------------------------------------- */ /* Communicator Info */ #ifndef PyMPI_HAVE_MPI_Comm_dup_with_info static int PyMPI_Comm_dup_with_info(MPI_Comm comm, MPI_Info info, MPI_Comm *newcomm) { int dummy, ierr; if (info != MPI_INFO_NULL) { ierr = MPI_Info_get_nkeys(info, &dummy); if (ierr != MPI_SUCCESS) return ierr; } return MPI_Comm_dup(comm, newcomm); } #undef MPI_Comm_dup_with_info #define MPI_Comm_dup_with_info PyMPI_Comm_dup_with_info #endif #ifndef PyMPI_HAVE_MPI_Comm_idup_with_info static int PyMPI_Comm_idup_with_info(MPI_Comm comm, MPI_Info info, MPI_Comm *newcomm, MPI_Request *request) { int dummy, ierr; if (info != MPI_INFO_NULL) { ierr = MPI_Info_get_nkeys(info, &dummy); if (ierr != MPI_SUCCESS) return ierr; } return MPI_Comm_idup(comm, newcomm, request); } #undef MPI_Comm_idup_with_info #define MPI_Comm_idup_with_info PyMPI_Comm_idup_with_info #endif #ifndef PyMPI_HAVE_MPI_Comm_set_info static int PyMPI_Comm_set_info(MPI_Comm comm, MPI_Info info) { int dummy, ierr; ierr = MPI_Comm_size(comm, &dummy); if (ierr != MPI_SUCCESS) return ierr; if (info != MPI_INFO_NULL) { ierr = MPI_Info_get_nkeys(info, &dummy); if (ierr != MPI_SUCCESS) return ierr; } return MPI_SUCCESS; } #undef MPI_Comm_set_info #define MPI_Comm_set_info PyMPI_Comm_set_info #endif #ifndef PyMPI_HAVE_MPI_Comm_get_info static int PyMPI_Comm_get_info(MPI_Comm comm, MPI_Info *info) { int dummy, ierr; ierr = MPI_Comm_size(comm, &dummy); if (ierr != MPI_SUCCESS) return ierr; return MPI_Info_create(info); } #undef MPI_Comm_get_info #define MPI_Comm_get_info PyMPI_Comm_get_info #endif /* ---------------------------------------------------------------- */ #if !defined(PyMPI_HAVE_MPI_WEIGHTS_EMPTY) static const int PyMPI_WEIGHTS_EMPTY_ARRAY[1] = {MPI_UNDEFINED}; static int * const PyMPI_WEIGHTS_EMPTY = (int*)PyMPI_WEIGHTS_EMPTY_ARRAY; #undef MPI_WEIGHTS_EMPTY #define MPI_WEIGHTS_EMPTY PyMPI_WEIGHTS_EMPTY #endif /* ---------------------------------------------------------------- */ /* Memory Allocation */ #if !defined(PyMPI_HAVE_MPI_Alloc_mem) || \ !defined(PyMPI_HAVE_MPI_Free_mem) static int PyMPI_Alloc_mem(MPI_Aint size, MPI_Info info, void *baseptr) { char *buf = NULL; if (size < 0) return MPI_ERR_ARG; if (!baseptr) return MPI_ERR_ARG; if (size == 0) size = 1; buf = (char *) PyMPI_MALLOC((size_t)size); if (!buf) return MPI_ERR_NO_MEM; (void)info; *(char **)baseptr = buf; return MPI_SUCCESS; } #undef MPI_Alloc_mem #define MPI_Alloc_mem PyMPI_Alloc_mem static int PyMPI_Free_mem(void *baseptr) { if (!baseptr) return MPI_ERR_ARG; PyMPI_FREE(baseptr); return MPI_SUCCESS; } #undef MPI_Free_mem #define MPI_Free_mem PyMPI_Free_mem #endif /* ---------------------------------------------------------------- */ #ifndef PyMPI_HAVE_MPI_Win_allocate #ifdef PyMPI_HAVE_MPI_Win_create static int PyMPI_WIN_KEYVAL_MPIMEM = MPI_KEYVAL_INVALID; static int MPIAPI PyMPI_win_free_mpimem(MPI_Win win, int k, void *v, void *xs) { (void)win; (void)k; (void)xs; /* unused */ return MPI_Free_mem(v); } static int MPIAPI PyMPI_win_free_keyval(MPI_Comm comm, int k, void *v, void *xs) { int ierr = MPI_SUCCESS; (void)comm; (void)xs; /* unused */ ierr = MPI_Win_free_keyval((int *)v); if (ierr != MPI_SUCCESS) return ierr; ierr = MPI_Comm_free_keyval(&k); if (ierr != MPI_SUCCESS) return ierr; return MPI_SUCCESS; } static int PyMPI_Win_allocate(MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, void *baseptr_, MPI_Win *win_) { int ierr = MPI_SUCCESS; void *baseptr = MPI_BOTTOM; MPI_Win win = MPI_WIN_NULL; if (!baseptr_) return MPI_ERR_ARG; if (!win_) return MPI_ERR_ARG; ierr = MPI_Alloc_mem(size?size:1, info, &baseptr); if (ierr != MPI_SUCCESS) goto error; ierr = MPI_Win_create(baseptr, size, disp_unit, info, comm, &win); if (ierr != MPI_SUCCESS) goto error; #if defined(PyMPI_HAVE_MPI_Win_create_keyval) && \ defined(PyMPI_HAVE_MPI_Win_set_attr) if (PyMPI_WIN_KEYVAL_MPIMEM == MPI_KEYVAL_INVALID) { int comm_keyval = MPI_KEYVAL_INVALID; ierr = MPI_Win_create_keyval(MPI_WIN_NULL_COPY_FN, PyMPI_win_free_mpimem, &PyMPI_WIN_KEYVAL_MPIMEM, NULL); if (ierr != MPI_SUCCESS) goto error; ierr = MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, PyMPI_win_free_keyval, &comm_keyval, NULL); if (ierr == MPI_SUCCESS) (void)MPI_Comm_set_attr(MPI_COMM_SELF, comm_keyval, &PyMPI_WIN_KEYVAL_MPIMEM); } ierr = MPI_Win_set_attr(win, PyMPI_WIN_KEYVAL_MPIMEM, baseptr); if (ierr != MPI_SUCCESS) goto error; #endif *((void**)baseptr_) = baseptr; *win_ = win; return MPI_SUCCESS; error: if (baseptr != MPI_BOTTOM) (void)MPI_Free_mem(baseptr); if (win != MPI_WIN_NULL) (void)MPI_Win_free(&win); return ierr; } #undef MPI_Win_allocate #define MPI_Win_allocate PyMPI_Win_allocate #endif #endif #ifndef PyMPI_HAVE_MPI_Win_set_info static int PyMPI_Win_set_info(MPI_Win win, MPI_Info info) { int dummy, ierr; if (win == MPI_WIN_NULL) return MPI_ERR_WIN; if (info != MPI_INFO_NULL) { ierr = MPI_Info_get_nkeys(info, &dummy); if (ierr != MPI_SUCCESS) return ierr; } return MPI_SUCCESS; } #undef MPI_Win_set_info #define MPI_Win_set_info PyMPI_Win_set_info #endif #ifndef PyMPI_HAVE_MPI_Win_get_info static int PyMPI_Win_get_info(MPI_Win win, MPI_Info *info) { if (win == MPI_WIN_NULL) return MPI_ERR_WIN; return MPI_Info_create(info); } #undef MPI_Win_get_info #define MPI_Win_get_info PyMPI_Win_get_info #endif /* ---------------------------------------------------------------- */ #ifndef PyMPI_HAVE_MPI_Info_get_string static int PyMPI_Info_get_string(MPI_Info info, const char key[], int *buflen, char value[], int *flag) { int ierr, valuelen = buflen ? *buflen : 0; if (valuelen) { ierr = MPI_Info_get(info, key, valuelen-1, value, flag); if (ierr != MPI_SUCCESS) return ierr; if (value && flag && *flag) value[valuelen] = 0; } ierr = MPI_Info_get_valuelen(info, key, &valuelen, flag); if (ierr != MPI_SUCCESS) return ierr; if (buflen && flag && *flag) *buflen = valuelen + 1; return MPI_SUCCESS; } #undef MPI_Info_get_string #define MPI_Info_get_string PyMPI_Info_get_string #endif /* ---------------------------------------------------------------- */ #ifndef PyMPI_HAVE_MPI_F_SOURCE #define PyMPI_F_SOURCE ((int)(offsetof(MPI_Status,MPI_SOURCE)/sizeof(int))) #undef MPI_F_SOURCE #define MPI_F_SOURCE PyMPI_F_SOURCE #endif #ifndef PyMPI_HAVE_MPI_F_TAG #define PyMPI_F_TAG ((int)(offsetof(MPI_Status,MPI_TAG)/sizeof(int))) #undef MPI_F_TAG #define MPI_F_TAG PyMPI_F_TAG #endif #ifndef PyMPI_HAVE_MPI_F_ERROR #define PyMPI_F_ERROR ((int)(offsetof(MPI_Status,MPI_ERROR)/sizeof(int))) #undef MPI_F_ERROR #define MPI_F_ERROR PyMPI_F_ERROR #endif #ifndef PyMPI_HAVE_MPI_F_STATUS_SIZE #define PyMPI_F_STATUS_SIZE ((int)(sizeof(MPI_Status)/sizeof(int))) #undef MPI_F_STATUS_SIZE #define MPI_F_STATUS_SIZE PyMPI_F_STATUS_SIZE #endif /* ---------------------------------------------------------------- */ #include "largecnt.h" /* ---------------------------------------------------------------- */ #ifndef PyMPI_HAVE_MPI_Type_contiguous_c static int PyMPI_Type_contiguous_c(MPI_Count count, MPI_Datatype oldtype, MPI_Datatype *newtype) { int ierr; int c; PyMPICastValue(int, c, MPI_Count, count); ierr = MPI_Type_contiguous(c, oldtype, newtype); fn_exit: return ierr; } #undef MPI_Type_contiguous_c #define MPI_Type_contiguous_c PyMPI_Type_contiguous_c #endif #ifndef PyMPI_HAVE_MPI_Type_vector_c static int PyMPI_Type_vector_c(MPI_Count count, MPI_Count blocklength, MPI_Count stride, MPI_Datatype oldtype, MPI_Datatype *newtype) { int ierr; int c, b, s; PyMPICastValue(int, c, MPI_Count, count); PyMPICastValue(int, b, MPI_Count, blocklength); PyMPICastValue(int, s, MPI_Count, stride); ierr = MPI_Type_vector(c, b, s, oldtype, newtype); fn_exit: return ierr; } #undef MPI_Type_vector_c #define MPI_Type_vector_c PyMPI_Type_vector_c #endif #ifndef PyMPI_HAVE_MPI_Type_create_hvector_c static int PyMPI_Type_create_hvector_c(MPI_Count count, MPI_Count blocklength, MPI_Count stride, MPI_Datatype oldtype, MPI_Datatype *newtype) { int ierr; int c, b, s; PyMPICastValue(int, c, MPI_Count, count); PyMPICastValue(int, b, MPI_Count, blocklength); PyMPICastValue(int, s, MPI_Count, stride); ierr = MPI_Type_create_hvector(c, b, s, oldtype, newtype); fn_exit: return ierr; } #undef MPI_Type_create_hvector_c #define MPI_Type_create_hvector_c PyMPI_Type_create_hvector_c #endif #ifndef PyMPI_HAVE_MPI_Type_indexed_c static int PyMPI_Type_indexed_c(MPI_Count count, const MPI_Count blocklengths[], const MPI_Count displacements[], MPI_Datatype oldtype, MPI_Datatype *newtype) { int ierr; int c, *b = NULL, *d = NULL; PyMPICastValue(int, c, MPI_Count, count); PyMPICastArray(int, b, MPI_Count, blocklengths, count); PyMPICastArray(int, d, MPI_Count, displacements, count); ierr = MPI_Type_indexed(c, b, d, oldtype, newtype); fn_exit: PyMPIFreeArray(b); PyMPIFreeArray(d); return ierr; } #undef MPI_Type_indexed_c #define MPI_Type_indexed_c PyMPI_Type_indexed_c #endif #ifndef PyMPI_HAVE_MPI_Type_create_hindexed_c static int PyMPI_Type_create_hindexed_c(MPI_Count count, const MPI_Count blocklengths[], const MPI_Count displacements[], MPI_Datatype oldtype, MPI_Datatype *newtype) { int ierr; int c, *b = NULL; MPI_Aint *d = NULL; PyMPICastValue(int, c, MPI_Count, count); PyMPICastArray(int, b, MPI_Count, blocklengths, count); PyMPICastArray(MPI_Aint, d, MPI_Count, displacements, count); ierr = MPI_Type_create_hindexed(c, b, d, oldtype, newtype); fn_exit: PyMPIFreeArray(b); PyMPIFreeArray(d); return ierr; } #undef MPI_Type_create_hindexed_c #define MPI_Type_create_hindexed_c PyMPI_Type_create_hindexed_c #endif #ifndef PyMPI_HAVE_MPI_Type_create_indexed_block_c static int PyMPI_Type_create_indexed_block_c(MPI_Count count, MPI_Count blocklength, const MPI_Count displacements[], MPI_Datatype oldtype, MPI_Datatype *newtype) { int ierr; int c, b, *d = NULL; PyMPICastValue(int, c, MPI_Count, count); PyMPICastValue(int, b, MPI_Count, blocklength); PyMPICastArray(int, d, MPI_Count, displacements, count); ierr = MPI_Type_create_indexed_block(c, b, d, oldtype, newtype); fn_exit: PyMPIFreeArray(d); return ierr; } #undef MPI_Type_create_indexed_block_c #define MPI_Type_create_indexed_block_c PyMPI_Type_create_indexed_block_c #endif #ifndef PyMPI_HAVE_MPI_Type_create_hindexed_block_c static int PyMPI_Type_create_hindexed_block_c(MPI_Count count, MPI_Count blocklength, const MPI_Count displacements[], MPI_Datatype oldtype, MPI_Datatype *newtype) { int ierr; int c, b; MPI_Aint *d = NULL; PyMPICastValue(int, c, MPI_Count, count); PyMPICastValue(int, b, MPI_Count, blocklength); PyMPICastArray(MPI_Aint, d, MPI_Count, displacements, count); ierr = MPI_Type_create_hindexed_block(c, b, d, oldtype, newtype); fn_exit: PyMPIFreeArray(d); return ierr; } #undef MPI_Type_create_hindexed_block_c #define MPI_Type_create_hindexed_block_c PyMPI_Type_create_hindexed_block_c #endif #ifndef PyMPI_HAVE_MPI_Type_create_struct_c static int PyMPI_Type_create_struct_c(MPI_Count count, const MPI_Count blocklengths[], const MPI_Count displacements[], const MPI_Datatype types[], MPI_Datatype *newtype) { int ierr; int c, *b = NULL; MPI_Aint *d = NULL; MPI_Datatype *t = (MPI_Datatype *) types; PyMPICastValue(int, c, MPI_Count, count); PyMPICastArray(int, b, MPI_Count, blocklengths, count); PyMPICastArray(MPI_Aint, d, MPI_Count, displacements, count); ierr = MPI_Type_create_struct(c, b, d, t, newtype); fn_exit: PyMPIFreeArray(b); PyMPIFreeArray(d); return ierr; } #undef MPI_Type_create_struct_c #define MPI_Type_create_struct_c PyMPI_Type_create_struct_c #endif #ifndef PyMPI_HAVE_MPI_Type_create_subarray_c static int PyMPI_Type_create_subarray_c(int ndims, const MPI_Count sizes[], const MPI_Count subsizes[], const MPI_Count starts[], int order, MPI_Datatype oldtype, MPI_Datatype *newtype) { int ierr; int *N = NULL, *n = NULL, *s = NULL; PyMPICastArray(int, N, MPI_Count, sizes, ndims); PyMPICastArray(int, n, MPI_Count, subsizes, ndims); PyMPICastArray(int, s, MPI_Count, starts, ndims); ierr = MPI_Type_create_subarray(ndims, N, n, s, order, oldtype, newtype); fn_exit: PyMPIFreeArray(N); PyMPIFreeArray(n); PyMPIFreeArray(s); return ierr; } #undef MPI_Type_create_subarray_c #define MPI_Type_create_subarray_c PyMPI_Type_create_subarray_c #endif #ifndef PyMPI_HAVE_MPI_Type_create_darray_c static int PyMPI_Type_create_darray_c(int size, int rank, int ndims, const MPI_Count gsizes[], const int distribs[], const int dargs[], const int psizes[], int order, MPI_Datatype oldtype, MPI_Datatype *newtype) { int ierr; int *g = NULL; PyMPICastArray(int, g, MPI_Count, gsizes, ndims); ierr = MPI_Type_create_darray(size, rank, ndims, g, (int *) distribs, (int *) dargs, (int *) psizes, order, oldtype, newtype); fn_exit: PyMPIFreeArray(g); return ierr; } #undef MPI_Type_create_darray_c #define MPI_Type_create_darray_c PyMPI_Type_create_darray_c #endif #ifndef PyMPI_HAVE_MPI_Type_create_resized_c static int PyMPI_Type_create_resized_c(MPI_Datatype oldtype, MPI_Count lb, MPI_Count extent, MPI_Datatype *newtype) { int ierr; MPI_Aint ilb, iex; PyMPICastValue(MPI_Aint, ilb, MPI_Count, lb); PyMPICastValue(MPI_Aint, iex, MPI_Count, extent); ierr = MPI_Type_create_resized(oldtype, ilb, iex, newtype); fn_exit: return ierr; } #undef MPI_Type_create_resized_c #define MPI_Type_create_resized_c PyMPI_Type_create_resized_c #endif /* ---------------------------------------------------------------- */ #ifndef PyMPI_HAVE_MPI_Type_get_envelope_c static int PyMPI_Type_get_envelope_c(MPI_Datatype datatype, MPI_Count *num_integers, MPI_Count *num_addresses, MPI_Count *num_large_counts, MPI_Count *num_datatypes, int *combiner) { int ierr; int ni = 0, na = 0, nd = 0; ierr = MPI_Type_get_envelope(datatype, &ni, &na, &nd, combiner); if (ierr != MPI_SUCCESS) return ierr; if (num_integers) *num_integers = ni; if (num_addresses) *num_addresses = na; if (num_large_counts) *num_large_counts = 0; if (num_datatypes) *num_datatypes = nd; return ierr; } #undef MPI_Type_get_envelope_c #define MPI_Type_get_envelope_c PyMPI_Type_get_envelope_c #endif #ifndef PyMPI_HAVE_MPI_Type_get_contents_c static int PyMPI_Type_get_contents_c(MPI_Datatype datatype, MPI_Count max_integers, MPI_Count max_addresses, MPI_Count max_large_counts, MPI_Count max_datatypes, int integers[], MPI_Aint addresses[], MPI_Count large_counts[], MPI_Datatype datatypes[]) { int ierr; int ni, na, nd; PyMPICastValue(int, ni, MPI_Count, max_integers); PyMPICastValue(int, na, MPI_Count, max_addresses); PyMPICastValue(int, nd, MPI_Count, max_datatypes); ierr = MPI_Type_get_contents(datatype, ni, na, nd, integers, addresses, datatypes); (void)max_large_counts; (void)large_counts; fn_exit: return ierr; } #undef MPI_Type_get_contents_c #define MPI_Type_get_contents_c PyMPI_Type_get_contents_c #endif /* ---------------------------------------------------------------- */ #ifndef PyMPI_HAVE_MPI_Pack_c static int PyMPI_Pack_c(const void *inbuf, MPI_Count incount, MPI_Datatype datatype, void *outbuf, MPI_Count outsize, MPI_Count *position, MPI_Comm comm) { int ierr; int ic, os, pp; PyMPICastValue(int, ic, MPI_Count, incount); PyMPICastValue(int, os, MPI_Count, outsize); PyMPICastValue(int, pp, MPI_Count, *position); ierr = MPI_Pack((void*)inbuf, ic, datatype, outbuf, os, &pp, comm); if (ierr == MPI_SUCCESS) *position = pp; fn_exit: return ierr; } #undef MPI_Pack_c #define MPI_Pack_c PyMPI_Pack_c #endif #ifndef PyMPI_HAVE_MPI_Unpack_c static int PyMPI_Unpack_c(const void *inbuf, MPI_Count insize, MPI_Count *position, void *outbuf, MPI_Count outcount, MPI_Datatype datatype, MPI_Comm comm) { int ierr; int is, pp, oc; PyMPICastValue(int, is, MPI_Count, insize); PyMPICastValue(int, pp, MPI_Count, *position); PyMPICastValue(int, oc, MPI_Count, outcount); ierr = MPI_Unpack((void*)inbuf, is, &pp, outbuf, oc, datatype, comm); if (ierr == MPI_SUCCESS) *position = pp; fn_exit: return ierr; } #undef MPI_Unpack_c #define MPI_Unpack_c PyMPI_Unpack_c #endif #ifndef PyMPI_HAVE_MPI_Pack_size_c static int PyMPI_Pack_size_c(MPI_Count count, MPI_Datatype datatype, MPI_Comm comm, MPI_Count *size) { int ierr; int c, s; PyMPICastValue(int, c, MPI_Count, count); ierr = MPI_Pack_size(c, datatype, comm, &s); if (ierr == MPI_SUCCESS) *size = s; fn_exit: return ierr; } #undef MPI_Pack_size_c #define MPI_Pack_size_c PyMPI_Pack_size_c #endif /* ---------------------------------------------------------------- */ #ifndef PyMPI_HAVE_MPI_Pack_external_c static int PyMPI_Pack_external_c(const char datarep[], const void *inbuf, MPI_Count incount, MPI_Datatype datatype, void *outbuf, MPI_Count outsize, MPI_Count *position) { int ierr; int ic; MPI_Aint os, pp; PyMPICastValue(int, ic, MPI_Count, incount); PyMPICastValue(MPI_Aint, os, MPI_Count, outsize); PyMPICastValue(MPI_Aint, pp, MPI_Count, *position); ierr = MPI_Pack_external((char*)datarep, (void*)inbuf, ic, datatype, outbuf, os, &pp); if (ierr == MPI_SUCCESS) *position = pp; fn_exit: return ierr; } #undef MPI_Pack_external_c #define MPI_Pack_external_c PyMPI_Pack_external_c #endif #ifndef PyMPI_HAVE_MPI_Unpack_external_c static int PyMPI_Unpack_external_c(const char datarep[], const void *inbuf, MPI_Count insize, MPI_Count *position, void *outbuf, MPI_Count outcount, MPI_Datatype datatype) { int ierr; MPI_Aint is, pp; int oc; PyMPICastValue(MPI_Aint, is, MPI_Count, insize); PyMPICastValue(MPI_Aint, pp, MPI_Count, *position); PyMPICastValue(int, oc, MPI_Count, outcount); ierr = MPI_Unpack_external((char*)datarep, (void*)inbuf, is, &pp, outbuf, oc, datatype); if (ierr == MPI_SUCCESS) *position = pp; fn_exit: return ierr; } #undef MPI_Unpack_external_c #define MPI_Unpack_external_c PyMPI_Unpack_external_c #endif #ifndef PyMPI_HAVE_MPI_Pack_external_size_c static int PyMPI_Pack_external_size_c(const char datarep[], MPI_Count count, MPI_Datatype datatype, MPI_Count *size) { int ierr; int c; MPI_Aint s; PyMPICastValue(int, c, MPI_Count, count); ierr = MPI_Pack_external_size((char*)datarep, c, datatype, &s); if (ierr == MPI_SUCCESS) *size = s; fn_exit: return ierr; } #undef MPI_Pack_external_size_c #define MPI_Pack_external_size_c PyMPI_Pack_external_size_c #endif /* ---------------------------------------------------------------- */ #ifndef PyMPI_HAVE_MPI_Register_datarep_c typedef struct PyMPI_datarep_s { MPI_Datarep_conversion_function_c *read_fn; MPI_Datarep_conversion_function_c *write_fn; MPI_Datarep_extent_function *extent_fn; void *extra_state; } PyMPI_datarep_t; static int MPIAPI PyMPI_datarep_read_fn(void *userbuf, MPI_Datatype datatype, int count, void *filebuf, MPI_Offset position, void *extra_state) { PyMPI_datarep_t *drep = (PyMPI_datarep_t *) extra_state; return drep->read_fn(userbuf, datatype, count, filebuf, position, drep->extra_state); } static int MPIAPI PyMPI_datarep_write_fn(void *userbuf, MPI_Datatype datatype, int count, void *filebuf, MPI_Offset position, void *extra_state) { PyMPI_datarep_t *drep = (PyMPI_datarep_t *) extra_state; return drep->write_fn(userbuf, datatype, count, filebuf, position, drep->extra_state); } static int PyMPI_Register_datarep_c(const char *datarep, MPI_Datarep_conversion_function_c *read_conversion_fn, MPI_Datarep_conversion_function_c *write_conversion_fn, MPI_Datarep_extent_function *dtype_file_extent_fn, void *extra_state) { static int n = 0; enum {N=32}; static PyMPI_datarep_t registry[N]; PyMPI_datarep_t *drep = (n < N) ? ®istry[n++] : (PyMPI_datarep_t *) PyMPI_MALLOC(sizeof(PyMPI_datarep_t)); MPI_Datarep_conversion_function *r_fn = MPI_CONVERSION_FN_NULL; MPI_Datarep_conversion_function *w_fn = MPI_CONVERSION_FN_NULL; MPI_Datarep_extent_function *e_fn = dtype_file_extent_fn; drep->read_fn = read_conversion_fn; drep->write_fn = write_conversion_fn; drep->extent_fn = dtype_file_extent_fn; drep->extra_state = extra_state; if (read_conversion_fn != MPI_CONVERSION_FN_NULL_C) r_fn = PyMPI_datarep_read_fn; if (write_conversion_fn != MPI_CONVERSION_FN_NULL_C) w_fn = PyMPI_datarep_write_fn; return MPI_Register_datarep(datarep, r_fn, w_fn, e_fn, drep); } #undef MPI_Register_datarep_c #define MPI_Register_datarep_c PyMPI_Register_datarep_c #endif /* ---------------------------------------------------------------- */ #if ((10 * MPI_VERSION + MPI_SUBVERSION) < 41) #define PyMPI_GET_NAME_NULLOBJ(MPI_HANDLE_NULL) \ do { if (obj == MPI_HANDLE_NULL && name && rlen) { \ (void) strncpy(name, #MPI_HANDLE_NULL, MPI_MAX_OBJECT_NAME); \ name[MPI_MAX_OBJECT_NAME] = 0; *rlen = (int) strlen(name); \ return MPI_SUCCESS; \ } } while(0) static int PyMPI_Type_get_name(MPI_Datatype obj, char *name, int *rlen) { PyMPI_GET_NAME_NULLOBJ(MPI_DATATYPE_NULL); return MPI_Type_get_name(obj, name, rlen); } #undef MPI_Type_get_name #define MPI_Type_get_name PyMPI_Type_get_name static int PyMPI_Comm_get_name(MPI_Comm obj, char *name, int *rlen) { PyMPI_GET_NAME_NULLOBJ(MPI_COMM_NULL); return MPI_Comm_get_name(obj, name, rlen); } #undef MPI_Comm_get_name #define MPI_Comm_get_name PyMPI_Comm_get_name static int PyMPI_Win_get_name(MPI_Win obj, char *name, int *rlen) { PyMPI_GET_NAME_NULLOBJ(MPI_WIN_NULL); return MPI_Win_get_name(obj, name, rlen); } #undef MPI_Win_get_name #define MPI_Win_get_name PyMPI_Win_get_name #undef PyMPI_GET_NAME_NULLOBJ #endif /* ---------------------------------------------------------------- */ #include "mpiulfm.h" #ifndef PyMPI_HAVE_MPI_Comm_revoke #ifndef PyMPI_HAVE_MPIX_Comm_revoke #ifndef PyMPI_HAVE_MPI_Comm_is_revoked #ifndef PyMPI_HAVE_MPIX_Comm_is_revoked static int PyMPI_Comm_is_revoked(MPI_Comm comm, int *flag) { if (!flag) { (void) MPI_Comm_call_errhandler(comm, MPI_ERR_ARG); return MPI_ERR_ARG; } { int dummy, ierr; ierr = MPI_Comm_test_inter(comm, &dummy); if (ierr != MPI_SUCCESS) return ierr; } *flag = 0; return MPI_SUCCESS; } #undef MPI_Comm_is_revoked #define MPI_Comm_is_revoked PyMPI_Comm_is_revoked #endif #endif #endif #endif #ifndef PyMPI_HAVE_MPI_Comm_get_failed #ifndef PyMPI_HAVE_MPIX_Comm_get_failed static int PyMPI_Comm_get_failed(MPI_Comm comm, MPI_Group *group) { { int dummy, ierr; ierr = MPI_Comm_test_inter(comm, &dummy); if (ierr != MPI_SUCCESS) return ierr; } if (!group) { (void) MPI_Comm_call_errhandler(comm, MPI_ERR_ARG); return MPI_ERR_ARG; } return MPI_Group_union(MPI_GROUP_EMPTY, MPI_GROUP_EMPTY, group); } #undef MPI_Comm_get_failed #define MPI_Comm_get_failed PyMPI_Comm_get_failed #endif #endif #ifndef PyMPI_HAVE_MPI_Comm_ack_failed #ifndef PyMPI_HAVE_MPIX_Comm_ack_failed static int PyMPI_Comm_ack_failed(MPI_Comm comm, int num_to_ack, int *num_acked) { { int dummy, ierr; ierr = MPI_Comm_test_inter(comm, &dummy); if (ierr != MPI_SUCCESS) return ierr; } if (!num_acked) { (void) MPI_Comm_call_errhandler(comm, MPI_ERR_ARG); return MPI_ERR_ARG; } (void)num_to_ack; *num_acked = 0; return MPI_SUCCESS; } #undef MPI_Comm_ack_failed #define MPI_Comm_ack_failed PyMPI_Comm_ack_failed #endif #endif #ifndef PyMPI_HAVE_MPI_Comm_agree #ifndef PyMPI_HAVE_MPIX_Comm_agree static int PyMPI_Comm_agree(MPI_Comm comm, int *flag) { int ibuf = flag ? *flag : 0; return MPI_Allreduce_c(&ibuf, flag, 1, MPI_INT, MPI_BAND, comm); } #undef MPI_Comm_agree #define MPI_Comm_agree PyMPI_Comm_agree #endif #endif #ifndef PyMPI_HAVE_MPI_Comm_iagree #ifndef PyMPI_HAVE_MPIX_Comm_iagree static int MPIAPI PyMPI_iagree_free_fn(MPI_Comm c, int k, void *v, void *xs) { return (void) c, (void) xs, PyMPI_FREE(v), MPI_Comm_free_keyval(&k); } static int PyMPI_Comm_iagree(MPI_Comm comm, int *flag, MPI_Request *request) { int ierr, keyval, *ibuf; MPI_Comm_copy_attr_function *copy_fn = MPI_COMM_NULL_COPY_FN; MPI_Comm_delete_attr_function *free_fn = PyMPI_iagree_free_fn; ierr = MPI_Comm_create_keyval(copy_fn, free_fn, &keyval, NULL); if (ierr != MPI_SUCCESS) return ierr; ibuf = (int *) PyMPI_MALLOC(sizeof(int)); ierr = MPI_Comm_set_attr(comm, keyval, ibuf); if (ierr != MPI_SUCCESS) return PyMPI_FREE(ibuf), ierr; ibuf[0] = flag ? *flag : 0; return MPI_Iallreduce_c(ibuf, flag, 1, MPI_INT, MPI_BAND, comm, request); } #undef MPI_Comm_iagree #define MPI_Comm_iagree PyMPI_Comm_iagree #endif #endif #ifndef PyMPI_HAVE_MPI_Comm_shrink #ifndef PyMPI_HAVE_MPIX_Comm_shrink static int PyMPI_Comm_shrink(MPI_Comm comm, MPI_Comm *newcomm) { return MPI_Comm_dup(comm, newcomm); } #undef MPI_Comm_shrink #define MPI_Comm_shrink PyMPI_Comm_shrink #endif #endif #ifndef PyMPI_HAVE_MPI_Comm_ishrink #ifndef PyMPI_HAVE_MPIX_Comm_ishrink static int PyMPI_Comm_ishrink(MPI_Comm comm, MPI_Comm *newcomm, MPI_Request *request) { return MPI_Comm_idup(comm, newcomm, request); } #undef MPI_Comm_ishrink #define MPI_Comm_ishrink PyMPI_Comm_ishrink #endif #endif /* ---------------------------------------------------------------- */ #endif /* !PyMPI_FALLBACK_H */ /* Local variables: c-basic-offset: 2 indent-tabs-mode: nil End: */ mpi4py-4.0.3/src/lib-mpi/largecnt.h000066400000000000000000002770401475341043600170510ustar00rootroot00000000000000#ifndef PyMPI_LARGECNT_H #define PyMPI_LARGECNT_H #include #include #ifndef PyMPI_MALLOC #define PyMPI_MALLOC malloc #endif #ifndef PyMPI_FREE #define PyMPI_FREE free #endif #ifndef PyMPI_MEMCPY #define PyMPI_MEMCPY memcpy #endif #define PyMPIAllocArray(dsttype, dst, len) \ do { \ size_t _m = (size_t) (len) * sizeof(dsttype); \ (dst) = (dsttype *) PyMPI_MALLOC(_m ? _m : 1); \ } while (0) /**/ #define PyMPIFreeArray(dst) \ do { \ if ((dst) != NULL) PyMPI_FREE(dst); \ (dst) = NULL; (void) (dst); \ } while (0) /**/ #define PyMPICastError(ERRORCODE) \ do { \ ierr = (ERRORCODE); \ (void) MPI_Comm_call_errhandler(MPI_COMM_SELF, ierr); \ goto fn_exit; \ } while (0) /**/ #define PyMPICastValue(dsttype, dst, srctype, src) \ do { \ (dst) = (dsttype) (src); \ if ((srctype) (dst) != (src)) \ PyMPICastError(MPI_ERR_ARG); \ } while (0) /**/ #define PyMPICastArray(dsttype, dst, srctype, src, len) \ do { \ (dst) = NULL; \ if ((src) != NULL) { \ MPI_Aint _n = (MPI_Aint) (len), _i; \ PyMPIAllocArray(dsttype, dst, len); \ if ((dst) == NULL) \ PyMPICastError(MPI_ERR_OTHER); \ for (_i = 0; _i < _n; _i++) { \ (dst)[_i] = (dsttype) (src)[_i]; \ if ((srctype) (dst)[_i] != (src)[_i]) { \ PyMPIFreeArray(dst); \ PyMPICastError(MPI_ERR_ARG); \ } \ } \ } \ } while (0) /**/ #define PyMPIMoveArray(dsttype, dst, srctype, src, len) \ do { \ if ((src) != NULL && (dst) != NULL) { \ size_t _n = (size_t) (len); \ unsigned char *_buf = (unsigned char *) (src); \ (void) PyMPI_MEMCPY(_buf, (dst), _n * sizeof(dsttype)); \ PyMPI_FREE(dst); (dst) = (dsttype *) _buf; \ } \ } while (0) /**/ #define PyMPICommSize(comm, n) \ do { \ int _inter = 0; \ ierr = MPI_Comm_test_inter(comm, &_inter); \ if (_inter) \ ierr = MPI_Comm_remote_size((comm), &(n)); \ else \ ierr = MPI_Comm_size((comm), &(n)); \ if (ierr != MPI_SUCCESS) goto fn_exit; \ } while (0) /**/ #define PyMPICommLocGroupSize(comm, n) \ do { \ ierr = MPI_Comm_size((comm), &(n)); \ if (ierr != MPI_SUCCESS) goto fn_exit; \ } while (0) /**/ #define PyMPICommNeighborCount(comm, ns, nr) \ do { \ int _topo = MPI_UNDEFINED; \ int _i, _n; (ns) = (nr) = 0; \ ierr = MPI_Topo_test((comm), &_topo); \ if (ierr != MPI_SUCCESS) goto fn_exit; \ if (_topo == MPI_UNDEFINED) { \ ierr = MPI_Comm_size((comm), &_n); \ (ns) = (nr) = _n; \ } else if (_topo == MPI_CART) { \ ierr = MPI_Cartdim_get((comm), &_n); \ (ns) = (nr) = 2 * _n; \ } else if (_topo == MPI_GRAPH) { \ ierr = MPI_Comm_rank((comm), &_i); \ ierr = MPI_Graph_neighbors_count( \ (comm), _i, &_n); \ (ns) = (nr) = _n; \ } else if (_topo == MPI_DIST_GRAPH) { \ ierr = MPI_Dist_graph_neighbors_count( \ (comm), &(nr), &(ns), &_i); \ } \ if (ierr != MPI_SUCCESS) goto fn_exit; \ } while (0) /**/ #ifndef PyMPI_HAVE_MPI_Get_count_c static int PyMPI_Get_count_c(MPI_Status *a1, MPI_Datatype a2, MPI_Count *a3) { int ierr; int b3 = 0; int *p3 = a3 ? &b3 : NULL; ierr = MPI_Get_count(a1, a2, p3); if (ierr != MPI_SUCCESS) goto fn_exit; if (a3) *a3 = b3; fn_exit: return ierr; } #undef MPI_Get_count_c #define MPI_Get_count_c PyMPI_Get_count_c #endif #ifndef PyMPI_HAVE_MPI_Buffer_attach_c static int PyMPI_Buffer_attach_c(void *a1, MPI_Count a2) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Count, a2); ierr = MPI_Buffer_attach(a1, b2); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Buffer_attach_c #define MPI_Buffer_attach_c PyMPI_Buffer_attach_c #endif #ifndef PyMPI_HAVE_MPI_Buffer_detach_c static int PyMPI_Buffer_detach_c(void *a1, MPI_Count *a2) { int ierr; int b2 = 0; int *p2 = a2 ? &b2 : NULL; ierr = MPI_Buffer_detach(a1, p2); if (ierr != MPI_SUCCESS) goto fn_exit; if (a2) *a2 = b2; fn_exit: return ierr; } #undef MPI_Buffer_detach_c #define MPI_Buffer_detach_c PyMPI_Buffer_detach_c #endif #ifndef PyMPI_HAVE_MPI_Send_c static int PyMPI_Send_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, int a5, MPI_Comm a6) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Count, a2); ierr = MPI_Send(a1, b2, a3, a4, a5, a6); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Send_c #define MPI_Send_c PyMPI_Send_c #endif #ifndef PyMPI_HAVE_MPI_Recv_c static int PyMPI_Recv_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, int a5, MPI_Comm a6, MPI_Status *a7) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Count, a2); ierr = MPI_Recv(a1, b2, a3, a4, a5, a6, a7); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Recv_c #define MPI_Recv_c PyMPI_Recv_c #endif #ifndef PyMPI_HAVE_MPI_Sendrecv_c static int PyMPI_Sendrecv_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, int a5, void *a6, MPI_Count a7, MPI_Datatype a8, int a9, int a10, MPI_Comm a11, MPI_Status *a12) { int ierr; int b2; int b7; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b7, MPI_Count, a7); ierr = MPI_Sendrecv(a1, b2, a3, a4, a5, a6, b7, a8, a9, a10, a11, a12); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Sendrecv_c #define MPI_Sendrecv_c PyMPI_Sendrecv_c #endif #ifndef PyMPI_HAVE_MPI_Sendrecv_replace_c static int PyMPI_Sendrecv_replace_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, int a5, int a6, int a7, MPI_Comm a8, MPI_Status *a9) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Count, a2); ierr = MPI_Sendrecv_replace(a1, b2, a3, a4, a5, a6, a7, a8, a9); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Sendrecv_replace_c #define MPI_Sendrecv_replace_c PyMPI_Sendrecv_replace_c #endif #ifndef PyMPI_HAVE_MPI_Bsend_c static int PyMPI_Bsend_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, int a5, MPI_Comm a6) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Count, a2); ierr = MPI_Bsend(a1, b2, a3, a4, a5, a6); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Bsend_c #define MPI_Bsend_c PyMPI_Bsend_c #endif #ifndef PyMPI_HAVE_MPI_Ssend_c static int PyMPI_Ssend_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, int a5, MPI_Comm a6) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Count, a2); ierr = MPI_Ssend(a1, b2, a3, a4, a5, a6); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Ssend_c #define MPI_Ssend_c PyMPI_Ssend_c #endif #ifndef PyMPI_HAVE_MPI_Rsend_c static int PyMPI_Rsend_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, int a5, MPI_Comm a6) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Count, a2); ierr = MPI_Rsend(a1, b2, a3, a4, a5, a6); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Rsend_c #define MPI_Rsend_c PyMPI_Rsend_c #endif #ifndef PyMPI_HAVE_MPI_Isend_c static int PyMPI_Isend_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, int a5, MPI_Comm a6, MPI_Request *a7) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Count, a2); ierr = MPI_Isend(a1, b2, a3, a4, a5, a6, a7); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Isend_c #define MPI_Isend_c PyMPI_Isend_c #endif #ifndef PyMPI_HAVE_MPI_Irecv_c static int PyMPI_Irecv_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, int a5, MPI_Comm a6, MPI_Request *a7) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Count, a2); ierr = MPI_Irecv(a1, b2, a3, a4, a5, a6, a7); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Irecv_c #define MPI_Irecv_c PyMPI_Irecv_c #endif #ifndef PyMPI_HAVE_MPI_Isendrecv_c static int PyMPI_Isendrecv_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, int a5, void *a6, MPI_Count a7, MPI_Datatype a8, int a9, int a10, MPI_Comm a11, MPI_Request *a12) { int ierr; int b2; int b7; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b7, MPI_Count, a7); ierr = MPI_Isendrecv(a1, b2, a3, a4, a5, a6, b7, a8, a9, a10, a11, a12); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Isendrecv_c #define MPI_Isendrecv_c PyMPI_Isendrecv_c #endif #ifndef PyMPI_HAVE_MPI_Isendrecv_replace_c static int PyMPI_Isendrecv_replace_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, int a5, int a6, int a7, MPI_Comm a8, MPI_Request *a9) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Count, a2); ierr = MPI_Isendrecv_replace(a1, b2, a3, a4, a5, a6, a7, a8, a9); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Isendrecv_replace_c #define MPI_Isendrecv_replace_c PyMPI_Isendrecv_replace_c #endif #ifndef PyMPI_HAVE_MPI_Ibsend_c static int PyMPI_Ibsend_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, int a5, MPI_Comm a6, MPI_Request *a7) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Count, a2); ierr = MPI_Ibsend(a1, b2, a3, a4, a5, a6, a7); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Ibsend_c #define MPI_Ibsend_c PyMPI_Ibsend_c #endif #ifndef PyMPI_HAVE_MPI_Issend_c static int PyMPI_Issend_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, int a5, MPI_Comm a6, MPI_Request *a7) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Count, a2); ierr = MPI_Issend(a1, b2, a3, a4, a5, a6, a7); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Issend_c #define MPI_Issend_c PyMPI_Issend_c #endif #ifndef PyMPI_HAVE_MPI_Irsend_c static int PyMPI_Irsend_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, int a5, MPI_Comm a6, MPI_Request *a7) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Count, a2); ierr = MPI_Irsend(a1, b2, a3, a4, a5, a6, a7); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Irsend_c #define MPI_Irsend_c PyMPI_Irsend_c #endif #ifndef PyMPI_HAVE_MPI_Send_init_c static int PyMPI_Send_init_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, int a5, MPI_Comm a6, MPI_Request *a7) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Count, a2); ierr = MPI_Send_init(a1, b2, a3, a4, a5, a6, a7); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Send_init_c #define MPI_Send_init_c PyMPI_Send_init_c #endif #ifndef PyMPI_HAVE_MPI_Recv_init_c static int PyMPI_Recv_init_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, int a5, MPI_Comm a6, MPI_Request *a7) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Count, a2); ierr = MPI_Recv_init(a1, b2, a3, a4, a5, a6, a7); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Recv_init_c #define MPI_Recv_init_c PyMPI_Recv_init_c #endif #ifndef PyMPI_HAVE_MPI_Bsend_init_c static int PyMPI_Bsend_init_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, int a5, MPI_Comm a6, MPI_Request *a7) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Count, a2); ierr = MPI_Bsend_init(a1, b2, a3, a4, a5, a6, a7); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Bsend_init_c #define MPI_Bsend_init_c PyMPI_Bsend_init_c #endif #ifndef PyMPI_HAVE_MPI_Ssend_init_c static int PyMPI_Ssend_init_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, int a5, MPI_Comm a6, MPI_Request *a7) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Count, a2); ierr = MPI_Ssend_init(a1, b2, a3, a4, a5, a6, a7); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Ssend_init_c #define MPI_Ssend_init_c PyMPI_Ssend_init_c #endif #ifndef PyMPI_HAVE_MPI_Rsend_init_c static int PyMPI_Rsend_init_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, int a5, MPI_Comm a6, MPI_Request *a7) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Count, a2); ierr = MPI_Rsend_init(a1, b2, a3, a4, a5, a6, a7); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Rsend_init_c #define MPI_Rsend_init_c PyMPI_Rsend_init_c #endif #ifndef PyMPI_HAVE_MPI_Mrecv_c static int PyMPI_Mrecv_c(void *a1, MPI_Count a2, MPI_Datatype a3, MPI_Message *a4, MPI_Status *a5) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Count, a2); ierr = MPI_Mrecv(a1, b2, a3, a4, a5); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Mrecv_c #define MPI_Mrecv_c PyMPI_Mrecv_c #endif #ifndef PyMPI_HAVE_MPI_Imrecv_c static int PyMPI_Imrecv_c(void *a1, MPI_Count a2, MPI_Datatype a3, MPI_Message *a4, MPI_Request *a5) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Count, a2); ierr = MPI_Imrecv(a1, b2, a3, a4, a5); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Imrecv_c #define MPI_Imrecv_c PyMPI_Imrecv_c #endif #ifndef PyMPI_HAVE_MPI_Bcast_c static int PyMPI_Bcast_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, MPI_Comm a5) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Count, a2); ierr = MPI_Bcast(a1, b2, a3, a4, a5); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Bcast_c #define MPI_Bcast_c PyMPI_Bcast_c #endif #ifndef PyMPI_HAVE_MPI_Gather_c static int PyMPI_Gather_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count a5, MPI_Datatype a6, int a7, MPI_Comm a8) { int ierr; int b2; int b5; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b5, MPI_Count, a5); ierr = MPI_Gather(a1, b2, a3, a4, b5, a6, a7, a8); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Gather_c #define MPI_Gather_c PyMPI_Gather_c #endif #ifndef PyMPI_HAVE_MPI_Gatherv_c static int PyMPI_Gatherv_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count *a5, MPI_Aint *a6, MPI_Datatype a7, int a8, MPI_Comm a9) { int ierr; int n; int b2; int *b5 = NULL; int *b6 = NULL; PyMPICommSize(a9, n); PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastArray(int, b5, MPI_Count, a5, n); PyMPICastArray(int, b6, MPI_Aint, a6, n); ierr = MPI_Gatherv(a1, b2, a3, a4, b5, b6, a7, a8, a9); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: PyMPIFreeArray(b5); PyMPIFreeArray(b6); return ierr; } #undef MPI_Gatherv_c #define MPI_Gatherv_c PyMPI_Gatherv_c #endif #ifndef PyMPI_HAVE_MPI_Scatter_c static int PyMPI_Scatter_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count a5, MPI_Datatype a6, int a7, MPI_Comm a8) { int ierr; int b2; int b5; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b5, MPI_Count, a5); ierr = MPI_Scatter(a1, b2, a3, a4, b5, a6, a7, a8); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Scatter_c #define MPI_Scatter_c PyMPI_Scatter_c #endif #ifndef PyMPI_HAVE_MPI_Scatterv_c static int PyMPI_Scatterv_c(void *a1, MPI_Count *a2, MPI_Aint *a3, MPI_Datatype a4, void *a5, MPI_Count a6, MPI_Datatype a7, int a8, MPI_Comm a9) { int ierr; int n; int *b2 = NULL; int *b3 = NULL; int b6; PyMPICommSize(a9, n); PyMPICastArray(int, b2, MPI_Count, a2, n); PyMPICastArray(int, b3, MPI_Aint, a3, n); PyMPICastValue(int, b6, MPI_Count, a6); ierr = MPI_Scatterv(a1, b2, b3, a4, a5, b6, a7, a8, a9); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: PyMPIFreeArray(b2); PyMPIFreeArray(b3); return ierr; } #undef MPI_Scatterv_c #define MPI_Scatterv_c PyMPI_Scatterv_c #endif #ifndef PyMPI_HAVE_MPI_Allgather_c static int PyMPI_Allgather_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count a5, MPI_Datatype a6, MPI_Comm a7) { int ierr; int b2; int b5; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b5, MPI_Count, a5); ierr = MPI_Allgather(a1, b2, a3, a4, b5, a6, a7); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Allgather_c #define MPI_Allgather_c PyMPI_Allgather_c #endif #ifndef PyMPI_HAVE_MPI_Allgatherv_c static int PyMPI_Allgatherv_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count *a5, MPI_Aint *a6, MPI_Datatype a7, MPI_Comm a8) { int ierr; int n; int b2; int *b5 = NULL; int *b6 = NULL; PyMPICommSize(a8, n); PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastArray(int, b5, MPI_Count, a5, n); PyMPICastArray(int, b6, MPI_Aint, a6, n); ierr = MPI_Allgatherv(a1, b2, a3, a4, b5, b6, a7, a8); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: PyMPIFreeArray(b5); PyMPIFreeArray(b6); return ierr; } #undef MPI_Allgatherv_c #define MPI_Allgatherv_c PyMPI_Allgatherv_c #endif #ifndef PyMPI_HAVE_MPI_Alltoall_c static int PyMPI_Alltoall_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count a5, MPI_Datatype a6, MPI_Comm a7) { int ierr; int b2; int b5; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b5, MPI_Count, a5); ierr = MPI_Alltoall(a1, b2, a3, a4, b5, a6, a7); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Alltoall_c #define MPI_Alltoall_c PyMPI_Alltoall_c #endif #ifndef PyMPI_HAVE_MPI_Alltoallv_c static int PyMPI_Alltoallv_c(void *a1, MPI_Count *a2, MPI_Aint *a3, MPI_Datatype a4, void *a5, MPI_Count *a6, MPI_Aint *a7, MPI_Datatype a8, MPI_Comm a9) { int ierr; int n; int *b2 = NULL; int *b3 = NULL; int *b6 = NULL; int *b7 = NULL; PyMPICommSize(a9, n); PyMPICastArray(int, b2, MPI_Count, a2, n); PyMPICastArray(int, b3, MPI_Aint, a3, n); PyMPICastArray(int, b6, MPI_Count, a6, n); PyMPICastArray(int, b7, MPI_Aint, a7, n); ierr = MPI_Alltoallv(a1, b2, b3, a4, a5, b6, b7, a8, a9); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: PyMPIFreeArray(b2); PyMPIFreeArray(b3); PyMPIFreeArray(b6); PyMPIFreeArray(b7); return ierr; } #undef MPI_Alltoallv_c #define MPI_Alltoallv_c PyMPI_Alltoallv_c #endif #ifndef PyMPI_HAVE_MPI_Alltoallw_c static int PyMPI_Alltoallw_c(void *a1, MPI_Count *a2, MPI_Aint *a3, MPI_Datatype *a4, void *a5, MPI_Count *a6, MPI_Aint *a7, MPI_Datatype *a8, MPI_Comm a9) { int ierr; int n; int *b2 = NULL; int *b3 = NULL; int *b6 = NULL; int *b7 = NULL; PyMPICommSize(a9, n); PyMPICastArray(int, b2, MPI_Count, a2, n); PyMPICastArray(int, b3, MPI_Aint, a3, n); PyMPICastArray(int, b6, MPI_Count, a6, n); PyMPICastArray(int, b7, MPI_Aint, a7, n); ierr = MPI_Alltoallw(a1, b2, b3, a4, a5, b6, b7, a8, a9); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: PyMPIFreeArray(b2); PyMPIFreeArray(b3); PyMPIFreeArray(b6); PyMPIFreeArray(b7); return ierr; } #undef MPI_Alltoallw_c #define MPI_Alltoallw_c PyMPI_Alltoallw_c #endif #ifndef PyMPI_HAVE_MPI_Reduce_local_c static int PyMPI_Reduce_local_c(void *a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Op a5) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_Reduce_local(a1, a2, b3, a4, a5); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Reduce_local_c #define MPI_Reduce_local_c PyMPI_Reduce_local_c #endif #ifndef PyMPI_HAVE_MPI_Reduce_c static int PyMPI_Reduce_c(void *a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Op a5, int a6, MPI_Comm a7) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_Reduce(a1, a2, b3, a4, a5, a6, a7); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Reduce_c #define MPI_Reduce_c PyMPI_Reduce_c #endif #ifndef PyMPI_HAVE_MPI_Allreduce_c static int PyMPI_Allreduce_c(void *a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Op a5, MPI_Comm a6) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_Allreduce(a1, a2, b3, a4, a5, a6); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Allreduce_c #define MPI_Allreduce_c PyMPI_Allreduce_c #endif #ifndef PyMPI_HAVE_MPI_Reduce_scatter_block_c static int PyMPI_Reduce_scatter_block_c(void *a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Op a5, MPI_Comm a6) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_Reduce_scatter_block(a1, a2, b3, a4, a5, a6); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Reduce_scatter_block_c #define MPI_Reduce_scatter_block_c PyMPI_Reduce_scatter_block_c #endif #ifndef PyMPI_HAVE_MPI_Reduce_scatter_c static int PyMPI_Reduce_scatter_c(void *a1, void *a2, MPI_Count *a3, MPI_Datatype a4, MPI_Op a5, MPI_Comm a6) { int ierr; int n; int *b3 = NULL; PyMPICommLocGroupSize(a6, n); PyMPICastArray(int, b3, MPI_Count, a3, n); ierr = MPI_Reduce_scatter(a1, a2, b3, a4, a5, a6); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: PyMPIFreeArray(b3); return ierr; } #undef MPI_Reduce_scatter_c #define MPI_Reduce_scatter_c PyMPI_Reduce_scatter_c #endif #ifndef PyMPI_HAVE_MPI_Scan_c static int PyMPI_Scan_c(void *a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Op a5, MPI_Comm a6) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_Scan(a1, a2, b3, a4, a5, a6); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Scan_c #define MPI_Scan_c PyMPI_Scan_c #endif #ifndef PyMPI_HAVE_MPI_Exscan_c static int PyMPI_Exscan_c(void *a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Op a5, MPI_Comm a6) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_Exscan(a1, a2, b3, a4, a5, a6); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Exscan_c #define MPI_Exscan_c PyMPI_Exscan_c #endif #ifndef PyMPI_HAVE_MPI_Neighbor_allgather_c static int PyMPI_Neighbor_allgather_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count a5, MPI_Datatype a6, MPI_Comm a7) { int ierr; int b2; int b5; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b5, MPI_Count, a5); ierr = MPI_Neighbor_allgather(a1, b2, a3, a4, b5, a6, a7); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Neighbor_allgather_c #define MPI_Neighbor_allgather_c PyMPI_Neighbor_allgather_c #endif #ifndef PyMPI_HAVE_MPI_Neighbor_allgatherv_c static int PyMPI_Neighbor_allgatherv_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count *a5, MPI_Aint *a6, MPI_Datatype a7, MPI_Comm a8) { int ierr; int ns, nr; int b2; int *b5 = NULL; int *b6 = NULL; PyMPICommNeighborCount(a8, ns, nr); PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastArray(int, b5, MPI_Count, a5, nr); PyMPICastArray(int, b6, MPI_Aint, a6, nr); ierr = MPI_Neighbor_allgatherv(a1, b2, a3, a4, b5, b6, a7, a8); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: PyMPIFreeArray(b5); PyMPIFreeArray(b6); return ierr; } #undef MPI_Neighbor_allgatherv_c #define MPI_Neighbor_allgatherv_c PyMPI_Neighbor_allgatherv_c #endif #ifndef PyMPI_HAVE_MPI_Neighbor_alltoall_c static int PyMPI_Neighbor_alltoall_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count a5, MPI_Datatype a6, MPI_Comm a7) { int ierr; int b2; int b5; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b5, MPI_Count, a5); ierr = MPI_Neighbor_alltoall(a1, b2, a3, a4, b5, a6, a7); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Neighbor_alltoall_c #define MPI_Neighbor_alltoall_c PyMPI_Neighbor_alltoall_c #endif #ifndef PyMPI_HAVE_MPI_Neighbor_alltoallv_c static int PyMPI_Neighbor_alltoallv_c(void *a1, MPI_Count *a2, MPI_Aint *a3, MPI_Datatype a4, void *a5, MPI_Count *a6, MPI_Aint *a7, MPI_Datatype a8, MPI_Comm a9) { int ierr; int ns, nr; int *b2 = NULL; int *b3 = NULL; int *b6 = NULL; int *b7 = NULL; PyMPICommNeighborCount(a9, ns, nr); PyMPICastArray(int, b2, MPI_Count, a2, ns); PyMPICastArray(int, b3, MPI_Aint, a3, ns); PyMPICastArray(int, b6, MPI_Count, a6, nr); PyMPICastArray(int, b7, MPI_Aint, a7, nr); ierr = MPI_Neighbor_alltoallv(a1, b2, b3, a4, a5, b6, b7, a8, a9); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: PyMPIFreeArray(b2); PyMPIFreeArray(b3); PyMPIFreeArray(b6); PyMPIFreeArray(b7); return ierr; } #undef MPI_Neighbor_alltoallv_c #define MPI_Neighbor_alltoallv_c PyMPI_Neighbor_alltoallv_c #endif #ifndef PyMPI_HAVE_MPI_Neighbor_alltoallw_c static int PyMPI_Neighbor_alltoallw_c(void *a1, MPI_Count *a2, MPI_Aint *a3, MPI_Datatype *a4, void *a5, MPI_Count *a6, MPI_Aint *a7, MPI_Datatype *a8, MPI_Comm a9) { int ierr; int ns, nr; int *b2 = NULL; int *b6 = NULL; PyMPICommNeighborCount(a9, ns, nr); PyMPICastArray(int, b2, MPI_Count, a2, ns); PyMPICastArray(int, b6, MPI_Count, a6, nr); ierr = MPI_Neighbor_alltoallw(a1, b2, a3, a4, a5, b6, a7, a8, a9); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: PyMPIFreeArray(b2); PyMPIFreeArray(b6); return ierr; } #undef MPI_Neighbor_alltoallw_c #define MPI_Neighbor_alltoallw_c PyMPI_Neighbor_alltoallw_c #endif #ifndef PyMPI_HAVE_MPI_Ibcast_c static int PyMPI_Ibcast_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, MPI_Comm a5, MPI_Request *a6) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Count, a2); ierr = MPI_Ibcast(a1, b2, a3, a4, a5, a6); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Ibcast_c #define MPI_Ibcast_c PyMPI_Ibcast_c #endif #ifndef PyMPI_HAVE_MPI_Igather_c static int PyMPI_Igather_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count a5, MPI_Datatype a6, int a7, MPI_Comm a8, MPI_Request *a9) { int ierr; int b2; int b5; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b5, MPI_Count, a5); ierr = MPI_Igather(a1, b2, a3, a4, b5, a6, a7, a8, a9); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Igather_c #define MPI_Igather_c PyMPI_Igather_c #endif #ifndef PyMPI_HAVE_MPI_Igatherv_c static int PyMPI_Igatherv_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count *a5, MPI_Aint *a6, MPI_Datatype a7, int a8, MPI_Comm a9, MPI_Request *a10) { int ierr; int n; int b2; int *b5 = NULL; int *b6 = NULL; PyMPICommSize(a9, n); PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastArray(int, b5, MPI_Count, a5, n); PyMPIMoveArray(int, b5, MPI_Count, a5, n); PyMPICastArray(int, b6, MPI_Aint, a6, n); PyMPIMoveArray(int, b6, MPI_Aint, a6, n); ierr = MPI_Igatherv(a1, b2, a3, a4, b5, b6, a7, a8, a9, a10); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Igatherv_c #define MPI_Igatherv_c PyMPI_Igatherv_c #endif #ifndef PyMPI_HAVE_MPI_Iscatter_c static int PyMPI_Iscatter_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count a5, MPI_Datatype a6, int a7, MPI_Comm a8, MPI_Request *a9) { int ierr; int b2; int b5; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b5, MPI_Count, a5); ierr = MPI_Iscatter(a1, b2, a3, a4, b5, a6, a7, a8, a9); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Iscatter_c #define MPI_Iscatter_c PyMPI_Iscatter_c #endif #ifndef PyMPI_HAVE_MPI_Iscatterv_c static int PyMPI_Iscatterv_c(void *a1, MPI_Count *a2, MPI_Aint *a3, MPI_Datatype a4, void *a5, MPI_Count a6, MPI_Datatype a7, int a8, MPI_Comm a9, MPI_Request *a10) { int ierr; int n; int *b2 = NULL; int *b3 = NULL; int b6; PyMPICommSize(a9, n); PyMPICastArray(int, b2, MPI_Count, a2, n); PyMPIMoveArray(int, b2, MPI_Count, a2, n); PyMPICastArray(int, b3, MPI_Aint, a3, n); PyMPIMoveArray(int, b3, MPI_Aint, a3, n); PyMPICastValue(int, b6, MPI_Count, a6); ierr = MPI_Iscatterv(a1, b2, b3, a4, a5, b6, a7, a8, a9, a10); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Iscatterv_c #define MPI_Iscatterv_c PyMPI_Iscatterv_c #endif #ifndef PyMPI_HAVE_MPI_Iallgather_c static int PyMPI_Iallgather_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count a5, MPI_Datatype a6, MPI_Comm a7, MPI_Request *a8) { int ierr; int b2; int b5; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b5, MPI_Count, a5); ierr = MPI_Iallgather(a1, b2, a3, a4, b5, a6, a7, a8); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Iallgather_c #define MPI_Iallgather_c PyMPI_Iallgather_c #endif #ifndef PyMPI_HAVE_MPI_Iallgatherv_c static int PyMPI_Iallgatherv_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count *a5, MPI_Aint *a6, MPI_Datatype a7, MPI_Comm a8, MPI_Request *a9) { int ierr; int n; int b2; int *b5 = NULL; int *b6 = NULL; PyMPICommSize(a8, n); PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastArray(int, b5, MPI_Count, a5, n); PyMPIMoveArray(int, b5, MPI_Count, a5, n); PyMPICastArray(int, b6, MPI_Aint, a6, n); PyMPIMoveArray(int, b6, MPI_Aint, a6, n); ierr = MPI_Iallgatherv(a1, b2, a3, a4, b5, b6, a7, a8, a9); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Iallgatherv_c #define MPI_Iallgatherv_c PyMPI_Iallgatherv_c #endif #ifndef PyMPI_HAVE_MPI_Ialltoall_c static int PyMPI_Ialltoall_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count a5, MPI_Datatype a6, MPI_Comm a7, MPI_Request *a8) { int ierr; int b2; int b5; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b5, MPI_Count, a5); ierr = MPI_Ialltoall(a1, b2, a3, a4, b5, a6, a7, a8); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Ialltoall_c #define MPI_Ialltoall_c PyMPI_Ialltoall_c #endif #ifndef PyMPI_HAVE_MPI_Ialltoallv_c static int PyMPI_Ialltoallv_c(void *a1, MPI_Count *a2, MPI_Aint *a3, MPI_Datatype a4, void *a5, MPI_Count *a6, MPI_Aint *a7, MPI_Datatype a8, MPI_Comm a9, MPI_Request *a10) { int ierr; int n; int *b2 = NULL; int *b3 = NULL; int *b6 = NULL; int *b7 = NULL; PyMPICommSize(a9, n); PyMPICastArray(int, b2, MPI_Count, a2, n); PyMPIMoveArray(int, b2, MPI_Count, a2, n); PyMPICastArray(int, b3, MPI_Aint, a3, n); PyMPIMoveArray(int, b3, MPI_Aint, a3, n); PyMPICastArray(int, b6, MPI_Count, a6, n); PyMPIMoveArray(int, b6, MPI_Count, a6, n); PyMPICastArray(int, b7, MPI_Aint, a7, n); PyMPIMoveArray(int, b7, MPI_Aint, a7, n); ierr = MPI_Ialltoallv(a1, b2, b3, a4, a5, b6, b7, a8, a9, a10); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Ialltoallv_c #define MPI_Ialltoallv_c PyMPI_Ialltoallv_c #endif #ifndef PyMPI_HAVE_MPI_Ialltoallw_c static int PyMPI_Ialltoallw_c(void *a1, MPI_Count *a2, MPI_Aint *a3, MPI_Datatype *a4, void *a5, MPI_Count *a6, MPI_Aint *a7, MPI_Datatype *a8, MPI_Comm a9, MPI_Request *a10) { int ierr; int n; int *b2 = NULL; int *b3 = NULL; int *b6 = NULL; int *b7 = NULL; PyMPICommSize(a9, n); PyMPICastArray(int, b2, MPI_Count, a2, n); PyMPIMoveArray(int, b2, MPI_Count, a2, n); PyMPICastArray(int, b3, MPI_Aint, a3, n); PyMPIMoveArray(int, b3, MPI_Aint, a3, n); PyMPICastArray(int, b6, MPI_Count, a6, n); PyMPIMoveArray(int, b6, MPI_Count, a6, n); PyMPICastArray(int, b7, MPI_Aint, a7, n); PyMPIMoveArray(int, b7, MPI_Aint, a7, n); ierr = MPI_Ialltoallw(a1, b2, b3, a4, a5, b6, b7, a8, a9, a10); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Ialltoallw_c #define MPI_Ialltoallw_c PyMPI_Ialltoallw_c #endif #ifndef PyMPI_HAVE_MPI_Ireduce_c static int PyMPI_Ireduce_c(void *a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Op a5, int a6, MPI_Comm a7, MPI_Request *a8) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_Ireduce(a1, a2, b3, a4, a5, a6, a7, a8); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Ireduce_c #define MPI_Ireduce_c PyMPI_Ireduce_c #endif #ifndef PyMPI_HAVE_MPI_Iallreduce_c static int PyMPI_Iallreduce_c(void *a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Op a5, MPI_Comm a6, MPI_Request *a7) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_Iallreduce(a1, a2, b3, a4, a5, a6, a7); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Iallreduce_c #define MPI_Iallreduce_c PyMPI_Iallreduce_c #endif #ifndef PyMPI_HAVE_MPI_Ireduce_scatter_block_c static int PyMPI_Ireduce_scatter_block_c(void *a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Op a5, MPI_Comm a6, MPI_Request *a7) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_Ireduce_scatter_block(a1, a2, b3, a4, a5, a6, a7); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Ireduce_scatter_block_c #define MPI_Ireduce_scatter_block_c PyMPI_Ireduce_scatter_block_c #endif #ifndef PyMPI_HAVE_MPI_Ireduce_scatter_c static int PyMPI_Ireduce_scatter_c(void *a1, void *a2, MPI_Count *a3, MPI_Datatype a4, MPI_Op a5, MPI_Comm a6, MPI_Request *a7) { int ierr; int n; int *b3 = NULL; PyMPICommLocGroupSize(a6, n); PyMPICastArray(int, b3, MPI_Count, a3, n); PyMPIMoveArray(int, b3, MPI_Count, a3, n); ierr = MPI_Ireduce_scatter(a1, a2, b3, a4, a5, a6, a7); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Ireduce_scatter_c #define MPI_Ireduce_scatter_c PyMPI_Ireduce_scatter_c #endif #ifndef PyMPI_HAVE_MPI_Iscan_c static int PyMPI_Iscan_c(void *a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Op a5, MPI_Comm a6, MPI_Request *a7) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_Iscan(a1, a2, b3, a4, a5, a6, a7); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Iscan_c #define MPI_Iscan_c PyMPI_Iscan_c #endif #ifndef PyMPI_HAVE_MPI_Iexscan_c static int PyMPI_Iexscan_c(void *a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Op a5, MPI_Comm a6, MPI_Request *a7) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_Iexscan(a1, a2, b3, a4, a5, a6, a7); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Iexscan_c #define MPI_Iexscan_c PyMPI_Iexscan_c #endif #ifndef PyMPI_HAVE_MPI_Ineighbor_allgather_c static int PyMPI_Ineighbor_allgather_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count a5, MPI_Datatype a6, MPI_Comm a7, MPI_Request *a8) { int ierr; int b2; int b5; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b5, MPI_Count, a5); ierr = MPI_Ineighbor_allgather(a1, b2, a3, a4, b5, a6, a7, a8); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Ineighbor_allgather_c #define MPI_Ineighbor_allgather_c PyMPI_Ineighbor_allgather_c #endif #ifndef PyMPI_HAVE_MPI_Ineighbor_allgatherv_c static int PyMPI_Ineighbor_allgatherv_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count *a5, MPI_Aint *a6, MPI_Datatype a7, MPI_Comm a8, MPI_Request *a9) { int ierr; int ns, nr; int b2; int *b5 = NULL; int *b6 = NULL; PyMPICommNeighborCount(a8, ns, nr); PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastArray(int, b5, MPI_Count, a5, nr); PyMPIMoveArray(int, b5, MPI_Count, a5, nr); PyMPICastArray(int, b6, MPI_Aint, a6, nr); PyMPIMoveArray(int, b6, MPI_Aint, a6, nr); ierr = MPI_Ineighbor_allgatherv(a1, b2, a3, a4, b5, b6, a7, a8, a9); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Ineighbor_allgatherv_c #define MPI_Ineighbor_allgatherv_c PyMPI_Ineighbor_allgatherv_c #endif #ifndef PyMPI_HAVE_MPI_Ineighbor_alltoall_c static int PyMPI_Ineighbor_alltoall_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count a5, MPI_Datatype a6, MPI_Comm a7, MPI_Request *a8) { int ierr; int b2; int b5; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b5, MPI_Count, a5); ierr = MPI_Ineighbor_alltoall(a1, b2, a3, a4, b5, a6, a7, a8); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Ineighbor_alltoall_c #define MPI_Ineighbor_alltoall_c PyMPI_Ineighbor_alltoall_c #endif #ifndef PyMPI_HAVE_MPI_Ineighbor_alltoallv_c static int PyMPI_Ineighbor_alltoallv_c(void *a1, MPI_Count *a2, MPI_Aint *a3, MPI_Datatype a4, void *a5, MPI_Count *a6, MPI_Aint *a7, MPI_Datatype a8, MPI_Comm a9, MPI_Request *a10) { int ierr; int ns, nr; int *b2 = NULL; int *b3 = NULL; int *b6 = NULL; int *b7 = NULL; PyMPICommNeighborCount(a9, ns, nr); PyMPICastArray(int, b2, MPI_Count, a2, ns); PyMPIMoveArray(int, b2, MPI_Count, a2, ns); PyMPICastArray(int, b3, MPI_Aint, a3, ns); PyMPIMoveArray(int, b3, MPI_Aint, a3, ns); PyMPICastArray(int, b6, MPI_Count, a6, nr); PyMPIMoveArray(int, b6, MPI_Count, a6, nr); PyMPICastArray(int, b7, MPI_Aint, a7, nr); PyMPIMoveArray(int, b7, MPI_Aint, a7, nr); ierr = MPI_Ineighbor_alltoallv(a1, b2, b3, a4, a5, b6, b7, a8, a9, a10); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Ineighbor_alltoallv_c #define MPI_Ineighbor_alltoallv_c PyMPI_Ineighbor_alltoallv_c #endif #ifndef PyMPI_HAVE_MPI_Ineighbor_alltoallw_c static int PyMPI_Ineighbor_alltoallw_c(void *a1, MPI_Count *a2, MPI_Aint *a3, MPI_Datatype *a4, void *a5, MPI_Count *a6, MPI_Aint *a7, MPI_Datatype *a8, MPI_Comm a9, MPI_Request *a10) { int ierr; int ns, nr; int *b2 = NULL; int *b6 = NULL; PyMPICommNeighborCount(a9, ns, nr); PyMPICastArray(int, b2, MPI_Count, a2, ns); PyMPIMoveArray(int, b2, MPI_Count, a2, ns); PyMPICastArray(int, b6, MPI_Count, a6, nr); PyMPIMoveArray(int, b6, MPI_Count, a6, nr); ierr = MPI_Ineighbor_alltoallw(a1, b2, a3, a4, a5, b6, a7, a8, a9, a10); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Ineighbor_alltoallw_c #define MPI_Ineighbor_alltoallw_c PyMPI_Ineighbor_alltoallw_c #endif #ifndef PyMPI_HAVE_MPI_Bcast_init_c static int PyMPI_Bcast_init_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, MPI_Comm a5, MPI_Info a6, MPI_Request *a7) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Count, a2); ierr = MPI_Bcast_init(a1, b2, a3, a4, a5, a6, a7); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Bcast_init_c #define MPI_Bcast_init_c PyMPI_Bcast_init_c #endif #ifndef PyMPI_HAVE_MPI_Gather_init_c static int PyMPI_Gather_init_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count a5, MPI_Datatype a6, int a7, MPI_Comm a8, MPI_Info a9, MPI_Request *a10) { int ierr; int b2; int b5; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b5, MPI_Count, a5); ierr = MPI_Gather_init(a1, b2, a3, a4, b5, a6, a7, a8, a9, a10); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Gather_init_c #define MPI_Gather_init_c PyMPI_Gather_init_c #endif #ifndef PyMPI_HAVE_MPI_Gatherv_init_c static int PyMPI_Gatherv_init_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count *a5, MPI_Aint *a6, MPI_Datatype a7, int a8, MPI_Comm a9, MPI_Info a10, MPI_Request *a11) { int ierr; int n; int b2; int *b5 = NULL; int *b6 = NULL; PyMPICommSize(a9, n); PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastArray(int, b5, MPI_Count, a5, n); PyMPICastArray(int, b6, MPI_Aint, a6, n); ierr = MPI_Gatherv_init(a1, b2, a3, a4, b5, b6, a7, a8, a9, a10, a11); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: PyMPIFreeArray(b5); PyMPIFreeArray(b6); return ierr; } #undef MPI_Gatherv_init_c #define MPI_Gatherv_init_c PyMPI_Gatherv_init_c #endif #ifndef PyMPI_HAVE_MPI_Scatter_init_c static int PyMPI_Scatter_init_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count a5, MPI_Datatype a6, int a7, MPI_Comm a8, MPI_Info a9, MPI_Request *a10) { int ierr; int b2; int b5; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b5, MPI_Count, a5); ierr = MPI_Scatter_init(a1, b2, a3, a4, b5, a6, a7, a8, a9, a10); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Scatter_init_c #define MPI_Scatter_init_c PyMPI_Scatter_init_c #endif #ifndef PyMPI_HAVE_MPI_Scatterv_init_c static int PyMPI_Scatterv_init_c(void *a1, MPI_Count *a2, MPI_Aint *a3, MPI_Datatype a4, void *a5, MPI_Count a6, MPI_Datatype a7, int a8, MPI_Comm a9, MPI_Info a10, MPI_Request *a11) { int ierr; int n; int *b2 = NULL; int *b3 = NULL; int b6; PyMPICommSize(a9, n); PyMPICastArray(int, b2, MPI_Count, a2, n); PyMPICastArray(int, b3, MPI_Aint, a3, n); PyMPICastValue(int, b6, MPI_Count, a6); ierr = MPI_Scatterv_init(a1, b2, b3, a4, a5, b6, a7, a8, a9, a10, a11); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: PyMPIFreeArray(b2); PyMPIFreeArray(b3); return ierr; } #undef MPI_Scatterv_init_c #define MPI_Scatterv_init_c PyMPI_Scatterv_init_c #endif #ifndef PyMPI_HAVE_MPI_Allgather_init_c static int PyMPI_Allgather_init_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count a5, MPI_Datatype a6, MPI_Comm a7, MPI_Info a8, MPI_Request *a9) { int ierr; int b2; int b5; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b5, MPI_Count, a5); ierr = MPI_Allgather_init(a1, b2, a3, a4, b5, a6, a7, a8, a9); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Allgather_init_c #define MPI_Allgather_init_c PyMPI_Allgather_init_c #endif #ifndef PyMPI_HAVE_MPI_Allgatherv_init_c static int PyMPI_Allgatherv_init_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count *a5, MPI_Aint *a6, MPI_Datatype a7, MPI_Comm a8, MPI_Info a9, MPI_Request *a10) { int ierr; int n; int b2; int *b5 = NULL; int *b6 = NULL; PyMPICommSize(a8, n); PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastArray(int, b5, MPI_Count, a5, n); PyMPICastArray(int, b6, MPI_Aint, a6, n); ierr = MPI_Allgatherv_init(a1, b2, a3, a4, b5, b6, a7, a8, a9, a10); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: PyMPIFreeArray(b5); PyMPIFreeArray(b6); return ierr; } #undef MPI_Allgatherv_init_c #define MPI_Allgatherv_init_c PyMPI_Allgatherv_init_c #endif #ifndef PyMPI_HAVE_MPI_Alltoall_init_c static int PyMPI_Alltoall_init_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count a5, MPI_Datatype a6, MPI_Comm a7, MPI_Info a8, MPI_Request *a9) { int ierr; int b2; int b5; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b5, MPI_Count, a5); ierr = MPI_Alltoall_init(a1, b2, a3, a4, b5, a6, a7, a8, a9); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Alltoall_init_c #define MPI_Alltoall_init_c PyMPI_Alltoall_init_c #endif #ifndef PyMPI_HAVE_MPI_Alltoallv_init_c static int PyMPI_Alltoallv_init_c(void *a1, MPI_Count *a2, MPI_Aint *a3, MPI_Datatype a4, void *a5, MPI_Count *a6, MPI_Aint *a7, MPI_Datatype a8, MPI_Comm a9, MPI_Info a10, MPI_Request *a11) { int ierr; int n; int *b2 = NULL; int *b3 = NULL; int *b6 = NULL; int *b7 = NULL; PyMPICommSize(a9, n); PyMPICastArray(int, b2, MPI_Count, a2, n); PyMPICastArray(int, b3, MPI_Aint, a3, n); PyMPICastArray(int, b6, MPI_Count, a6, n); PyMPICastArray(int, b7, MPI_Aint, a7, n); ierr = MPI_Alltoallv_init(a1, b2, b3, a4, a5, b6, b7, a8, a9, a10, a11); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: PyMPIFreeArray(b2); PyMPIFreeArray(b3); PyMPIFreeArray(b6); PyMPIFreeArray(b7); return ierr; } #undef MPI_Alltoallv_init_c #define MPI_Alltoallv_init_c PyMPI_Alltoallv_init_c #endif #ifndef PyMPI_HAVE_MPI_Alltoallw_init_c static int PyMPI_Alltoallw_init_c(void *a1, MPI_Count *a2, MPI_Aint *a3, MPI_Datatype *a4, void *a5, MPI_Count *a6, MPI_Aint *a7, MPI_Datatype *a8, MPI_Comm a9, MPI_Info a10, MPI_Request *a11) { int ierr; int n; int *b2 = NULL; int *b3 = NULL; int *b6 = NULL; int *b7 = NULL; PyMPICommSize(a9, n); PyMPICastArray(int, b2, MPI_Count, a2, n); PyMPICastArray(int, b3, MPI_Aint, a3, n); PyMPICastArray(int, b6, MPI_Count, a6, n); PyMPICastArray(int, b7, MPI_Aint, a7, n); ierr = MPI_Alltoallw_init(a1, b2, b3, a4, a5, b6, b7, a8, a9, a10, a11); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: PyMPIFreeArray(b2); PyMPIFreeArray(b3); PyMPIFreeArray(b6); PyMPIFreeArray(b7); return ierr; } #undef MPI_Alltoallw_init_c #define MPI_Alltoallw_init_c PyMPI_Alltoallw_init_c #endif #ifndef PyMPI_HAVE_MPI_Reduce_init_c static int PyMPI_Reduce_init_c(void *a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Op a5, int a6, MPI_Comm a7, MPI_Info a8, MPI_Request *a9) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_Reduce_init(a1, a2, b3, a4, a5, a6, a7, a8, a9); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Reduce_init_c #define MPI_Reduce_init_c PyMPI_Reduce_init_c #endif #ifndef PyMPI_HAVE_MPI_Allreduce_init_c static int PyMPI_Allreduce_init_c(void *a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Op a5, MPI_Comm a6, MPI_Info a7, MPI_Request *a8) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_Allreduce_init(a1, a2, b3, a4, a5, a6, a7, a8); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Allreduce_init_c #define MPI_Allreduce_init_c PyMPI_Allreduce_init_c #endif #ifndef PyMPI_HAVE_MPI_Reduce_scatter_block_init_c static int PyMPI_Reduce_scatter_block_init_c(void *a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Op a5, MPI_Comm a6, MPI_Info a7, MPI_Request *a8) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_Reduce_scatter_block_init(a1, a2, b3, a4, a5, a6, a7, a8); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Reduce_scatter_block_init_c #define MPI_Reduce_scatter_block_init_c PyMPI_Reduce_scatter_block_init_c #endif #ifndef PyMPI_HAVE_MPI_Reduce_scatter_init_c static int PyMPI_Reduce_scatter_init_c(void *a1, void *a2, MPI_Count *a3, MPI_Datatype a4, MPI_Op a5, MPI_Comm a6, MPI_Info a7, MPI_Request *a8) { int ierr; int n; int *b3 = NULL; PyMPICommLocGroupSize(a6, n); PyMPICastArray(int, b3, MPI_Count, a3, n); ierr = MPI_Reduce_scatter_init(a1, a2, b3, a4, a5, a6, a7, a8); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: PyMPIFreeArray(b3); return ierr; } #undef MPI_Reduce_scatter_init_c #define MPI_Reduce_scatter_init_c PyMPI_Reduce_scatter_init_c #endif #ifndef PyMPI_HAVE_MPI_Scan_init_c static int PyMPI_Scan_init_c(void *a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Op a5, MPI_Comm a6, MPI_Info a7, MPI_Request *a8) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_Scan_init(a1, a2, b3, a4, a5, a6, a7, a8); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Scan_init_c #define MPI_Scan_init_c PyMPI_Scan_init_c #endif #ifndef PyMPI_HAVE_MPI_Exscan_init_c static int PyMPI_Exscan_init_c(void *a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Op a5, MPI_Comm a6, MPI_Info a7, MPI_Request *a8) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_Exscan_init(a1, a2, b3, a4, a5, a6, a7, a8); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Exscan_init_c #define MPI_Exscan_init_c PyMPI_Exscan_init_c #endif #ifndef PyMPI_HAVE_MPI_Neighbor_allgather_init_c static int PyMPI_Neighbor_allgather_init_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count a5, MPI_Datatype a6, MPI_Comm a7, MPI_Info a8, MPI_Request *a9) { int ierr; int b2; int b5; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b5, MPI_Count, a5); ierr = MPI_Neighbor_allgather_init(a1, b2, a3, a4, b5, a6, a7, a8, a9); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Neighbor_allgather_init_c #define MPI_Neighbor_allgather_init_c PyMPI_Neighbor_allgather_init_c #endif #ifndef PyMPI_HAVE_MPI_Neighbor_allgatherv_init_c static int PyMPI_Neighbor_allgatherv_init_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count *a5, MPI_Aint *a6, MPI_Datatype a7, MPI_Comm a8, MPI_Info a9, MPI_Request *a10) { int ierr; int ns, nr; int b2; int *b5 = NULL; int *b6 = NULL; PyMPICommNeighborCount(a8, ns, nr); PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastArray(int, b5, MPI_Count, a5, nr); PyMPICastArray(int, b6, MPI_Aint, a6, nr); ierr = MPI_Neighbor_allgatherv_init(a1, b2, a3, a4, b5, b6, a7, a8, a9, a10); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: PyMPIFreeArray(b5); PyMPIFreeArray(b6); return ierr; } #undef MPI_Neighbor_allgatherv_init_c #define MPI_Neighbor_allgatherv_init_c PyMPI_Neighbor_allgatherv_init_c #endif #ifndef PyMPI_HAVE_MPI_Neighbor_alltoall_init_c static int PyMPI_Neighbor_alltoall_init_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count a5, MPI_Datatype a6, MPI_Comm a7, MPI_Info a8, MPI_Request *a9) { int ierr; int b2; int b5; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b5, MPI_Count, a5); ierr = MPI_Neighbor_alltoall_init(a1, b2, a3, a4, b5, a6, a7, a8, a9); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Neighbor_alltoall_init_c #define MPI_Neighbor_alltoall_init_c PyMPI_Neighbor_alltoall_init_c #endif #ifndef PyMPI_HAVE_MPI_Neighbor_alltoallv_init_c static int PyMPI_Neighbor_alltoallv_init_c(void *a1, MPI_Count *a2, MPI_Aint *a3, MPI_Datatype a4, void *a5, MPI_Count *a6, MPI_Aint *a7, MPI_Datatype a8, MPI_Comm a9, MPI_Info a10, MPI_Request *a11) { int ierr; int ns, nr; int *b2 = NULL; int *b3 = NULL; int *b6 = NULL; int *b7 = NULL; PyMPICommNeighborCount(a9, ns, nr); PyMPICastArray(int, b2, MPI_Count, a2, ns); PyMPICastArray(int, b3, MPI_Aint, a3, ns); PyMPICastArray(int, b6, MPI_Count, a6, nr); PyMPICastArray(int, b7, MPI_Aint, a7, nr); ierr = MPI_Neighbor_alltoallv_init(a1, b2, b3, a4, a5, b6, b7, a8, a9, a10, a11); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: PyMPIFreeArray(b2); PyMPIFreeArray(b3); PyMPIFreeArray(b6); PyMPIFreeArray(b7); return ierr; } #undef MPI_Neighbor_alltoallv_init_c #define MPI_Neighbor_alltoallv_init_c PyMPI_Neighbor_alltoallv_init_c #endif #ifndef PyMPI_HAVE_MPI_Neighbor_alltoallw_init_c static int PyMPI_Neighbor_alltoallw_init_c(void *a1, MPI_Count *a2, MPI_Aint *a3, MPI_Datatype *a4, void *a5, MPI_Count *a6, MPI_Aint *a7, MPI_Datatype *a8, MPI_Comm a9, MPI_Info a10, MPI_Request *a11) { int ierr; int ns, nr; int *b2 = NULL; int *b6 = NULL; PyMPICommNeighborCount(a9, ns, nr); PyMPICastArray(int, b2, MPI_Count, a2, ns); PyMPICastArray(int, b6, MPI_Count, a6, nr); ierr = MPI_Neighbor_alltoallw_init(a1, b2, a3, a4, a5, b6, a7, a8, a9, a10, a11); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: PyMPIFreeArray(b2); PyMPIFreeArray(b6); return ierr; } #undef MPI_Neighbor_alltoallw_init_c #define MPI_Neighbor_alltoallw_init_c PyMPI_Neighbor_alltoallw_init_c #endif #ifndef PyMPI_HAVE_MPI_Win_create_c static int PyMPI_Win_create_c(void *a1, MPI_Aint a2, MPI_Aint a3, MPI_Info a4, MPI_Comm a5, MPI_Win *a6) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Aint, a3); ierr = MPI_Win_create(a1, a2, b3, a4, a5, a6); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Win_create_c #define MPI_Win_create_c PyMPI_Win_create_c #endif #ifndef PyMPI_HAVE_MPI_Win_allocate_c static int PyMPI_Win_allocate_c(MPI_Aint a1, MPI_Aint a2, MPI_Info a3, MPI_Comm a4, void *a5, MPI_Win *a6) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Aint, a2); ierr = MPI_Win_allocate(a1, b2, a3, a4, a5, a6); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Win_allocate_c #define MPI_Win_allocate_c PyMPI_Win_allocate_c #endif #ifndef PyMPI_HAVE_MPI_Win_allocate_shared_c static int PyMPI_Win_allocate_shared_c(MPI_Aint a1, MPI_Aint a2, MPI_Info a3, MPI_Comm a4, void *a5, MPI_Win *a6) { int ierr; int b2; PyMPICastValue(int, b2, MPI_Aint, a2); ierr = MPI_Win_allocate_shared(a1, b2, a3, a4, a5, a6); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Win_allocate_shared_c #define MPI_Win_allocate_shared_c PyMPI_Win_allocate_shared_c #endif #ifndef PyMPI_HAVE_MPI_Win_shared_query_c static int PyMPI_Win_shared_query_c(MPI_Win a1, int a2, MPI_Aint *a3, MPI_Aint *a4, void *a5) { int ierr; int b4 = 0; int *p4 = a4 ? &b4 : NULL; ierr = MPI_Win_shared_query(a1, a2, a3, p4, a5); if (ierr != MPI_SUCCESS) goto fn_exit; if (a4) *a4 = b4; fn_exit: return ierr; } #undef MPI_Win_shared_query_c #define MPI_Win_shared_query_c PyMPI_Win_shared_query_c #endif #ifndef PyMPI_HAVE_MPI_Get_c static int PyMPI_Get_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, MPI_Aint a5, MPI_Count a6, MPI_Datatype a7, MPI_Win a8) { int ierr; int b2; int b6; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b6, MPI_Count, a6); ierr = MPI_Get(a1, b2, a3, a4, a5, b6, a7, a8); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Get_c #define MPI_Get_c PyMPI_Get_c #endif #ifndef PyMPI_HAVE_MPI_Put_c static int PyMPI_Put_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, MPI_Aint a5, MPI_Count a6, MPI_Datatype a7, MPI_Win a8) { int ierr; int b2; int b6; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b6, MPI_Count, a6); ierr = MPI_Put(a1, b2, a3, a4, a5, b6, a7, a8); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Put_c #define MPI_Put_c PyMPI_Put_c #endif #ifndef PyMPI_HAVE_MPI_Accumulate_c static int PyMPI_Accumulate_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, MPI_Aint a5, MPI_Count a6, MPI_Datatype a7, MPI_Op a8, MPI_Win a9) { int ierr; int b2; int b6; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b6, MPI_Count, a6); ierr = MPI_Accumulate(a1, b2, a3, a4, a5, b6, a7, a8, a9); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Accumulate_c #define MPI_Accumulate_c PyMPI_Accumulate_c #endif #ifndef PyMPI_HAVE_MPI_Get_accumulate_c static int PyMPI_Get_accumulate_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count a5, MPI_Datatype a6, int a7, MPI_Aint a8, MPI_Count a9, MPI_Datatype a10, MPI_Op a11, MPI_Win a12) { int ierr; int b2; int b5; int b9; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b5, MPI_Count, a5); PyMPICastValue(int, b9, MPI_Count, a9); ierr = MPI_Get_accumulate(a1, b2, a3, a4, b5, a6, a7, a8, b9, a10, a11, a12); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Get_accumulate_c #define MPI_Get_accumulate_c PyMPI_Get_accumulate_c #endif #ifndef PyMPI_HAVE_MPI_Rget_c static int PyMPI_Rget_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, MPI_Aint a5, MPI_Count a6, MPI_Datatype a7, MPI_Win a8, MPI_Request *a9) { int ierr; int b2; int b6; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b6, MPI_Count, a6); ierr = MPI_Rget(a1, b2, a3, a4, a5, b6, a7, a8, a9); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Rget_c #define MPI_Rget_c PyMPI_Rget_c #endif #ifndef PyMPI_HAVE_MPI_Rput_c static int PyMPI_Rput_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, MPI_Aint a5, MPI_Count a6, MPI_Datatype a7, MPI_Win a8, MPI_Request *a9) { int ierr; int b2; int b6; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b6, MPI_Count, a6); ierr = MPI_Rput(a1, b2, a3, a4, a5, b6, a7, a8, a9); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Rput_c #define MPI_Rput_c PyMPI_Rput_c #endif #ifndef PyMPI_HAVE_MPI_Raccumulate_c static int PyMPI_Raccumulate_c(void *a1, MPI_Count a2, MPI_Datatype a3, int a4, MPI_Aint a5, MPI_Count a6, MPI_Datatype a7, MPI_Op a8, MPI_Win a9, MPI_Request *a10) { int ierr; int b2; int b6; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b6, MPI_Count, a6); ierr = MPI_Raccumulate(a1, b2, a3, a4, a5, b6, a7, a8, a9, a10); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Raccumulate_c #define MPI_Raccumulate_c PyMPI_Raccumulate_c #endif #ifndef PyMPI_HAVE_MPI_Rget_accumulate_c static int PyMPI_Rget_accumulate_c(void *a1, MPI_Count a2, MPI_Datatype a3, void *a4, MPI_Count a5, MPI_Datatype a6, int a7, MPI_Aint a8, MPI_Count a9, MPI_Datatype a10, MPI_Op a11, MPI_Win a12, MPI_Request *a13) { int ierr; int b2; int b5; int b9; PyMPICastValue(int, b2, MPI_Count, a2); PyMPICastValue(int, b5, MPI_Count, a5); PyMPICastValue(int, b9, MPI_Count, a9); ierr = MPI_Rget_accumulate(a1, b2, a3, a4, b5, a6, a7, a8, b9, a10, a11, a12, a13); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_Rget_accumulate_c #define MPI_Rget_accumulate_c PyMPI_Rget_accumulate_c #endif #ifndef PyMPI_HAVE_MPI_File_read_at_c static int PyMPI_File_read_at_c(MPI_File a1, MPI_Offset a2, void *a3, MPI_Count a4, MPI_Datatype a5, MPI_Status *a6) { int ierr; int b4; PyMPICastValue(int, b4, MPI_Count, a4); ierr = MPI_File_read_at(a1, a2, a3, b4, a5, a6); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_read_at_c #define MPI_File_read_at_c PyMPI_File_read_at_c #endif #ifndef PyMPI_HAVE_MPI_File_read_at_all_c static int PyMPI_File_read_at_all_c(MPI_File a1, MPI_Offset a2, void *a3, MPI_Count a4, MPI_Datatype a5, MPI_Status *a6) { int ierr; int b4; PyMPICastValue(int, b4, MPI_Count, a4); ierr = MPI_File_read_at_all(a1, a2, a3, b4, a5, a6); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_read_at_all_c #define MPI_File_read_at_all_c PyMPI_File_read_at_all_c #endif #ifndef PyMPI_HAVE_MPI_File_write_at_c static int PyMPI_File_write_at_c(MPI_File a1, MPI_Offset a2, void *a3, MPI_Count a4, MPI_Datatype a5, MPI_Status *a6) { int ierr; int b4; PyMPICastValue(int, b4, MPI_Count, a4); ierr = MPI_File_write_at(a1, a2, a3, b4, a5, a6); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_write_at_c #define MPI_File_write_at_c PyMPI_File_write_at_c #endif #ifndef PyMPI_HAVE_MPI_File_write_at_all_c static int PyMPI_File_write_at_all_c(MPI_File a1, MPI_Offset a2, void *a3, MPI_Count a4, MPI_Datatype a5, MPI_Status *a6) { int ierr; int b4; PyMPICastValue(int, b4, MPI_Count, a4); ierr = MPI_File_write_at_all(a1, a2, a3, b4, a5, a6); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_write_at_all_c #define MPI_File_write_at_all_c PyMPI_File_write_at_all_c #endif #ifndef PyMPI_HAVE_MPI_File_iread_at_c static int PyMPI_File_iread_at_c(MPI_File a1, MPI_Offset a2, void *a3, MPI_Count a4, MPI_Datatype a5, MPI_Request *a6) { int ierr; int b4; PyMPICastValue(int, b4, MPI_Count, a4); ierr = MPI_File_iread_at(a1, a2, a3, b4, a5, a6); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_iread_at_c #define MPI_File_iread_at_c PyMPI_File_iread_at_c #endif #ifndef PyMPI_HAVE_MPI_File_iread_at_all_c static int PyMPI_File_iread_at_all_c(MPI_File a1, MPI_Offset a2, void *a3, MPI_Count a4, MPI_Datatype a5, MPI_Request *a6) { int ierr; int b4; PyMPICastValue(int, b4, MPI_Count, a4); ierr = MPI_File_iread_at_all(a1, a2, a3, b4, a5, a6); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_iread_at_all_c #define MPI_File_iread_at_all_c PyMPI_File_iread_at_all_c #endif #ifndef PyMPI_HAVE_MPI_File_iwrite_at_c static int PyMPI_File_iwrite_at_c(MPI_File a1, MPI_Offset a2, void *a3, MPI_Count a4, MPI_Datatype a5, MPI_Request *a6) { int ierr; int b4; PyMPICastValue(int, b4, MPI_Count, a4); ierr = MPI_File_iwrite_at(a1, a2, a3, b4, a5, a6); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_iwrite_at_c #define MPI_File_iwrite_at_c PyMPI_File_iwrite_at_c #endif #ifndef PyMPI_HAVE_MPI_File_iwrite_at_all_c static int PyMPI_File_iwrite_at_all_c(MPI_File a1, MPI_Offset a2, void *a3, MPI_Count a4, MPI_Datatype a5, MPI_Request *a6) { int ierr; int b4; PyMPICastValue(int, b4, MPI_Count, a4); ierr = MPI_File_iwrite_at_all(a1, a2, a3, b4, a5, a6); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_iwrite_at_all_c #define MPI_File_iwrite_at_all_c PyMPI_File_iwrite_at_all_c #endif #ifndef PyMPI_HAVE_MPI_File_read_c static int PyMPI_File_read_c(MPI_File a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Status *a5) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_File_read(a1, a2, b3, a4, a5); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_read_c #define MPI_File_read_c PyMPI_File_read_c #endif #ifndef PyMPI_HAVE_MPI_File_read_all_c static int PyMPI_File_read_all_c(MPI_File a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Status *a5) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_File_read_all(a1, a2, b3, a4, a5); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_read_all_c #define MPI_File_read_all_c PyMPI_File_read_all_c #endif #ifndef PyMPI_HAVE_MPI_File_write_c static int PyMPI_File_write_c(MPI_File a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Status *a5) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_File_write(a1, a2, b3, a4, a5); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_write_c #define MPI_File_write_c PyMPI_File_write_c #endif #ifndef PyMPI_HAVE_MPI_File_write_all_c static int PyMPI_File_write_all_c(MPI_File a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Status *a5) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_File_write_all(a1, a2, b3, a4, a5); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_write_all_c #define MPI_File_write_all_c PyMPI_File_write_all_c #endif #ifndef PyMPI_HAVE_MPI_File_iread_c static int PyMPI_File_iread_c(MPI_File a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Request *a5) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_File_iread(a1, a2, b3, a4, a5); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_iread_c #define MPI_File_iread_c PyMPI_File_iread_c #endif #ifndef PyMPI_HAVE_MPI_File_iread_all_c static int PyMPI_File_iread_all_c(MPI_File a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Request *a5) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_File_iread_all(a1, a2, b3, a4, a5); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_iread_all_c #define MPI_File_iread_all_c PyMPI_File_iread_all_c #endif #ifndef PyMPI_HAVE_MPI_File_iwrite_c static int PyMPI_File_iwrite_c(MPI_File a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Request *a5) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_File_iwrite(a1, a2, b3, a4, a5); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_iwrite_c #define MPI_File_iwrite_c PyMPI_File_iwrite_c #endif #ifndef PyMPI_HAVE_MPI_File_iwrite_all_c static int PyMPI_File_iwrite_all_c(MPI_File a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Request *a5) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_File_iwrite_all(a1, a2, b3, a4, a5); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_iwrite_all_c #define MPI_File_iwrite_all_c PyMPI_File_iwrite_all_c #endif #ifndef PyMPI_HAVE_MPI_File_read_shared_c static int PyMPI_File_read_shared_c(MPI_File a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Status *a5) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_File_read_shared(a1, a2, b3, a4, a5); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_read_shared_c #define MPI_File_read_shared_c PyMPI_File_read_shared_c #endif #ifndef PyMPI_HAVE_MPI_File_write_shared_c static int PyMPI_File_write_shared_c(MPI_File a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Status *a5) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_File_write_shared(a1, a2, b3, a4, a5); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_write_shared_c #define MPI_File_write_shared_c PyMPI_File_write_shared_c #endif #ifndef PyMPI_HAVE_MPI_File_iread_shared_c static int PyMPI_File_iread_shared_c(MPI_File a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Request *a5) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_File_iread_shared(a1, a2, b3, a4, a5); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_iread_shared_c #define MPI_File_iread_shared_c PyMPI_File_iread_shared_c #endif #ifndef PyMPI_HAVE_MPI_File_iwrite_shared_c static int PyMPI_File_iwrite_shared_c(MPI_File a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Request *a5) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_File_iwrite_shared(a1, a2, b3, a4, a5); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_iwrite_shared_c #define MPI_File_iwrite_shared_c PyMPI_File_iwrite_shared_c #endif #ifndef PyMPI_HAVE_MPI_File_read_ordered_c static int PyMPI_File_read_ordered_c(MPI_File a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Status *a5) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_File_read_ordered(a1, a2, b3, a4, a5); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_read_ordered_c #define MPI_File_read_ordered_c PyMPI_File_read_ordered_c #endif #ifndef PyMPI_HAVE_MPI_File_write_ordered_c static int PyMPI_File_write_ordered_c(MPI_File a1, void *a2, MPI_Count a3, MPI_Datatype a4, MPI_Status *a5) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_File_write_ordered(a1, a2, b3, a4, a5); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_write_ordered_c #define MPI_File_write_ordered_c PyMPI_File_write_ordered_c #endif #ifndef PyMPI_HAVE_MPI_File_read_at_all_begin_c static int PyMPI_File_read_at_all_begin_c(MPI_File a1, MPI_Offset a2, void *a3, MPI_Count a4, MPI_Datatype a5) { int ierr; int b4; PyMPICastValue(int, b4, MPI_Count, a4); ierr = MPI_File_read_at_all_begin(a1, a2, a3, b4, a5); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_read_at_all_begin_c #define MPI_File_read_at_all_begin_c PyMPI_File_read_at_all_begin_c #endif #ifndef PyMPI_HAVE_MPI_File_write_at_all_begin_c static int PyMPI_File_write_at_all_begin_c(MPI_File a1, MPI_Offset a2, void *a3, MPI_Count a4, MPI_Datatype a5) { int ierr; int b4; PyMPICastValue(int, b4, MPI_Count, a4); ierr = MPI_File_write_at_all_begin(a1, a2, a3, b4, a5); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_write_at_all_begin_c #define MPI_File_write_at_all_begin_c PyMPI_File_write_at_all_begin_c #endif #ifndef PyMPI_HAVE_MPI_File_read_all_begin_c static int PyMPI_File_read_all_begin_c(MPI_File a1, void *a2, MPI_Count a3, MPI_Datatype a4) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_File_read_all_begin(a1, a2, b3, a4); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_read_all_begin_c #define MPI_File_read_all_begin_c PyMPI_File_read_all_begin_c #endif #ifndef PyMPI_HAVE_MPI_File_write_all_begin_c static int PyMPI_File_write_all_begin_c(MPI_File a1, void *a2, MPI_Count a3, MPI_Datatype a4) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_File_write_all_begin(a1, a2, b3, a4); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_write_all_begin_c #define MPI_File_write_all_begin_c PyMPI_File_write_all_begin_c #endif #ifndef PyMPI_HAVE_MPI_File_read_ordered_begin_c static int PyMPI_File_read_ordered_begin_c(MPI_File a1, void *a2, MPI_Count a3, MPI_Datatype a4) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_File_read_ordered_begin(a1, a2, b3, a4); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_read_ordered_begin_c #define MPI_File_read_ordered_begin_c PyMPI_File_read_ordered_begin_c #endif #ifndef PyMPI_HAVE_MPI_File_write_ordered_begin_c static int PyMPI_File_write_ordered_begin_c(MPI_File a1, void *a2, MPI_Count a3, MPI_Datatype a4) { int ierr; int b3; PyMPICastValue(int, b3, MPI_Count, a3); ierr = MPI_File_write_ordered_begin(a1, a2, b3, a4); if (ierr != MPI_SUCCESS) goto fn_exit; fn_exit: return ierr; } #undef MPI_File_write_ordered_begin_c #define MPI_File_write_ordered_begin_c PyMPI_File_write_ordered_begin_c #endif #ifndef PyMPI_HAVE_MPI_File_get_type_extent_c static int PyMPI_File_get_type_extent_c(MPI_File a1, MPI_Datatype a2, MPI_Count *a3) { int ierr; MPI_Aint b3 = 0; MPI_Aint *p3 = a3 ? &b3 : NULL; ierr = MPI_File_get_type_extent(a1, a2, p3); if (ierr != MPI_SUCCESS) goto fn_exit; if (a3) *a3 = b3; fn_exit: return ierr; } #undef MPI_File_get_type_extent_c #define MPI_File_get_type_extent_c PyMPI_File_get_type_extent_c #endif #endif /* !PyMPI_LARGECNT_H */ mpi4py-4.0.3/src/lib-mpi/missing.h000066400000000000000000004344561475341043600167310ustar00rootroot00000000000000#ifndef PyMPI_MISSING_H #define PyMPI_MISSING_H #ifndef PyMPI_UNUSED # if defined(__GNUC__) # define PyMPI_UNUSED __attribute__ ((__unused__)) # else # define PyMPI_UNUSED # endif #endif #define PyMPI_ERR_UNAVAILABLE (-1431655766) /*0xAAAAAAAA*/ static PyMPI_UNUSED int PyMPI_UNAVAILABLE(const char *name,...) { (void)name; return PyMPI_ERR_UNAVAILABLE; } #ifndef PyMPI_HAVE_MPI_Aint #undef MPI_Aint typedef long PyMPI_MPI_Aint; #define MPI_Aint PyMPI_MPI_Aint #endif #ifndef PyMPI_HAVE_MPI_Offset #undef MPI_Offset typedef long PyMPI_MPI_Offset; #define MPI_Offset PyMPI_MPI_Offset #endif #ifndef PyMPI_HAVE_MPI_Count #undef MPI_Count typedef MPI_Offset PyMPI_MPI_Count; #define MPI_Count PyMPI_MPI_Count #endif #ifndef PyMPI_HAVE_MPI_Status #undef MPI_Status typedef struct PyMPI_MPI_Status { int MPI_SOURCE; int MPI_TAG; int MPI_ERROR; } PyMPI_MPI_Status; #define MPI_Status PyMPI_MPI_Status #endif #ifndef PyMPI_HAVE_MPI_Datatype #undef MPI_Datatype typedef void *PyMPI_MPI_Datatype; #define MPI_Datatype PyMPI_MPI_Datatype #endif #ifndef PyMPI_HAVE_MPI_Request #undef MPI_Request typedef void *PyMPI_MPI_Request; #define MPI_Request PyMPI_MPI_Request #endif #ifndef PyMPI_HAVE_MPI_Message #undef MPI_Message typedef void *PyMPI_MPI_Message; #define MPI_Message PyMPI_MPI_Message #endif #ifndef PyMPI_HAVE_MPI_Op #undef MPI_Op typedef void *PyMPI_MPI_Op; #define MPI_Op PyMPI_MPI_Op #endif #ifndef PyMPI_HAVE_MPI_Group #undef MPI_Group typedef void *PyMPI_MPI_Group; #define MPI_Group PyMPI_MPI_Group #endif #ifndef PyMPI_HAVE_MPI_Info #undef MPI_Info typedef void *PyMPI_MPI_Info; #define MPI_Info PyMPI_MPI_Info #endif #ifndef PyMPI_HAVE_MPI_Errhandler #undef MPI_Errhandler typedef void *PyMPI_MPI_Errhandler; #define MPI_Errhandler PyMPI_MPI_Errhandler #endif #ifndef PyMPI_HAVE_MPI_Session #undef MPI_Session typedef void *PyMPI_MPI_Session; #define MPI_Session PyMPI_MPI_Session #endif #ifndef PyMPI_HAVE_MPI_Comm #undef MPI_Comm typedef void *PyMPI_MPI_Comm; #define MPI_Comm PyMPI_MPI_Comm #endif #ifndef PyMPI_HAVE_MPI_Win #undef MPI_Win typedef void *PyMPI_MPI_Win; #define MPI_Win PyMPI_MPI_Win #endif #ifndef PyMPI_HAVE_MPI_File #undef MPI_File typedef void *PyMPI_MPI_File; #define MPI_File PyMPI_MPI_File #endif #ifndef PyMPI_HAVE_MPI_UNDEFINED #undef MPI_UNDEFINED #define MPI_UNDEFINED (-32766) #endif #ifndef PyMPI_HAVE_MPI_ANY_SOURCE #undef MPI_ANY_SOURCE #define MPI_ANY_SOURCE (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_ANY_TAG #undef MPI_ANY_TAG #define MPI_ANY_TAG (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_PROC_NULL #undef MPI_PROC_NULL #define MPI_PROC_NULL (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_ROOT #undef MPI_ROOT #define MPI_ROOT (MPI_PROC_NULL) #endif #ifndef PyMPI_HAVE_MPI_IDENT #undef MPI_IDENT #define MPI_IDENT (1) #endif #ifndef PyMPI_HAVE_MPI_CONGRUENT #undef MPI_CONGRUENT #define MPI_CONGRUENT (2) #endif #ifndef PyMPI_HAVE_MPI_SIMILAR #undef MPI_SIMILAR #define MPI_SIMILAR (3) #endif #ifndef PyMPI_HAVE_MPI_UNEQUAL #undef MPI_UNEQUAL #define MPI_UNEQUAL (4) #endif #ifndef PyMPI_HAVE_MPI_BOTTOM #undef MPI_BOTTOM #define MPI_BOTTOM ((void*)0) #endif #ifndef PyMPI_HAVE_MPI_IN_PLACE #undef MPI_IN_PLACE #define MPI_IN_PLACE ((void*)0) #endif #ifndef PyMPI_HAVE_MPI_KEYVAL_INVALID #undef MPI_KEYVAL_INVALID #define MPI_KEYVAL_INVALID (0) #endif #ifndef PyMPI_HAVE_MPI_MAX_OBJECT_NAME #undef MPI_MAX_OBJECT_NAME #define MPI_MAX_OBJECT_NAME (64) #endif #ifndef PyMPI_HAVE_MPI_DATATYPE_NULL #undef MPI_DATATYPE_NULL #define MPI_DATATYPE_NULL ((MPI_Datatype)0) #endif #ifndef PyMPI_HAVE_MPI_PACKED #undef MPI_PACKED #define MPI_PACKED ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_BYTE #undef MPI_BYTE #define MPI_BYTE ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_AINT #undef MPI_AINT #define MPI_AINT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_OFFSET #undef MPI_OFFSET #define MPI_OFFSET ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_COUNT #undef MPI_COUNT #define MPI_COUNT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_CHAR #undef MPI_CHAR #define MPI_CHAR ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_WCHAR #undef MPI_WCHAR #define MPI_WCHAR ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_SIGNED_CHAR #undef MPI_SIGNED_CHAR #define MPI_SIGNED_CHAR ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_SHORT #undef MPI_SHORT #define MPI_SHORT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_INT #undef MPI_INT #define MPI_INT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LONG #undef MPI_LONG #define MPI_LONG ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LONG_LONG #undef MPI_LONG_LONG #define MPI_LONG_LONG ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LONG_LONG_INT #undef MPI_LONG_LONG_INT #define MPI_LONG_LONG_INT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_UNSIGNED_CHAR #undef MPI_UNSIGNED_CHAR #define MPI_UNSIGNED_CHAR ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_UNSIGNED_SHORT #undef MPI_UNSIGNED_SHORT #define MPI_UNSIGNED_SHORT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_UNSIGNED #undef MPI_UNSIGNED #define MPI_UNSIGNED ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_UNSIGNED_LONG #undef MPI_UNSIGNED_LONG #define MPI_UNSIGNED_LONG ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_UNSIGNED_LONG_LONG #undef MPI_UNSIGNED_LONG_LONG #define MPI_UNSIGNED_LONG_LONG ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_FLOAT #undef MPI_FLOAT #define MPI_FLOAT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_DOUBLE #undef MPI_DOUBLE #define MPI_DOUBLE ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LONG_DOUBLE #undef MPI_LONG_DOUBLE #define MPI_LONG_DOUBLE ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_C_BOOL #undef MPI_C_BOOL #define MPI_C_BOOL ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_INT8_T #undef MPI_INT8_T #define MPI_INT8_T ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_INT16_T #undef MPI_INT16_T #define MPI_INT16_T ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_INT32_T #undef MPI_INT32_T #define MPI_INT32_T ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_INT64_T #undef MPI_INT64_T #define MPI_INT64_T ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_UINT8_T #undef MPI_UINT8_T #define MPI_UINT8_T ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_UINT16_T #undef MPI_UINT16_T #define MPI_UINT16_T ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_UINT32_T #undef MPI_UINT32_T #define MPI_UINT32_T ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_UINT64_T #undef MPI_UINT64_T #define MPI_UINT64_T ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_C_COMPLEX #undef MPI_C_COMPLEX #define MPI_C_COMPLEX ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_C_FLOAT_COMPLEX #undef MPI_C_FLOAT_COMPLEX #define MPI_C_FLOAT_COMPLEX ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_C_DOUBLE_COMPLEX #undef MPI_C_DOUBLE_COMPLEX #define MPI_C_DOUBLE_COMPLEX ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_C_LONG_DOUBLE_COMPLEX #undef MPI_C_LONG_DOUBLE_COMPLEX #define MPI_C_LONG_DOUBLE_COMPLEX ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_CXX_BOOL #undef MPI_CXX_BOOL #define MPI_CXX_BOOL ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_CXX_FLOAT_COMPLEX #undef MPI_CXX_FLOAT_COMPLEX #define MPI_CXX_FLOAT_COMPLEX ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_CXX_DOUBLE_COMPLEX #undef MPI_CXX_DOUBLE_COMPLEX #define MPI_CXX_DOUBLE_COMPLEX ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_CXX_LONG_DOUBLE_COMPLEX #undef MPI_CXX_LONG_DOUBLE_COMPLEX #define MPI_CXX_LONG_DOUBLE_COMPLEX ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_SHORT_INT #undef MPI_SHORT_INT #define MPI_SHORT_INT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_2INT #undef MPI_2INT #define MPI_2INT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LONG_INT #undef MPI_LONG_INT #define MPI_LONG_INT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_FLOAT_INT #undef MPI_FLOAT_INT #define MPI_FLOAT_INT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_DOUBLE_INT #undef MPI_DOUBLE_INT #define MPI_DOUBLE_INT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LONG_DOUBLE_INT #undef MPI_LONG_DOUBLE_INT #define MPI_LONG_DOUBLE_INT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_CHARACTER #undef MPI_CHARACTER #define MPI_CHARACTER ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LOGICAL #undef MPI_LOGICAL #define MPI_LOGICAL ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_INTEGER #undef MPI_INTEGER #define MPI_INTEGER ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_REAL #undef MPI_REAL #define MPI_REAL ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_DOUBLE_PRECISION #undef MPI_DOUBLE_PRECISION #define MPI_DOUBLE_PRECISION ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_COMPLEX #undef MPI_COMPLEX #define MPI_COMPLEX ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_DOUBLE_COMPLEX #undef MPI_DOUBLE_COMPLEX #define MPI_DOUBLE_COMPLEX ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LOGICAL1 #undef MPI_LOGICAL1 #define MPI_LOGICAL1 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LOGICAL2 #undef MPI_LOGICAL2 #define MPI_LOGICAL2 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LOGICAL4 #undef MPI_LOGICAL4 #define MPI_LOGICAL4 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LOGICAL8 #undef MPI_LOGICAL8 #define MPI_LOGICAL8 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_INTEGER1 #undef MPI_INTEGER1 #define MPI_INTEGER1 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_INTEGER2 #undef MPI_INTEGER2 #define MPI_INTEGER2 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_INTEGER4 #undef MPI_INTEGER4 #define MPI_INTEGER4 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_INTEGER8 #undef MPI_INTEGER8 #define MPI_INTEGER8 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_INTEGER16 #undef MPI_INTEGER16 #define MPI_INTEGER16 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_REAL2 #undef MPI_REAL2 #define MPI_REAL2 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_REAL4 #undef MPI_REAL4 #define MPI_REAL4 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_REAL8 #undef MPI_REAL8 #define MPI_REAL8 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_REAL16 #undef MPI_REAL16 #define MPI_REAL16 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_COMPLEX4 #undef MPI_COMPLEX4 #define MPI_COMPLEX4 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_COMPLEX8 #undef MPI_COMPLEX8 #define MPI_COMPLEX8 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_COMPLEX16 #undef MPI_COMPLEX16 #define MPI_COMPLEX16 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_COMPLEX32 #undef MPI_COMPLEX32 #define MPI_COMPLEX32 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_Get_address #undef MPI_Get_address #define MPI_Get_address MPI_Address #endif #ifndef PyMPI_HAVE_MPI_Aint_add #undef MPI_Aint_add #define MPI_Aint_add(a1,a2) PyMPI_UNAVAILABLE("MPI_Aint_add",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Aint_diff #undef MPI_Aint_diff #define MPI_Aint_diff(a1,a2) PyMPI_UNAVAILABLE("MPI_Aint_diff",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Type_dup #undef MPI_Type_dup #define MPI_Type_dup(a1,a2) PyMPI_UNAVAILABLE("MPI_Type_dup",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Type_contiguous #undef MPI_Type_contiguous #define MPI_Type_contiguous(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Type_contiguous",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Type_vector #undef MPI_Type_vector #define MPI_Type_vector(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Type_vector",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Type_indexed #undef MPI_Type_indexed #define MPI_Type_indexed(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Type_indexed",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Type_create_indexed_block #undef MPI_Type_create_indexed_block #define MPI_Type_create_indexed_block(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Type_create_indexed_block",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_ORDER_C #undef MPI_ORDER_C #define MPI_ORDER_C (0) #endif #ifndef PyMPI_HAVE_MPI_ORDER_FORTRAN #undef MPI_ORDER_FORTRAN #define MPI_ORDER_FORTRAN (1) #endif #ifndef PyMPI_HAVE_MPI_Type_create_subarray #undef MPI_Type_create_subarray #define MPI_Type_create_subarray(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Type_create_subarray",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_DISTRIBUTE_NONE #undef MPI_DISTRIBUTE_NONE #define MPI_DISTRIBUTE_NONE (0) #endif #ifndef PyMPI_HAVE_MPI_DISTRIBUTE_BLOCK #undef MPI_DISTRIBUTE_BLOCK #define MPI_DISTRIBUTE_BLOCK (1) #endif #ifndef PyMPI_HAVE_MPI_DISTRIBUTE_CYCLIC #undef MPI_DISTRIBUTE_CYCLIC #define MPI_DISTRIBUTE_CYCLIC (2) #endif #ifndef PyMPI_HAVE_MPI_DISTRIBUTE_DFLT_DARG #undef MPI_DISTRIBUTE_DFLT_DARG #define MPI_DISTRIBUTE_DFLT_DARG (4) #endif #ifndef PyMPI_HAVE_MPI_Type_create_darray #undef MPI_Type_create_darray #define MPI_Type_create_darray(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Type_create_darray",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Type_create_hvector #undef MPI_Type_create_hvector #define MPI_Type_create_hvector MPI_Type_hvector #endif #ifndef PyMPI_HAVE_MPI_Type_create_hindexed #undef MPI_Type_create_hindexed #define MPI_Type_create_hindexed MPI_Type_hindexed #endif #ifndef PyMPI_HAVE_MPI_Type_create_hindexed_block #undef MPI_Type_create_hindexed_block #define MPI_Type_create_hindexed_block(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Type_create_hindexed_block",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Type_create_struct #undef MPI_Type_create_struct #define MPI_Type_create_struct MPI_Type_struct #endif #ifndef PyMPI_HAVE_MPI_Type_create_resized #undef MPI_Type_create_resized #define MPI_Type_create_resized(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Type_create_resized",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Type_size #undef MPI_Type_size #define MPI_Type_size(a1,a2) PyMPI_UNAVAILABLE("MPI_Type_size",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Type_get_extent #undef MPI_Type_get_extent #define MPI_Type_get_extent(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Type_get_extent",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Type_get_true_extent #undef MPI_Type_get_true_extent #define MPI_Type_get_true_extent(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Type_get_true_extent",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Type_size_x #undef MPI_Type_size_x #define MPI_Type_size_x(a1,a2) PyMPI_UNAVAILABLE("MPI_Type_size_x",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Type_get_extent_x #undef MPI_Type_get_extent_x #define MPI_Type_get_extent_x(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Type_get_extent_x",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Type_get_true_extent_x #undef MPI_Type_get_true_extent_x #define MPI_Type_get_true_extent_x(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Type_get_true_extent_x",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Type_create_f90_integer #undef MPI_Type_create_f90_integer #define MPI_Type_create_f90_integer(a1,a2) PyMPI_UNAVAILABLE("MPI_Type_create_f90_integer",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Type_create_f90_real #undef MPI_Type_create_f90_real #define MPI_Type_create_f90_real(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Type_create_f90_real",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Type_create_f90_complex #undef MPI_Type_create_f90_complex #define MPI_Type_create_f90_complex(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Type_create_f90_complex",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_TYPECLASS_INTEGER #undef MPI_TYPECLASS_INTEGER #define MPI_TYPECLASS_INTEGER (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_TYPECLASS_REAL #undef MPI_TYPECLASS_REAL #define MPI_TYPECLASS_REAL (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_TYPECLASS_COMPLEX #undef MPI_TYPECLASS_COMPLEX #define MPI_TYPECLASS_COMPLEX (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_Type_match_size #undef MPI_Type_match_size #define MPI_Type_match_size(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Type_match_size",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Type_get_value_index #undef MPI_Type_get_value_index #define MPI_Type_get_value_index(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Type_get_value_index",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Type_commit #undef MPI_Type_commit #define MPI_Type_commit(a1) PyMPI_UNAVAILABLE("MPI_Type_commit",a1) #endif #ifndef PyMPI_HAVE_MPI_Type_free #undef MPI_Type_free #define MPI_Type_free(a1) PyMPI_UNAVAILABLE("MPI_Type_free",a1) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_NAMED #undef MPI_COMBINER_NAMED #define MPI_COMBINER_NAMED (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_DUP #undef MPI_COMBINER_DUP #define MPI_COMBINER_DUP (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_CONTIGUOUS #undef MPI_COMBINER_CONTIGUOUS #define MPI_COMBINER_CONTIGUOUS (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_VECTOR #undef MPI_COMBINER_VECTOR #define MPI_COMBINER_VECTOR (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_HVECTOR #undef MPI_COMBINER_HVECTOR #define MPI_COMBINER_HVECTOR (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_INDEXED #undef MPI_COMBINER_INDEXED #define MPI_COMBINER_INDEXED (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_HINDEXED #undef MPI_COMBINER_HINDEXED #define MPI_COMBINER_HINDEXED (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_INDEXED_BLOCK #undef MPI_COMBINER_INDEXED_BLOCK #define MPI_COMBINER_INDEXED_BLOCK (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_HINDEXED_BLOCK #undef MPI_COMBINER_HINDEXED_BLOCK #define MPI_COMBINER_HINDEXED_BLOCK (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_STRUCT #undef MPI_COMBINER_STRUCT #define MPI_COMBINER_STRUCT (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_SUBARRAY #undef MPI_COMBINER_SUBARRAY #define MPI_COMBINER_SUBARRAY (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_DARRAY #undef MPI_COMBINER_DARRAY #define MPI_COMBINER_DARRAY (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_F90_REAL #undef MPI_COMBINER_F90_REAL #define MPI_COMBINER_F90_REAL (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_F90_COMPLEX #undef MPI_COMBINER_F90_COMPLEX #define MPI_COMBINER_F90_COMPLEX (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_F90_INTEGER #undef MPI_COMBINER_F90_INTEGER #define MPI_COMBINER_F90_INTEGER (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_RESIZED #undef MPI_COMBINER_RESIZED #define MPI_COMBINER_RESIZED (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_VALUE_INDEX #undef MPI_COMBINER_VALUE_INDEX #define MPI_COMBINER_VALUE_INDEX (MPI_COMBINER_NAMED) #endif #ifndef PyMPI_HAVE_MPI_Type_get_envelope #undef MPI_Type_get_envelope #define MPI_Type_get_envelope(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Type_get_envelope",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Type_get_contents #undef MPI_Type_get_contents #define MPI_Type_get_contents(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Type_get_contents",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Pack #undef MPI_Pack #define MPI_Pack(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Pack",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Unpack #undef MPI_Unpack #define MPI_Unpack(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Unpack",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Pack_size #undef MPI_Pack_size #define MPI_Pack_size(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Pack_size",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Pack_external #undef MPI_Pack_external #define MPI_Pack_external(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Pack_external",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Unpack_external #undef MPI_Unpack_external #define MPI_Unpack_external(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Unpack_external",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Pack_external_size #undef MPI_Pack_external_size #define MPI_Pack_external_size(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Pack_external_size",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Type_get_name #undef MPI_Type_get_name #define MPI_Type_get_name(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Type_get_name",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Type_set_name #undef MPI_Type_set_name #define MPI_Type_set_name(a1,a2) PyMPI_UNAVAILABLE("MPI_Type_set_name",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Type_get_attr #undef MPI_Type_get_attr #define MPI_Type_get_attr(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Type_get_attr",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Type_set_attr #undef MPI_Type_set_attr #define MPI_Type_set_attr(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Type_set_attr",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Type_delete_attr #undef MPI_Type_delete_attr #define MPI_Type_delete_attr(a1,a2) PyMPI_UNAVAILABLE("MPI_Type_delete_attr",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Type_copy_attr_function #undef MPI_Type_copy_attr_function typedef int (MPIAPI PyMPI_MPI_Type_copy_attr_function)(MPI_Datatype,int,void*,void*,void*,int*); #define MPI_Type_copy_attr_function PyMPI_MPI_Type_copy_attr_function #endif #ifndef PyMPI_HAVE_MPI_Type_delete_attr_function #undef MPI_Type_delete_attr_function typedef int (MPIAPI PyMPI_MPI_Type_delete_attr_function)(MPI_Datatype,int,void*,void*); #define MPI_Type_delete_attr_function PyMPI_MPI_Type_delete_attr_function #endif #ifndef PyMPI_HAVE_MPI_TYPE_NULL_COPY_FN #undef MPI_TYPE_NULL_COPY_FN #define MPI_TYPE_NULL_COPY_FN ((MPI_Type_copy_attr_function*)0) #endif #ifndef PyMPI_HAVE_MPI_TYPE_DUP_FN #undef MPI_TYPE_DUP_FN #define MPI_TYPE_DUP_FN ((MPI_Type_copy_attr_function*)0) #endif #ifndef PyMPI_HAVE_MPI_TYPE_NULL_DELETE_FN #undef MPI_TYPE_NULL_DELETE_FN #define MPI_TYPE_NULL_DELETE_FN ((MPI_Type_delete_attr_function*)0) #endif #ifndef PyMPI_HAVE_MPI_Type_create_keyval #undef MPI_Type_create_keyval #define MPI_Type_create_keyval(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Type_create_keyval",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Type_free_keyval #undef MPI_Type_free_keyval #define MPI_Type_free_keyval(a1) PyMPI_UNAVAILABLE("MPI_Type_free_keyval",a1) #endif #ifndef PyMPI_HAVE_MPI_Type_contiguous_c #undef MPI_Type_contiguous_c #define MPI_Type_contiguous_c(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Type_contiguous_c",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Type_vector_c #undef MPI_Type_vector_c #define MPI_Type_vector_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Type_vector_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Type_indexed_c #undef MPI_Type_indexed_c #define MPI_Type_indexed_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Type_indexed_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Type_create_indexed_block_c #undef MPI_Type_create_indexed_block_c #define MPI_Type_create_indexed_block_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Type_create_indexed_block_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Type_create_subarray_c #undef MPI_Type_create_subarray_c #define MPI_Type_create_subarray_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Type_create_subarray_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Type_create_darray_c #undef MPI_Type_create_darray_c #define MPI_Type_create_darray_c(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Type_create_darray_c",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Type_create_hvector_c #undef MPI_Type_create_hvector_c #define MPI_Type_create_hvector_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Type_create_hvector_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Type_create_hindexed_c #undef MPI_Type_create_hindexed_c #define MPI_Type_create_hindexed_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Type_create_hindexed_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Type_create_hindexed_block_c #undef MPI_Type_create_hindexed_block_c #define MPI_Type_create_hindexed_block_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Type_create_hindexed_block_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Type_create_struct_c #undef MPI_Type_create_struct_c #define MPI_Type_create_struct_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Type_create_struct_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Type_create_resized_c #undef MPI_Type_create_resized_c #define MPI_Type_create_resized_c(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Type_create_resized_c",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Type_size_c #undef MPI_Type_size_c #define MPI_Type_size_c MPI_Type_size_x #endif #ifndef PyMPI_HAVE_MPI_Type_get_extent_c #undef MPI_Type_get_extent_c #define MPI_Type_get_extent_c MPI_Type_get_extent_x #endif #ifndef PyMPI_HAVE_MPI_Type_get_true_extent_c #undef MPI_Type_get_true_extent_c #define MPI_Type_get_true_extent_c MPI_Type_get_true_extent_x #endif #ifndef PyMPI_HAVE_MPI_Type_get_envelope_c #undef MPI_Type_get_envelope_c #define MPI_Type_get_envelope_c(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Type_get_envelope_c",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Type_get_contents_c #undef MPI_Type_get_contents_c #define MPI_Type_get_contents_c(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Type_get_contents_c",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Pack_c #undef MPI_Pack_c #define MPI_Pack_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Pack_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Unpack_c #undef MPI_Unpack_c #define MPI_Unpack_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Unpack_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Pack_size_c #undef MPI_Pack_size_c #define MPI_Pack_size_c(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Pack_size_c",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Pack_external_c #undef MPI_Pack_external_c #define MPI_Pack_external_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Pack_external_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Unpack_external_c #undef MPI_Unpack_external_c #define MPI_Unpack_external_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Unpack_external_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Pack_external_size_c #undef MPI_Pack_external_size_c #define MPI_Pack_external_size_c(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Pack_external_size_c",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_STATUS_IGNORE #undef MPI_STATUS_IGNORE #define MPI_STATUS_IGNORE ((MPI_Status*)0) #endif #ifndef PyMPI_HAVE_MPI_STATUSES_IGNORE #undef MPI_STATUSES_IGNORE #define MPI_STATUSES_IGNORE ((MPI_Status*)0) #endif #ifndef PyMPI_HAVE_MPI_Get_count #undef MPI_Get_count #define MPI_Get_count(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Get_count",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Get_elements #undef MPI_Get_elements #define MPI_Get_elements(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Get_elements",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Status_set_elements #undef MPI_Status_set_elements #define MPI_Status_set_elements(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Status_set_elements",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Get_elements_x #undef MPI_Get_elements_x #define MPI_Get_elements_x(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Get_elements_x",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Status_set_elements_x #undef MPI_Status_set_elements_x #define MPI_Status_set_elements_x(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Status_set_elements_x",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Test_cancelled #undef MPI_Test_cancelled #define MPI_Test_cancelled(a1,a2) PyMPI_UNAVAILABLE("MPI_Test_cancelled",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Status_set_cancelled #undef MPI_Status_set_cancelled #define MPI_Status_set_cancelled(a1,a2) PyMPI_UNAVAILABLE("MPI_Status_set_cancelled",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Get_count_c #undef MPI_Get_count_c #define MPI_Get_count_c(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Get_count_c",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Get_elements_c #undef MPI_Get_elements_c #define MPI_Get_elements_c MPI_Get_elements_x #endif #ifndef PyMPI_HAVE_MPI_Status_set_elements_c #undef MPI_Status_set_elements_c #define MPI_Status_set_elements_c MPI_Status_set_elements_x #endif #ifndef PyMPI_HAVE_MPI_Status_get_source #undef MPI_Status_get_source #define MPI_Status_get_source(a1,a2) PyMPI_UNAVAILABLE("MPI_Status_get_source",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Status_set_source #undef MPI_Status_set_source #define MPI_Status_set_source(a1,a2) PyMPI_UNAVAILABLE("MPI_Status_set_source",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Status_get_tag #undef MPI_Status_get_tag #define MPI_Status_get_tag(a1,a2) PyMPI_UNAVAILABLE("MPI_Status_get_tag",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Status_set_tag #undef MPI_Status_set_tag #define MPI_Status_set_tag(a1,a2) PyMPI_UNAVAILABLE("MPI_Status_set_tag",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Status_get_error #undef MPI_Status_get_error #define MPI_Status_get_error(a1,a2) PyMPI_UNAVAILABLE("MPI_Status_get_error",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Status_set_error #undef MPI_Status_set_error #define MPI_Status_set_error(a1,a2) PyMPI_UNAVAILABLE("MPI_Status_set_error",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_REQUEST_NULL #undef MPI_REQUEST_NULL #define MPI_REQUEST_NULL ((MPI_Request)0) #endif #ifndef PyMPI_HAVE_MPI_Wait #undef MPI_Wait #define MPI_Wait(a1,a2) PyMPI_UNAVAILABLE("MPI_Wait",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Test #undef MPI_Test #define MPI_Test(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Test",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Request_get_status #undef MPI_Request_get_status #define MPI_Request_get_status(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Request_get_status",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Waitany #undef MPI_Waitany #define MPI_Waitany(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Waitany",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Testany #undef MPI_Testany #define MPI_Testany(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Testany",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Request_get_status_any #undef MPI_Request_get_status_any #define MPI_Request_get_status_any(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Request_get_status_any",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Waitall #undef MPI_Waitall #define MPI_Waitall(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Waitall",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Testall #undef MPI_Testall #define MPI_Testall(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Testall",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Request_get_status_all #undef MPI_Request_get_status_all #define MPI_Request_get_status_all(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Request_get_status_all",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Waitsome #undef MPI_Waitsome #define MPI_Waitsome(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Waitsome",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Testsome #undef MPI_Testsome #define MPI_Testsome(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Testsome",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Request_get_status_some #undef MPI_Request_get_status_some #define MPI_Request_get_status_some(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Request_get_status_some",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Cancel #undef MPI_Cancel #define MPI_Cancel(a1) PyMPI_UNAVAILABLE("MPI_Cancel",a1) #endif #ifndef PyMPI_HAVE_MPI_Request_free #undef MPI_Request_free #define MPI_Request_free(a1) PyMPI_UNAVAILABLE("MPI_Request_free",a1) #endif #ifndef PyMPI_HAVE_MPI_Start #undef MPI_Start #define MPI_Start(a1) PyMPI_UNAVAILABLE("MPI_Start",a1) #endif #ifndef PyMPI_HAVE_MPI_Startall #undef MPI_Startall #define MPI_Startall(a1,a2) PyMPI_UNAVAILABLE("MPI_Startall",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Pready #undef MPI_Pready #define MPI_Pready(a1,a2) PyMPI_UNAVAILABLE("MPI_Pready",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Pready_range #undef MPI_Pready_range #define MPI_Pready_range(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Pready_range",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Pready_list #undef MPI_Pready_list #define MPI_Pready_list(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Pready_list",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Parrived #undef MPI_Parrived #define MPI_Parrived(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Parrived",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Grequest_cancel_function #undef MPI_Grequest_cancel_function typedef int (MPIAPI PyMPI_MPI_Grequest_cancel_function)(void*,int); #define MPI_Grequest_cancel_function PyMPI_MPI_Grequest_cancel_function #endif #ifndef PyMPI_HAVE_MPI_Grequest_free_function #undef MPI_Grequest_free_function typedef int (MPIAPI PyMPI_MPI_Grequest_free_function)(void*); #define MPI_Grequest_free_function PyMPI_MPI_Grequest_free_function #endif #ifndef PyMPI_HAVE_MPI_Grequest_query_function #undef MPI_Grequest_query_function typedef int (MPIAPI PyMPI_MPI_Grequest_query_function)(void*,MPI_Status*); #define MPI_Grequest_query_function PyMPI_MPI_Grequest_query_function #endif #ifndef PyMPI_HAVE_MPI_Grequest_start #undef MPI_Grequest_start #define MPI_Grequest_start(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Grequest_start",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Grequest_complete #undef MPI_Grequest_complete #define MPI_Grequest_complete(a1) PyMPI_UNAVAILABLE("MPI_Grequest_complete",a1) #endif #ifndef PyMPI_HAVE_MPI_OP_NULL #undef MPI_OP_NULL #define MPI_OP_NULL ((MPI_Op)0) #endif #ifndef PyMPI_HAVE_MPI_MAX #undef MPI_MAX #define MPI_MAX ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_MIN #undef MPI_MIN #define MPI_MIN ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_SUM #undef MPI_SUM #define MPI_SUM ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_PROD #undef MPI_PROD #define MPI_PROD ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_LAND #undef MPI_LAND #define MPI_LAND ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_BAND #undef MPI_BAND #define MPI_BAND ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_LOR #undef MPI_LOR #define MPI_LOR ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_BOR #undef MPI_BOR #define MPI_BOR ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_LXOR #undef MPI_LXOR #define MPI_LXOR ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_BXOR #undef MPI_BXOR #define MPI_BXOR ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_MAXLOC #undef MPI_MAXLOC #define MPI_MAXLOC ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_MINLOC #undef MPI_MINLOC #define MPI_MINLOC ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_REPLACE #undef MPI_REPLACE #define MPI_REPLACE ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_NO_OP #undef MPI_NO_OP #define MPI_NO_OP ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_Op_free #undef MPI_Op_free #define MPI_Op_free(a1) PyMPI_UNAVAILABLE("MPI_Op_free",a1) #endif #ifndef PyMPI_HAVE_MPI_User_function #undef MPI_User_function typedef void (MPIAPI PyMPI_MPI_User_function)(void*,void*,int*,MPI_Datatype*); #define MPI_User_function PyMPI_MPI_User_function #endif #ifndef PyMPI_HAVE_MPI_Op_create #undef MPI_Op_create #define MPI_Op_create(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Op_create",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Op_commutative #undef MPI_Op_commutative #define MPI_Op_commutative(a1,a2) PyMPI_UNAVAILABLE("MPI_Op_commutative",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_User_function_c #undef MPI_User_function_c typedef void (MPIAPI PyMPI_MPI_User_function_c)(void*,void*,MPI_Count*,MPI_Datatype*); #define MPI_User_function_c PyMPI_MPI_User_function_c #endif #ifndef PyMPI_HAVE_MPI_Op_create_c #undef MPI_Op_create_c #define MPI_Op_create_c(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Op_create_c",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_GROUP_NULL #undef MPI_GROUP_NULL #define MPI_GROUP_NULL ((MPI_Group)0) #endif #ifndef PyMPI_HAVE_MPI_GROUP_EMPTY #undef MPI_GROUP_EMPTY #define MPI_GROUP_EMPTY ((MPI_Group)1) #endif #ifndef PyMPI_HAVE_MPI_Group_free #undef MPI_Group_free #define MPI_Group_free(a1) PyMPI_UNAVAILABLE("MPI_Group_free",a1) #endif #ifndef PyMPI_HAVE_MPI_Group_size #undef MPI_Group_size #define MPI_Group_size(a1,a2) PyMPI_UNAVAILABLE("MPI_Group_size",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Group_rank #undef MPI_Group_rank #define MPI_Group_rank(a1,a2) PyMPI_UNAVAILABLE("MPI_Group_rank",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Group_translate_ranks #undef MPI_Group_translate_ranks #define MPI_Group_translate_ranks(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Group_translate_ranks",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Group_compare #undef MPI_Group_compare #define MPI_Group_compare(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Group_compare",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Group_union #undef MPI_Group_union #define MPI_Group_union(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Group_union",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Group_intersection #undef MPI_Group_intersection #define MPI_Group_intersection(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Group_intersection",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Group_difference #undef MPI_Group_difference #define MPI_Group_difference(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Group_difference",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Group_incl #undef MPI_Group_incl #define MPI_Group_incl(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Group_incl",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Group_excl #undef MPI_Group_excl #define MPI_Group_excl(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Group_excl",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Group_range_incl #undef MPI_Group_range_incl #define MPI_Group_range_incl(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Group_range_incl",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Group_range_excl #undef MPI_Group_range_excl #define MPI_Group_range_excl(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Group_range_excl",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_INFO_NULL #undef MPI_INFO_NULL #define MPI_INFO_NULL ((MPI_Info)0) #endif #ifndef PyMPI_HAVE_MPI_INFO_ENV #undef MPI_INFO_ENV #define MPI_INFO_ENV ((MPI_Info)MPI_INFO_NULL) #endif #ifndef PyMPI_HAVE_MPI_Info_free #undef MPI_Info_free #define MPI_Info_free(a1) PyMPI_UNAVAILABLE("MPI_Info_free",a1) #endif #ifndef PyMPI_HAVE_MPI_Info_create #undef MPI_Info_create #define MPI_Info_create(a1) PyMPI_UNAVAILABLE("MPI_Info_create",a1) #endif #ifndef PyMPI_HAVE_MPI_Info_dup #undef MPI_Info_dup #define MPI_Info_dup(a1,a2) PyMPI_UNAVAILABLE("MPI_Info_dup",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Info_create_env #undef MPI_Info_create_env #define MPI_Info_create_env(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Info_create_env",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_MAX_INFO_KEY #undef MPI_MAX_INFO_KEY #define MPI_MAX_INFO_KEY (1) #endif #ifndef PyMPI_HAVE_MPI_MAX_INFO_VAL #undef MPI_MAX_INFO_VAL #define MPI_MAX_INFO_VAL (1) #endif #ifndef PyMPI_HAVE_MPI_Info_get_string #undef MPI_Info_get_string #define MPI_Info_get_string(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Info_get_string",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Info_set #undef MPI_Info_set #define MPI_Info_set(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Info_set",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Info_delete #undef MPI_Info_delete #define MPI_Info_delete(a1,a2) PyMPI_UNAVAILABLE("MPI_Info_delete",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Info_get_nkeys #undef MPI_Info_get_nkeys #define MPI_Info_get_nkeys(a1,a2) PyMPI_UNAVAILABLE("MPI_Info_get_nkeys",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Info_get_nthkey #undef MPI_Info_get_nthkey #define MPI_Info_get_nthkey(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Info_get_nthkey",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_ERRHANDLER_NULL #undef MPI_ERRHANDLER_NULL #define MPI_ERRHANDLER_NULL ((MPI_Errhandler)0) #endif #ifndef PyMPI_HAVE_MPI_ERRORS_RETURN #undef MPI_ERRORS_RETURN #define MPI_ERRORS_RETURN ((MPI_Errhandler)MPI_ERRHANDLER_NULL) #endif #ifndef PyMPI_HAVE_MPI_ERRORS_ABORT #undef MPI_ERRORS_ABORT #define MPI_ERRORS_ABORT ((MPI_Errhandler)MPI_ERRHANDLER_NULL) #endif #ifndef PyMPI_HAVE_MPI_ERRORS_ARE_FATAL #undef MPI_ERRORS_ARE_FATAL #define MPI_ERRORS_ARE_FATAL ((MPI_Errhandler)MPI_ERRHANDLER_NULL) #endif #ifndef PyMPI_HAVE_MPI_Errhandler_free #undef MPI_Errhandler_free #define MPI_Errhandler_free(a1) PyMPI_UNAVAILABLE("MPI_Errhandler_free",a1) #endif #ifndef PyMPI_HAVE_MPI_SESSION_NULL #undef MPI_SESSION_NULL #define MPI_SESSION_NULL ((MPI_Session)0) #endif #ifndef PyMPI_HAVE_MPI_MAX_PSET_NAME_LEN #undef MPI_MAX_PSET_NAME_LEN #define MPI_MAX_PSET_NAME_LEN (1) #endif #ifndef PyMPI_HAVE_MPI_Session_init #undef MPI_Session_init #define MPI_Session_init(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Session_init",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Session_finalize #undef MPI_Session_finalize #define MPI_Session_finalize(a1) PyMPI_UNAVAILABLE("MPI_Session_finalize",a1) #endif #ifndef PyMPI_HAVE_MPI_Session_get_num_psets #undef MPI_Session_get_num_psets #define MPI_Session_get_num_psets(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Session_get_num_psets",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Session_get_nth_pset #undef MPI_Session_get_nth_pset #define MPI_Session_get_nth_pset(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Session_get_nth_pset",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Session_get_info #undef MPI_Session_get_info #define MPI_Session_get_info(a1,a2) PyMPI_UNAVAILABLE("MPI_Session_get_info",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Session_get_pset_info #undef MPI_Session_get_pset_info #define MPI_Session_get_pset_info(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Session_get_pset_info",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Group_from_session_pset #undef MPI_Group_from_session_pset #define MPI_Group_from_session_pset(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Group_from_session_pset",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Session_errhandler_function #undef MPI_Session_errhandler_function typedef void (MPIAPI PyMPI_MPI_Session_errhandler_function)(MPI_Session*,int*,...); #define MPI_Session_errhandler_function PyMPI_MPI_Session_errhandler_function #endif #ifndef PyMPI_HAVE_MPI_Session_create_errhandler #undef MPI_Session_create_errhandler #define MPI_Session_create_errhandler(a1,a2) PyMPI_UNAVAILABLE("MPI_Session_create_errhandler",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Session_get_errhandler #undef MPI_Session_get_errhandler #define MPI_Session_get_errhandler(a1,a2) PyMPI_UNAVAILABLE("MPI_Session_get_errhandler",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Session_set_errhandler #undef MPI_Session_set_errhandler #define MPI_Session_set_errhandler(a1,a2) PyMPI_UNAVAILABLE("MPI_Session_set_errhandler",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Session_call_errhandler #undef MPI_Session_call_errhandler #define MPI_Session_call_errhandler(a1,a2) PyMPI_UNAVAILABLE("MPI_Session_call_errhandler",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_COMM_NULL #undef MPI_COMM_NULL #define MPI_COMM_NULL ((MPI_Comm)0) #endif #ifndef PyMPI_HAVE_MPI_COMM_SELF #undef MPI_COMM_SELF #define MPI_COMM_SELF ((MPI_Comm)MPI_COMM_NULL) #endif #ifndef PyMPI_HAVE_MPI_COMM_WORLD #undef MPI_COMM_WORLD #define MPI_COMM_WORLD ((MPI_Comm)MPI_COMM_NULL) #endif #ifndef PyMPI_HAVE_MPI_Comm_free #undef MPI_Comm_free #define MPI_Comm_free(a1) PyMPI_UNAVAILABLE("MPI_Comm_free",a1) #endif #ifndef PyMPI_HAVE_MPI_Comm_group #undef MPI_Comm_group #define MPI_Comm_group(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_group",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Comm_size #undef MPI_Comm_size #define MPI_Comm_size(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_size",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Comm_rank #undef MPI_Comm_rank #define MPI_Comm_rank(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_rank",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Comm_compare #undef MPI_Comm_compare #define MPI_Comm_compare(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Comm_compare",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Topo_test #undef MPI_Topo_test #define MPI_Topo_test(a1,a2) PyMPI_UNAVAILABLE("MPI_Topo_test",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Comm_test_inter #undef MPI_Comm_test_inter #define MPI_Comm_test_inter(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_test_inter",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Abort #undef MPI_Abort #define MPI_Abort(a1,a2) PyMPI_UNAVAILABLE("MPI_Abort",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_BSEND_OVERHEAD #undef MPI_BSEND_OVERHEAD #define MPI_BSEND_OVERHEAD (0) #endif #ifndef PyMPI_HAVE_MPI_BUFFER_AUTOMATIC #undef MPI_BUFFER_AUTOMATIC #define MPI_BUFFER_AUTOMATIC ((void*)0) #endif #ifndef PyMPI_HAVE_MPI_Buffer_attach #undef MPI_Buffer_attach #define MPI_Buffer_attach(a1,a2) PyMPI_UNAVAILABLE("MPI_Buffer_attach",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Buffer_detach #undef MPI_Buffer_detach #define MPI_Buffer_detach(a1,a2) PyMPI_UNAVAILABLE("MPI_Buffer_detach",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Buffer_flush #undef MPI_Buffer_flush #define MPI_Buffer_flush() PyMPI_UNAVAILABLE("MPI_Buffer_flush") #endif #ifndef PyMPI_HAVE_MPI_Buffer_iflush #undef MPI_Buffer_iflush #define MPI_Buffer_iflush(a1) PyMPI_UNAVAILABLE("MPI_Buffer_iflush",a1) #endif #ifndef PyMPI_HAVE_MPI_Comm_attach_buffer #undef MPI_Comm_attach_buffer #define MPI_Comm_attach_buffer(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Comm_attach_buffer",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Comm_detach_buffer #undef MPI_Comm_detach_buffer #define MPI_Comm_detach_buffer(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Comm_detach_buffer",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Comm_flush_buffer #undef MPI_Comm_flush_buffer #define MPI_Comm_flush_buffer(a1) PyMPI_UNAVAILABLE("MPI_Comm_flush_buffer",a1) #endif #ifndef PyMPI_HAVE_MPI_Comm_iflush_buffer #undef MPI_Comm_iflush_buffer #define MPI_Comm_iflush_buffer(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_iflush_buffer",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Session_attach_buffer #undef MPI_Session_attach_buffer #define MPI_Session_attach_buffer(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Session_attach_buffer",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Session_detach_buffer #undef MPI_Session_detach_buffer #define MPI_Session_detach_buffer(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Session_detach_buffer",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Session_flush_buffer #undef MPI_Session_flush_buffer #define MPI_Session_flush_buffer(a1) PyMPI_UNAVAILABLE("MPI_Session_flush_buffer",a1) #endif #ifndef PyMPI_HAVE_MPI_Session_iflush_buffer #undef MPI_Session_iflush_buffer #define MPI_Session_iflush_buffer(a1,a2) PyMPI_UNAVAILABLE("MPI_Session_iflush_buffer",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Send #undef MPI_Send #define MPI_Send(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Send",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Recv #undef MPI_Recv #define MPI_Recv(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Recv",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Sendrecv #undef MPI_Sendrecv #define MPI_Sendrecv(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) PyMPI_UNAVAILABLE("MPI_Sendrecv",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) #endif #ifndef PyMPI_HAVE_MPI_Sendrecv_replace #undef MPI_Sendrecv_replace #define MPI_Sendrecv_replace(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Sendrecv_replace",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Bsend #undef MPI_Bsend #define MPI_Bsend(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Bsend",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Ssend #undef MPI_Ssend #define MPI_Ssend(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Ssend",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Rsend #undef MPI_Rsend #define MPI_Rsend(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Rsend",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Isend #undef MPI_Isend #define MPI_Isend(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Isend",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Irecv #undef MPI_Irecv #define MPI_Irecv(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Irecv",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Isendrecv #undef MPI_Isendrecv #define MPI_Isendrecv(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) PyMPI_UNAVAILABLE("MPI_Isendrecv",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) #endif #ifndef PyMPI_HAVE_MPI_Isendrecv_replace #undef MPI_Isendrecv_replace #define MPI_Isendrecv_replace(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Isendrecv_replace",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Ibsend #undef MPI_Ibsend #define MPI_Ibsend(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Ibsend",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Issend #undef MPI_Issend #define MPI_Issend(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Issend",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Irsend #undef MPI_Irsend #define MPI_Irsend(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Irsend",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Send_init #undef MPI_Send_init #define MPI_Send_init(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Send_init",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Bsend_init #undef MPI_Bsend_init #define MPI_Bsend_init(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Bsend_init",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Ssend_init #undef MPI_Ssend_init #define MPI_Ssend_init(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Ssend_init",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Rsend_init #undef MPI_Rsend_init #define MPI_Rsend_init(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Rsend_init",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Recv_init #undef MPI_Recv_init #define MPI_Recv_init(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Recv_init",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Psend_init #undef MPI_Psend_init #define MPI_Psend_init(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Psend_init",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Precv_init #undef MPI_Precv_init #define MPI_Precv_init(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Precv_init",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Probe #undef MPI_Probe #define MPI_Probe(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Probe",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Iprobe #undef MPI_Iprobe #define MPI_Iprobe(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Iprobe",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_MESSAGE_NULL #undef MPI_MESSAGE_NULL #define MPI_MESSAGE_NULL ((MPI_Message)0) #endif #ifndef PyMPI_HAVE_MPI_MESSAGE_NO_PROC #undef MPI_MESSAGE_NO_PROC #define MPI_MESSAGE_NO_PROC ((MPI_Message)MPI_MESSAGE_NULL) #endif #ifndef PyMPI_HAVE_MPI_Mprobe #undef MPI_Mprobe #define MPI_Mprobe(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Mprobe",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Improbe #undef MPI_Improbe #define MPI_Improbe(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Improbe",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Mrecv #undef MPI_Mrecv #define MPI_Mrecv(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Mrecv",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Imrecv #undef MPI_Imrecv #define MPI_Imrecv(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Imrecv",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Barrier #undef MPI_Barrier #define MPI_Barrier(a1) PyMPI_UNAVAILABLE("MPI_Barrier",a1) #endif #ifndef PyMPI_HAVE_MPI_Bcast #undef MPI_Bcast #define MPI_Bcast(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Bcast",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Gather #undef MPI_Gather #define MPI_Gather(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Gather",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Gatherv #undef MPI_Gatherv #define MPI_Gatherv(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Gatherv",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Scatter #undef MPI_Scatter #define MPI_Scatter(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Scatter",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Scatterv #undef MPI_Scatterv #define MPI_Scatterv(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Scatterv",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Allgather #undef MPI_Allgather #define MPI_Allgather(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Allgather",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Allgatherv #undef MPI_Allgatherv #define MPI_Allgatherv(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Allgatherv",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Alltoall #undef MPI_Alltoall #define MPI_Alltoall(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Alltoall",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Alltoallv #undef MPI_Alltoallv #define MPI_Alltoallv(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Alltoallv",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Alltoallw #undef MPI_Alltoallw #define MPI_Alltoallw(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Alltoallw",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Reduce_local #undef MPI_Reduce_local #define MPI_Reduce_local(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Reduce_local",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Reduce #undef MPI_Reduce #define MPI_Reduce(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Reduce",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Allreduce #undef MPI_Allreduce #define MPI_Allreduce(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Allreduce",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Reduce_scatter_block #undef MPI_Reduce_scatter_block #define MPI_Reduce_scatter_block(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Reduce_scatter_block",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Reduce_scatter #undef MPI_Reduce_scatter #define MPI_Reduce_scatter(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Reduce_scatter",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Scan #undef MPI_Scan #define MPI_Scan(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Scan",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Exscan #undef MPI_Exscan #define MPI_Exscan(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Exscan",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_allgather #undef MPI_Neighbor_allgather #define MPI_Neighbor_allgather(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Neighbor_allgather",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_allgatherv #undef MPI_Neighbor_allgatherv #define MPI_Neighbor_allgatherv(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Neighbor_allgatherv",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_alltoall #undef MPI_Neighbor_alltoall #define MPI_Neighbor_alltoall(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Neighbor_alltoall",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_alltoallv #undef MPI_Neighbor_alltoallv #define MPI_Neighbor_alltoallv(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Neighbor_alltoallv",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_alltoallw #undef MPI_Neighbor_alltoallw #define MPI_Neighbor_alltoallw(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Neighbor_alltoallw",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Ibarrier #undef MPI_Ibarrier #define MPI_Ibarrier(a1,a2) PyMPI_UNAVAILABLE("MPI_Ibarrier",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Ibcast #undef MPI_Ibcast #define MPI_Ibcast(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Ibcast",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Igather #undef MPI_Igather #define MPI_Igather(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Igather",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Igatherv #undef MPI_Igatherv #define MPI_Igatherv(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Igatherv",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Iscatter #undef MPI_Iscatter #define MPI_Iscatter(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Iscatter",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Iscatterv #undef MPI_Iscatterv #define MPI_Iscatterv(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Iscatterv",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Iallgather #undef MPI_Iallgather #define MPI_Iallgather(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Iallgather",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Iallgatherv #undef MPI_Iallgatherv #define MPI_Iallgatherv(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Iallgatherv",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Ialltoall #undef MPI_Ialltoall #define MPI_Ialltoall(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Ialltoall",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Ialltoallv #undef MPI_Ialltoallv #define MPI_Ialltoallv(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Ialltoallv",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Ialltoallw #undef MPI_Ialltoallw #define MPI_Ialltoallw(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Ialltoallw",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Ireduce #undef MPI_Ireduce #define MPI_Ireduce(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Ireduce",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Iallreduce #undef MPI_Iallreduce #define MPI_Iallreduce(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Iallreduce",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Ireduce_scatter_block #undef MPI_Ireduce_scatter_block #define MPI_Ireduce_scatter_block(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Ireduce_scatter_block",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Ireduce_scatter #undef MPI_Ireduce_scatter #define MPI_Ireduce_scatter(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Ireduce_scatter",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Iscan #undef MPI_Iscan #define MPI_Iscan(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Iscan",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Iexscan #undef MPI_Iexscan #define MPI_Iexscan(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Iexscan",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Ineighbor_allgather #undef MPI_Ineighbor_allgather #define MPI_Ineighbor_allgather(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Ineighbor_allgather",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Ineighbor_allgatherv #undef MPI_Ineighbor_allgatherv #define MPI_Ineighbor_allgatherv(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Ineighbor_allgatherv",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Ineighbor_alltoall #undef MPI_Ineighbor_alltoall #define MPI_Ineighbor_alltoall(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Ineighbor_alltoall",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Ineighbor_alltoallv #undef MPI_Ineighbor_alltoallv #define MPI_Ineighbor_alltoallv(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Ineighbor_alltoallv",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Ineighbor_alltoallw #undef MPI_Ineighbor_alltoallw #define MPI_Ineighbor_alltoallw(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Ineighbor_alltoallw",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Barrier_init #undef MPI_Barrier_init #define MPI_Barrier_init(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Barrier_init",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Bcast_init #undef MPI_Bcast_init #define MPI_Bcast_init(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Bcast_init",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Gather_init #undef MPI_Gather_init #define MPI_Gather_init(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Gather_init",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Gatherv_init #undef MPI_Gatherv_init #define MPI_Gatherv_init(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) PyMPI_UNAVAILABLE("MPI_Gatherv_init",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) #endif #ifndef PyMPI_HAVE_MPI_Scatter_init #undef MPI_Scatter_init #define MPI_Scatter_init(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Scatter_init",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Scatterv_init #undef MPI_Scatterv_init #define MPI_Scatterv_init(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) PyMPI_UNAVAILABLE("MPI_Scatterv_init",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) #endif #ifndef PyMPI_HAVE_MPI_Allgather_init #undef MPI_Allgather_init #define MPI_Allgather_init(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Allgather_init",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Allgatherv_init #undef MPI_Allgatherv_init #define MPI_Allgatherv_init(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Allgatherv_init",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Alltoall_init #undef MPI_Alltoall_init #define MPI_Alltoall_init(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Alltoall_init",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Alltoallv_init #undef MPI_Alltoallv_init #define MPI_Alltoallv_init(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) PyMPI_UNAVAILABLE("MPI_Alltoallv_init",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) #endif #ifndef PyMPI_HAVE_MPI_Alltoallw_init #undef MPI_Alltoallw_init #define MPI_Alltoallw_init(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) PyMPI_UNAVAILABLE("MPI_Alltoallw_init",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) #endif #ifndef PyMPI_HAVE_MPI_Reduce_init #undef MPI_Reduce_init #define MPI_Reduce_init(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Reduce_init",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Allreduce_init #undef MPI_Allreduce_init #define MPI_Allreduce_init(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Allreduce_init",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Reduce_scatter_block_init #undef MPI_Reduce_scatter_block_init #define MPI_Reduce_scatter_block_init(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Reduce_scatter_block_init",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Reduce_scatter_init #undef MPI_Reduce_scatter_init #define MPI_Reduce_scatter_init(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Reduce_scatter_init",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Scan_init #undef MPI_Scan_init #define MPI_Scan_init(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Scan_init",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Exscan_init #undef MPI_Exscan_init #define MPI_Exscan_init(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Exscan_init",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_allgather_init #undef MPI_Neighbor_allgather_init #define MPI_Neighbor_allgather_init(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Neighbor_allgather_init",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_allgatherv_init #undef MPI_Neighbor_allgatherv_init #define MPI_Neighbor_allgatherv_init(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Neighbor_allgatherv_init",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_alltoall_init #undef MPI_Neighbor_alltoall_init #define MPI_Neighbor_alltoall_init(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Neighbor_alltoall_init",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_alltoallv_init #undef MPI_Neighbor_alltoallv_init #define MPI_Neighbor_alltoallv_init(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) PyMPI_UNAVAILABLE("MPI_Neighbor_alltoallv_init",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_alltoallw_init #undef MPI_Neighbor_alltoallw_init #define MPI_Neighbor_alltoallw_init(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) PyMPI_UNAVAILABLE("MPI_Neighbor_alltoallw_init",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) #endif #ifndef PyMPI_HAVE_MPI_Comm_dup #undef MPI_Comm_dup #define MPI_Comm_dup(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_dup",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Comm_dup_with_info #undef MPI_Comm_dup_with_info #define MPI_Comm_dup_with_info(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Comm_dup_with_info",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Comm_idup #undef MPI_Comm_idup #define MPI_Comm_idup(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Comm_idup",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Comm_idup_with_info #undef MPI_Comm_idup_with_info #define MPI_Comm_idup_with_info(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Comm_idup_with_info",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Comm_create #undef MPI_Comm_create #define MPI_Comm_create(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Comm_create",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Comm_create_group #undef MPI_Comm_create_group #define MPI_Comm_create_group(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Comm_create_group",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_MAX_STRINGTAG_LEN #undef MPI_MAX_STRINGTAG_LEN #define MPI_MAX_STRINGTAG_LEN (1) #endif #ifndef PyMPI_HAVE_MPI_Comm_create_from_group #undef MPI_Comm_create_from_group #define MPI_Comm_create_from_group(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Comm_create_from_group",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Comm_split #undef MPI_Comm_split #define MPI_Comm_split(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Comm_split",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_COMM_TYPE_SHARED #undef MPI_COMM_TYPE_SHARED #define MPI_COMM_TYPE_SHARED (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMM_TYPE_HW_GUIDED #undef MPI_COMM_TYPE_HW_GUIDED #define MPI_COMM_TYPE_HW_GUIDED (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMM_TYPE_HW_UNGUIDED #undef MPI_COMM_TYPE_HW_UNGUIDED #define MPI_COMM_TYPE_HW_UNGUIDED (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMM_TYPE_RESOURCE_GUIDED #undef MPI_COMM_TYPE_RESOURCE_GUIDED #define MPI_COMM_TYPE_RESOURCE_GUIDED (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_Comm_split_type #undef MPI_Comm_split_type #define MPI_Comm_split_type(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Comm_split_type",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Comm_set_info #undef MPI_Comm_set_info #define MPI_Comm_set_info(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_set_info",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Comm_get_info #undef MPI_Comm_get_info #define MPI_Comm_get_info(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_get_info",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_CART #undef MPI_CART #define MPI_CART (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_Cart_create #undef MPI_Cart_create #define MPI_Cart_create(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Cart_create",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Cartdim_get #undef MPI_Cartdim_get #define MPI_Cartdim_get(a1,a2) PyMPI_UNAVAILABLE("MPI_Cartdim_get",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Cart_get #undef MPI_Cart_get #define MPI_Cart_get(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Cart_get",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Cart_rank #undef MPI_Cart_rank #define MPI_Cart_rank(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Cart_rank",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Cart_coords #undef MPI_Cart_coords #define MPI_Cart_coords(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Cart_coords",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Cart_shift #undef MPI_Cart_shift #define MPI_Cart_shift(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Cart_shift",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Cart_sub #undef MPI_Cart_sub #define MPI_Cart_sub(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Cart_sub",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Cart_map #undef MPI_Cart_map #define MPI_Cart_map(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Cart_map",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Dims_create #undef MPI_Dims_create #define MPI_Dims_create(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Dims_create",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_GRAPH #undef MPI_GRAPH #define MPI_GRAPH (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_Graph_create #undef MPI_Graph_create #define MPI_Graph_create(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Graph_create",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Graphdims_get #undef MPI_Graphdims_get #define MPI_Graphdims_get(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Graphdims_get",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Graph_get #undef MPI_Graph_get #define MPI_Graph_get(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Graph_get",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Graph_map #undef MPI_Graph_map #define MPI_Graph_map(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Graph_map",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Graph_neighbors_count #undef MPI_Graph_neighbors_count #define MPI_Graph_neighbors_count(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Graph_neighbors_count",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Graph_neighbors #undef MPI_Graph_neighbors #define MPI_Graph_neighbors(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Graph_neighbors",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_DIST_GRAPH #undef MPI_DIST_GRAPH #define MPI_DIST_GRAPH (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_UNWEIGHTED #undef MPI_UNWEIGHTED #define MPI_UNWEIGHTED ((int*)0) #endif #ifndef PyMPI_HAVE_MPI_WEIGHTS_EMPTY #undef MPI_WEIGHTS_EMPTY #define MPI_WEIGHTS_EMPTY ((int*)MPI_UNWEIGHTED) #endif #ifndef PyMPI_HAVE_MPI_Dist_graph_create_adjacent #undef MPI_Dist_graph_create_adjacent #define MPI_Dist_graph_create_adjacent(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Dist_graph_create_adjacent",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Dist_graph_create #undef MPI_Dist_graph_create #define MPI_Dist_graph_create(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Dist_graph_create",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Dist_graph_neighbors_count #undef MPI_Dist_graph_neighbors_count #define MPI_Dist_graph_neighbors_count(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Dist_graph_neighbors_count",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Dist_graph_neighbors #undef MPI_Dist_graph_neighbors #define MPI_Dist_graph_neighbors(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Dist_graph_neighbors",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Intercomm_create #undef MPI_Intercomm_create #define MPI_Intercomm_create(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Intercomm_create",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Intercomm_create_from_groups #undef MPI_Intercomm_create_from_groups #define MPI_Intercomm_create_from_groups(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Intercomm_create_from_groups",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Comm_remote_group #undef MPI_Comm_remote_group #define MPI_Comm_remote_group(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_remote_group",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Comm_remote_size #undef MPI_Comm_remote_size #define MPI_Comm_remote_size(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_remote_size",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Intercomm_merge #undef MPI_Intercomm_merge #define MPI_Intercomm_merge(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Intercomm_merge",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_MAX_PORT_NAME #undef MPI_MAX_PORT_NAME #define MPI_MAX_PORT_NAME (1) #endif #ifndef PyMPI_HAVE_MPI_Open_port #undef MPI_Open_port #define MPI_Open_port(a1,a2) PyMPI_UNAVAILABLE("MPI_Open_port",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Close_port #undef MPI_Close_port #define MPI_Close_port(a1) PyMPI_UNAVAILABLE("MPI_Close_port",a1) #endif #ifndef PyMPI_HAVE_MPI_Publish_name #undef MPI_Publish_name #define MPI_Publish_name(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Publish_name",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Unpublish_name #undef MPI_Unpublish_name #define MPI_Unpublish_name(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Unpublish_name",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Lookup_name #undef MPI_Lookup_name #define MPI_Lookup_name(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Lookup_name",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Comm_accept #undef MPI_Comm_accept #define MPI_Comm_accept(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Comm_accept",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Comm_connect #undef MPI_Comm_connect #define MPI_Comm_connect(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Comm_connect",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Comm_join #undef MPI_Comm_join #define MPI_Comm_join(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_join",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Comm_disconnect #undef MPI_Comm_disconnect #define MPI_Comm_disconnect(a1) PyMPI_UNAVAILABLE("MPI_Comm_disconnect",a1) #endif #ifndef PyMPI_HAVE_MPI_ARGV_NULL #undef MPI_ARGV_NULL #define MPI_ARGV_NULL ((char**)0) #endif #ifndef PyMPI_HAVE_MPI_ARGVS_NULL #undef MPI_ARGVS_NULL #define MPI_ARGVS_NULL ((char***)0) #endif #ifndef PyMPI_HAVE_MPI_ERRCODES_IGNORE #undef MPI_ERRCODES_IGNORE #define MPI_ERRCODES_IGNORE ((int*)0) #endif #ifndef PyMPI_HAVE_MPI_Comm_spawn #undef MPI_Comm_spawn #define MPI_Comm_spawn(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Comm_spawn",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Comm_spawn_multiple #undef MPI_Comm_spawn_multiple #define MPI_Comm_spawn_multiple(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Comm_spawn_multiple",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Comm_get_parent #undef MPI_Comm_get_parent #define MPI_Comm_get_parent(a1) PyMPI_UNAVAILABLE("MPI_Comm_get_parent",a1) #endif #ifndef PyMPI_HAVE_MPI_Comm_get_name #undef MPI_Comm_get_name #define MPI_Comm_get_name(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Comm_get_name",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Comm_set_name #undef MPI_Comm_set_name #define MPI_Comm_set_name(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_set_name",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_TAG_UB #undef MPI_TAG_UB #define MPI_TAG_UB (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_IO #undef MPI_IO #define MPI_IO (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_WTIME_IS_GLOBAL #undef MPI_WTIME_IS_GLOBAL #define MPI_WTIME_IS_GLOBAL (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_UNIVERSE_SIZE #undef MPI_UNIVERSE_SIZE #define MPI_UNIVERSE_SIZE (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_APPNUM #undef MPI_APPNUM #define MPI_APPNUM (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_LASTUSEDCODE #undef MPI_LASTUSEDCODE #define MPI_LASTUSEDCODE (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_Comm_get_attr #undef MPI_Comm_get_attr #define MPI_Comm_get_attr MPI_Attr_get #endif #ifndef PyMPI_HAVE_MPI_Comm_set_attr #undef MPI_Comm_set_attr #define MPI_Comm_set_attr MPI_Attr_put #endif #ifndef PyMPI_HAVE_MPI_Comm_delete_attr #undef MPI_Comm_delete_attr #define MPI_Comm_delete_attr MPI_Attr_delete #endif #ifndef PyMPI_HAVE_MPI_Comm_copy_attr_function #undef MPI_Comm_copy_attr_function #define MPI_Comm_copy_attr_function MPI_Copy_function #endif #ifndef PyMPI_HAVE_MPI_Comm_delete_attr_function #undef MPI_Comm_delete_attr_function #define MPI_Comm_delete_attr_function MPI_Delete_function #endif #ifndef PyMPI_HAVE_MPI_COMM_DUP_FN #undef MPI_COMM_DUP_FN #define MPI_COMM_DUP_FN ((MPI_Comm_copy_attr_function*)MPI_DUP_FN) #endif #ifndef PyMPI_HAVE_MPI_COMM_NULL_COPY_FN #undef MPI_COMM_NULL_COPY_FN #define MPI_COMM_NULL_COPY_FN ((MPI_Comm_copy_attr_function*)MPI_NULL_COPY_FN) #endif #ifndef PyMPI_HAVE_MPI_COMM_NULL_DELETE_FN #undef MPI_COMM_NULL_DELETE_FN #define MPI_COMM_NULL_DELETE_FN ((MPI_Comm_delete_attr_function*)MPI_NULL_DELETE_FN) #endif #ifndef PyMPI_HAVE_MPI_Comm_create_keyval #undef MPI_Comm_create_keyval #define MPI_Comm_create_keyval MPI_Keyval_create #endif #ifndef PyMPI_HAVE_MPI_Comm_free_keyval #undef MPI_Comm_free_keyval #define MPI_Comm_free_keyval MPI_Keyval_free #endif #ifndef PyMPI_HAVE_MPI_Comm_errhandler_fn #undef MPI_Comm_errhandler_fn #define MPI_Comm_errhandler_fn MPI_Handler_function #endif #ifndef PyMPI_HAVE_MPI_Comm_errhandler_function #undef MPI_Comm_errhandler_function #define MPI_Comm_errhandler_function MPI_Comm_errhandler_fn #endif #ifndef PyMPI_HAVE_MPI_Comm_create_errhandler #undef MPI_Comm_create_errhandler #define MPI_Comm_create_errhandler MPI_Errhandler_create #endif #ifndef PyMPI_HAVE_MPI_Comm_get_errhandler #undef MPI_Comm_get_errhandler #define MPI_Comm_get_errhandler MPI_Errhandler_get #endif #ifndef PyMPI_HAVE_MPI_Comm_set_errhandler #undef MPI_Comm_set_errhandler #define MPI_Comm_set_errhandler MPI_Errhandler_set #endif #ifndef PyMPI_HAVE_MPI_Comm_call_errhandler #undef MPI_Comm_call_errhandler #define MPI_Comm_call_errhandler(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_call_errhandler",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Buffer_attach_c #undef MPI_Buffer_attach_c #define MPI_Buffer_attach_c(a1,a2) PyMPI_UNAVAILABLE("MPI_Buffer_attach_c",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Buffer_detach_c #undef MPI_Buffer_detach_c #define MPI_Buffer_detach_c(a1,a2) PyMPI_UNAVAILABLE("MPI_Buffer_detach_c",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Comm_attach_buffer_c #undef MPI_Comm_attach_buffer_c #define MPI_Comm_attach_buffer_c(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Comm_attach_buffer_c",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Comm_detach_buffer_c #undef MPI_Comm_detach_buffer_c #define MPI_Comm_detach_buffer_c(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Comm_detach_buffer_c",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Session_attach_buffer_c #undef MPI_Session_attach_buffer_c #define MPI_Session_attach_buffer_c(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Session_attach_buffer_c",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Session_detach_buffer_c #undef MPI_Session_detach_buffer_c #define MPI_Session_detach_buffer_c(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Session_detach_buffer_c",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Send_c #undef MPI_Send_c #define MPI_Send_c(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Send_c",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Recv_c #undef MPI_Recv_c #define MPI_Recv_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Recv_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Sendrecv_c #undef MPI_Sendrecv_c #define MPI_Sendrecv_c(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) PyMPI_UNAVAILABLE("MPI_Sendrecv_c",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) #endif #ifndef PyMPI_HAVE_MPI_Sendrecv_replace_c #undef MPI_Sendrecv_replace_c #define MPI_Sendrecv_replace_c(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Sendrecv_replace_c",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Bsend_c #undef MPI_Bsend_c #define MPI_Bsend_c(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Bsend_c",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Ssend_c #undef MPI_Ssend_c #define MPI_Ssend_c(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Ssend_c",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Rsend_c #undef MPI_Rsend_c #define MPI_Rsend_c(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Rsend_c",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Isend_c #undef MPI_Isend_c #define MPI_Isend_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Isend_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Irecv_c #undef MPI_Irecv_c #define MPI_Irecv_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Irecv_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Isendrecv_c #undef MPI_Isendrecv_c #define MPI_Isendrecv_c(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) PyMPI_UNAVAILABLE("MPI_Isendrecv_c",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) #endif #ifndef PyMPI_HAVE_MPI_Isendrecv_replace_c #undef MPI_Isendrecv_replace_c #define MPI_Isendrecv_replace_c(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Isendrecv_replace_c",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Ibsend_c #undef MPI_Ibsend_c #define MPI_Ibsend_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Ibsend_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Issend_c #undef MPI_Issend_c #define MPI_Issend_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Issend_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Irsend_c #undef MPI_Irsend_c #define MPI_Irsend_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Irsend_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Send_init_c #undef MPI_Send_init_c #define MPI_Send_init_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Send_init_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Recv_init_c #undef MPI_Recv_init_c #define MPI_Recv_init_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Recv_init_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Bsend_init_c #undef MPI_Bsend_init_c #define MPI_Bsend_init_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Bsend_init_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Ssend_init_c #undef MPI_Ssend_init_c #define MPI_Ssend_init_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Ssend_init_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Rsend_init_c #undef MPI_Rsend_init_c #define MPI_Rsend_init_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Rsend_init_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Mrecv_c #undef MPI_Mrecv_c #define MPI_Mrecv_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Mrecv_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Imrecv_c #undef MPI_Imrecv_c #define MPI_Imrecv_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Imrecv_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Bcast_c #undef MPI_Bcast_c #define MPI_Bcast_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Bcast_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Gather_c #undef MPI_Gather_c #define MPI_Gather_c(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Gather_c",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Gatherv_c #undef MPI_Gatherv_c #define MPI_Gatherv_c(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Gatherv_c",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Scatter_c #undef MPI_Scatter_c #define MPI_Scatter_c(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Scatter_c",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Scatterv_c #undef MPI_Scatterv_c #define MPI_Scatterv_c(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Scatterv_c",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Allgather_c #undef MPI_Allgather_c #define MPI_Allgather_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Allgather_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Allgatherv_c #undef MPI_Allgatherv_c #define MPI_Allgatherv_c(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Allgatherv_c",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Alltoall_c #undef MPI_Alltoall_c #define MPI_Alltoall_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Alltoall_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Alltoallv_c #undef MPI_Alltoallv_c #define MPI_Alltoallv_c(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Alltoallv_c",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Alltoallw_c #undef MPI_Alltoallw_c #define MPI_Alltoallw_c(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Alltoallw_c",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Reduce_local_c #undef MPI_Reduce_local_c #define MPI_Reduce_local_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Reduce_local_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Reduce_c #undef MPI_Reduce_c #define MPI_Reduce_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Reduce_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Allreduce_c #undef MPI_Allreduce_c #define MPI_Allreduce_c(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Allreduce_c",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Reduce_scatter_block_c #undef MPI_Reduce_scatter_block_c #define MPI_Reduce_scatter_block_c(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Reduce_scatter_block_c",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Reduce_scatter_c #undef MPI_Reduce_scatter_c #define MPI_Reduce_scatter_c(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Reduce_scatter_c",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Scan_c #undef MPI_Scan_c #define MPI_Scan_c(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Scan_c",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Exscan_c #undef MPI_Exscan_c #define MPI_Exscan_c(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Exscan_c",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_allgather_c #undef MPI_Neighbor_allgather_c #define MPI_Neighbor_allgather_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Neighbor_allgather_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_allgatherv_c #undef MPI_Neighbor_allgatherv_c #define MPI_Neighbor_allgatherv_c(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Neighbor_allgatherv_c",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_alltoall_c #undef MPI_Neighbor_alltoall_c #define MPI_Neighbor_alltoall_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Neighbor_alltoall_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_alltoallv_c #undef MPI_Neighbor_alltoallv_c #define MPI_Neighbor_alltoallv_c(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Neighbor_alltoallv_c",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_alltoallw_c #undef MPI_Neighbor_alltoallw_c #define MPI_Neighbor_alltoallw_c(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Neighbor_alltoallw_c",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Ibcast_c #undef MPI_Ibcast_c #define MPI_Ibcast_c(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Ibcast_c",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Igather_c #undef MPI_Igather_c #define MPI_Igather_c(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Igather_c",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Igatherv_c #undef MPI_Igatherv_c #define MPI_Igatherv_c(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Igatherv_c",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Iscatter_c #undef MPI_Iscatter_c #define MPI_Iscatter_c(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Iscatter_c",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Iscatterv_c #undef MPI_Iscatterv_c #define MPI_Iscatterv_c(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Iscatterv_c",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Iallgather_c #undef MPI_Iallgather_c #define MPI_Iallgather_c(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Iallgather_c",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Iallgatherv_c #undef MPI_Iallgatherv_c #define MPI_Iallgatherv_c(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Iallgatherv_c",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Ialltoall_c #undef MPI_Ialltoall_c #define MPI_Ialltoall_c(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Ialltoall_c",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Ialltoallv_c #undef MPI_Ialltoallv_c #define MPI_Ialltoallv_c(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Ialltoallv_c",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Ialltoallw_c #undef MPI_Ialltoallw_c #define MPI_Ialltoallw_c(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Ialltoallw_c",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Ireduce_c #undef MPI_Ireduce_c #define MPI_Ireduce_c(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Ireduce_c",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Iallreduce_c #undef MPI_Iallreduce_c #define MPI_Iallreduce_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Iallreduce_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Ireduce_scatter_block_c #undef MPI_Ireduce_scatter_block_c #define MPI_Ireduce_scatter_block_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Ireduce_scatter_block_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Ireduce_scatter_c #undef MPI_Ireduce_scatter_c #define MPI_Ireduce_scatter_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Ireduce_scatter_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Iscan_c #undef MPI_Iscan_c #define MPI_Iscan_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Iscan_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Iexscan_c #undef MPI_Iexscan_c #define MPI_Iexscan_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Iexscan_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Ineighbor_allgather_c #undef MPI_Ineighbor_allgather_c #define MPI_Ineighbor_allgather_c(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Ineighbor_allgather_c",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Ineighbor_allgatherv_c #undef MPI_Ineighbor_allgatherv_c #define MPI_Ineighbor_allgatherv_c(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Ineighbor_allgatherv_c",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Ineighbor_alltoall_c #undef MPI_Ineighbor_alltoall_c #define MPI_Ineighbor_alltoall_c(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Ineighbor_alltoall_c",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Ineighbor_alltoallv_c #undef MPI_Ineighbor_alltoallv_c #define MPI_Ineighbor_alltoallv_c(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Ineighbor_alltoallv_c",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Ineighbor_alltoallw_c #undef MPI_Ineighbor_alltoallw_c #define MPI_Ineighbor_alltoallw_c(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Ineighbor_alltoallw_c",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Bcast_init_c #undef MPI_Bcast_init_c #define MPI_Bcast_init_c(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Bcast_init_c",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Gather_init_c #undef MPI_Gather_init_c #define MPI_Gather_init_c(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Gather_init_c",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Gatherv_init_c #undef MPI_Gatherv_init_c #define MPI_Gatherv_init_c(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) PyMPI_UNAVAILABLE("MPI_Gatherv_init_c",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) #endif #ifndef PyMPI_HAVE_MPI_Scatter_init_c #undef MPI_Scatter_init_c #define MPI_Scatter_init_c(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Scatter_init_c",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Scatterv_init_c #undef MPI_Scatterv_init_c #define MPI_Scatterv_init_c(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) PyMPI_UNAVAILABLE("MPI_Scatterv_init_c",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) #endif #ifndef PyMPI_HAVE_MPI_Allgather_init_c #undef MPI_Allgather_init_c #define MPI_Allgather_init_c(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Allgather_init_c",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Allgatherv_init_c #undef MPI_Allgatherv_init_c #define MPI_Allgatherv_init_c(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Allgatherv_init_c",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Alltoall_init_c #undef MPI_Alltoall_init_c #define MPI_Alltoall_init_c(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Alltoall_init_c",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Alltoallv_init_c #undef MPI_Alltoallv_init_c #define MPI_Alltoallv_init_c(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) PyMPI_UNAVAILABLE("MPI_Alltoallv_init_c",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) #endif #ifndef PyMPI_HAVE_MPI_Alltoallw_init_c #undef MPI_Alltoallw_init_c #define MPI_Alltoallw_init_c(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) PyMPI_UNAVAILABLE("MPI_Alltoallw_init_c",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) #endif #ifndef PyMPI_HAVE_MPI_Reduce_init_c #undef MPI_Reduce_init_c #define MPI_Reduce_init_c(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Reduce_init_c",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Allreduce_init_c #undef MPI_Allreduce_init_c #define MPI_Allreduce_init_c(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Allreduce_init_c",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Reduce_scatter_block_init_c #undef MPI_Reduce_scatter_block_init_c #define MPI_Reduce_scatter_block_init_c(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Reduce_scatter_block_init_c",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Reduce_scatter_init_c #undef MPI_Reduce_scatter_init_c #define MPI_Reduce_scatter_init_c(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Reduce_scatter_init_c",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Scan_init_c #undef MPI_Scan_init_c #define MPI_Scan_init_c(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Scan_init_c",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Exscan_init_c #undef MPI_Exscan_init_c #define MPI_Exscan_init_c(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Exscan_init_c",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_allgather_init_c #undef MPI_Neighbor_allgather_init_c #define MPI_Neighbor_allgather_init_c(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Neighbor_allgather_init_c",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_allgatherv_init_c #undef MPI_Neighbor_allgatherv_init_c #define MPI_Neighbor_allgatherv_init_c(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Neighbor_allgatherv_init_c",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_alltoall_init_c #undef MPI_Neighbor_alltoall_init_c #define MPI_Neighbor_alltoall_init_c(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Neighbor_alltoall_init_c",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_alltoallv_init_c #undef MPI_Neighbor_alltoallv_init_c #define MPI_Neighbor_alltoallv_init_c(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) PyMPI_UNAVAILABLE("MPI_Neighbor_alltoallv_init_c",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_alltoallw_init_c #undef MPI_Neighbor_alltoallw_init_c #define MPI_Neighbor_alltoallw_init_c(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) PyMPI_UNAVAILABLE("MPI_Neighbor_alltoallw_init_c",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) #endif #ifndef PyMPI_HAVE_MPI_WIN_NULL #undef MPI_WIN_NULL #define MPI_WIN_NULL ((MPI_Win)0) #endif #ifndef PyMPI_HAVE_MPI_Win_free #undef MPI_Win_free #define MPI_Win_free(a1) PyMPI_UNAVAILABLE("MPI_Win_free",a1) #endif #ifndef PyMPI_HAVE_MPI_Win_create #undef MPI_Win_create #define MPI_Win_create(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Win_create",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Win_allocate #undef MPI_Win_allocate #define MPI_Win_allocate(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Win_allocate",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Win_allocate_shared #undef MPI_Win_allocate_shared #define MPI_Win_allocate_shared(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Win_allocate_shared",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Win_shared_query #undef MPI_Win_shared_query #define MPI_Win_shared_query(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Win_shared_query",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Win_create_dynamic #undef MPI_Win_create_dynamic #define MPI_Win_create_dynamic(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Win_create_dynamic",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Win_attach #undef MPI_Win_attach #define MPI_Win_attach(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Win_attach",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Win_detach #undef MPI_Win_detach #define MPI_Win_detach(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_detach",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_set_info #undef MPI_Win_set_info #define MPI_Win_set_info(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_set_info",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_get_info #undef MPI_Win_get_info #define MPI_Win_get_info(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_get_info",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_get_group #undef MPI_Win_get_group #define MPI_Win_get_group(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_get_group",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Get #undef MPI_Get #define MPI_Get(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Get",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Put #undef MPI_Put #define MPI_Put(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Put",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Accumulate #undef MPI_Accumulate #define MPI_Accumulate(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Accumulate",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Get_accumulate #undef MPI_Get_accumulate #define MPI_Get_accumulate(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) PyMPI_UNAVAILABLE("MPI_Get_accumulate",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) #endif #ifndef PyMPI_HAVE_MPI_Fetch_and_op #undef MPI_Fetch_and_op #define MPI_Fetch_and_op(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Fetch_and_op",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Compare_and_swap #undef MPI_Compare_and_swap #define MPI_Compare_and_swap(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Compare_and_swap",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Rget #undef MPI_Rget #define MPI_Rget(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Rget",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Rput #undef MPI_Rput #define MPI_Rput(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Rput",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Raccumulate #undef MPI_Raccumulate #define MPI_Raccumulate(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Raccumulate",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Rget_accumulate #undef MPI_Rget_accumulate #define MPI_Rget_accumulate(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13) PyMPI_UNAVAILABLE("MPI_Rget_accumulate",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13) #endif #ifndef PyMPI_HAVE_MPI_MODE_NOCHECK #undef MPI_MODE_NOCHECK #define MPI_MODE_NOCHECK (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_MODE_NOSTORE #undef MPI_MODE_NOSTORE #define MPI_MODE_NOSTORE (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_MODE_NOPUT #undef MPI_MODE_NOPUT #define MPI_MODE_NOPUT (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_MODE_NOPRECEDE #undef MPI_MODE_NOPRECEDE #define MPI_MODE_NOPRECEDE (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_MODE_NOSUCCEED #undef MPI_MODE_NOSUCCEED #define MPI_MODE_NOSUCCEED (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_Win_fence #undef MPI_Win_fence #define MPI_Win_fence(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_fence",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_post #undef MPI_Win_post #define MPI_Win_post(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Win_post",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Win_start #undef MPI_Win_start #define MPI_Win_start(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Win_start",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Win_complete #undef MPI_Win_complete #define MPI_Win_complete(a1) PyMPI_UNAVAILABLE("MPI_Win_complete",a1) #endif #ifndef PyMPI_HAVE_MPI_Win_wait #undef MPI_Win_wait #define MPI_Win_wait(a1) PyMPI_UNAVAILABLE("MPI_Win_wait",a1) #endif #ifndef PyMPI_HAVE_MPI_Win_test #undef MPI_Win_test #define MPI_Win_test(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_test",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_LOCK_EXCLUSIVE #undef MPI_LOCK_EXCLUSIVE #define MPI_LOCK_EXCLUSIVE (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_LOCK_SHARED #undef MPI_LOCK_SHARED #define MPI_LOCK_SHARED (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_Win_lock #undef MPI_Win_lock #define MPI_Win_lock(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Win_lock",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Win_unlock #undef MPI_Win_unlock #define MPI_Win_unlock(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_unlock",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_lock_all #undef MPI_Win_lock_all #define MPI_Win_lock_all(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_lock_all",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_unlock_all #undef MPI_Win_unlock_all #define MPI_Win_unlock_all(a1) PyMPI_UNAVAILABLE("MPI_Win_unlock_all",a1) #endif #ifndef PyMPI_HAVE_MPI_Win_flush #undef MPI_Win_flush #define MPI_Win_flush(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_flush",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_flush_all #undef MPI_Win_flush_all #define MPI_Win_flush_all(a1) PyMPI_UNAVAILABLE("MPI_Win_flush_all",a1) #endif #ifndef PyMPI_HAVE_MPI_Win_flush_local #undef MPI_Win_flush_local #define MPI_Win_flush_local(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_flush_local",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_flush_local_all #undef MPI_Win_flush_local_all #define MPI_Win_flush_local_all(a1) PyMPI_UNAVAILABLE("MPI_Win_flush_local_all",a1) #endif #ifndef PyMPI_HAVE_MPI_Win_sync #undef MPI_Win_sync #define MPI_Win_sync(a1) PyMPI_UNAVAILABLE("MPI_Win_sync",a1) #endif #ifndef PyMPI_HAVE_MPI_Win_get_name #undef MPI_Win_get_name #define MPI_Win_get_name(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Win_get_name",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Win_set_name #undef MPI_Win_set_name #define MPI_Win_set_name(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_set_name",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_WIN_BASE #undef MPI_WIN_BASE #define MPI_WIN_BASE (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_WIN_SIZE #undef MPI_WIN_SIZE #define MPI_WIN_SIZE (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_WIN_DISP_UNIT #undef MPI_WIN_DISP_UNIT #define MPI_WIN_DISP_UNIT (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_WIN_CREATE_FLAVOR #undef MPI_WIN_CREATE_FLAVOR #define MPI_WIN_CREATE_FLAVOR (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_WIN_MODEL #undef MPI_WIN_MODEL #define MPI_WIN_MODEL (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_WIN_FLAVOR_CREATE #undef MPI_WIN_FLAVOR_CREATE #define MPI_WIN_FLAVOR_CREATE (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_WIN_FLAVOR_ALLOCATE #undef MPI_WIN_FLAVOR_ALLOCATE #define MPI_WIN_FLAVOR_ALLOCATE (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_WIN_FLAVOR_DYNAMIC #undef MPI_WIN_FLAVOR_DYNAMIC #define MPI_WIN_FLAVOR_DYNAMIC (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_WIN_FLAVOR_SHARED #undef MPI_WIN_FLAVOR_SHARED #define MPI_WIN_FLAVOR_SHARED (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_WIN_SEPARATE #undef MPI_WIN_SEPARATE #define MPI_WIN_SEPARATE (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_WIN_UNIFIED #undef MPI_WIN_UNIFIED #define MPI_WIN_UNIFIED (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_Win_get_attr #undef MPI_Win_get_attr #define MPI_Win_get_attr(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Win_get_attr",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Win_set_attr #undef MPI_Win_set_attr #define MPI_Win_set_attr(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Win_set_attr",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Win_delete_attr #undef MPI_Win_delete_attr #define MPI_Win_delete_attr(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_delete_attr",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_copy_attr_function #undef MPI_Win_copy_attr_function typedef int (MPIAPI PyMPI_MPI_Win_copy_attr_function)(MPI_Win,int,void*,void*,void*,int*); #define MPI_Win_copy_attr_function PyMPI_MPI_Win_copy_attr_function #endif #ifndef PyMPI_HAVE_MPI_Win_delete_attr_function #undef MPI_Win_delete_attr_function typedef int (MPIAPI PyMPI_MPI_Win_delete_attr_function)(MPI_Win,int,void*,void*); #define MPI_Win_delete_attr_function PyMPI_MPI_Win_delete_attr_function #endif #ifndef PyMPI_HAVE_MPI_WIN_DUP_FN #undef MPI_WIN_DUP_FN #define MPI_WIN_DUP_FN ((MPI_Win_copy_attr_function*)0) #endif #ifndef PyMPI_HAVE_MPI_WIN_NULL_COPY_FN #undef MPI_WIN_NULL_COPY_FN #define MPI_WIN_NULL_COPY_FN ((MPI_Win_copy_attr_function*)0) #endif #ifndef PyMPI_HAVE_MPI_WIN_NULL_DELETE_FN #undef MPI_WIN_NULL_DELETE_FN #define MPI_WIN_NULL_DELETE_FN ((MPI_Win_delete_attr_function*)0) #endif #ifndef PyMPI_HAVE_MPI_Win_create_keyval #undef MPI_Win_create_keyval #define MPI_Win_create_keyval(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Win_create_keyval",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Win_free_keyval #undef MPI_Win_free_keyval #define MPI_Win_free_keyval(a1) PyMPI_UNAVAILABLE("MPI_Win_free_keyval",a1) #endif #ifndef PyMPI_HAVE_MPI_Win_errhandler_fn #undef MPI_Win_errhandler_fn typedef void (MPIAPI PyMPI_MPI_Win_errhandler_fn)(MPI_Win*,int*,...); #define MPI_Win_errhandler_fn PyMPI_MPI_Win_errhandler_fn #endif #ifndef PyMPI_HAVE_MPI_Win_errhandler_function #undef MPI_Win_errhandler_function #define MPI_Win_errhandler_function MPI_Win_errhandler_fn #endif #ifndef PyMPI_HAVE_MPI_Win_create_errhandler #undef MPI_Win_create_errhandler #define MPI_Win_create_errhandler(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_create_errhandler",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_get_errhandler #undef MPI_Win_get_errhandler #define MPI_Win_get_errhandler(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_get_errhandler",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_set_errhandler #undef MPI_Win_set_errhandler #define MPI_Win_set_errhandler(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_set_errhandler",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_call_errhandler #undef MPI_Win_call_errhandler #define MPI_Win_call_errhandler(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_call_errhandler",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_create_c #undef MPI_Win_create_c #define MPI_Win_create_c(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Win_create_c",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Win_allocate_c #undef MPI_Win_allocate_c #define MPI_Win_allocate_c(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Win_allocate_c",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Win_allocate_shared_c #undef MPI_Win_allocate_shared_c #define MPI_Win_allocate_shared_c(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Win_allocate_shared_c",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Win_shared_query_c #undef MPI_Win_shared_query_c #define MPI_Win_shared_query_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Win_shared_query_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Get_c #undef MPI_Get_c #define MPI_Get_c(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Get_c",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Put_c #undef MPI_Put_c #define MPI_Put_c(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Put_c",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Accumulate_c #undef MPI_Accumulate_c #define MPI_Accumulate_c(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Accumulate_c",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Get_accumulate_c #undef MPI_Get_accumulate_c #define MPI_Get_accumulate_c(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) PyMPI_UNAVAILABLE("MPI_Get_accumulate_c",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) #endif #ifndef PyMPI_HAVE_MPI_Rget_c #undef MPI_Rget_c #define MPI_Rget_c(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Rget_c",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Rput_c #undef MPI_Rput_c #define MPI_Rput_c(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Rput_c",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Raccumulate_c #undef MPI_Raccumulate_c #define MPI_Raccumulate_c(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Raccumulate_c",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Rget_accumulate_c #undef MPI_Rget_accumulate_c #define MPI_Rget_accumulate_c(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13) PyMPI_UNAVAILABLE("MPI_Rget_accumulate_c",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13) #endif #ifndef PyMPI_HAVE_MPI_FILE_NULL #undef MPI_FILE_NULL #define MPI_FILE_NULL ((MPI_File)0) #endif #ifndef PyMPI_HAVE_MPI_MODE_RDONLY #undef MPI_MODE_RDONLY #define MPI_MODE_RDONLY (1) #endif #ifndef PyMPI_HAVE_MPI_MODE_RDWR #undef MPI_MODE_RDWR #define MPI_MODE_RDWR (2) #endif #ifndef PyMPI_HAVE_MPI_MODE_WRONLY #undef MPI_MODE_WRONLY #define MPI_MODE_WRONLY (4) #endif #ifndef PyMPI_HAVE_MPI_MODE_CREATE #undef MPI_MODE_CREATE #define MPI_MODE_CREATE (8) #endif #ifndef PyMPI_HAVE_MPI_MODE_EXCL #undef MPI_MODE_EXCL #define MPI_MODE_EXCL (16) #endif #ifndef PyMPI_HAVE_MPI_MODE_DELETE_ON_CLOSE #undef MPI_MODE_DELETE_ON_CLOSE #define MPI_MODE_DELETE_ON_CLOSE (32) #endif #ifndef PyMPI_HAVE_MPI_MODE_UNIQUE_OPEN #undef MPI_MODE_UNIQUE_OPEN #define MPI_MODE_UNIQUE_OPEN (64) #endif #ifndef PyMPI_HAVE_MPI_MODE_APPEND #undef MPI_MODE_APPEND #define MPI_MODE_APPEND (128) #endif #ifndef PyMPI_HAVE_MPI_MODE_SEQUENTIAL #undef MPI_MODE_SEQUENTIAL #define MPI_MODE_SEQUENTIAL (256) #endif #ifndef PyMPI_HAVE_MPI_File_open #undef MPI_File_open #define MPI_File_open(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_open",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_close #undef MPI_File_close #define MPI_File_close(a1) PyMPI_UNAVAILABLE("MPI_File_close",a1) #endif #ifndef PyMPI_HAVE_MPI_File_delete #undef MPI_File_delete #define MPI_File_delete(a1,a2) PyMPI_UNAVAILABLE("MPI_File_delete",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_set_size #undef MPI_File_set_size #define MPI_File_set_size(a1,a2) PyMPI_UNAVAILABLE("MPI_File_set_size",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_preallocate #undef MPI_File_preallocate #define MPI_File_preallocate(a1,a2) PyMPI_UNAVAILABLE("MPI_File_preallocate",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_get_size #undef MPI_File_get_size #define MPI_File_get_size(a1,a2) PyMPI_UNAVAILABLE("MPI_File_get_size",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_get_group #undef MPI_File_get_group #define MPI_File_get_group(a1,a2) PyMPI_UNAVAILABLE("MPI_File_get_group",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_get_amode #undef MPI_File_get_amode #define MPI_File_get_amode(a1,a2) PyMPI_UNAVAILABLE("MPI_File_get_amode",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_set_info #undef MPI_File_set_info #define MPI_File_set_info(a1,a2) PyMPI_UNAVAILABLE("MPI_File_set_info",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_get_info #undef MPI_File_get_info #define MPI_File_get_info(a1,a2) PyMPI_UNAVAILABLE("MPI_File_get_info",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_get_view #undef MPI_File_get_view #define MPI_File_get_view(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_get_view",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_set_view #undef MPI_File_set_view #define MPI_File_set_view(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_set_view",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_read_at #undef MPI_File_read_at #define MPI_File_read_at(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_read_at",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_read_at_all #undef MPI_File_read_at_all #define MPI_File_read_at_all(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_read_at_all",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_write_at #undef MPI_File_write_at #define MPI_File_write_at(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_write_at",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_write_at_all #undef MPI_File_write_at_all #define MPI_File_write_at_all(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_write_at_all",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_iread_at #undef MPI_File_iread_at #define MPI_File_iread_at(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_iread_at",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_iread_at_all #undef MPI_File_iread_at_all #define MPI_File_iread_at_all(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_iread_at_all",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_iwrite_at #undef MPI_File_iwrite_at #define MPI_File_iwrite_at(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_iwrite_at",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_iwrite_at_all #undef MPI_File_iwrite_at_all #define MPI_File_iwrite_at_all(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_iwrite_at_all",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_SEEK_SET #undef MPI_SEEK_SET #define MPI_SEEK_SET (0) #endif #ifndef PyMPI_HAVE_MPI_SEEK_CUR #undef MPI_SEEK_CUR #define MPI_SEEK_CUR (1) #endif #ifndef PyMPI_HAVE_MPI_SEEK_END #undef MPI_SEEK_END #define MPI_SEEK_END (2) #endif #ifndef PyMPI_HAVE_MPI_DISPLACEMENT_CURRENT #undef MPI_DISPLACEMENT_CURRENT #define MPI_DISPLACEMENT_CURRENT (0) #endif #ifndef PyMPI_HAVE_MPI_File_seek #undef MPI_File_seek #define MPI_File_seek(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_File_seek",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_File_get_position #undef MPI_File_get_position #define MPI_File_get_position(a1,a2) PyMPI_UNAVAILABLE("MPI_File_get_position",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_get_byte_offset #undef MPI_File_get_byte_offset #define MPI_File_get_byte_offset(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_File_get_byte_offset",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_File_read #undef MPI_File_read #define MPI_File_read(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_read",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_read_all #undef MPI_File_read_all #define MPI_File_read_all(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_read_all",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_write #undef MPI_File_write #define MPI_File_write(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_write",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_write_all #undef MPI_File_write_all #define MPI_File_write_all(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_write_all",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_iread #undef MPI_File_iread #define MPI_File_iread(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_iread",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_iread_all #undef MPI_File_iread_all #define MPI_File_iread_all(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_iread_all",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_iwrite #undef MPI_File_iwrite #define MPI_File_iwrite(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_iwrite",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_iwrite_all #undef MPI_File_iwrite_all #define MPI_File_iwrite_all(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_iwrite_all",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_read_shared #undef MPI_File_read_shared #define MPI_File_read_shared(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_read_shared",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_write_shared #undef MPI_File_write_shared #define MPI_File_write_shared(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_write_shared",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_iread_shared #undef MPI_File_iread_shared #define MPI_File_iread_shared(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_iread_shared",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_iwrite_shared #undef MPI_File_iwrite_shared #define MPI_File_iwrite_shared(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_iwrite_shared",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_read_ordered #undef MPI_File_read_ordered #define MPI_File_read_ordered(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_read_ordered",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_write_ordered #undef MPI_File_write_ordered #define MPI_File_write_ordered(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_write_ordered",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_seek_shared #undef MPI_File_seek_shared #define MPI_File_seek_shared(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_File_seek_shared",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_File_get_position_shared #undef MPI_File_get_position_shared #define MPI_File_get_position_shared(a1,a2) PyMPI_UNAVAILABLE("MPI_File_get_position_shared",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_read_at_all_begin #undef MPI_File_read_at_all_begin #define MPI_File_read_at_all_begin(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_read_at_all_begin",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_read_at_all_end #undef MPI_File_read_at_all_end #define MPI_File_read_at_all_end(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_File_read_at_all_end",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_File_write_at_all_begin #undef MPI_File_write_at_all_begin #define MPI_File_write_at_all_begin(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_write_at_all_begin",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_write_at_all_end #undef MPI_File_write_at_all_end #define MPI_File_write_at_all_end(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_File_write_at_all_end",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_File_read_all_begin #undef MPI_File_read_all_begin #define MPI_File_read_all_begin(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_File_read_all_begin",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_File_read_all_end #undef MPI_File_read_all_end #define MPI_File_read_all_end(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_File_read_all_end",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_File_write_all_begin #undef MPI_File_write_all_begin #define MPI_File_write_all_begin(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_File_write_all_begin",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_File_write_all_end #undef MPI_File_write_all_end #define MPI_File_write_all_end(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_File_write_all_end",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_File_read_ordered_begin #undef MPI_File_read_ordered_begin #define MPI_File_read_ordered_begin(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_File_read_ordered_begin",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_File_read_ordered_end #undef MPI_File_read_ordered_end #define MPI_File_read_ordered_end(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_File_read_ordered_end",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_File_write_ordered_begin #undef MPI_File_write_ordered_begin #define MPI_File_write_ordered_begin(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_File_write_ordered_begin",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_File_write_ordered_end #undef MPI_File_write_ordered_end #define MPI_File_write_ordered_end(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_File_write_ordered_end",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_File_get_type_extent #undef MPI_File_get_type_extent #define MPI_File_get_type_extent(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_File_get_type_extent",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_File_set_atomicity #undef MPI_File_set_atomicity #define MPI_File_set_atomicity(a1,a2) PyMPI_UNAVAILABLE("MPI_File_set_atomicity",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_get_atomicity #undef MPI_File_get_atomicity #define MPI_File_get_atomicity(a1,a2) PyMPI_UNAVAILABLE("MPI_File_get_atomicity",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_sync #undef MPI_File_sync #define MPI_File_sync(a1) PyMPI_UNAVAILABLE("MPI_File_sync",a1) #endif #ifndef PyMPI_HAVE_MPI_File_errhandler_fn #undef MPI_File_errhandler_fn typedef void (MPIAPI PyMPI_MPI_File_errhandler_fn)(MPI_File*,int*,...); #define MPI_File_errhandler_fn PyMPI_MPI_File_errhandler_fn #endif #ifndef PyMPI_HAVE_MPI_File_errhandler_function #undef MPI_File_errhandler_function #define MPI_File_errhandler_function MPI_File_errhandler_fn #endif #ifndef PyMPI_HAVE_MPI_File_create_errhandler #undef MPI_File_create_errhandler #define MPI_File_create_errhandler(a1,a2) PyMPI_UNAVAILABLE("MPI_File_create_errhandler",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_get_errhandler #undef MPI_File_get_errhandler #define MPI_File_get_errhandler(a1,a2) PyMPI_UNAVAILABLE("MPI_File_get_errhandler",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_set_errhandler #undef MPI_File_set_errhandler #define MPI_File_set_errhandler(a1,a2) PyMPI_UNAVAILABLE("MPI_File_set_errhandler",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_call_errhandler #undef MPI_File_call_errhandler #define MPI_File_call_errhandler(a1,a2) PyMPI_UNAVAILABLE("MPI_File_call_errhandler",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Datarep_conversion_function #undef MPI_Datarep_conversion_function typedef int (MPIAPI PyMPI_MPI_Datarep_conversion_function)(void*,MPI_Datatype,int,void*,MPI_Offset,void*); #define MPI_Datarep_conversion_function PyMPI_MPI_Datarep_conversion_function #endif #ifndef PyMPI_HAVE_MPI_Datarep_extent_function #undef MPI_Datarep_extent_function typedef int (MPIAPI PyMPI_MPI_Datarep_extent_function)(MPI_Datatype,MPI_Aint*,void*); #define MPI_Datarep_extent_function PyMPI_MPI_Datarep_extent_function #endif #ifndef PyMPI_HAVE_MPI_CONVERSION_FN_NULL #undef MPI_CONVERSION_FN_NULL #define MPI_CONVERSION_FN_NULL ((MPI_Datarep_conversion_function*)0) #endif #ifndef PyMPI_HAVE_MPI_MAX_DATAREP_STRING #undef MPI_MAX_DATAREP_STRING #define MPI_MAX_DATAREP_STRING (1) #endif #ifndef PyMPI_HAVE_MPI_Register_datarep #undef MPI_Register_datarep #define MPI_Register_datarep(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Register_datarep",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_read_at_c #undef MPI_File_read_at_c #define MPI_File_read_at_c(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_read_at_c",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_read_at_all_c #undef MPI_File_read_at_all_c #define MPI_File_read_at_all_c(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_read_at_all_c",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_write_at_c #undef MPI_File_write_at_c #define MPI_File_write_at_c(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_write_at_c",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_write_at_all_c #undef MPI_File_write_at_all_c #define MPI_File_write_at_all_c(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_write_at_all_c",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_iread_at_c #undef MPI_File_iread_at_c #define MPI_File_iread_at_c(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_iread_at_c",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_iread_at_all_c #undef MPI_File_iread_at_all_c #define MPI_File_iread_at_all_c(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_iread_at_all_c",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_iwrite_at_c #undef MPI_File_iwrite_at_c #define MPI_File_iwrite_at_c(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_iwrite_at_c",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_iwrite_at_all_c #undef MPI_File_iwrite_at_all_c #define MPI_File_iwrite_at_all_c(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_iwrite_at_all_c",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_read_c #undef MPI_File_read_c #define MPI_File_read_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_read_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_read_all_c #undef MPI_File_read_all_c #define MPI_File_read_all_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_read_all_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_write_c #undef MPI_File_write_c #define MPI_File_write_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_write_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_write_all_c #undef MPI_File_write_all_c #define MPI_File_write_all_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_write_all_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_iread_c #undef MPI_File_iread_c #define MPI_File_iread_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_iread_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_iread_all_c #undef MPI_File_iread_all_c #define MPI_File_iread_all_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_iread_all_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_iwrite_c #undef MPI_File_iwrite_c #define MPI_File_iwrite_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_iwrite_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_iwrite_all_c #undef MPI_File_iwrite_all_c #define MPI_File_iwrite_all_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_iwrite_all_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_read_shared_c #undef MPI_File_read_shared_c #define MPI_File_read_shared_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_read_shared_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_write_shared_c #undef MPI_File_write_shared_c #define MPI_File_write_shared_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_write_shared_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_iread_shared_c #undef MPI_File_iread_shared_c #define MPI_File_iread_shared_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_iread_shared_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_iwrite_shared_c #undef MPI_File_iwrite_shared_c #define MPI_File_iwrite_shared_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_iwrite_shared_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_read_ordered_c #undef MPI_File_read_ordered_c #define MPI_File_read_ordered_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_read_ordered_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_write_ordered_c #undef MPI_File_write_ordered_c #define MPI_File_write_ordered_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_write_ordered_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_read_at_all_begin_c #undef MPI_File_read_at_all_begin_c #define MPI_File_read_at_all_begin_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_read_at_all_begin_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_write_at_all_begin_c #undef MPI_File_write_at_all_begin_c #define MPI_File_write_at_all_begin_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_write_at_all_begin_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_read_all_begin_c #undef MPI_File_read_all_begin_c #define MPI_File_read_all_begin_c(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_File_read_all_begin_c",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_File_write_all_begin_c #undef MPI_File_write_all_begin_c #define MPI_File_write_all_begin_c(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_File_write_all_begin_c",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_File_read_ordered_begin_c #undef MPI_File_read_ordered_begin_c #define MPI_File_read_ordered_begin_c(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_File_read_ordered_begin_c",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_File_write_ordered_begin_c #undef MPI_File_write_ordered_begin_c #define MPI_File_write_ordered_begin_c(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_File_write_ordered_begin_c",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_File_get_type_extent_c #undef MPI_File_get_type_extent_c #define MPI_File_get_type_extent_c(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_File_get_type_extent_c",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Datarep_conversion_function_c #undef MPI_Datarep_conversion_function_c typedef int (MPIAPI PyMPI_MPI_Datarep_conversion_function_c)(void*,MPI_Datatype,MPI_Count,void*,MPI_Offset,void*); #define MPI_Datarep_conversion_function_c PyMPI_MPI_Datarep_conversion_function_c #endif #ifndef PyMPI_HAVE_MPI_CONVERSION_FN_NULL_C #undef MPI_CONVERSION_FN_NULL_C #define MPI_CONVERSION_FN_NULL_C ((MPI_Datarep_conversion_function_c*)0) #endif #ifndef PyMPI_HAVE_MPI_Register_datarep_c #undef MPI_Register_datarep_c #define MPI_Register_datarep_c(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Register_datarep_c",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_MAX_ERROR_STRING #undef MPI_MAX_ERROR_STRING #define MPI_MAX_ERROR_STRING (1) #endif #ifndef PyMPI_HAVE_MPI_Error_class #undef MPI_Error_class #define MPI_Error_class(a1,a2) PyMPI_UNAVAILABLE("MPI_Error_class",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Error_string #undef MPI_Error_string #define MPI_Error_string(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Error_string",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Add_error_class #undef MPI_Add_error_class #define MPI_Add_error_class(a1) PyMPI_UNAVAILABLE("MPI_Add_error_class",a1) #endif #ifndef PyMPI_HAVE_MPI_Remove_error_class #undef MPI_Remove_error_class #define MPI_Remove_error_class(a1) PyMPI_UNAVAILABLE("MPI_Remove_error_class",a1) #endif #ifndef PyMPI_HAVE_MPI_Add_error_code #undef MPI_Add_error_code #define MPI_Add_error_code(a1,a2) PyMPI_UNAVAILABLE("MPI_Add_error_code",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Remove_error_code #undef MPI_Remove_error_code #define MPI_Remove_error_code(a1) PyMPI_UNAVAILABLE("MPI_Remove_error_code",a1) #endif #ifndef PyMPI_HAVE_MPI_Add_error_string #undef MPI_Add_error_string #define MPI_Add_error_string(a1,a2) PyMPI_UNAVAILABLE("MPI_Add_error_string",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Remove_error_string #undef MPI_Remove_error_string #define MPI_Remove_error_string(a1) PyMPI_UNAVAILABLE("MPI_Remove_error_string",a1) #endif #ifndef PyMPI_HAVE_MPI_SUCCESS #undef MPI_SUCCESS #define MPI_SUCCESS (0) #endif #ifndef PyMPI_HAVE_MPI_ERR_LASTCODE #undef MPI_ERR_LASTCODE #define MPI_ERR_LASTCODE (1) #endif #ifndef PyMPI_HAVE_MPI_ERR_TYPE #undef MPI_ERR_TYPE #define MPI_ERR_TYPE (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_REQUEST #undef MPI_ERR_REQUEST #define MPI_ERR_REQUEST (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_OP #undef MPI_ERR_OP #define MPI_ERR_OP (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_GROUP #undef MPI_ERR_GROUP #define MPI_ERR_GROUP (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_INFO #undef MPI_ERR_INFO #define MPI_ERR_INFO (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_ERRHANDLER #undef MPI_ERR_ERRHANDLER #define MPI_ERR_ERRHANDLER (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_SESSION #undef MPI_ERR_SESSION #define MPI_ERR_SESSION (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_COMM #undef MPI_ERR_COMM #define MPI_ERR_COMM (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_WIN #undef MPI_ERR_WIN #define MPI_ERR_WIN (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_FILE #undef MPI_ERR_FILE #define MPI_ERR_FILE (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_BUFFER #undef MPI_ERR_BUFFER #define MPI_ERR_BUFFER (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_COUNT #undef MPI_ERR_COUNT #define MPI_ERR_COUNT (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_TAG #undef MPI_ERR_TAG #define MPI_ERR_TAG (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_RANK #undef MPI_ERR_RANK #define MPI_ERR_RANK (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_ROOT #undef MPI_ERR_ROOT #define MPI_ERR_ROOT (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_TRUNCATE #undef MPI_ERR_TRUNCATE #define MPI_ERR_TRUNCATE (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_IN_STATUS #undef MPI_ERR_IN_STATUS #define MPI_ERR_IN_STATUS (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_PENDING #undef MPI_ERR_PENDING #define MPI_ERR_PENDING (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_TOPOLOGY #undef MPI_ERR_TOPOLOGY #define MPI_ERR_TOPOLOGY (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_DIMS #undef MPI_ERR_DIMS #define MPI_ERR_DIMS (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_ARG #undef MPI_ERR_ARG #define MPI_ERR_ARG (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_OTHER #undef MPI_ERR_OTHER #define MPI_ERR_OTHER (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_UNKNOWN #undef MPI_ERR_UNKNOWN #define MPI_ERR_UNKNOWN (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_INTERN #undef MPI_ERR_INTERN #define MPI_ERR_INTERN (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_KEYVAL #undef MPI_ERR_KEYVAL #define MPI_ERR_KEYVAL (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_NO_MEM #undef MPI_ERR_NO_MEM #define MPI_ERR_NO_MEM (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_INFO_KEY #undef MPI_ERR_INFO_KEY #define MPI_ERR_INFO_KEY (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_INFO_VALUE #undef MPI_ERR_INFO_VALUE #define MPI_ERR_INFO_VALUE (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_INFO_NOKEY #undef MPI_ERR_INFO_NOKEY #define MPI_ERR_INFO_NOKEY (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_SPAWN #undef MPI_ERR_SPAWN #define MPI_ERR_SPAWN (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_PORT #undef MPI_ERR_PORT #define MPI_ERR_PORT (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_SERVICE #undef MPI_ERR_SERVICE #define MPI_ERR_SERVICE (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_NAME #undef MPI_ERR_NAME #define MPI_ERR_NAME (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_PROC_ABORTED #undef MPI_ERR_PROC_ABORTED #define MPI_ERR_PROC_ABORTED (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_BASE #undef MPI_ERR_BASE #define MPI_ERR_BASE (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_SIZE #undef MPI_ERR_SIZE #define MPI_ERR_SIZE (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_DISP #undef MPI_ERR_DISP #define MPI_ERR_DISP (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_ASSERT #undef MPI_ERR_ASSERT #define MPI_ERR_ASSERT (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_LOCKTYPE #undef MPI_ERR_LOCKTYPE #define MPI_ERR_LOCKTYPE (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_RMA_CONFLICT #undef MPI_ERR_RMA_CONFLICT #define MPI_ERR_RMA_CONFLICT (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_RMA_SYNC #undef MPI_ERR_RMA_SYNC #define MPI_ERR_RMA_SYNC (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_RMA_RANGE #undef MPI_ERR_RMA_RANGE #define MPI_ERR_RMA_RANGE (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_RMA_ATTACH #undef MPI_ERR_RMA_ATTACH #define MPI_ERR_RMA_ATTACH (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_RMA_SHARED #undef MPI_ERR_RMA_SHARED #define MPI_ERR_RMA_SHARED (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_RMA_FLAVOR #undef MPI_ERR_RMA_FLAVOR #define MPI_ERR_RMA_FLAVOR (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_BAD_FILE #undef MPI_ERR_BAD_FILE #define MPI_ERR_BAD_FILE (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_NO_SUCH_FILE #undef MPI_ERR_NO_SUCH_FILE #define MPI_ERR_NO_SUCH_FILE (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_FILE_EXISTS #undef MPI_ERR_FILE_EXISTS #define MPI_ERR_FILE_EXISTS (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_FILE_IN_USE #undef MPI_ERR_FILE_IN_USE #define MPI_ERR_FILE_IN_USE (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_AMODE #undef MPI_ERR_AMODE #define MPI_ERR_AMODE (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_ACCESS #undef MPI_ERR_ACCESS #define MPI_ERR_ACCESS (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_READ_ONLY #undef MPI_ERR_READ_ONLY #define MPI_ERR_READ_ONLY (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_NO_SPACE #undef MPI_ERR_NO_SPACE #define MPI_ERR_NO_SPACE (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_QUOTA #undef MPI_ERR_QUOTA #define MPI_ERR_QUOTA (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_UNSUPPORTED_OPERATION #undef MPI_ERR_UNSUPPORTED_OPERATION #define MPI_ERR_UNSUPPORTED_OPERATION (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_NOT_SAME #undef MPI_ERR_NOT_SAME #define MPI_ERR_NOT_SAME (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_IO #undef MPI_ERR_IO #define MPI_ERR_IO (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_UNSUPPORTED_DATAREP #undef MPI_ERR_UNSUPPORTED_DATAREP #define MPI_ERR_UNSUPPORTED_DATAREP (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_CONVERSION #undef MPI_ERR_CONVERSION #define MPI_ERR_CONVERSION (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_DUP_DATAREP #undef MPI_ERR_DUP_DATAREP #define MPI_ERR_DUP_DATAREP (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_VALUE_TOO_LARGE #undef MPI_ERR_VALUE_TOO_LARGE #define MPI_ERR_VALUE_TOO_LARGE (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_Alloc_mem #undef MPI_Alloc_mem #define MPI_Alloc_mem(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Alloc_mem",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Free_mem #undef MPI_Free_mem #define MPI_Free_mem(a1) PyMPI_UNAVAILABLE("MPI_Free_mem",a1) #endif #ifndef PyMPI_HAVE_MPI_Init #undef MPI_Init #define MPI_Init(a1,a2) PyMPI_UNAVAILABLE("MPI_Init",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Finalize #undef MPI_Finalize #define MPI_Finalize() PyMPI_UNAVAILABLE("MPI_Finalize") #endif #ifndef PyMPI_HAVE_MPI_Initialized #undef MPI_Initialized #define MPI_Initialized(a1) PyMPI_UNAVAILABLE("MPI_Initialized",a1) #endif #ifndef PyMPI_HAVE_MPI_Finalized #undef MPI_Finalized #define MPI_Finalized(a1) PyMPI_UNAVAILABLE("MPI_Finalized",a1) #endif #ifndef PyMPI_HAVE_MPI_THREAD_SINGLE #undef MPI_THREAD_SINGLE #define MPI_THREAD_SINGLE (0) #endif #ifndef PyMPI_HAVE_MPI_THREAD_FUNNELED #undef MPI_THREAD_FUNNELED #define MPI_THREAD_FUNNELED (1) #endif #ifndef PyMPI_HAVE_MPI_THREAD_SERIALIZED #undef MPI_THREAD_SERIALIZED #define MPI_THREAD_SERIALIZED (2) #endif #ifndef PyMPI_HAVE_MPI_THREAD_MULTIPLE #undef MPI_THREAD_MULTIPLE #define MPI_THREAD_MULTIPLE (3) #endif #ifndef PyMPI_HAVE_MPI_Init_thread #undef MPI_Init_thread #define MPI_Init_thread(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Init_thread",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Query_thread #undef MPI_Query_thread #define MPI_Query_thread(a1) PyMPI_UNAVAILABLE("MPI_Query_thread",a1) #endif #ifndef PyMPI_HAVE_MPI_Is_thread_main #undef MPI_Is_thread_main #define MPI_Is_thread_main(a1) PyMPI_UNAVAILABLE("MPI_Is_thread_main",a1) #endif #ifndef PyMPI_HAVE_MPI_VERSION #undef MPI_VERSION #define MPI_VERSION (1) #endif #ifndef PyMPI_HAVE_MPI_SUBVERSION #undef MPI_SUBVERSION #define MPI_SUBVERSION (0) #endif #ifndef PyMPI_HAVE_MPI_Get_version #undef MPI_Get_version #define MPI_Get_version(a1,a2) PyMPI_UNAVAILABLE("MPI_Get_version",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_MAX_LIBRARY_VERSION_STRING #undef MPI_MAX_LIBRARY_VERSION_STRING #define MPI_MAX_LIBRARY_VERSION_STRING (1) #endif #ifndef PyMPI_HAVE_MPI_Get_library_version #undef MPI_Get_library_version #define MPI_Get_library_version(a1,a2) PyMPI_UNAVAILABLE("MPI_Get_library_version",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_MAX_PROCESSOR_NAME #undef MPI_MAX_PROCESSOR_NAME #define MPI_MAX_PROCESSOR_NAME (1) #endif #ifndef PyMPI_HAVE_MPI_Get_processor_name #undef MPI_Get_processor_name #define MPI_Get_processor_name(a1,a2) PyMPI_UNAVAILABLE("MPI_Get_processor_name",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Get_hw_resource_info #undef MPI_Get_hw_resource_info #define MPI_Get_hw_resource_info(a1) PyMPI_UNAVAILABLE("MPI_Get_hw_resource_info",a1) #endif #ifndef PyMPI_HAVE_MPI_Wtime #undef MPI_Wtime #define MPI_Wtime() PyMPI_UNAVAILABLE("MPI_Wtime") #endif #ifndef PyMPI_HAVE_MPI_Wtick #undef MPI_Wtick #define MPI_Wtick() PyMPI_UNAVAILABLE("MPI_Wtick") #endif #ifndef PyMPI_HAVE_MPI_Pcontrol #undef MPI_Pcontrol #define MPI_Pcontrol(a1) PyMPI_UNAVAILABLE("MPI_Pcontrol",a1) #endif #ifndef PyMPI_HAVE_MPI_Fint #undef MPI_Fint typedef int PyMPI_MPI_Fint; #define MPI_Fint PyMPI_MPI_Fint #endif #ifndef PyMPI_HAVE_MPI_F_SOURCE #undef MPI_F_SOURCE #define MPI_F_SOURCE (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_F_TAG #undef MPI_F_TAG #define MPI_F_TAG (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_F_ERROR #undef MPI_F_ERROR #define MPI_F_ERROR (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_F_STATUS_SIZE #undef MPI_F_STATUS_SIZE #define MPI_F_STATUS_SIZE (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_F_STATUS_IGNORE #undef MPI_F_STATUS_IGNORE #define MPI_F_STATUS_IGNORE ((MPI_Fint*)0) #endif #ifndef PyMPI_HAVE_MPI_F_STATUSES_IGNORE #undef MPI_F_STATUSES_IGNORE #define MPI_F_STATUSES_IGNORE ((MPI_Fint*)0) #endif #ifndef PyMPI_HAVE_MPI_Status_c2f #undef MPI_Status_c2f #define MPI_Status_c2f(a1,a2) PyMPI_UNAVAILABLE("MPI_Status_c2f",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Status_f2c #undef MPI_Status_f2c #define MPI_Status_f2c(a1,a2) PyMPI_UNAVAILABLE("MPI_Status_f2c",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Type_c2f #undef MPI_Type_c2f #define MPI_Type_c2f(a1) ((void)a1,(MPI_Fint)0) #endif #ifndef PyMPI_HAVE_MPI_Request_c2f #undef MPI_Request_c2f #define MPI_Request_c2f(a1) ((void)a1,(MPI_Fint)0) #endif #ifndef PyMPI_HAVE_MPI_Message_c2f #undef MPI_Message_c2f #define MPI_Message_c2f(a1) ((void)a1,(MPI_Fint)0) #endif #ifndef PyMPI_HAVE_MPI_Op_c2f #undef MPI_Op_c2f #define MPI_Op_c2f(a1) ((void)a1,(MPI_Fint)0) #endif #ifndef PyMPI_HAVE_MPI_Group_c2f #undef MPI_Group_c2f #define MPI_Group_c2f(a1) ((void)a1,(MPI_Fint)0) #endif #ifndef PyMPI_HAVE_MPI_Info_c2f #undef MPI_Info_c2f #define MPI_Info_c2f(a1) ((void)a1,(MPI_Fint)0) #endif #ifndef PyMPI_HAVE_MPI_Session_c2f #undef MPI_Session_c2f #define MPI_Session_c2f(a1) ((void)a1,(MPI_Fint)0) #endif #ifndef PyMPI_HAVE_MPI_Comm_c2f #undef MPI_Comm_c2f #define MPI_Comm_c2f(a1) ((void)a1,(MPI_Fint)0) #endif #ifndef PyMPI_HAVE_MPI_Win_c2f #undef MPI_Win_c2f #define MPI_Win_c2f(a1) ((void)a1,(MPI_Fint)0) #endif #ifndef PyMPI_HAVE_MPI_File_c2f #undef MPI_File_c2f #define MPI_File_c2f(a1) ((void)a1,(MPI_Fint)0) #endif #ifndef PyMPI_HAVE_MPI_Errhandler_c2f #undef MPI_Errhandler_c2f #define MPI_Errhandler_c2f(a1) ((void)a1,(MPI_Fint)0) #endif #ifndef PyMPI_HAVE_MPI_Type_f2c #undef MPI_Type_f2c #define MPI_Type_f2c(a1) ((void)a1,MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_Request_f2c #undef MPI_Request_f2c #define MPI_Request_f2c(a1) ((void)a1,MPI_REQUEST_NULL) #endif #ifndef PyMPI_HAVE_MPI_Message_f2c #undef MPI_Message_f2c #define MPI_Message_f2c(a1) ((void)a1,MPI_MESSAGE_NULL) #endif #ifndef PyMPI_HAVE_MPI_Op_f2c #undef MPI_Op_f2c #define MPI_Op_f2c(a1) ((void)a1,MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_Group_f2c #undef MPI_Group_f2c #define MPI_Group_f2c(a1) ((void)a1,MPI_GROUP_NULL) #endif #ifndef PyMPI_HAVE_MPI_Info_f2c #undef MPI_Info_f2c #define MPI_Info_f2c(a1) ((void)a1,MPI_INFO_NULL) #endif #ifndef PyMPI_HAVE_MPI_Session_f2c #undef MPI_Session_f2c #define MPI_Session_f2c(a1) ((void)a1,MPI_SESSION_NULL) #endif #ifndef PyMPI_HAVE_MPI_Comm_f2c #undef MPI_Comm_f2c #define MPI_Comm_f2c(a1) ((void)a1,MPI_COMM_NULL) #endif #ifndef PyMPI_HAVE_MPI_Win_f2c #undef MPI_Win_f2c #define MPI_Win_f2c(a1) ((void)a1,MPI_WIN_NULL) #endif #ifndef PyMPI_HAVE_MPI_File_f2c #undef MPI_File_f2c #define MPI_File_f2c(a1) ((void)a1,MPI_FILE_NULL) #endif #ifndef PyMPI_HAVE_MPI_Errhandler_f2c #undef MPI_Errhandler_f2c #define MPI_Errhandler_f2c(a1) ((void)a1,MPI_ERRHANDLER_NULL) #endif #ifndef PyMPI_HAVE_MPI_HOST #undef MPI_HOST #define MPI_HOST (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_Info_get #undef MPI_Info_get #define MPI_Info_get(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Info_get",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Info_get_valuelen #undef MPI_Info_get_valuelen #define MPI_Info_get_valuelen(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Info_get_valuelen",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Attr_get #undef MPI_Attr_get #define MPI_Attr_get(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Attr_get",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Attr_put #undef MPI_Attr_put #define MPI_Attr_put(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Attr_put",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Attr_delete #undef MPI_Attr_delete #define MPI_Attr_delete(a1,a2) PyMPI_UNAVAILABLE("MPI_Attr_delete",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Copy_function #undef MPI_Copy_function typedef int (MPIAPI PyMPI_MPI_Copy_function)(MPI_Comm,int,void*,void*,void*,int*); #define MPI_Copy_function PyMPI_MPI_Copy_function #endif #ifndef PyMPI_HAVE_MPI_Delete_function #undef MPI_Delete_function typedef int (MPIAPI PyMPI_MPI_Delete_function)(MPI_Comm,int,void*,void*); #define MPI_Delete_function PyMPI_MPI_Delete_function #endif #ifndef PyMPI_HAVE_MPI_DUP_FN #undef MPI_DUP_FN #define MPI_DUP_FN ((MPI_Copy_function*)0) #endif #ifndef PyMPI_HAVE_MPI_NULL_COPY_FN #undef MPI_NULL_COPY_FN #define MPI_NULL_COPY_FN ((MPI_Copy_function*)0) #endif #ifndef PyMPI_HAVE_MPI_NULL_DELETE_FN #undef MPI_NULL_DELETE_FN #define MPI_NULL_DELETE_FN ((MPI_Delete_function*)0) #endif #ifndef PyMPI_HAVE_MPI_Keyval_create #undef MPI_Keyval_create #define MPI_Keyval_create(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Keyval_create",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Keyval_free #undef MPI_Keyval_free #define MPI_Keyval_free(a1) PyMPI_UNAVAILABLE("MPI_Keyval_free",a1) #endif #ifndef PyMPI_HAVE_MPI_Errhandler_get #undef MPI_Errhandler_get #define MPI_Errhandler_get(a1,a2) PyMPI_UNAVAILABLE("MPI_Errhandler_get",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Errhandler_set #undef MPI_Errhandler_set #define MPI_Errhandler_set(a1,a2) PyMPI_UNAVAILABLE("MPI_Errhandler_set",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Handler_function #undef MPI_Handler_function typedef void (MPIAPI PyMPI_MPI_Handler_function)(MPI_Comm*,int*,...); #define MPI_Handler_function PyMPI_MPI_Handler_function #endif #ifndef PyMPI_HAVE_MPI_Errhandler_create #undef MPI_Errhandler_create #define MPI_Errhandler_create(a1,a2) PyMPI_UNAVAILABLE("MPI_Errhandler_create",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Address #undef MPI_Address #define MPI_Address(a1,a2) PyMPI_UNAVAILABLE("MPI_Address",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_UB #undef MPI_UB #define MPI_UB ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LB #undef MPI_LB #define MPI_LB ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_Type_lb #undef MPI_Type_lb #define MPI_Type_lb(a1,a2) PyMPI_UNAVAILABLE("MPI_Type_lb",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Type_ub #undef MPI_Type_ub #define MPI_Type_ub(a1,a2) PyMPI_UNAVAILABLE("MPI_Type_ub",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Type_extent #undef MPI_Type_extent #define MPI_Type_extent(a1,a2) PyMPI_UNAVAILABLE("MPI_Type_extent",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Type_hvector #undef MPI_Type_hvector #define MPI_Type_hvector(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Type_hvector",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Type_hindexed #undef MPI_Type_hindexed #define MPI_Type_hindexed(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Type_hindexed",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Type_struct #undef MPI_Type_struct #define MPI_Type_struct(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Type_struct",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_HVECTOR_INTEGER #undef MPI_COMBINER_HVECTOR_INTEGER #define MPI_COMBINER_HVECTOR_INTEGER (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_HINDEXED_INTEGER #undef MPI_COMBINER_HINDEXED_INTEGER #define MPI_COMBINER_HINDEXED_INTEGER (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_STRUCT_INTEGER #undef MPI_COMBINER_STRUCT_INTEGER #define MPI_COMBINER_STRUCT_INTEGER (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_ERR_REVOKED #undef MPI_ERR_REVOKED #define MPI_ERR_REVOKED (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_PROC_FAILED #undef MPI_ERR_PROC_FAILED #define MPI_ERR_PROC_FAILED (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_PROC_FAILED_PENDING #undef MPI_ERR_PROC_FAILED_PENDING #define MPI_ERR_PROC_FAILED_PENDING (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_Comm_revoke #undef MPI_Comm_revoke #define MPI_Comm_revoke(a1) PyMPI_UNAVAILABLE("MPI_Comm_revoke",a1) #endif #ifndef PyMPI_HAVE_MPI_Comm_is_revoked #undef MPI_Comm_is_revoked #define MPI_Comm_is_revoked(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_is_revoked",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Comm_get_failed #undef MPI_Comm_get_failed #define MPI_Comm_get_failed(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_get_failed",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Comm_ack_failed #undef MPI_Comm_ack_failed #define MPI_Comm_ack_failed(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Comm_ack_failed",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Comm_agree #undef MPI_Comm_agree #define MPI_Comm_agree(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_agree",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Comm_iagree #undef MPI_Comm_iagree #define MPI_Comm_iagree(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Comm_iagree",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Comm_shrink #undef MPI_Comm_shrink #define MPI_Comm_shrink(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_shrink",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Comm_ishrink #undef MPI_Comm_ishrink #define MPI_Comm_ishrink(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Comm_ishrink",a1,a2,a3) #endif #endif /* !PyMPI_MISSING_H */ mpi4py-4.0.3/src/lib-mpi/mpiulfm.h000066400000000000000000000061501475341043600167130ustar00rootroot00000000000000#ifndef PyMPI_MPIULFM_H #define PyMPI_MPIULFM_H #ifndef PyMPI_SKIP_MPIULFM #define PyMPI_SKIP_MPIULFM 0 #endif #if MPI_VERSION < 6 && !PyMPI_SKIP_MPIULFM #if defined(MPICH_NAME) && (MPICH_NAME >= 3) #if MPICH_NUMVERSION >= 30200000 && 0 #define PyMPI_HAVE_MPIX_ERR_REVOKED 1 #define PyMPI_HAVE_MPIX_ERR_PROC_FAILED 1 #define PyMPI_HAVE_MPIX_ERR_PROC_FAILED_PENDING 1 #define PyMPI_HAVE_MPIX_Comm_revoke 1 #define PyMPI_HAVE_MPIX_Comm_agree 1 #define PyMPI_HAVE_MPIX_Comm_shrink 1 #endif #if MPICH_NUMVERSION >= 40200000 && 0 #define PyMPI_HAVE_MPIX_Comm_get_failed 1 #define PyMPI_HAVE_MPIX_Comm_ack_failed 1 #define PyMPI_HAVE_MPIX_Comm_iagree 1 #endif #endif #if defined(OPEN_MPI) #include #ifdef OMPI_HAVE_MPI_EXT_FTMPI #define PyMPI_HAVE_MPIX_ERR_REVOKED 1 #define PyMPI_HAVE_MPIX_ERR_PROC_FAILED 1 #define PyMPI_HAVE_MPIX_ERR_PROC_FAILED_PENDING 1 #define PyMPI_HAVE_MPIX_Comm_revoke 1 #define PyMPI_HAVE_MPIX_Comm_is_revoked 1 #define PyMPI_HAVE_MPIX_Comm_agree 1 #define PyMPI_HAVE_MPIX_Comm_iagree 1 #define PyMPI_HAVE_MPIX_Comm_shrink 1 #ifdef OMPI_HAVE_MPIX_COMM_GET_FAILED #define PyMPI_HAVE_MPIX_Comm_get_failed 1 #endif #ifdef OMPI_HAVE_MPIX_COMM_ACK_FAILED #define PyMPI_HAVE_MPIX_Comm_ack_failed 1 #endif #ifdef OMPI_HAVE_MPIX_COMM_ISHRINK #define PyMPI_HAVE_MPIX_Comm_ishrink 1 #endif #endif #endif #endif #ifndef PyMPI_HAVE_MPI_ERR_REVOKED #ifdef PyMPI_HAVE_MPIX_ERR_REVOKED #undef MPI_ERR_REVOKED #define MPI_ERR_REVOKED MPIX_ERR_REVOKED #endif #endif #ifndef PyMPI_HAVE_MPI_ERR_PROC_FAILED #ifdef PyMPI_HAVE_MPIX_ERR_PROC_FAILED #undef MPI_ERR_PROC_FAILED #define MPI_ERR_PROC_FAILED MPIX_ERR_PROC_FAILED #endif #endif #ifndef PyMPI_HAVE_MPI_ERR_PROC_FAILED_PENDING #ifdef PyMPI_HAVE_MPIX_ERR_PROC_FAILED_PENDING #undef MPI_ERR_PROC_FAILED_PENDING #define MPI_ERR_PROC_FAILED_PENDING MPIX_ERR_PROC_FAILED_PENDING #endif #endif #ifndef PyMPI_HAVE_MPI_Comm_revoke #ifdef PyMPI_HAVE_MPIX_Comm_revoke #undef MPI_Comm_revoke #define MPI_Comm_revoke MPIX_Comm_revoke #endif #endif #ifndef PyMPI_HAVE_MPI_Comm_is_revoked #ifdef PyMPI_HAVE_MPIX_Comm_is_revoked #undef MPI_Comm_is_revoked #define MPI_Comm_is_revoked MPIX_Comm_is_revoked #endif #endif #ifndef PyMPI_HAVE_MPI_Comm_get_failed #ifdef PyMPI_HAVE_MPIX_Comm_get_failed #undef MPI_Comm_get_failed #define MPI_Comm_get_failed MPIX_Comm_get_failed #endif #endif #ifndef PyMPI_HAVE_MPI_Comm_ack_failed #ifdef PyMPI_HAVE_MPIX_Comm_ack_failed #undef MPI_Comm_ack_failed #define MPI_Comm_ack_failed MPIX_Comm_ack_failed #endif #endif #ifndef PyMPI_HAVE_MPI_Comm_agree #ifdef PyMPI_HAVE_MPIX_Comm_agree #undef MPI_Comm_agree #define MPI_Comm_agree MPIX_Comm_agree #endif #endif #ifndef PyMPI_HAVE_MPI_Comm_iagree #ifdef PyMPI_HAVE_MPIX_Comm_iagree #undef MPI_Comm_iagree #define MPI_Comm_iagree MPIX_Comm_iagree #endif #endif #ifndef PyMPI_HAVE_MPI_Comm_shrink #ifdef PyMPI_HAVE_MPIX_Comm_shrink #undef MPI_Comm_shrink #define MPI_Comm_shrink MPIX_Comm_shrink #endif #endif #ifndef PyMPI_HAVE_MPI_Comm_ishrink #ifdef PyMPI_HAVE_MPIX_Comm_ishrink #undef MPI_Comm_ishrink #define MPI_Comm_ishrink MPIX_Comm_ishrink #endif #endif #endif /* !PyMPI_MPIULFM_H */ mpi4py-4.0.3/src/mpi.pth000066400000000000000000000023541475341043600150440ustar00rootroot00000000000000# Add Intel MPI to Python 3.8+ DLL search path on Windows import sys, os; I_MPI_ROOT = os.getenv('I_MPI_ROOT'); I_MPI_LIBRARY_KIND = os.getenv('I_MPI_LIBRARY_KIND'); library_kind = os.getenv('library_kind'); kind = I_MPI_LIBRARY_KIND or library_kind or 'release'; d1 = I_MPI_ROOT and os.path.join(I_MPI_ROOT, 'bin', kind); d2 = I_MPI_ROOT and os.path.join(I_MPI_ROOT, 'bin'); d1 = d1 and os.path.isfile(os.path.join(d1, 'impi.dll')) and d1; d2 = d2 and os.path.isfile(os.path.join(d2, 'impi.dll')) and d2; dlldir = d1 or d2; add_dll_directory = getattr(os, 'add_dll_directory', None); add_dll_directory and dlldir and add_dll_directory(dlldir); verbose = add_dll_directory and dlldir and sys.flags.verbose >= 1; verbose and print("# add DLL directory: ", dlldir, file=sys.stderr) # Add Microsoft MPI to Python 3.8+ DLL search path on Windows import sys, os; MSMPI_BIN = os.getenv('MSMPI_BIN'); dll = MSMPI_BIN and os.path.join(MSMPI_BIN, 'msmpi.dll'); dlldir = dll and os.path.isfile(dll) and MSMPI_BIN; add_dll_directory = getattr(os, 'add_dll_directory', None); add_dll_directory and dlldir and add_dll_directory(dlldir); verbose = add_dll_directory and dlldir and sys.flags.verbose >= 1; verbose and print("# add DLL directory: ", dlldir, file=sys.stderr) mpi4py-4.0.3/src/mpi4py/000077500000000000000000000000001475341043600147605ustar00rootroot00000000000000mpi4py-4.0.3/src/mpi4py/MPI.pxd000066400000000000000000000074771475341043600161410ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com # -- from mpi4py.libmpi cimport MPI_Aint from mpi4py.libmpi cimport MPI_Offset from mpi4py.libmpi cimport MPI_Count from mpi4py.libmpi cimport MPI_Status from mpi4py.libmpi cimport MPI_Datatype from mpi4py.libmpi cimport MPI_Request from mpi4py.libmpi cimport MPI_Message from mpi4py.libmpi cimport MPI_Op from mpi4py.libmpi cimport MPI_Group from mpi4py.libmpi cimport MPI_Info from mpi4py.libmpi cimport MPI_Errhandler from mpi4py.libmpi cimport MPI_Session from mpi4py.libmpi cimport MPI_Comm from mpi4py.libmpi cimport MPI_Win from mpi4py.libmpi cimport MPI_File # -- cdef extern from *: ctypedef MPI_Aint Aint "MPI_Aint" ctypedef MPI_Offset Offset "MPI_Offset" ctypedef MPI_Count Count "MPI_Count" ctypedef api class Datatype [ type PyMPIDatatype_Type, object PyMPIDatatypeObject, ]: cdef MPI_Datatype ob_mpi cdef unsigned flags cdef object __weakref__ ctypedef api class Status [ type PyMPIStatus_Type, object PyMPIStatusObject, ]: cdef MPI_Status ob_mpi cdef unsigned flags cdef object __weakref__ ctypedef api class Request [ type PyMPIRequest_Type, object PyMPIRequestObject, ]: cdef MPI_Request ob_mpi cdef unsigned flags cdef object __weakref__ cdef object ob_buf ctypedef api class Prequest(Request) [ type PyMPIPrequest_Type, object PyMPIPrequestObject, ]: pass ctypedef api class Grequest(Request) [ type PyMPIGrequest_Type, object PyMPIGrequestObject, ]: cdef MPI_Request ob_grequest ctypedef api class Message [ type PyMPIMessage_Type, object PyMPIMessageObject, ]: cdef MPI_Message ob_mpi cdef unsigned flags cdef object __weakref__ cdef object ob_buf ctypedef api class Op [ type PyMPIOp_Type, object PyMPIOpObject, ]: cdef MPI_Op ob_mpi cdef unsigned flags cdef object __weakref__ ctypedef api class Group [ type PyMPIGroup_Type, object PyMPIGroupObject, ]: cdef MPI_Group ob_mpi cdef unsigned flags cdef object __weakref__ ctypedef api class Info [ type PyMPIInfo_Type, object PyMPIInfoObject, ]: cdef MPI_Info ob_mpi cdef unsigned flags cdef object __weakref__ ctypedef api class Errhandler [ type PyMPIErrhandler_Type, object PyMPIErrhandlerObject, ]: cdef MPI_Errhandler ob_mpi cdef unsigned flags cdef object __weakref__ ctypedef api class Session [ type PyMPISession_Type, object PyMPISessionObject, ]: cdef MPI_Session ob_mpi cdef unsigned flags cdef object __weakref__ ctypedef api class Comm [ type PyMPIComm_Type, object PyMPICommObject, ]: cdef MPI_Comm ob_mpi cdef unsigned flags cdef object __weakref__ ctypedef api class Intracomm(Comm) [ type PyMPIIntracomm_Type, object PyMPIIntracommObject, ]: pass ctypedef api class Topocomm(Intracomm) [ type PyMPITopocomm_Type, object PyMPITopocommObject, ]: pass ctypedef api class Cartcomm(Topocomm) [ type PyMPICartcomm_Type, object PyMPICartcommObject, ]: pass ctypedef api class Graphcomm(Topocomm) [ type PyMPIGraphcomm_Type, object PyMPIGraphcommObject, ]: pass ctypedef api class Distgraphcomm(Topocomm) [ type PyMPIDistgraphcomm_Type, object PyMPIDistgraphcommObject, ]: pass ctypedef api class Intercomm(Comm) [ type PyMPIIntercomm_Type, object PyMPIIntercommObject, ]: pass ctypedef api class Win [ type PyMPIWin_Type, object PyMPIWinObject, ]: cdef MPI_Win ob_mpi cdef unsigned flags cdef object __weakref__ cdef object ob_mem ctypedef api class File [ type PyMPIFile_Type, object PyMPIFileObject, ]: cdef MPI_File ob_mpi cdef unsigned flags cdef object __weakref__ # -- mpi4py-4.0.3/src/mpi4py/MPI.pyi000066400000000000000000001661431475341043600161430ustar00rootroot00000000000000# Generated with `python conf/mpistubgen.py` import sys from threading import Lock from typing import ( Any, AnyStr, Final, Literal, NoReturn, final, overload, ) if sys.version_info >= (3, 9): from collections.abc import ( Callable, Hashable, Iterable, Iterator, Sequence, Mapping, ) else: from typing import ( Callable, Hashable, Iterable, Iterator, Sequence, Mapping, ) if sys.version_info >= (3, 11): from typing import Self else: from typing_extensions import Self from os import PathLike UNDEFINED: Final[int] = ... ANY_SOURCE: Final[int] = ... ANY_TAG: Final[int] = ... PROC_NULL: Final[int] = ... ROOT: Final[int] = ... BOTTOM: Final[BottomType] = ... IN_PLACE: Final[InPlaceType] = ... KEYVAL_INVALID: Final[int] = ... TAG_UB: Final[int] = ... IO: Final[int] = ... WTIME_IS_GLOBAL: Final[int] = ... UNIVERSE_SIZE: Final[int] = ... APPNUM: Final[int] = ... LASTUSEDCODE: Final[int] = ... WIN_BASE: Final[int] = ... WIN_SIZE: Final[int] = ... WIN_DISP_UNIT: Final[int] = ... WIN_CREATE_FLAVOR: Final[int] = ... WIN_FLAVOR: Final[int] = ... WIN_MODEL: Final[int] = ... SUCCESS: Final[int] = ... ERR_LASTCODE: Final[int] = ... ERR_TYPE: Final[int] = ... ERR_REQUEST: Final[int] = ... ERR_OP: Final[int] = ... ERR_GROUP: Final[int] = ... ERR_INFO: Final[int] = ... ERR_ERRHANDLER: Final[int] = ... ERR_SESSION: Final[int] = ... ERR_COMM: Final[int] = ... ERR_WIN: Final[int] = ... ERR_FILE: Final[int] = ... ERR_BUFFER: Final[int] = ... ERR_COUNT: Final[int] = ... ERR_TAG: Final[int] = ... ERR_RANK: Final[int] = ... ERR_ROOT: Final[int] = ... ERR_TRUNCATE: Final[int] = ... ERR_IN_STATUS: Final[int] = ... ERR_PENDING: Final[int] = ... ERR_TOPOLOGY: Final[int] = ... ERR_DIMS: Final[int] = ... ERR_ARG: Final[int] = ... ERR_OTHER: Final[int] = ... ERR_UNKNOWN: Final[int] = ... ERR_INTERN: Final[int] = ... ERR_KEYVAL: Final[int] = ... ERR_NO_MEM: Final[int] = ... ERR_INFO_KEY: Final[int] = ... ERR_INFO_VALUE: Final[int] = ... ERR_INFO_NOKEY: Final[int] = ... ERR_SPAWN: Final[int] = ... ERR_PORT: Final[int] = ... ERR_SERVICE: Final[int] = ... ERR_NAME: Final[int] = ... ERR_PROC_ABORTED: Final[int] = ... ERR_BASE: Final[int] = ... ERR_SIZE: Final[int] = ... ERR_DISP: Final[int] = ... ERR_ASSERT: Final[int] = ... ERR_LOCKTYPE: Final[int] = ... ERR_RMA_CONFLICT: Final[int] = ... ERR_RMA_SYNC: Final[int] = ... ERR_RMA_RANGE: Final[int] = ... ERR_RMA_ATTACH: Final[int] = ... ERR_RMA_SHARED: Final[int] = ... ERR_RMA_FLAVOR: Final[int] = ... ERR_BAD_FILE: Final[int] = ... ERR_NO_SUCH_FILE: Final[int] = ... ERR_FILE_EXISTS: Final[int] = ... ERR_FILE_IN_USE: Final[int] = ... ERR_AMODE: Final[int] = ... ERR_ACCESS: Final[int] = ... ERR_READ_ONLY: Final[int] = ... ERR_NO_SPACE: Final[int] = ... ERR_QUOTA: Final[int] = ... ERR_NOT_SAME: Final[int] = ... ERR_IO: Final[int] = ... ERR_UNSUPPORTED_OPERATION: Final[int] = ... ERR_UNSUPPORTED_DATAREP: Final[int] = ... ERR_CONVERSION: Final[int] = ... ERR_DUP_DATAREP: Final[int] = ... ERR_VALUE_TOO_LARGE: Final[int] = ... ERR_REVOKED: Final[int] = ... ERR_PROC_FAILED: Final[int] = ... ERR_PROC_FAILED_PENDING: Final[int] = ... ORDER_C: Final[int] = ... ORDER_FORTRAN: Final[int] = ... ORDER_F: Final[int] = ... TYPECLASS_INTEGER: Final[int] = ... TYPECLASS_REAL: Final[int] = ... TYPECLASS_COMPLEX: Final[int] = ... DISTRIBUTE_NONE: Final[int] = ... DISTRIBUTE_BLOCK: Final[int] = ... DISTRIBUTE_CYCLIC: Final[int] = ... DISTRIBUTE_DFLT_DARG: Final[int] = ... COMBINER_NAMED: Final[int] = ... COMBINER_DUP: Final[int] = ... COMBINER_CONTIGUOUS: Final[int] = ... COMBINER_VECTOR: Final[int] = ... COMBINER_HVECTOR: Final[int] = ... COMBINER_INDEXED: Final[int] = ... COMBINER_HINDEXED: Final[int] = ... COMBINER_INDEXED_BLOCK: Final[int] = ... COMBINER_HINDEXED_BLOCK: Final[int] = ... COMBINER_STRUCT: Final[int] = ... COMBINER_SUBARRAY: Final[int] = ... COMBINER_DARRAY: Final[int] = ... COMBINER_RESIZED: Final[int] = ... COMBINER_VALUE_INDEX: Final[int] = ... COMBINER_F90_INTEGER: Final[int] = ... COMBINER_F90_REAL: Final[int] = ... COMBINER_F90_COMPLEX: Final[int] = ... F_SOURCE: Final[int] = ... F_TAG: Final[int] = ... F_ERROR: Final[int] = ... F_STATUS_SIZE: Final[int] = ... IDENT: Final[int] = ... CONGRUENT: Final[int] = ... SIMILAR: Final[int] = ... UNEQUAL: Final[int] = ... CART: Final[int] = ... GRAPH: Final[int] = ... DIST_GRAPH: Final[int] = ... UNWEIGHTED: Final[int] = ... WEIGHTS_EMPTY: Final[int] = ... COMM_TYPE_SHARED: Final[int] = ... COMM_TYPE_HW_GUIDED: Final[int] = ... COMM_TYPE_HW_UNGUIDED: Final[int] = ... COMM_TYPE_RESOURCE_GUIDED: Final[int] = ... BSEND_OVERHEAD: Final[int] = ... BUFFER_AUTOMATIC: Final[BufferAutomaticType] = ... WIN_FLAVOR_CREATE: Final[int] = ... WIN_FLAVOR_ALLOCATE: Final[int] = ... WIN_FLAVOR_DYNAMIC: Final[int] = ... WIN_FLAVOR_SHARED: Final[int] = ... WIN_SEPARATE: Final[int] = ... WIN_UNIFIED: Final[int] = ... MODE_NOCHECK: Final[int] = ... MODE_NOSTORE: Final[int] = ... MODE_NOPUT: Final[int] = ... MODE_NOPRECEDE: Final[int] = ... MODE_NOSUCCEED: Final[int] = ... LOCK_EXCLUSIVE: Final[int] = ... LOCK_SHARED: Final[int] = ... MODE_RDONLY: Final[int] = ... MODE_WRONLY: Final[int] = ... MODE_RDWR: Final[int] = ... MODE_CREATE: Final[int] = ... MODE_EXCL: Final[int] = ... MODE_DELETE_ON_CLOSE: Final[int] = ... MODE_UNIQUE_OPEN: Final[int] = ... MODE_SEQUENTIAL: Final[int] = ... MODE_APPEND: Final[int] = ... SEEK_SET: Final[int] = ... SEEK_CUR: Final[int] = ... SEEK_END: Final[int] = ... DISPLACEMENT_CURRENT: Final[int] = ... DISP_CUR: Final[int] = ... THREAD_SINGLE: Final[int] = ... THREAD_FUNNELED: Final[int] = ... THREAD_SERIALIZED: Final[int] = ... THREAD_MULTIPLE: Final[int] = ... VERSION: Final[int] = ... SUBVERSION: Final[int] = ... MAX_PROCESSOR_NAME: Final[int] = ... MAX_ERROR_STRING: Final[int] = ... MAX_PORT_NAME: Final[int] = ... MAX_INFO_KEY: Final[int] = ... MAX_INFO_VAL: Final[int] = ... MAX_OBJECT_NAME: Final[int] = ... MAX_DATAREP_STRING: Final[int] = ... MAX_LIBRARY_VERSION_STRING: Final[int] = ... MAX_PSET_NAME_LEN: Final[int] = ... MAX_STRINGTAG_LEN: Final[int] = ... class Datatype: def __new__(cls, datatype: Datatype | None = None) -> Self: ... def __eq__(self, __other: object) -> bool: ... def __ne__(self, __other: object) -> bool: ... def __bool__(self) -> bool: ... def __reduce__(self) -> str | tuple[Any, ...]: ... @classmethod def fromhandle(cls, handle: int) -> Datatype: ... def free(self) -> None: ... def Get_size(self) -> int: ... def Get_extent(self) -> tuple[int, int]: ... def Dup(self) -> Self: ... def Create_contiguous(self, count: int) -> Self: ... def Create_vector(self, count: int, blocklength: int, stride: int) -> Self: ... def Create_hvector(self, count: int, blocklength: int, stride: int) -> Self: ... def Create_indexed(self, blocklengths: Sequence[int], displacements: Sequence[int]) -> Self: ... def Create_hindexed(self, blocklengths: Sequence[int], displacements: Sequence[int]) -> Self: ... def Create_indexed_block(self, blocklength: int, displacements: Sequence[int]) -> Self: ... def Create_hindexed_block(self, blocklength: int, displacements: Sequence[int]) -> Self: ... @classmethod def Create_struct(cls, blocklengths: Sequence[int], displacements: Sequence[int], datatypes: Sequence[Datatype]) -> Self: ... def Create_subarray(self, sizes: Sequence[int], subsizes: Sequence[int], starts: Sequence[int], order: int = ORDER_C) -> Self: ... def Create_darray(self, size: int, rank: int, gsizes: Sequence[int], distribs: Sequence[int], dargs: Sequence[int], psizes: Sequence[int], order: int = ORDER_C) -> Self: ... @classmethod def Get_value_index(cls, value: Datatype, index: Datatype) -> Self: ... @classmethod def Create_f90_integer(cls, r: int) -> Self: ... @classmethod def Create_f90_real(cls, p: int, r: int) -> Self: ... @classmethod def Create_f90_complex(cls, p: int, r: int) -> Self: ... @classmethod def Match_size(cls, typeclass: int, size: int) -> Self: ... def Commit(self) -> Self: ... def Free(self) -> None: ... def Create_resized(self, lb: int, extent: int) -> Self: ... def Get_true_extent(self) -> tuple[int, int]: ... def Get_envelope(self) -> tuple[int, int, int, int, int]: ... def Get_contents(self) -> tuple[list[int], list[int], list[int], list[Datatype]]: ... def decode(self) -> tuple[Datatype, str, dict[str, Any]]: ... def Pack(self, inbuf: BufSpec, outbuf: BufSpec, position: int, comm: Comm) -> int: ... def Unpack(self, inbuf: BufSpec, position: int, outbuf: BufSpec, comm: Comm) -> int: ... def Pack_size(self, count: int, comm: Comm) -> int: ... def Pack_external(self, datarep: str, inbuf: BufSpec, outbuf: BufSpec, position: int) -> int: ... def Unpack_external(self, datarep: str, inbuf: BufSpec, position: int, outbuf: BufSpec) -> int: ... def Pack_external_size(self, datarep: str, count: int) -> int: ... def Get_attr(self, keyval: int) -> int | Any | None: ... def Set_attr(self, keyval: int, attrval: Any) -> None: ... def Delete_attr(self, keyval: int) -> None: ... @classmethod def Create_keyval(cls, copy_fn: Callable[[Datatype, int, Any], Any] | None = None, delete_fn: Callable[[Datatype, int, Any], None] | None = None, nopython: bool = False) -> int: ... @classmethod def Free_keyval(cls, keyval: int) -> int: ... def Get_name(self) -> str: ... def Set_name(self, name: str) -> None: ... def py2f(self) -> int: ... @classmethod def f2py(cls, arg: int) -> Datatype: ... def tocode(self) -> str: ... @classmethod def fromcode(cls, code: str) -> Datatype: ... handle: int size: int extent: int lb: int ub: int true_extent: int true_lb: int true_ub: int envelope: tuple[int, int, int, int, int] contents: tuple[list[int], list[int], list[int], list[Datatype]] combiner: int is_named: bool is_predefined: bool name: str typestr: str typechar: str Create_dup = Dup Resized = Create_resized DATATYPE_NULL: Final[Datatype] = ... PACKED: Final[Datatype] = ... BYTE: Final[Datatype] = ... AINT: Final[Datatype] = ... OFFSET: Final[Datatype] = ... COUNT: Final[Datatype] = ... CHAR: Final[Datatype] = ... WCHAR: Final[Datatype] = ... SIGNED_CHAR: Final[Datatype] = ... SHORT: Final[Datatype] = ... INT: Final[Datatype] = ... LONG: Final[Datatype] = ... LONG_LONG: Final[Datatype] = ... UNSIGNED_CHAR: Final[Datatype] = ... UNSIGNED_SHORT: Final[Datatype] = ... UNSIGNED: Final[Datatype] = ... UNSIGNED_LONG: Final[Datatype] = ... UNSIGNED_LONG_LONG: Final[Datatype] = ... FLOAT: Final[Datatype] = ... DOUBLE: Final[Datatype] = ... LONG_DOUBLE: Final[Datatype] = ... C_BOOL: Final[Datatype] = ... INT8_T: Final[Datatype] = ... INT16_T: Final[Datatype] = ... INT32_T: Final[Datatype] = ... INT64_T: Final[Datatype] = ... UINT8_T: Final[Datatype] = ... UINT16_T: Final[Datatype] = ... UINT32_T: Final[Datatype] = ... UINT64_T: Final[Datatype] = ... C_COMPLEX: Final[Datatype] = ... C_FLOAT_COMPLEX: Final[Datatype] = ... C_DOUBLE_COMPLEX: Final[Datatype] = ... C_LONG_DOUBLE_COMPLEX: Final[Datatype] = ... CXX_BOOL: Final[Datatype] = ... CXX_FLOAT_COMPLEX: Final[Datatype] = ... CXX_DOUBLE_COMPLEX: Final[Datatype] = ... CXX_LONG_DOUBLE_COMPLEX: Final[Datatype] = ... SHORT_INT: Final[Datatype] = ... INT_INT: Final[Datatype] = ... TWOINT: Final[Datatype] = ... LONG_INT: Final[Datatype] = ... FLOAT_INT: Final[Datatype] = ... DOUBLE_INT: Final[Datatype] = ... LONG_DOUBLE_INT: Final[Datatype] = ... CHARACTER: Final[Datatype] = ... LOGICAL: Final[Datatype] = ... INTEGER: Final[Datatype] = ... REAL: Final[Datatype] = ... DOUBLE_PRECISION: Final[Datatype] = ... COMPLEX: Final[Datatype] = ... DOUBLE_COMPLEX: Final[Datatype] = ... LOGICAL1: Final[Datatype] = ... LOGICAL2: Final[Datatype] = ... LOGICAL4: Final[Datatype] = ... LOGICAL8: Final[Datatype] = ... INTEGER1: Final[Datatype] = ... INTEGER2: Final[Datatype] = ... INTEGER4: Final[Datatype] = ... INTEGER8: Final[Datatype] = ... INTEGER16: Final[Datatype] = ... REAL2: Final[Datatype] = ... REAL4: Final[Datatype] = ... REAL8: Final[Datatype] = ... REAL16: Final[Datatype] = ... COMPLEX4: Final[Datatype] = ... COMPLEX8: Final[Datatype] = ... COMPLEX16: Final[Datatype] = ... COMPLEX32: Final[Datatype] = ... UNSIGNED_INT: Final[Datatype] = ... SIGNED_SHORT: Final[Datatype] = ... SIGNED_INT: Final[Datatype] = ... SIGNED_LONG: Final[Datatype] = ... SIGNED_LONG_LONG: Final[Datatype] = ... BOOL: Final[Datatype] = ... SINT8_T: Final[Datatype] = ... SINT16_T: Final[Datatype] = ... SINT32_T: Final[Datatype] = ... SINT64_T: Final[Datatype] = ... F_BOOL: Final[Datatype] = ... F_INT: Final[Datatype] = ... F_FLOAT: Final[Datatype] = ... F_DOUBLE: Final[Datatype] = ... F_COMPLEX: Final[Datatype] = ... F_FLOAT_COMPLEX: Final[Datatype] = ... F_DOUBLE_COMPLEX: Final[Datatype] = ... class Status: def __new__(cls, status: Status | None = None) -> Self: ... def __eq__(self, __other: object) -> bool: ... def __ne__(self, __other: object) -> bool: ... def __reduce__(self) -> tuple[Any, tuple[Any, ...], dict[str, Any]]: ... def __getstate__(self) -> dict[str, int]: ... def __setstate__(self, state: dict[str, int]) -> None: ... def Get_source(self) -> int: ... def Set_source(self, source: int) -> None: ... def Get_tag(self) -> int: ... def Set_tag(self, tag: int) -> None: ... def Get_error(self) -> int: ... def Set_error(self, error: int) -> None: ... def Get_count(self, datatype: Datatype = BYTE) -> int: ... def Get_elements(self, datatype: Datatype) -> int: ... def Set_elements(self, datatype: Datatype, count: int) -> None: ... def Is_cancelled(self) -> bool: ... def Set_cancelled(self, flag: bool) -> None: ... def py2f(self) -> list[int]: ... @classmethod def f2py(cls, arg: list[int]) -> Self: ... source: int tag: int error: int count: int cancelled: bool class Request: def __new__(cls, request: Request | None = None) -> Self: ... def __eq__(self, __other: object) -> bool: ... def __ne__(self, __other: object) -> bool: ... def __bool__(self) -> bool: ... def __reduce__(self) -> str | tuple[Any, ...]: ... @classmethod def fromhandle(cls, handle: int) -> Request: ... def free(self) -> None: ... def Wait(self, status: Status | None = None) -> Literal[True]: ... def Test(self, status: Status | None = None) -> bool: ... def Get_status(self, status: Status | None = None) -> bool: ... @classmethod def Waitany(cls, requests: Sequence[Request], status: Status | None = None) -> int: ... @classmethod def Testany(cls, requests: Sequence[Request], status: Status | None = None) -> tuple[int, bool]: ... @classmethod def Get_status_any(cls, requests: Sequence[Request], status: Status | None = None) -> tuple[int, bool]: ... @classmethod def Waitall(cls, requests: Sequence[Request], statuses: list[Status] | None = None) -> Literal[True]: ... @classmethod def Testall(cls, requests: Sequence[Request], statuses: list[Status] | None = None) -> bool: ... @classmethod def Get_status_all(cls, requests: Sequence[Request], statuses: list[Status] | None = None) -> bool: ... @classmethod def Waitsome(cls, requests: Sequence[Request], statuses: list[Status] | None = None) -> list[int] | None: ... @classmethod def Testsome(cls, requests: Sequence[Request], statuses: list[Status] | None = None) -> list[int] | None: ... @classmethod def Get_status_some(cls, requests: Sequence[Request], statuses: list[Status] | None = None) -> list[int] | None: ... def Cancel(self) -> None: ... def Free(self) -> None: ... def py2f(self) -> int: ... @classmethod def f2py(cls, arg: int) -> Request: ... def wait(self, status: Status | None = None) -> Any: ... def test(self, status: Status | None = None) -> tuple[bool, Any | None]: ... def get_status(self, status: Status | None = None) -> bool: ... @classmethod def waitany(cls, requests: Sequence[Request], status: Status | None = None) -> tuple[int, Any]: ... @classmethod def testany(cls, requests: Sequence[Request], status: Status | None = None) -> tuple[int, bool, Any | None]: ... @classmethod def get_status_any(cls, requests: Sequence[Request], status: Status | None = None) -> tuple[int, bool]: ... @classmethod def waitall(cls, requests: Sequence[Request], statuses: list[Status] | None = None) -> list[Any]: ... @classmethod def testall(cls, requests: Sequence[Request], statuses: list[Status] | None = None) -> tuple[bool, list[Any] | None]: ... @classmethod def get_status_all(cls, requests: Sequence[Request], statuses: list[Status] | None = None) -> bool: ... @classmethod def waitsome(cls, requests: Sequence[Request], statuses: list[Status] | None = None) -> tuple[list[int] | None, list[Any] | None]: ... @classmethod def testsome(cls, requests: Sequence[Request], statuses: list[Status] | None = None) -> tuple[list[int] | None, list[Any] | None]: ... @classmethod def get_status_some(cls, requests: Sequence[Request], statuses: list[Status] | None = None) -> list[int] | None: ... def cancel(self) -> None: ... handle: int REQUEST_NULL: Final[Request] = ... class Prequest(Request): def __new__(cls, request: Request | None = None) -> Self: ... def Start(self) -> None: ... @classmethod def Startall(cls, requests: list[Prequest]) -> None: ... def Pready(self, partition: int) -> None: ... def Pready_range(self, partition_low: int, partition_high: int) -> None: ... def Pready_list(self, partitions: Sequence[int]) -> None: ... def Parrived(self, partition: int) -> bool: ... class Grequest(Request): def __new__(cls, request: Request | None = None) -> Self: ... @classmethod def Start(cls, query_fn: Callable[..., None] | None = None, free_fn: Callable[..., None] | None = None, cancel_fn: Callable[..., None] | None = None, args: tuple[Any] | None = None, kwargs: dict[str, Any] | None = None) -> Grequest: ... def Complete(self) -> None: ... def complete(self, obj: Any = None) -> None: ... class Message: def __new__(cls, message: Message | None = None) -> Self: ... def __eq__(self, __other: object) -> bool: ... def __ne__(self, __other: object) -> bool: ... def __bool__(self) -> bool: ... def __reduce__(self) -> str | tuple[Any, ...]: ... @classmethod def fromhandle(cls, handle: int) -> Message: ... def free(self) -> None: ... @classmethod def Probe(cls, comm: Comm, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Status | None = None) -> Self: ... @classmethod def Iprobe(cls, comm: Comm, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Status | None = None) -> Self | None: ... def Recv(self, buf: BufSpec, status: Status | None = None) -> None: ... def Irecv(self, buf: BufSpec) -> Request: ... @classmethod def probe(cls, comm: Comm, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Status | None = None) -> Self: ... @classmethod def iprobe(cls, comm: Comm, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Status | None = None) -> Self | None: ... def recv(self, status: Status | None = None) -> Any: ... def irecv(self) -> Request: ... def py2f(self) -> int: ... @classmethod def f2py(cls, arg: int) -> Message: ... handle: int MESSAGE_NULL: Final[Message] = ... MESSAGE_NO_PROC: Final[Message] = ... class Op: def __new__(cls, op: Op | None = None) -> Self: ... def __call__(self, x: Any, y: Any) -> Any: ... def __eq__(self, __other: object) -> bool: ... def __ne__(self, __other: object) -> bool: ... def __bool__(self) -> bool: ... def __reduce__(self) -> str | tuple[Any, ...]: ... @classmethod def fromhandle(cls, handle: int) -> Op: ... def free(self) -> None: ... @classmethod def Create(cls, function: Callable[[Buffer, Buffer, Datatype], None], commute: bool = False) -> Self: ... def Free(self) -> None: ... def Is_commutative(self) -> bool: ... def Reduce_local(self, inbuf: BufSpec, inoutbuf: BufSpec) -> None: ... def py2f(self) -> int: ... @classmethod def f2py(cls, arg: int) -> Op: ... handle: int is_commutative: bool is_predefined: bool OP_NULL: Final[Op] = ... MAX: Final[Op] = ... MIN: Final[Op] = ... SUM: Final[Op] = ... PROD: Final[Op] = ... LAND: Final[Op] = ... BAND: Final[Op] = ... LOR: Final[Op] = ... BOR: Final[Op] = ... LXOR: Final[Op] = ... BXOR: Final[Op] = ... MAXLOC: Final[Op] = ... MINLOC: Final[Op] = ... REPLACE: Final[Op] = ... NO_OP: Final[Op] = ... class Group: def __new__(cls, group: Group | None = None) -> Self: ... def __eq__(self, __other: object) -> bool: ... def __ne__(self, __other: object) -> bool: ... def __bool__(self) -> bool: ... def __reduce__(self) -> str | tuple[Any, ...]: ... @classmethod def fromhandle(cls, handle: int) -> Group: ... def free(self) -> None: ... def Get_size(self) -> int: ... def Get_rank(self) -> int: ... def Translate_ranks(self, ranks: Sequence[int] | None = None, group: Group | None = None) -> list[int]: ... def Compare(self, group: Group) -> int: ... def Dup(self) -> Self: ... @classmethod def Union(cls, group1: Group, group2: Group) -> Self: ... @classmethod def Intersection(cls, group1: Group, group2: Group) -> Self: ... @classmethod def Difference(cls, group1: Group, group2: Group) -> Self: ... def Incl(self, ranks: Sequence[int]) -> Self: ... def Excl(self, ranks: Sequence[int]) -> Self: ... def Range_incl(self, ranks: Sequence[tuple[int, int, int]]) -> Self: ... def Range_excl(self, ranks: Sequence[tuple[int, int, int]]) -> Self: ... @classmethod def Create_from_session_pset(cls, session: Session, pset_name: str) -> Self: ... def Free(self) -> None: ... def py2f(self) -> int: ... @classmethod def f2py(cls, arg: int) -> Group: ... handle: int size: int rank: int @classmethod # python/mypy#15717 def Intersect(cls, group1: Group, group2: Group) -> Self: ... GROUP_NULL: Final[Group] = ... GROUP_EMPTY: Final[Group] = ... class Info: def __new__(cls, info: Info | None = None) -> Self: ... def __eq__(self, __other: object) -> bool: ... def __ne__(self, __other: object) -> bool: ... def __iter__(self) -> Iterator[str]: ... def __bool__(self) -> bool: ... def __len__(self) -> int: ... def __getitem__(self, __item: str) -> str: ... def __setitem__(self, __item: str, __value: str) -> None: ... def __delitem__(self, __item: str) -> None: ... def __contains__(self, __value: str) -> bool: ... def __reduce__(self) -> str | tuple[Any, ...]: ... @classmethod def fromhandle(cls, handle: int) -> Info: ... def free(self) -> None: ... @classmethod def Create(cls, items: Info | Mapping[str, str] | Iterable[tuple[str, str]] | None = None) -> Self: ... @classmethod def Create_env(cls, args: Sequence[str] | None = None) -> Self: ... def Free(self) -> None: ... def Dup(self) -> Self: ... def Get(self, key: str) -> str | None: ... def Set(self, key: str, value: str) -> None: ... def Delete(self, key: str) -> None: ... def Get_nkeys(self) -> int: ... def Get_nthkey(self, n: int) -> str: ... def py2f(self) -> int: ... @classmethod def f2py(cls, arg: int) -> Info: ... def get(self, key: str, default: str | None = None) -> str | None: ... def keys(self) -> list[str]: ... def values(self) -> list[str]: ... def items(self) -> list[tuple[str, str]]: ... def update(self, items: Info | Mapping[str, str] | Iterable[tuple[str, str]] = (), **kwds: str) -> None: ... def pop(self, key: str, *default: str) -> str: ... def popitem(self) -> tuple[str, str]: ... def copy(self) -> Self: ... def clear(self) -> None: ... handle: int INFO_NULL: Final[Info] = ... INFO_ENV: Final[Info] = ... class Errhandler: def __new__(cls, errhandler: Errhandler | None = None) -> Self: ... def __eq__(self, __other: object) -> bool: ... def __ne__(self, __other: object) -> bool: ... def __bool__(self) -> bool: ... def __reduce__(self) -> str | tuple[Any, ...]: ... @classmethod def fromhandle(cls, handle: int) -> Errhandler: ... def free(self) -> None: ... def Free(self) -> None: ... def py2f(self) -> int: ... @classmethod def f2py(cls, arg: int) -> Errhandler: ... handle: int ERRHANDLER_NULL: Final[Errhandler] = ... ERRORS_RETURN: Final[Errhandler] = ... ERRORS_ABORT: Final[Errhandler] = ... ERRORS_ARE_FATAL: Final[Errhandler] = ... class Session: def __new__(cls, session: Session | None = None) -> Self: ... def __eq__(self, __other: object) -> bool: ... def __ne__(self, __other: object) -> bool: ... def __bool__(self) -> bool: ... def __reduce__(self) -> str | tuple[Any, ...]: ... @classmethod def fromhandle(cls, handle: int) -> Session: ... def free(self) -> None: ... @classmethod def Init(cls, info: Info = INFO_NULL, errhandler: Errhandler | None = None) -> Self: ... def Finalize(self) -> None: ... def Get_num_psets(self, info: Info = INFO_NULL) -> int: ... def Get_nth_pset(self, n: int, info: Info = INFO_NULL) -> str: ... def Get_info(self) -> Info: ... def Get_pset_info(self, pset_name: str) -> Info: ... def Create_group(self, pset_name: str) -> Group: ... def Attach_buffer(self, buf: Buffer | None) -> None: ... def Detach_buffer(self) -> Buffer | None: ... def Flush_buffer(self) -> None: ... def Iflush_buffer(self) -> Request: ... @classmethod def Create_errhandler(cls, errhandler_fn: Callable[[Session, int], None]) -> Errhandler: ... def Get_errhandler(self) -> Errhandler: ... def Set_errhandler(self, errhandler: Errhandler) -> None: ... def Call_errhandler(self, errorcode: int) -> None: ... def py2f(self) -> int: ... @classmethod def f2py(cls, arg: int) -> Session: ... handle: int SESSION_NULL: Final[Session] = ... class Comm: def __new__(cls, comm: Comm | None = None) -> Self: ... def __eq__(self, __other: object) -> bool: ... def __ne__(self, __other: object) -> bool: ... def __bool__(self) -> bool: ... def __reduce__(self) -> str | tuple[Any, ...]: ... @classmethod def fromhandle(cls, handle: int) -> Comm: ... def free(self) -> None: ... def Get_group(self) -> Group: ... def Get_size(self) -> int: ... def Get_rank(self) -> int: ... def Compare(self, comm: Comm) -> int: ... def Clone(self) -> Self: ... def Dup(self, info: Info | None = None) -> Self: ... def Dup_with_info(self, info: Info) -> Self: ... def Idup(self, info: Info | None = None) -> tuple[Self, Request]: ... def Idup_with_info(self, info: Info) -> tuple[Self, Request]: ... def Create(self, group: Group) -> Comm: ... def Split(self, color: int = 0, key: int = 0) -> Comm: ... def Split_type(self, split_type: int, key: int = 0, info: Info = INFO_NULL) -> Comm: ... def Free(self) -> None: ... def Revoke(self) -> None: ... def Is_revoked(self) -> bool: ... def Get_failed(self) -> Group: ... def Ack_failed(self, num_to_ack: int | None = None) -> int: ... def Agree(self, flag: int) -> int: ... def Iagree(self, flag: Buffer) -> Request: ... def Shrink(self) -> Comm: ... def Ishrink(self) -> tuple[Comm, Request]: ... def Set_info(self, info: Info) -> None: ... def Get_info(self) -> Info: ... def Attach_buffer(self, buf: Buffer | None) -> None: ... def Detach_buffer(self) -> Buffer | None: ... def Flush_buffer(self) -> None: ... def Iflush_buffer(self) -> Request: ... def Send(self, buf: BufSpec, dest: int, tag: int = 0) -> None: ... def Recv(self, buf: BufSpec, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Status | None = None) -> None: ... def Sendrecv(self, sendbuf: BufSpec, dest: int, sendtag: int = 0, recvbuf: BufSpec | None = None, source: int = ANY_SOURCE, recvtag: int = ANY_TAG, status: Status | None = None) -> None: ... def Sendrecv_replace(self, buf: BufSpec, dest: int, sendtag: int = 0, source: int = ANY_SOURCE, recvtag: int = ANY_TAG, status: Status | None = None) -> None: ... def Isend(self, buf: BufSpec, dest: int, tag: int = 0) -> Request: ... def Irecv(self, buf: BufSpec, source: int = ANY_SOURCE, tag: int = ANY_TAG) -> Request: ... def Isendrecv(self, sendbuf: BufSpec, dest: int, sendtag: int = 0, recvbuf: BufSpec | None = None, source: int = ANY_SOURCE, recvtag: int = ANY_TAG) -> Request: ... def Isendrecv_replace(self, buf: BufSpec, dest: int, sendtag: int = 0, source: int = ANY_SOURCE, recvtag: int = ANY_TAG) -> Request: ... def Probe(self, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Status | None = None) -> Literal[True]: ... def Iprobe(self, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Status | None = None) -> bool: ... def Mprobe(self, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Status | None = None) -> Message: ... def Improbe(self, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Status | None = None) -> Message | None: ... def Send_init(self, buf: BufSpec, dest: int, tag: int = 0) -> Prequest: ... def Recv_init(self, buf: BufSpec, source: int = ANY_SOURCE, tag: int = ANY_TAG) -> Prequest: ... def Psend_init(self, buf: BufSpec, partitions: int, dest: int, tag: int = 0, info: Info = INFO_NULL) -> Prequest: ... def Precv_init(self, buf: BufSpec, partitions: int, source: int = ANY_SOURCE, tag: int = ANY_TAG, info: Info = INFO_NULL) -> Prequest: ... def Bsend(self, buf: BufSpec, dest: int, tag: int = 0) -> None: ... def Ssend(self, buf: BufSpec, dest: int, tag: int = 0) -> None: ... def Rsend(self, buf: BufSpec, dest: int, tag: int = 0) -> None: ... def Ibsend(self, buf: BufSpec, dest: int, tag: int = 0) -> Request: ... def Issend(self, buf: BufSpec, dest: int, tag: int = 0) -> Request: ... def Irsend(self, buf: BufSpec, dest: int, tag: int = 0) -> Request: ... def Bsend_init(self, buf: BufSpec, dest: int, tag: int = 0) -> Request: ... def Ssend_init(self, buf: BufSpec, dest: int, tag: int = 0) -> Request: ... def Rsend_init(self, buf: BufSpec, dest: int, tag: int = 0) -> Request: ... def Barrier(self) -> None: ... def Bcast(self, buf: BufSpec, root: int = 0) -> None: ... def Gather(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecB | None, root: int = 0) -> None: ... def Gatherv(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecV | None, root: int = 0) -> None: ... def Scatter(self, sendbuf: BufSpecB | None, recvbuf: BufSpec | InPlace, root: int = 0) -> None: ... def Scatterv(self, sendbuf: BufSpecV | None, recvbuf: BufSpec | InPlace, root: int = 0) -> None: ... def Allgather(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecB) -> None: ... def Allgatherv(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecV) -> None: ... def Alltoall(self, sendbuf: BufSpecB | InPlace, recvbuf: BufSpecB) -> None: ... def Alltoallv(self, sendbuf: BufSpecV | InPlace, recvbuf: BufSpecV) -> None: ... def Alltoallw(self, sendbuf: BufSpecW | InPlace, recvbuf: BufSpecW) -> None: ... def Reduce(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec | None, op: Op = SUM, root: int = 0) -> None: ... def Allreduce(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, op: Op = SUM) -> None: ... def Reduce_scatter_block(self, sendbuf: BufSpecB | InPlace, recvbuf: BufSpec | BufSpecB, op: Op = SUM) -> None: ... def Reduce_scatter(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, recvcounts: Sequence[int] | None = None, op: Op = SUM) -> None: ... def Ibarrier(self) -> Request: ... def Ibcast(self, buf: BufSpec, root: int = 0) -> Request: ... def Igather(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecB | None, root: int = 0) -> Request: ... def Igatherv(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecV | None, root: int = 0) -> Request: ... def Iscatter(self, sendbuf: BufSpecB | None, recvbuf: BufSpec | InPlace, root: int = 0) -> Request: ... def Iscatterv(self, sendbuf: BufSpecV | None, recvbuf: BufSpec | InPlace, root: int = 0) -> Request: ... def Iallgather(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecB) -> Request: ... def Iallgatherv(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecV) -> Request: ... def Ialltoall(self, sendbuf: BufSpecB | InPlace, recvbuf: BufSpecB) -> Request: ... def Ialltoallv(self, sendbuf: BufSpecV | InPlace, recvbuf: BufSpecV) -> Request: ... def Ialltoallw(self, sendbuf: BufSpecW | InPlace, recvbuf: BufSpecW) -> Request: ... def Ireduce(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec | None, op: Op = SUM, root: int = 0) -> Request: ... def Iallreduce(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, op: Op = SUM) -> Request: ... def Ireduce_scatter_block(self, sendbuf: BufSpecB | InPlace, recvbuf: BufSpec | BufSpecB, op: Op = SUM) -> Request: ... def Ireduce_scatter(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, recvcounts: Sequence[int] | None = None, op: Op = SUM) -> Request: ... def Barrier_init(self, info: Info = INFO_NULL) -> Prequest: ... def Bcast_init(self, buf: BufSpec, root: int = 0, info: Info = INFO_NULL) -> Prequest: ... def Gather_init(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecB | None, root: int = 0, info: Info = INFO_NULL) -> Prequest: ... def Gatherv_init(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecV | None, root: int = 0, info: Info = INFO_NULL) -> Prequest: ... def Scatter_init(self, sendbuf: BufSpecB | None, recvbuf: BufSpec | InPlace, root: int = 0, info: Info = INFO_NULL) -> Prequest: ... def Scatterv_init(self, sendbuf: BufSpecV | None, recvbuf: BufSpec | InPlace, root: int = 0, info: Info = INFO_NULL) -> Prequest: ... def Allgather_init(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecB, info: Info = INFO_NULL) -> Prequest: ... def Allgatherv_init(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecV, info: Info = INFO_NULL) -> Prequest: ... def Alltoall_init(self, sendbuf: BufSpecB | InPlace, recvbuf: BufSpecB, info: Info = INFO_NULL) -> Prequest: ... def Alltoallv_init(self, sendbuf: BufSpecV | InPlace, recvbuf: BufSpecV, info: Info = INFO_NULL) -> Prequest: ... def Alltoallw_init(self, sendbuf: BufSpecW | InPlace, recvbuf: BufSpecW, info: Info = INFO_NULL) -> Prequest: ... def Reduce_init(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec | None, op: Op = SUM, root: int = 0, info: Info = INFO_NULL) -> Prequest: ... def Allreduce_init(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, op: Op = SUM, info: Info = INFO_NULL) -> Prequest: ... def Reduce_scatter_block_init(self, sendbuf: BufSpecB | InPlace, recvbuf: BufSpec | BufSpecB, op: Op = SUM, info: Info = INFO_NULL) -> Prequest: ... def Reduce_scatter_init(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, recvcounts: Sequence[int] | None = None, op: Op = SUM, info: Info = INFO_NULL) -> Prequest: ... def Is_inter(self) -> bool: ... def Is_intra(self) -> bool: ... def Get_topology(self) -> int: ... @classmethod def Get_parent(cls) -> Intercomm: ... def Disconnect(self) -> None: ... @classmethod def Join(cls, fd: int) -> Intercomm: ... def Get_attr(self, keyval: int) -> int | Any | None: ... def Set_attr(self, keyval: int, attrval: Any) -> None: ... def Delete_attr(self, keyval: int) -> None: ... @classmethod def Create_keyval(cls, copy_fn: Callable[[Comm, int, Any], Any] | None = None, delete_fn: Callable[[Comm, int, Any], None] | None = None, nopython: bool = False) -> int: ... @classmethod def Free_keyval(cls, keyval: int) -> int: ... @classmethod def Create_errhandler(cls, errhandler_fn: Callable[[Comm, int], None]) -> Errhandler: ... def Get_errhandler(self) -> Errhandler: ... def Set_errhandler(self, errhandler: Errhandler) -> None: ... def Call_errhandler(self, errorcode: int) -> None: ... def Abort(self, errorcode: int = 0) -> NoReturn: ... def Get_name(self) -> str: ... def Set_name(self, name: str) -> None: ... def py2f(self) -> int: ... @classmethod def f2py(cls, arg: int) -> Comm: ... def send(self, obj: Any, dest: int, tag: int = 0) -> None: ... def bsend(self, obj: Any, dest: int, tag: int = 0) -> None: ... def ssend(self, obj: Any, dest: int, tag: int = 0) -> None: ... def recv(self, buf: Buffer | None = None, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Status | None = None) -> Any: ... def sendrecv(self, sendobj: Any, dest: int, sendtag: int = 0, recvbuf: Buffer | None = None, source: int = ANY_SOURCE, recvtag: int = ANY_TAG, status: Status | None = None) -> Any: ... def isend(self, obj: Any, dest: int, tag: int = 0) -> Request: ... def ibsend(self, obj: Any, dest: int, tag: int = 0) -> Request: ... def issend(self, obj: Any, dest: int, tag: int = 0) -> Request: ... def irecv(self, buf: Buffer | None = None, source: int = ANY_SOURCE, tag: int = ANY_TAG) -> Request: ... def probe(self, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Status | None = None) -> Literal[True]: ... def iprobe(self, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Status | None = None) -> bool: ... def mprobe(self, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Status | None = None) -> Message: ... def improbe(self, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Status | None = None) -> Message | None: ... def barrier(self) -> None: ... def bcast(self, obj: Any, root: int = 0) -> Any: ... def gather(self, sendobj: Any, root: int = 0) -> list[Any] | None: ... def scatter(self, sendobj: Sequence[Any] | None, root: int = 0) -> Any: ... def allgather(self, sendobj: Any) -> list[Any]: ... def alltoall(self, sendobj: Sequence[Any]) -> list[Any]: ... def reduce(self, sendobj: Any, op: Op | Callable[[Any, Any], Any] = SUM, root: int = 0) -> Any | None: ... def allreduce(self, sendobj: Any, op: Op | Callable[[Any, Any], Any] = SUM) -> Any: ... handle: int group: Group size: int rank: int info: Info is_inter: bool is_intra: bool topology: int is_topo: bool name: str COMM_NULL: Final[Comm] = ... class Intracomm(Comm): def __new__(cls, comm: Comm | None = None) -> Self: ... def Create_group(self, group: Group, tag: int = 0) -> Intracomm: ... @classmethod def Create_from_group(cls, group: Group, stringtag: str = 'org.mpi4py', info: Info = INFO_NULL, errhandler: Errhandler | None = None) -> Intracomm: ... def Create_cart(self, dims: Sequence[int], periods: Sequence[bool] | None = None, reorder: bool = False) -> Cartcomm: ... def Create_graph(self, index: Sequence[int], edges: Sequence[int], reorder: bool = False) -> Graphcomm: ... def Create_dist_graph_adjacent(self, sources: Sequence[int], destinations: Sequence[int], sourceweights: Sequence[int] | None = None, destweights: Sequence[int] | None = None, info: Info = INFO_NULL, reorder: bool = False) -> Distgraphcomm: ... def Create_dist_graph(self, sources: Sequence[int], degrees: Sequence[int], destinations: Sequence[int], weights: Sequence[int] | None = None, info: Info = INFO_NULL, reorder: bool = False) -> Distgraphcomm: ... def Create_intercomm(self, local_leader: int, peer_comm: Intracomm, remote_leader: int, tag: int = 0) -> Intercomm: ... def Cart_map(self, dims: Sequence[int], periods: Sequence[bool] | None = None) -> int: ... def Graph_map(self, index: Sequence[int], edges: Sequence[int]) -> int: ... def Scan(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, op: Op = SUM) -> None: ... def Exscan(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, op: Op = SUM) -> None: ... def Iscan(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, op: Op = SUM) -> Request: ... def Iexscan(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, op: Op = SUM) -> Request: ... def Scan_init(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, op: Op = SUM, info: Info = INFO_NULL) -> Prequest: ... def Exscan_init(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, op: Op = SUM, info: Info = INFO_NULL) -> Prequest: ... def scan(self, sendobj: Any, op: Op | Callable[[Any, Any], Any] = SUM) -> Any: ... def exscan(self, sendobj: Any, op: Op | Callable[[Any, Any], Any] = SUM) -> Any: ... def Spawn(self, command: str, args: Sequence[str] | None = None, maxprocs: int = 1, info: Info = INFO_NULL, root: int = 0, errcodes: list[int] | None = None) -> Intercomm: ... def Spawn_multiple(self, command: Sequence[str], args: Sequence[Sequence[str]] | None = None, maxprocs: Sequence[int] | None = None, info: Sequence[Info] | Info = INFO_NULL, root: int = 0, errcodes: list[list[int]] | None = None) -> Intercomm: ... def Accept(self, port_name: str, info: Info = INFO_NULL, root: int = 0) -> Intercomm: ... def Connect(self, port_name: str, info: Info = INFO_NULL, root: int = 0) -> Intercomm: ... COMM_SELF: Final[Intracomm] = ... COMM_WORLD: Final[Intracomm] = ... class Topocomm(Intracomm): def __new__(cls, comm: Comm | None = None) -> Self: ... def Neighbor_allgather(self, sendbuf: BufSpec, recvbuf: BufSpecB) -> None: ... def Neighbor_allgatherv(self, sendbuf: BufSpec, recvbuf: BufSpecV) -> None: ... def Neighbor_alltoall(self, sendbuf: BufSpecB, recvbuf: BufSpecB) -> None: ... def Neighbor_alltoallv(self, sendbuf: BufSpecV, recvbuf: BufSpecV) -> None: ... def Neighbor_alltoallw(self, sendbuf: BufSpecW, recvbuf: BufSpecW) -> None: ... def Ineighbor_allgather(self, sendbuf: BufSpec, recvbuf: BufSpecB) -> Request: ... def Ineighbor_allgatherv(self, sendbuf: BufSpec, recvbuf: BufSpecV) -> Request: ... def Ineighbor_alltoall(self, sendbuf: BufSpecB, recvbuf: BufSpecB) -> Request: ... def Ineighbor_alltoallv(self, sendbuf: BufSpecV, recvbuf: BufSpecV) -> Request: ... def Ineighbor_alltoallw(self, sendbuf: BufSpecW, recvbuf: BufSpecW) -> Request: ... def Neighbor_allgather_init(self, sendbuf: BufSpec, recvbuf: BufSpecB, info: Info = INFO_NULL) -> Prequest: ... def Neighbor_allgatherv_init(self, sendbuf: BufSpec, recvbuf: BufSpecV, info: Info = INFO_NULL) -> Prequest: ... def Neighbor_alltoall_init(self, sendbuf: BufSpecB, recvbuf: BufSpecB, info: Info = INFO_NULL) -> Prequest: ... def Neighbor_alltoallv_init(self, sendbuf: BufSpecV, recvbuf: BufSpecV, info: Info = INFO_NULL) -> Prequest: ... def Neighbor_alltoallw_init(self, sendbuf: BufSpecW, recvbuf: BufSpecW, info: Info = INFO_NULL) -> Prequest: ... def neighbor_allgather(self, sendobj: Any) -> list[Any]: ... def neighbor_alltoall(self, sendobj: list[Any]) -> list[Any]: ... degrees: tuple[int, int] indegree: int outdegree: int inoutedges: tuple[list[int], list[int]] inedges: list[int] outedges: list[int] class Cartcomm(Topocomm): def __new__(cls, comm: Comm | None = None) -> Self: ... def Get_dim(self) -> int: ... def Get_topo(self) -> tuple[list[int], list[int], list[int]]: ... def Get_cart_rank(self, coords: Sequence[int]) -> int: ... def Get_coords(self, rank: int) -> list[int]: ... def Shift(self, direction: int, disp: int) -> tuple[int, int]: ... def Sub(self, remain_dims: Sequence[bool]) -> Cartcomm: ... dim: int ndim: int topo: tuple[list[int], list[int], list[int]] dims: list[int] periods: list[int] coords: list[int] class Graphcomm(Topocomm): def __new__(cls, comm: Comm | None = None) -> Self: ... def Get_dims(self) -> tuple[int, int]: ... def Get_topo(self) -> tuple[list[int], list[int]]: ... def Get_neighbors_count(self, rank: int) -> int: ... def Get_neighbors(self, rank: int) -> list[int]: ... dims: tuple[int, int] nnodes: int nedges: int topo: tuple[list[int], list[int]] index: list[int] edges: list[int] nneighbors: int neighbors: list[int] class Distgraphcomm(Topocomm): def __new__(cls, comm: Comm | None = None) -> Self: ... def Get_dist_neighbors_count(self) -> int: ... def Get_dist_neighbors(self) -> tuple[list[int], list[int], tuple[list[int], list[int]] | None]: ... class Intercomm(Comm): def __new__(cls, comm: Comm | None = None) -> Self: ... @classmethod def Create_from_groups(cls, local_group: Group, local_leader: int, remote_group: Group, remote_leader: int, stringtag: str = 'org.mpi4py', info: Info = INFO_NULL, errhandler: Errhandler | None = None) -> Intracomm: ... def Get_remote_group(self) -> Group: ... def Get_remote_size(self) -> int: ... def Merge(self, high: bool = False) -> Intracomm: ... remote_group: Group remote_size: int class Win: def __new__(cls, win: Win | None = None) -> Self: ... def __eq__(self, __other: object) -> bool: ... def __ne__(self, __other: object) -> bool: ... if sys.version_info >= (3, 12): def __buffer__(self, __flags: int) -> memoryview: ... def __bool__(self) -> bool: ... def __reduce__(self) -> str | tuple[Any, ...]: ... @classmethod def fromhandle(cls, handle: int) -> Win: ... def free(self) -> None: ... @classmethod def Create(cls, memory: Buffer | Bottom, disp_unit: int = 1, info: Info = INFO_NULL, comm: Intracomm = COMM_SELF) -> Self: ... @classmethod def Allocate(cls, size: int, disp_unit: int = 1, info: Info = INFO_NULL, comm: Intracomm = COMM_SELF) -> Self: ... @classmethod def Allocate_shared(cls, size: int, disp_unit: int = 1, info: Info = INFO_NULL, comm: Intracomm = COMM_SELF) -> Self: ... def Shared_query(self, rank: int) -> tuple[buffer, int]: ... @classmethod def Create_dynamic(cls, info: Info = INFO_NULL, comm: Intracomm = COMM_SELF) -> Self: ... def Attach(self, memory: Buffer) -> None: ... def Detach(self, memory: Buffer) -> None: ... def Free(self) -> None: ... def Set_info(self, info: Info) -> None: ... def Get_info(self) -> Info: ... def Get_group(self) -> Group: ... def Get_attr(self, keyval: int) -> int | Any | None: ... def Set_attr(self, keyval: int, attrval: Any) -> None: ... def Delete_attr(self, keyval: int) -> None: ... @classmethod def Create_keyval(cls, copy_fn: Callable[[Win, int, Any], Any] | None = None, delete_fn: Callable[[Win, int, Any], None] | None = None, nopython: bool = False) -> int: ... @classmethod def Free_keyval(cls, keyval: int) -> int: ... def tomemory(self) -> buffer: ... def Put(self, origin: BufSpec, target_rank: int, target: TargetSpec | None = None) -> None: ... def Get(self, origin: BufSpec, target_rank: int, target: TargetSpec | None = None) -> None: ... def Accumulate(self, origin: BufSpec, target_rank: int, target: TargetSpec | None = None, op: Op = SUM) -> None: ... def Get_accumulate(self, origin: BufSpec, result: BufSpec, target_rank: int, target: TargetSpec | None = None, op: Op = SUM) -> None: ... def Fetch_and_op(self, origin: BufSpec, result: BufSpec, target_rank: int, target_disp: int = 0, op: Op = SUM) -> None: ... def Compare_and_swap(self, origin: BufSpec, compare: BufSpec, result: BufSpec, target_rank: int, target_disp: int = 0) -> None: ... def Rput(self, origin: BufSpec, target_rank: int, target: TargetSpec | None = None) -> Request: ... def Rget(self, origin: BufSpec, target_rank: int, target: TargetSpec | None = None) -> Request: ... def Raccumulate(self, origin: BufSpec, target_rank: int, target: TargetSpec | None = None, op: Op = SUM) -> Request: ... def Rget_accumulate(self, origin: BufSpec, result: BufSpec, target_rank: int, target: TargetSpec | None = None, op: Op = SUM) -> Request: ... def Fence(self, assertion: int = 0) -> None: ... def Start(self, group: Group, assertion: int = 0) -> None: ... def Complete(self) -> None: ... def Post(self, group: Group, assertion: int = 0) -> None: ... def Wait(self) -> Literal[True]: ... def Test(self) -> bool: ... def Lock(self, rank: int, lock_type: int = LOCK_EXCLUSIVE, assertion: int = 0) -> None: ... def Unlock(self, rank: int) -> None: ... def Lock_all(self, assertion: int = 0) -> None: ... def Unlock_all(self) -> None: ... def Flush(self, rank: int) -> None: ... def Flush_all(self) -> None: ... def Flush_local(self, rank: int) -> None: ... def Flush_local_all(self) -> None: ... def Sync(self) -> None: ... @classmethod def Create_errhandler(cls, errhandler_fn: Callable[[Win, int], None]) -> Errhandler: ... def Get_errhandler(self) -> Errhandler: ... def Set_errhandler(self, errhandler: Errhandler) -> None: ... def Call_errhandler(self, errorcode: int) -> None: ... def Get_name(self) -> str: ... def Set_name(self, name: str) -> None: ... def py2f(self) -> int: ... @classmethod def f2py(cls, arg: int) -> Win: ... handle: int info: Info group: Group group_size: int group_rank: int attrs: tuple[int, int, int] flavor: int model: int name: str WIN_NULL: Final[Win] = ... class File: def __new__(cls, file: File | None = None) -> Self: ... def __eq__(self, __other: object) -> bool: ... def __ne__(self, __other: object) -> bool: ... def __bool__(self) -> bool: ... def __reduce__(self) -> str | tuple[Any, ...]: ... @classmethod def fromhandle(cls, handle: int) -> File: ... def free(self) -> None: ... @classmethod def Open(cls, comm: Intracomm, filename: PathLike[AnyStr] | str | bytes, amode: int = MODE_RDONLY, info: Info = INFO_NULL) -> Self: ... def Close(self) -> None: ... @classmethod def Delete(cls, filename: PathLike[AnyStr] | str | bytes, info: Info = INFO_NULL) -> None: ... def Set_size(self, size: int) -> None: ... def Preallocate(self, size: int) -> None: ... def Get_size(self) -> int: ... def Get_amode(self) -> int: ... def Get_group(self) -> Group: ... def Set_info(self, info: Info) -> None: ... def Get_info(self) -> Info: ... def Set_view(self, disp: int = 0, etype: Datatype = BYTE, filetype: Datatype | None = None, datarep: str = 'native', info: Info = INFO_NULL) -> None: ... def Get_view(self) -> tuple[int, Datatype, Datatype, str]: ... def Read_at(self, offset: int, buf: BufSpec, status: Status | None = None) -> None: ... def Read_at_all(self, offset: int, buf: BufSpec, status: Status | None = None) -> None: ... def Write_at(self, offset: int, buf: BufSpec, status: Status | None = None) -> None: ... def Write_at_all(self, offset: int, buf: BufSpec, status: Status | None = None) -> None: ... def Iread_at(self, offset: int, buf: BufSpec) -> Request: ... def Iread_at_all(self, offset: int, buf: BufSpec) -> Request: ... def Iwrite_at(self, offset: int, buf: BufSpec) -> Request: ... def Iwrite_at_all(self, offset: int, buf: BufSpec) -> Request: ... def Read(self, buf: BufSpec, status: Status | None = None) -> None: ... def Read_all(self, buf: BufSpec, status: Status | None = None) -> None: ... def Write(self, buf: BufSpec, status: Status | None = None) -> None: ... def Write_all(self, buf: BufSpec, status: Status | None = None) -> None: ... def Iread(self, buf: BufSpec) -> Request: ... def Iread_all(self, buf: BufSpec) -> Request: ... def Iwrite(self, buf: BufSpec) -> Request: ... def Iwrite_all(self, buf: BufSpec) -> Request: ... def Seek(self, offset: int, whence: int = SEEK_SET) -> None: ... def Get_position(self) -> int: ... def Get_byte_offset(self, offset: int) -> int: ... def Read_shared(self, buf: BufSpec, status: Status | None = None) -> None: ... def Write_shared(self, buf: BufSpec, status: Status | None = None) -> None: ... def Iread_shared(self, buf: BufSpec) -> Request: ... def Iwrite_shared(self, buf: BufSpec) -> Request: ... def Read_ordered(self, buf: BufSpec, status: Status | None = None) -> None: ... def Write_ordered(self, buf: BufSpec, status: Status | None = None) -> None: ... def Seek_shared(self, offset: int, whence: int = SEEK_SET) -> None: ... def Get_position_shared(self) -> int: ... def Read_at_all_begin(self, offset: int, buf: BufSpec) -> None: ... def Read_at_all_end(self, buf: BufSpec, status: Status | None = None) -> None: ... def Write_at_all_begin(self, offset: int, buf: BufSpec) -> None: ... def Write_at_all_end(self, buf: BufSpec, status: Status | None = None) -> None: ... def Read_all_begin(self, buf: BufSpec) -> None: ... def Read_all_end(self, buf: BufSpec, status: Status | None = None) -> None: ... def Write_all_begin(self, buf: BufSpec) -> None: ... def Write_all_end(self, buf: BufSpec, status: Status | None = None) -> None: ... def Read_ordered_begin(self, buf: BufSpec) -> None: ... def Read_ordered_end(self, buf: BufSpec, status: Status | None = None) -> None: ... def Write_ordered_begin(self, buf: BufSpec) -> None: ... def Write_ordered_end(self, buf: BufSpec, status: Status | None = None) -> None: ... def Get_type_extent(self, datatype: Datatype) -> int: ... def Set_atomicity(self, flag: bool) -> None: ... def Get_atomicity(self) -> bool: ... def Sync(self) -> None: ... @classmethod def Create_errhandler(cls, errhandler_fn: Callable[[File, int], None]) -> Errhandler: ... def Get_errhandler(self) -> Errhandler: ... def Set_errhandler(self, errhandler: Errhandler) -> None: ... def Call_errhandler(self, errorcode: int) -> None: ... def py2f(self) -> int: ... @classmethod def f2py(cls, arg: int) -> File: ... handle: int size: int amode: int group: Group group_size: int group_rank: int info: Info atomicity: bool FILE_NULL: Final[File] = ... @final class buffer: @overload def __new__(cls) -> Self: ... @overload def __new__(cls, __buf: Buffer) -> Self: ... if sys.version_info >= (3, 12): def __buffer__(self, __flags: int) -> memoryview: ... def __len__(self) -> int: ... @overload def __getitem__(self, __item: int) -> int: ... @overload def __getitem__(self, __item: slice) -> buffer: ... @overload def __setitem__(self, __item: int, __value: int) -> None: ... @overload def __setitem__(self, __item: slice, __value: Buffer) -> None: ... @staticmethod def allocate(nbytes: int, clear: bool = False) -> buffer: ... @staticmethod def frombuffer(obj: Buffer, readonly: bool = False) -> buffer: ... @staticmethod def fromaddress(address: int, nbytes: int, readonly: bool = False) -> buffer: ... def cast(self, format: str, shape: list[int] | tuple[int, ...] = ...) -> memoryview: ... def tobytes(self, order: str | None = None) -> bytes: ... def toreadonly(self) -> buffer: ... def release(self) -> None: ... address: int obj: Buffer | None nbytes: int readonly: bool format: str itemsize: int memory = buffer @final class BufferAutomaticType(int): def __new__(cls) -> Self: ... if sys.version_info >= (3, 12): def __buffer__(self, __flags: int) -> memoryview: ... def __reduce__(self) -> str: ... @final class BottomType(int): def __new__(cls) -> Self: ... if sys.version_info >= (3, 12): def __buffer__(self, __flags: int) -> memoryview: ... def __reduce__(self) -> str: ... @final class InPlaceType(int): def __new__(cls) -> Self: ... if sys.version_info >= (3, 12): def __buffer__(self, __flags: int) -> memoryview: ... def __reduce__(self) -> str: ... class Pickle: @overload def __init__(self, dumps: Callable[[Any, int], bytes], loads: Callable[[Buffer], Any], protocol: int | None = None, threshold: int | None = None, ) -> None: ... @overload def __init__(self, dumps: Callable[[Any], bytes] | None = None, loads: Callable[[Buffer], Any] | None = None, ) -> None: ... def dumps(self, obj: Any) -> bytes: ... def loads(self, data: Buffer) -> Any: ... def dumps_oob(self, obj: Any) -> tuple[bytes, list[buffer]]: ... def loads_oob(self, data: Buffer, buffers: Iterable[Buffer]) -> Any: ... PROTOCOL: int | None THRESHOLD: int pickle: Final[Pickle] = ... class Exception(RuntimeError): def __new__(cls, ierr: int = SUCCESS) -> Self: ... def __hash__(self) -> int: ... def __eq__(self, __other: object) -> bool: ... def __ne__(self, __other: object) -> bool: ... def __bool__(self) -> bool: ... def __int__(self) -> int: ... def Get_error_code(self) -> int: ... def Get_error_class(self) -> int: ... def Get_error_string(self) -> str: ... error_code: int error_class: int error_string: str def Get_error_class(errorcode: int) -> int: ... def Get_error_string(errorcode: int) -> str: ... def Add_error_class() -> int: ... def Remove_error_class(errorclass: int) -> None: ... def Add_error_code(errorclass: int) -> int: ... def Remove_error_code(errorcode: int) -> None: ... def Add_error_string(errorcode: int, string: str) -> None: ... def Remove_error_string(errorcode: int) -> None: ... def Get_address(location: Buffer | Bottom) -> int: ... def Aint_add(base: int, disp: int) -> int: ... def Aint_diff(addr1: int, addr2: int) -> int: ... def Compute_dims(nnodes: int, dims: int | Sequence[int]) -> list[int]: ... def Attach_buffer(buf: Buffer | None) -> None: ... def Detach_buffer() -> Buffer | None: ... def Flush_buffer() -> None: ... def Iflush_buffer() -> Request: ... def Open_port(info: Info = INFO_NULL) -> str: ... def Close_port(port_name: str) -> None: ... def Publish_name(service_name: str, port_name: str, info: Info = INFO_NULL) -> None: ... def Unpublish_name(service_name: str, port_name: str, info: Info = INFO_NULL) -> None: ... def Lookup_name(service_name: str, info: Info = INFO_NULL) -> str: ... def Register_datarep(datarep: str, read_fn: Callable[[Buffer, Datatype, int, Buffer, int], None], write_fn: Callable[[Buffer, Datatype, int, Buffer, int], None], extent_fn: Callable[[Datatype], int]) -> None: ... def Alloc_mem(size: int, info: Info = INFO_NULL) -> buffer: ... def Free_mem(mem: buffer) -> None: ... def Init() -> None: ... def Finalize() -> None: ... def Init_thread(required: int = THREAD_MULTIPLE) -> int: ... def Query_thread() -> int: ... def Is_thread_main() -> bool: ... def Is_initialized() -> bool: ... def Is_finalized() -> bool: ... def Get_version() -> tuple[int, int]: ... def Get_library_version() -> str: ... def Get_processor_name() -> str: ... def Get_hw_resource_info() -> Info: ... def Wtime() -> float: ... def Wtick() -> float: ... def Pcontrol(level: int) -> None: ... def get_vendor() -> tuple[str, tuple[int, int, int]]: ... def _set_abort_status(status: int) -> None: ... def _comm_lock(comm: Comm, key: Hashable | None = None) -> Lock: ... def _comm_lock_table(comm: Comm) -> dict[Hashable, Lock]: ... def _commctx_intra(comm: Intracomm) -> tuple[Intracomm, int]: ... def _commctx_inter(comm: Intercomm) -> tuple[Intercomm, int, Intracomm, bool]: ... def _typecode(datatype: Datatype) -> str | None: ... def _typealign(datatype: Datatype) -> int | None: ... def _datatype_create(datatype: Datatype, combiner: str, params: dict[str, Any], free: bool = False) -> Datatype: ... def _datatype_decode(datatype: Datatype, mark: bool = False) -> tuple[Datatype, str, dict[str, Any]]: ... def _sizeof(arg: Any) -> int: ... def _addressof(arg: Any) -> int: ... def _handleof(arg: Any) -> int: ... __pyx_capi__: Final[dict[str, Any]] = ... _typedict: Final[dict[str, Datatype]] = ... _typedict_c: Final[dict[str, Datatype]] = ... _typedict_f: Final[dict[str, Datatype]] = ... from .typing import ( # noqa: E402 Buffer, Bottom, InPlace, BufSpec, BufSpecB, BufSpecV, BufSpecW, TargetSpec, ) mpi4py-4.0.3/src/mpi4py/MPI.pyx000066400000000000000000000010271475341043600161470ustar00rootroot00000000000000# cython: language_level=3str # cython: embedsignature=True # cython: embedsignature.format=python # cython: annotation_typing=False # cython: cdivision=True # cython: auto_pickle=False # cython: always_allow_keywords=True # cython: allow_none_for_extension_args=False # cython: autotestdict=False # cython: warn.multiple_declarators=False # cython: optimize.use_switch=False # cython: binding=True # cython: freethreading_compatible=True from __future__ import annotations cimport cython # no-cython-lint include "MPI.src/MPI.pyx" mpi4py-4.0.3/src/mpi4py/MPI.src/000077500000000000000000000000001475341043600161735ustar00rootroot00000000000000mpi4py-4.0.3/src/mpi4py/MPI.src/CAPI.pxi000066400000000000000000000110001475341043600174210ustar00rootroot00000000000000# ----------------------------------------------------------------------------- # Datatype cdef api object PyMPIDatatype_New(MPI_Datatype arg): cdef Datatype obj = Datatype.__new__(Datatype) obj.ob_mpi = arg obj.flags |= 0 return obj cdef api MPI_Datatype* PyMPIDatatype_Get(object arg) except NULL: return &(arg).ob_mpi # ----------------------------------------------------------------------------- # Status cdef api object PyMPIStatus_New(MPI_Status *arg): cdef Status obj = Status.__new__(Status) if ( arg != NULL and arg != MPI_STATUS_IGNORE and arg != MPI_STATUSES_IGNORE ): obj.ob_mpi = arg[0] return obj cdef api MPI_Status* PyMPIStatus_Get(object arg) except? NULL: if arg is None: return MPI_STATUS_IGNORE return &(arg).ob_mpi # ----------------------------------------------------------------------------- # Request cdef api object PyMPIRequest_New(MPI_Request arg): cdef Request obj = Request.__new__(Request) obj.ob_mpi = arg obj.flags |= 0 return obj cdef api object PyMPIPrequest_New(MPI_Request arg): cdef Prequest obj = Prequest.__new__(Prequest) obj.ob_mpi = arg obj.flags |= 0 return obj cdef api object PyMPIGrequest_New(MPI_Request arg): cdef Grequest obj = Grequest.__new__(Grequest) obj.ob_grequest = arg obj.ob_mpi = arg obj.flags |= 0 return obj cdef api MPI_Request* PyMPIRequest_Get(object arg) except NULL: return &(arg).ob_mpi # ----------------------------------------------------------------------------- # Message cdef api object PyMPIMessage_New(MPI_Message arg): cdef Message obj = Message.__new__(Message) obj.ob_mpi = arg obj.flags |= 0 return obj cdef api MPI_Message* PyMPIMessage_Get(object arg) except NULL: return &(arg).ob_mpi # ----------------------------------------------------------------------------- # Op cdef api object PyMPIOp_New(MPI_Op arg): cdef Op obj = Op.__new__(Op) obj.ob_mpi = arg obj.flags |= 0 return obj cdef api MPI_Op* PyMPIOp_Get(object arg) except NULL: return &(arg).ob_mpi # ----------------------------------------------------------------------------- # Group cdef api object PyMPIGroup_New(MPI_Group arg): cdef Group obj = Group.__new__(Group) obj.ob_mpi = arg return obj cdef api MPI_Group* PyMPIGroup_Get(object arg) except NULL: return &(arg).ob_mpi # ----------------------------------------------------------------------------- # Info cdef api object PyMPIInfo_New(MPI_Info arg): cdef Info obj = Info.__new__(Info) obj.ob_mpi = arg obj.flags |= 0 return obj cdef api MPI_Info* PyMPIInfo_Get(object arg) except NULL: return &(arg).ob_mpi # ----------------------------------------------------------------------------- # Errhandler cdef api object PyMPIErrhandler_New(MPI_Errhandler arg): cdef Errhandler obj = Errhandler.__new__(Errhandler) obj.ob_mpi = arg obj.flags |= 0 return obj cdef api MPI_Errhandler* PyMPIErrhandler_Get(object arg) except NULL: return &(arg).ob_mpi # ----------------------------------------------------------------------------- # Session cdef api object PyMPISession_New(MPI_Session arg): cdef Session obj = Session.__new__(Session) obj.ob_mpi = arg obj.flags |= 0 return obj cdef api MPI_Session* PyMPISession_Get(object arg) except NULL: return &(arg).ob_mpi # ----------------------------------------------------------------------------- # Comm cdef api object PyMPIComm_New(MPI_Comm arg): cdef type cls = CommType(arg) cdef Comm obj = cls.__new__(cls) obj.ob_mpi = arg obj.flags |= 0 return obj cdef api MPI_Comm* PyMPIComm_Get(object arg) except NULL: return &(arg).ob_mpi # ----------------------------------------------------------------------------- # Win cdef api object PyMPIWin_New(MPI_Win arg): cdef Win obj = Win.__new__(Win) obj.ob_mpi = arg obj.flags |= 0 return obj cdef api MPI_Win* PyMPIWin_Get(object arg) except NULL: return &(arg).ob_mpi # ----------------------------------------------------------------------------- # File cdef api object PyMPIFile_New(MPI_File arg): cdef File obj = File.__new__(File) obj.ob_mpi = arg obj.flags |= 0 return obj cdef api MPI_File* PyMPIFile_Get(object arg) except NULL: return &(arg).ob_mpi # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/Comm.pyx000066400000000000000000003302431475341043600176350ustar00rootroot00000000000000# Communicator Comparisons # ------------------------ IDENT = MPI_IDENT #: Groups are identical, contexts are the same CONGRUENT = MPI_CONGRUENT #: Groups are identical, contexts are different SIMILAR = MPI_SIMILAR #: Groups are similar, rank order differs UNEQUAL = MPI_UNEQUAL #: Groups are different # Communicator Topologies # ----------------------- CART = MPI_CART #: Cartesian topology GRAPH = MPI_GRAPH #: General graph topology DIST_GRAPH = MPI_DIST_GRAPH #: Distributed graph topology # Graph Communicator Weights # -------------------------- UNWEIGHTED = __UNWEIGHTED__ #: Unweighted graph WEIGHTS_EMPTY = __WEIGHTS_EMPTY__ #: Empty graph weights # Communicator Split Type # ----------------------- COMM_TYPE_SHARED = MPI_COMM_TYPE_SHARED COMM_TYPE_HW_GUIDED = MPI_COMM_TYPE_HW_GUIDED COMM_TYPE_HW_UNGUIDED = MPI_COMM_TYPE_HW_UNGUIDED COMM_TYPE_RESOURCE_GUIDED = MPI_COMM_TYPE_RESOURCE_GUIDED cdef class Comm: """ Communication context. """ def __cinit__(self, Comm comm: Comm | None = None): cinit(self, comm) def __dealloc__(self): dealloc(self) def __richcmp__(self, other, int op): if not isinstance(other, Comm): return NotImplemented return richcmp(self, other, op) def __bool__(self) -> bool: return nonnull(self) def __reduce__(self) -> str | tuple[Any, ...]: return reduce_default(self) property handle: """MPI handle.""" def __get__(self) -> int: return tohandle(self) @classmethod def fromhandle(cls, handle: int) -> Comm: """ Create object from MPI handle. """ return fromhandle( handle) def free(self) -> None: """ Call `Free` if not null or predefined. """ safefree(self) # Group # ----- def Get_group(self) -> Group: """ Access the group associated with a communicator. """ cdef Group group = New(Group) with nogil: CHKERR( MPI_Comm_group(self.ob_mpi, &group.ob_mpi) ) return group property group: """Group.""" def __get__(self) -> Group: return self.Get_group() # Communicator Accessors # ---------------------- def Get_size(self) -> int: """ Return the number of processes in a communicator. """ cdef int size = -1 CHKERR( MPI_Comm_size(self.ob_mpi, &size) ) return size property size: """Number of processes.""" def __get__(self) -> int: return self.Get_size() def Get_rank(self) -> int: """ Return the rank of this process in a communicator. """ cdef int rank = MPI_PROC_NULL CHKERR( MPI_Comm_rank(self.ob_mpi, &rank) ) return rank property rank: """Rank of this process.""" def __get__(self) -> int: return self.Get_rank() def Compare(self, Comm comm: Comm) -> int: """ Compare two communicators. """ cdef int flag = MPI_UNEQUAL with nogil: CHKERR( MPI_Comm_compare( self.ob_mpi, comm.ob_mpi, &flag) ) return flag # Communicator Constructors # ------------------------- def Clone(self) -> Self: """ Clone an existing communicator. """ cdef Comm comm = New(type(self)) with nogil: CHKERR( MPI_Comm_dup(self.ob_mpi, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm def Dup(self, Info info: Info | None = None) -> Self: """ Duplicate a communicator. """ cdef MPI_Info cinfo = arg_Info(info) cdef Comm comm = New(type(self)) if info is None: with nogil: CHKERR( MPI_Comm_dup( self.ob_mpi, &comm.ob_mpi) ) else: with nogil: CHKERR( MPI_Comm_dup_with_info( self.ob_mpi, cinfo, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm def Dup_with_info(self, Info info: Info) -> Self: """ Duplicate a communicator with hints. """ cdef Comm comm = New(type(self)) with nogil: CHKERR( MPI_Comm_dup_with_info( self.ob_mpi, info.ob_mpi, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm def Idup(self, Info info: Info | None = None) -> tuple[Self, Request]: """ Nonblocking duplicate a communicator. """ cdef MPI_Info cinfo = arg_Info(info) cdef Comm comm = New(type(self)) cdef Request request = New(Request) if info is None: with nogil: CHKERR( MPI_Comm_idup( self.ob_mpi, &comm.ob_mpi, &request.ob_mpi) ) else: with nogil: CHKERR( MPI_Comm_idup_with_info( self.ob_mpi, cinfo, &comm.ob_mpi, &request.ob_mpi) ) comm_set_eh(comm.ob_mpi) return (comm, request) def Idup_with_info(self, Info info: Info) -> tuple[Self, Request]: """ Nonblocking duplicate a communicator with hints. """ cdef Comm comm = New(type(self)) cdef Request request = New(Request) with nogil: CHKERR( MPI_Comm_idup_with_info( self.ob_mpi, info.ob_mpi, &comm.ob_mpi, &request.ob_mpi) ) comm_set_eh(comm.ob_mpi) return (comm, request) def Create(self, Group group: Group) -> Comm: """ Create communicator from group. """ cdef type cls = Comm if isinstance(self, Intracomm): cls = Intracomm elif isinstance(self, Intercomm): cls = Intercomm cdef Comm comm = New(cls) with nogil: CHKERR( MPI_Comm_create( self.ob_mpi, group.ob_mpi, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm def Split(self, int color: int = 0, int key: int = 0) -> Comm: """ Split communicator by color and key. """ cdef type cls = Comm if isinstance(self, Intracomm): cls = Intracomm elif isinstance(self, Intercomm): cls = Intercomm cdef Comm comm = New(cls) with nogil: CHKERR( MPI_Comm_split( self.ob_mpi, color, key, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm def Split_type( self, int split_type: int, int key: int = 0, Info info: Info = INFO_NULL, ) -> Comm: """ Split communicator by split type. """ cdef type cls = Comm if isinstance(self, Intracomm): cls = Intracomm elif isinstance(self, Intercomm): cls = Intercomm cdef Comm comm = New(cls) with nogil: CHKERR( MPI_Comm_split_type( self.ob_mpi, split_type, key, info.ob_mpi, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm # Communicator Destructor # ----------------------- def Free(self) -> None: """ Free a communicator. """ cdef MPI_Comm save = self.ob_mpi with nogil: CHKERR( MPI_Comm_free(&self.ob_mpi) ) if constobj(self): self.ob_mpi = save # Process Fault Tolerance # ----------------------- def Revoke(self) -> None: """ Revoke a communicator. """ with nogil: CHKERR( MPI_Comm_revoke(self.ob_mpi) ) def Is_revoked(self) -> bool: """ Indicate whether the communicator has been revoked. """ cdef int flag = 0 with nogil: CHKERR( MPI_Comm_is_revoked(self.ob_mpi, &flag) ) return flag def Get_failed(self) -> Group: """ Extract the group of failed processes. """ cdef Group group = New(Group) with nogil: CHKERR( MPI_Comm_get_failed(self.ob_mpi, &group.ob_mpi) ) return group def Ack_failed(self, num_to_ack: int | None = None) -> int: """ Acknowledge failures on a communicator. """ cdef int num_acked = MPI_UNDEFINED cdef int c_num_to_ack = MPI_UNDEFINED if num_to_ack is not None: c_num_to_ack = num_to_ack else: CHKERR( MPI_Comm_size(self.ob_mpi, &c_num_to_ack) ) with nogil: CHKERR( MPI_Comm_ack_failed( self.ob_mpi, c_num_to_ack, &num_acked) ) return num_acked def Agree(self, int flag: int) -> int: """ Blocking agreement. """ with nogil: CHKERR( MPI_Comm_agree(self.ob_mpi, &flag) ) return flag def Iagree(self, flag: Buffer) -> Request: """ Nonblocking agreement. """ cdef int *flag_ptr = NULL cdef MPI_Aint flag_len = 0 flag = aspybuffer(flag, &flag_ptr, &flag_len, 0, b"i") if flag_len != 1: raise ValueError( "flag: expecting int buffer of length one") cdef Request request = New(Request) with nogil: CHKERR( MPI_Comm_iagree( self.ob_mpi, flag_ptr, &request.ob_mpi) ) request.ob_buf = flag return request def Shrink(self) -> Comm: """ Shrink a communicator to remove all failed processes. """ cdef type cls = Comm if isinstance(self, Intracomm): cls = Intracomm elif isinstance(self, Intercomm): cls = Intercomm cdef Comm comm = New(cls) with nogil: CHKERR( MPI_Comm_shrink(self.ob_mpi, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm def Ishrink(self) -> tuple[Comm, Request]: """ Nonblocking shrink a communicator to remove all failed processes. """ cdef type cls = Comm if isinstance(self, Intracomm): cls = Intracomm elif isinstance(self, Intercomm): cls = Intercomm cdef Comm comm = New(cls) cdef Request request = New(Request) with nogil: CHKERR( MPI_Comm_ishrink( self.ob_mpi, &comm.ob_mpi, &request.ob_mpi) ) comm_set_eh(comm.ob_mpi) return (comm, request) # Communicator Info # ----------------- def Set_info(self, Info info: Info) -> None: """ Set new values for the hints associated with a communicator. """ with nogil: CHKERR( MPI_Comm_set_info( self.ob_mpi, info.ob_mpi) ) def Get_info(self) -> Info: """ Return the current hints for a communicator. """ cdef Info info = New(Info) with nogil: CHKERR( MPI_Comm_get_info( self.ob_mpi, &info.ob_mpi) ) return info property info: """Info hints.""" def __get__(self) -> Info: return self.Get_info() def __set__(self, value: Info): self.Set_info(value) # Point to Point communication # ---------------------------- # Buffer Allocation and Usage # --------------------------- def Attach_buffer(self, buf: Buffer | None) -> None: """ Attach a user-provided buffer for sending in buffered mode. """ cdef void *base = NULL cdef MPI_Count size = 0 buf = attach_buffer(buf, &base, &size) with nogil: CHKERR( MPI_Comm_attach_buffer_c( self.ob_mpi, base, size) ) detach_buffer_set(self, buf) # ~> MPI-4.1 def Detach_buffer(self) -> Buffer | None: """ Remove an existing attached buffer. """ cdef void *base = NULL cdef MPI_Count size = 0 with nogil: CHKERR( MPI_Comm_detach_buffer_c( self.ob_mpi, &base, &size) ) return detach_buffer_get(self, base, size) # ~> MPI-4.1 def Flush_buffer(self) -> None: """ Block until all buffered messages have been transmitted. """ with nogil: CHKERR( MPI_Comm_flush_buffer(self.ob_mpi) ) def Iflush_buffer(self) -> Request: """ Nonblocking flush for buffered messages. """ cdef Request request = New(Request) with nogil: CHKERR( MPI_Comm_iflush_buffer( self.ob_mpi, &request.ob_mpi) ) return request # ~> MPI-4.1 # Blocking Send and Receive Operations # ------------------------------------ def Send( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> None: """ Blocking send. .. note:: This function may block until the message is received. Whether `Send` blocks or not depends on several factors and is implementation dependent. """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) with nogil: CHKERR( MPI_Send_c( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi) ) def Recv( self, buf: BufSpec, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Status | None = None, ) -> None: """ Blocking receive. .. note:: This function blocks until the message is received. """ cdef _p_msg_p2p rmsg = message_p2p_recv(buf, source) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Recv_c( rmsg.buf, rmsg.count, rmsg.dtype, source, tag, self.ob_mpi, statusp) ) # Send-Receive # ------------ def Sendrecv( self, sendbuf: BufSpec, int dest: int, int sendtag: int = 0, recvbuf: BufSpec | None = None, int source: int = ANY_SOURCE, int recvtag: int = ANY_TAG, Status status: Status | None = None, ) -> None: """ Send and receive a message. .. note:: This function is guaranteed not to deadlock in situations where pairs of blocking sends and receives may deadlock. .. caution:: A common mistake when using this function is to mismatch the tags with the source and destination ranks, which can result in deadlock. """ cdef _p_msg_p2p smsg = message_p2p_send(sendbuf, dest) cdef _p_msg_p2p rmsg = message_p2p_recv(recvbuf, source) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Sendrecv_c( smsg.buf, smsg.count, smsg.dtype, dest, sendtag, rmsg.buf, rmsg.count, rmsg.dtype, source, recvtag, self.ob_mpi, statusp) ) def Sendrecv_replace( self, buf: BufSpec, int dest: int, int sendtag: int = 0, int source: int = ANY_SOURCE, int recvtag: int = ANY_TAG, Status status: Status | None = None, ) -> None: """ Send and receive a message. .. note:: This function is guaranteed not to deadlock in situations where pairs of blocking sends and receives may deadlock. .. caution:: A common mistake when using this function is to mismatch the tags with the source and destination ranks, which can result in deadlock. """ cdef int rank = MPI_PROC_NULL if dest != MPI_PROC_NULL: rank = dest if source != MPI_PROC_NULL: rank = source cdef _p_msg_p2p rmsg = message_p2p_recv(buf, rank) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Sendrecv_replace_c( rmsg.buf, rmsg.count, rmsg.dtype, dest, sendtag, source, recvtag, self.ob_mpi, statusp) ) # Nonblocking Communications # -------------------------- def Isend( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> Request: """ Nonblocking send. """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) cdef Request request = New(Request) with nogil: CHKERR( MPI_Isend_c( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = smsg return request def Irecv( self, buf: BufSpec, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, ) -> Request: """ Nonblocking receive. """ cdef _p_msg_p2p rmsg = message_p2p_recv(buf, source) cdef Request request = New(Request) with nogil: CHKERR( MPI_Irecv_c( rmsg.buf, rmsg.count, rmsg.dtype, source, tag, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = rmsg return request def Isendrecv( self, sendbuf: BufSpec, int dest: int, int sendtag: int = 0, recvbuf: BufSpec | None = None, int source: int = ANY_SOURCE, int recvtag: int = ANY_TAG, ) -> Request: """ Nonblocking send and receive. """ cdef _p_msg_p2p smsg = message_p2p_send(sendbuf, dest) cdef _p_msg_p2p rmsg = message_p2p_recv(recvbuf, source) cdef Request request = New(Request) with nogil: CHKERR( MPI_Isendrecv_c( smsg.buf, smsg.count, smsg.dtype, dest, sendtag, rmsg.buf, rmsg.count, rmsg.dtype, source, recvtag, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = (smsg, rmsg) return request def Isendrecv_replace( self, buf: BufSpec, int dest: int, int sendtag: int = 0, int source: int = ANY_SOURCE, int recvtag: int = ANY_TAG, ) -> Request: """ Send and receive a message. .. note:: This function is guaranteed not to deadlock in situations where pairs of blocking sends and receives may deadlock. .. caution:: A common mistake when using this function is to mismatch the tags with the source and destination ranks, which can result in deadlock. """ cdef int rank = MPI_PROC_NULL if dest != MPI_PROC_NULL: rank = dest if source != MPI_PROC_NULL: rank = source cdef _p_msg_p2p rmsg = message_p2p_recv(buf, rank) cdef Request request = New(Request) with nogil: CHKERR( MPI_Isendrecv_replace_c( rmsg.buf, rmsg.count, rmsg.dtype, dest, sendtag, source, recvtag, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = rmsg return request # Probe # ----- def Probe( self, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Status | None = None, ) -> Literal[True]: """ Blocking test for a message. .. note:: This function blocks until the message arrives. """ cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Probe( source, tag, self.ob_mpi, statusp) ) return True def Iprobe( self, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Status | None = None, ) -> bool: """ Nonblocking test for a message. """ cdef int flag = 0 cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Iprobe( source, tag, self.ob_mpi, &flag, statusp) ) return flag # Matching Probe # -------------- def Mprobe( self, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Status | None = None, ) -> Message: """ Blocking test for a matched message. """ cdef MPI_Message cmessage = MPI_MESSAGE_NULL cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Mprobe( source, tag, self.ob_mpi, &cmessage, statusp) ) cdef Message message = New(Message) message.ob_mpi = cmessage return message def Improbe( self, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Status | None = None, ) -> Message | None: """ Nonblocking test for a matched message. """ cdef int flag = 0 cdef MPI_Message cmessage = MPI_MESSAGE_NULL cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Improbe( source, tag, self.ob_mpi, &flag, &cmessage, statusp) ) if flag == 0: return None cdef Message message = New(Message) message.ob_mpi = cmessage return message # Persistent Communication # ------------------------ def Send_init( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> Prequest: """ Create a persistent request for a standard send. """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Send_init_c( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = smsg return request def Recv_init( self, buf: BufSpec, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, ) -> Prequest: """ Create a persistent request for a receive. """ cdef _p_msg_p2p rmsg = message_p2p_recv(buf, source) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Recv_init_c( rmsg.buf, rmsg.count, rmsg.dtype, source, tag, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = rmsg return request # Partitioned Communication # ------------------------- def Psend_init( self, buf: BufSpec, int partitions: int, int dest: int, int tag: int = 0, Info info: Info = INFO_NULL, ) -> Prequest: """ Create request for a partitioned send operation. """ cdef _p_msg_p2p smsg = message_p2p_psend(buf, dest, partitions) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Psend_init( smsg.buf, partitions, smsg.count, smsg.dtype, dest, tag, self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) request.ob_buf = smsg return request def Precv_init( self, buf: BufSpec, int partitions: int, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Info info: Info = INFO_NULL, ) -> Prequest: """ Create request for a partitioned recv operation. """ cdef _p_msg_p2p rmsg = message_p2p_precv(buf, source, partitions) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Precv_init( rmsg.buf, partitions, rmsg.count, rmsg.dtype, source, tag, self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) request.ob_buf = rmsg return request # Communication Modes # ------------------- # Blocking calls def Bsend( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> None: """ Blocking send in buffered mode. """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) with nogil: CHKERR( MPI_Bsend_c( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi) ) def Ssend( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> None: """ Blocking send in synchronous mode. """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) with nogil: CHKERR( MPI_Ssend_c( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi) ) def Rsend( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> None: """ Blocking send in ready mode. """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) with nogil: CHKERR( MPI_Rsend_c( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi) ) # Nonblocking calls def Ibsend( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> Request: """ Nonblocking send in buffered mode. """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) cdef Request request = New(Request) with nogil: CHKERR( MPI_Ibsend_c( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = smsg return request def Issend( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> Request: """ Nonblocking send in synchronous mode. """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) cdef Request request = New(Request) with nogil: CHKERR( MPI_Issend_c( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = smsg return request def Irsend( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> Request: """ Nonblocking send in ready mode. """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) cdef Request request = New(Request) with nogil: CHKERR( MPI_Irsend_c( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = smsg return request # Persistent Requests def Bsend_init( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> Request: """ Persistent request for a send in buffered mode. """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Bsend_init_c( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = smsg return request def Ssend_init( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> Request: """ Persistent request for a send in synchronous mode. """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Ssend_init_c( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = smsg return request def Rsend_init( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> Request: """ Persistent request for a send in ready mode. """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Rsend_init_c( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = smsg return request # Collective Communications # ------------------------- # Barrier Synchronization # ----------------------- def Barrier(self) -> None: """ Barrier synchronization. """ with nogil: CHKERR( MPI_Barrier(self.ob_mpi) ) # Global Communication Functions # ------------------------------ def Bcast( self, buf: BufSpec, int root: int = 0, ) -> None: """ Broadcast data from one process to all other processes. """ cdef _p_msg_cco m = message_cco() m.for_bcast(buf, root, self.ob_mpi) with nogil: CHKERR( MPI_Bcast_c( m.sbuf, m.scount, m.stype, root, self.ob_mpi) ) def Gather( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecB | None, int root: int = 0, ) -> None: """ Gather data to one process from all other processes. """ cdef _p_msg_cco m = message_cco() m.for_gather(0, sendbuf, recvbuf, root, self.ob_mpi) with nogil: CHKERR( MPI_Gather_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, root, self.ob_mpi) ) def Gatherv( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecV | None, int root: int = 0, ) -> None: """ Gather Vector. Gather data to one process from all other processes providing different amounts of data and displacements. """ cdef _p_msg_cco m = message_cco() m.for_gather(1, sendbuf, recvbuf, root, self.ob_mpi) with nogil: CHKERR( MPI_Gatherv_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, root, self.ob_mpi) ) def Scatter( self, sendbuf: BufSpecB | None, recvbuf: BufSpec | InPlace, int root: int = 0, ) -> None: """ Scatter data from one process to all other processes. """ cdef _p_msg_cco m = message_cco() m.for_scatter(0, sendbuf, recvbuf, root, self.ob_mpi) with nogil: CHKERR( MPI_Scatter_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, root, self.ob_mpi) ) def Scatterv( self, sendbuf: BufSpecV | None, recvbuf: BufSpec | InPlace, int root: int = 0, ) -> None: """ Scatter Vector. Scatter data from one process to all other processes providing different amounts of data and displacements. """ cdef _p_msg_cco m = message_cco() m.for_scatter(1, sendbuf, recvbuf, root, self.ob_mpi) with nogil: CHKERR( MPI_Scatterv_c( m.sbuf, m.scounts, m.sdispls, m.stype, m.rbuf, m.rcount, m.rtype, root, self.ob_mpi) ) def Allgather( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecB, ) -> None: """ Gather to All. Gather data from all processes and broadcast the combined data to all other processes. """ cdef _p_msg_cco m = message_cco() m.for_allgather(0, sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Allgather_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, self.ob_mpi) ) def Allgatherv( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecV, ) -> None: """ Gather to All Vector. Gather data from all processes and send it to all other processes providing different amounts of data and displacements. """ cdef _p_msg_cco m = message_cco() m.for_allgather(1, sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Allgatherv_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, self.ob_mpi) ) def Alltoall( self, sendbuf: BufSpecB | InPlace, recvbuf: BufSpecB, ) -> None: """ All to All Scatter/Gather. Send data to all processes and recv data from all processes. """ cdef _p_msg_cco m = message_cco() m.for_alltoall(0, sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Alltoall_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, self.ob_mpi) ) def Alltoallv( self, sendbuf: BufSpecV | InPlace, recvbuf: BufSpecV, ) -> None: """ All to All Scatter/Gather Vector. Send data to all processes and recv data from all processes providing different amounts of data and displacements. """ cdef _p_msg_cco m = message_cco() m.for_alltoall(1, sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Alltoallv_c( m.sbuf, m.scounts, m.sdispls, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, self.ob_mpi) ) def Alltoallw( self, sendbuf: BufSpecW | InPlace, recvbuf: BufSpecW, ) -> None: """ All to All Scatter/Gather General. Send/recv data to/from all processes allowing the specification of different counts, displacements, and datatypes for each dest/source. """ cdef _p_msg_ccow m = message_ccow() m.for_alltoallw(sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Alltoallw_c( m.sbuf, m.scounts, m.sdispls, m.stypes, m.rbuf, m.rcounts, m.rdispls, m.rtypes, self.ob_mpi) ) # Global Reduction Operations # --------------------------- def Reduce( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec | None, Op op: Op = SUM, int root: int = 0, ) -> None: """ Reduce to Root. """ cdef _p_msg_cco m = message_cco() m.for_reduce(sendbuf, recvbuf, root, self.ob_mpi) with nogil: CHKERR( MPI_Reduce_c( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, root, self.ob_mpi) ) def Allreduce( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, Op op: Op = SUM, ) -> None: """ Reduce to All. """ cdef _p_msg_cco m = message_cco() m.for_allreduce(sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Allreduce_c( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, self.ob_mpi) ) def Reduce_scatter_block( self, sendbuf: BufSpecB | InPlace, recvbuf: BufSpec | BufSpecB, Op op: Op = SUM, ) -> None: """ Reduce-Scatter Block (regular, non-vector version). """ cdef _p_msg_cco m = message_cco() m.for_reduce_scatter_block(sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Reduce_scatter_block_c( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, self.ob_mpi) ) def Reduce_scatter( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, recvcounts: Sequence[int] | None = None, Op op: Op = SUM, ) -> None: """ Reduce-Scatter (vector version). """ cdef _p_msg_cco m = message_cco() m.for_reduce_scatter(sendbuf, recvbuf, recvcounts, self.ob_mpi) with nogil: CHKERR( MPI_Reduce_scatter_c( m.sbuf, m.rbuf, m.rcounts, m.rtype, op.ob_mpi, self.ob_mpi) ) # Nonblocking Collectives # ----------------------- def Ibarrier(self) -> Request: """ Nonblocking Barrier. """ cdef Request request = New(Request) with nogil: CHKERR( MPI_Ibarrier(self.ob_mpi, &request.ob_mpi) ) return request def Ibcast( self, buf: BufSpec, int root: int = 0, ) -> Request: """ Nonblocking Broadcast. """ cdef _p_msg_cco m = message_cco() m.for_bcast(buf, root, self.ob_mpi) cdef Request request = New(Request) with nogil: CHKERR( MPI_Ibcast_c( m.sbuf, m.scount, m.stype, root, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Igather( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecB | None, int root: int = 0, ) -> Request: """ Nonblocking Gather. """ cdef _p_msg_cco m = message_cco() m.for_gather(0, sendbuf, recvbuf, root, self.ob_mpi) cdef Request request = New(Request) with nogil: CHKERR( MPI_Igather_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, root, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Igatherv( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecV | None, int root: int = 0, ) -> Request: """ Nonblocking Gather Vector. """ cdef _p_msg_cco m = message_cco() m.for_gather(1, sendbuf, recvbuf, root, self.ob_mpi) cdef Request request = New(Request) with nogil: CHKERR( MPI_Igatherv_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, root, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Iscatter( self, sendbuf: BufSpecB | None, recvbuf: BufSpec | InPlace, int root: int = 0, ) -> Request: """ Nonblocking Scatter. """ cdef _p_msg_cco m = message_cco() m.for_scatter(0, sendbuf, recvbuf, root, self.ob_mpi) cdef Request request = New(Request) with nogil: CHKERR( MPI_Iscatter_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, root, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Iscatterv( self, sendbuf: BufSpecV | None, recvbuf: BufSpec | InPlace, int root: int = 0, ) -> Request: """ Nonblocking Scatter Vector. """ cdef _p_msg_cco m = message_cco() m.for_scatter(1, sendbuf, recvbuf, root, self.ob_mpi) cdef Request request = New(Request) with nogil: CHKERR( MPI_Iscatterv_c( m.sbuf, m.scounts, m.sdispls, m.stype, m.rbuf, m.rcount, m.rtype, root, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Iallgather( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecB, ) -> Request: """ Nonblocking Gather to All. """ cdef _p_msg_cco m = message_cco() m.for_allgather(0, sendbuf, recvbuf, self.ob_mpi) cdef Request request = New(Request) with nogil: CHKERR( MPI_Iallgather_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Iallgatherv( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecV, ) -> Request: """ Nonblocking Gather to All Vector. """ cdef _p_msg_cco m = message_cco() m.for_allgather(1, sendbuf, recvbuf, self.ob_mpi) cdef Request request = New(Request) with nogil: CHKERR( MPI_Iallgatherv_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, self.ob_mpi, &request.ob_mpi) ) return request def Ialltoall( self, sendbuf: BufSpecB | InPlace, recvbuf: BufSpecB, ) -> Request: """ Nonblocking All to All Scatter/Gather. """ cdef _p_msg_cco m = message_cco() m.for_alltoall(0, sendbuf, recvbuf, self.ob_mpi) cdef Request request = New(Request) with nogil: CHKERR( MPI_Ialltoall_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Ialltoallv( self, sendbuf: BufSpecV | InPlace, recvbuf: BufSpecV, ) -> Request: """ Nonblocking All to All Scatter/Gather Vector. """ cdef _p_msg_cco m = message_cco() m.for_alltoall(1, sendbuf, recvbuf, self.ob_mpi) cdef Request request = New(Request) with nogil: CHKERR( MPI_Ialltoallv_c( m.sbuf, m.scounts, m.sdispls, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Ialltoallw( self, sendbuf: BufSpecW | InPlace, recvbuf: BufSpecW, ) -> Request: """ Nonblocking All to All Scatter/Gather General. """ cdef _p_msg_ccow m = message_ccow() m.for_alltoallw(sendbuf, recvbuf, self.ob_mpi) cdef Request request = New(Request) with nogil: CHKERR( MPI_Ialltoallw_c( m.sbuf, m.scounts, m.sdispls, m.stypes, m.rbuf, m.rcounts, m.rdispls, m.rtypes, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Ireduce( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec | None, Op op: Op = SUM, int root: int = 0, ) -> Request: """ Nonblocking Reduce to Root. """ cdef _p_msg_cco m = message_cco() m.for_reduce(sendbuf, recvbuf, root, self.ob_mpi) cdef Request request = New(Request) with nogil: CHKERR( MPI_Ireduce_c( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, root, self.ob_mpi, &request.ob_mpi) ) return request def Iallreduce( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, Op op: Op = SUM, ) -> Request: """ Nonblocking Reduce to All. """ cdef _p_msg_cco m = message_cco() m.for_allreduce(sendbuf, recvbuf, self.ob_mpi) cdef Request request = New(Request) with nogil: CHKERR( MPI_Iallreduce_c( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, self.ob_mpi, &request.ob_mpi) ) return request def Ireduce_scatter_block( self, sendbuf: BufSpecB | InPlace, recvbuf: BufSpec | BufSpecB, Op op: Op = SUM, ) -> Request: """ Nonblocking Reduce-Scatter Block (regular, non-vector version). """ cdef _p_msg_cco m = message_cco() m.for_reduce_scatter_block(sendbuf, recvbuf, self.ob_mpi) cdef Request request = New(Request) with nogil: CHKERR( MPI_Ireduce_scatter_block_c( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, self.ob_mpi, &request.ob_mpi) ) return request def Ireduce_scatter( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, recvcounts: Sequence[int] | None = None, Op op: Op = SUM, ) -> Request: """ Nonblocking Reduce-Scatter (vector version). """ cdef _p_msg_cco m = message_cco() m.for_reduce_scatter(sendbuf, recvbuf, recvcounts, self.ob_mpi) cdef Request request = New(Request) with nogil: CHKERR( MPI_Ireduce_scatter_c( m.sbuf, m.rbuf, m.rcounts, m.rtype, op.ob_mpi, self.ob_mpi, &request.ob_mpi) ) return request # Persistent Collectives # ---------------------- def Barrier_init( self, Info info: Info = INFO_NULL, ) -> Prequest: """ Persistent Barrier. """ cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Barrier_init( self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) return request def Bcast_init( self, buf: BufSpec, int root: int = 0, Info info: Info = INFO_NULL, ) -> Prequest: """ Persistent Broadcast. """ cdef _p_msg_cco m = message_cco() m.for_bcast(buf, root, self.ob_mpi) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Bcast_init_c( m.sbuf, m.scount, m.stype, root, self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Gather_init( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecB | None, int root: int = 0, Info info: Info = INFO_NULL, ) -> Prequest: """ Persistent Gather. """ cdef _p_msg_cco m = message_cco() m.for_gather(0, sendbuf, recvbuf, root, self.ob_mpi) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Gather_init_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, root, self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Gatherv_init( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecV | None, int root: int = 0, Info info: Info = INFO_NULL, ) -> Prequest: """ Persistent Gather Vector. """ cdef _p_msg_cco m = message_cco() m.for_gather(1, sendbuf, recvbuf, root, self.ob_mpi) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Gatherv_init_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, root, self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Scatter_init( self, sendbuf: BufSpecB | None, recvbuf: BufSpec | InPlace, int root: int = 0, Info info: Info = INFO_NULL, ) -> Prequest: """ Persistent Scatter. """ cdef _p_msg_cco m = message_cco() m.for_scatter(0, sendbuf, recvbuf, root, self.ob_mpi) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Scatter_init_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, root, self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Scatterv_init( self, sendbuf: BufSpecV | None, recvbuf: BufSpec | InPlace, int root: int = 0, Info info: Info = INFO_NULL, ) -> Prequest: """ Persistent Scatter Vector. """ cdef _p_msg_cco m = message_cco() m.for_scatter(1, sendbuf, recvbuf, root, self.ob_mpi) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Scatterv_init_c( m.sbuf, m.scounts, m.sdispls, m.stype, m.rbuf, m.rcount, m.rtype, root, self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Allgather_init( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecB, Info info: Info = INFO_NULL, ) -> Prequest: """ Persistent Gather to All. """ cdef _p_msg_cco m = message_cco() m.for_allgather(0, sendbuf, recvbuf, self.ob_mpi) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Allgather_init_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Allgatherv_init( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecV, Info info: Info = INFO_NULL, ) -> Prequest: """ Persistent Gather to All Vector. """ cdef _p_msg_cco m = message_cco() m.for_allgather(1, sendbuf, recvbuf, self.ob_mpi) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Allgatherv_init_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) return request def Alltoall_init( self, sendbuf: BufSpecB | InPlace, recvbuf: BufSpecB, Info info: Info = INFO_NULL, ) -> Prequest: """ Persistent All to All Scatter/Gather. """ cdef _p_msg_cco m = message_cco() m.for_alltoall(0, sendbuf, recvbuf, self.ob_mpi) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Alltoall_init_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Alltoallv_init( self, sendbuf: BufSpecV | InPlace, recvbuf: BufSpecV, Info info: Info = INFO_NULL, ) -> Prequest: """ Persistent All to All Scatter/Gather Vector. """ cdef _p_msg_cco m = message_cco() m.for_alltoall(1, sendbuf, recvbuf, self.ob_mpi) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Alltoallv_init_c( m.sbuf, m.scounts, m.sdispls, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Alltoallw_init( self, sendbuf: BufSpecW | InPlace, recvbuf: BufSpecW, Info info: Info = INFO_NULL, ) -> Prequest: """ Persistent All to All Scatter/Gather General. """ cdef _p_msg_ccow m = message_ccow() m.for_alltoallw(sendbuf, recvbuf, self.ob_mpi) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Alltoallw_init_c( m.sbuf, m.scounts, m.sdispls, m.stypes, m.rbuf, m.rcounts, m.rdispls, m.rtypes, self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Reduce_init( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec | None, Op op: Op = SUM, int root: int = 0, Info info: Info = INFO_NULL, ) -> Prequest: """ Persistent Reduce to Root. """ cdef _p_msg_cco m = message_cco() m.for_reduce(sendbuf, recvbuf, root, self.ob_mpi) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Reduce_init_c( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, root, self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) return request def Allreduce_init( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, Op op: Op = SUM, Info info: Info = INFO_NULL, ) -> Prequest: """ Persistent Reduce to All. """ cdef _p_msg_cco m = message_cco() m.for_allreduce(sendbuf, recvbuf, self.ob_mpi) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Allreduce_init_c( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) return request def Reduce_scatter_block_init( self, sendbuf: BufSpecB | InPlace, recvbuf: BufSpec | BufSpecB, Op op: Op = SUM, Info info: Info = INFO_NULL, ) -> Prequest: """ Persistent Reduce-Scatter Block (regular, non-vector version). """ cdef _p_msg_cco m = message_cco() m.for_reduce_scatter_block(sendbuf, recvbuf, self.ob_mpi) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Reduce_scatter_block_init_c( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) return request def Reduce_scatter_init( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, recvcounts: Sequence[int] | None = None, Op op: Op = SUM, Info info: Info = INFO_NULL, ) -> Prequest: """ Persistent Reduce-Scatter (vector version). """ cdef _p_msg_cco m = message_cco() m.for_reduce_scatter(sendbuf, recvbuf, recvcounts, self.ob_mpi) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Reduce_scatter_init_c( m.sbuf, m.rbuf, m.rcounts, m.rtype, op.ob_mpi, self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) return request # Tests # ----- def Is_inter(self) -> bool: """ Return whether the communicator is an intercommunicator. """ cdef int flag = 0 CHKERR( MPI_Comm_test_inter(self.ob_mpi, &flag) ) return flag property is_inter: """Is intercommunicator.""" def __get__(self) -> bool: return self.Is_inter() def Is_intra(self) -> bool: """ Return whether the communicator is an intracommunicator. """ return not self.Is_inter() property is_intra: """Is intracommunicator.""" def __get__(self) -> bool: return self.Is_intra() def Get_topology(self) -> int: """ Return the type of topology (if any) associated with a communicator. """ cdef int topo = MPI_UNDEFINED CHKERR( MPI_Topo_test(self.ob_mpi, &topo) ) return topo property topology: """Topology type.""" def __get__(self) -> int: return self.Get_topology() property is_topo: """Is a topology.""" def __get__(self) -> bool: return self.Get_topology() != MPI_UNDEFINED # Process Creation and Management # ------------------------------- @classmethod def Get_parent(cls) -> Intercomm: """ Return the parent intercommunicator for this process. """ cdef Intercomm comm = __COMM_PARENT__ with nogil: CHKERR( MPI_Comm_get_parent(&comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm def Disconnect(self) -> None: """ Disconnect from a communicator. """ with nogil: CHKERR( MPI_Comm_disconnect(&self.ob_mpi) ) @classmethod def Join(cls, int fd: int) -> Intercomm: """ Interconnect two processes connected by a socket. """ cdef Intercomm comm = New(Intercomm) with nogil: CHKERR( MPI_Comm_join(fd, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm # Attributes # ---------- def Get_attr(self, int keyval: int) -> int | Any | None: """ Retrieve attribute value by key. """ cdef void *attrval = NULL cdef int flag = 0 CHKERR( MPI_Comm_get_attr(self.ob_mpi, keyval, &attrval, &flag) ) if not flag: return None if attrval == NULL: return 0 # MPI-1 predefined attribute keyvals if ( keyval == MPI_TAG_UB or keyval == MPI_IO or keyval == MPI_WTIME_IS_GLOBAL ): return (attrval)[0] # MPI-2 predefined attribute keyvals if ( keyval == MPI_UNIVERSE_SIZE or keyval == MPI_APPNUM or keyval == MPI_LASTUSEDCODE ): return (attrval)[0] # user-defined attribute keyval return PyMPI_attr_get(self.ob_mpi, keyval, attrval) def Set_attr(self, int keyval: int, attrval: Any) -> None: """ Store attribute value associated with a key. """ PyMPI_attr_set(self.ob_mpi, keyval, attrval) def Delete_attr(self, int keyval: int) -> None: """ Delete attribute value associated with a key. """ PyMPI_attr_del(self.ob_mpi, keyval) @classmethod def Create_keyval( cls, copy_fn: Callable[[Comm, int, Any], Any] | None = None, delete_fn: Callable[[Comm, int, Any], None] | None = None, nopython: bool = False, ) -> int: """ Create a new attribute key for communicators. """ cdef int keyval = MPI_KEYVAL_INVALID cdef MPI_Comm_copy_attr_function *_copy = PyMPI_attr_copy_fn cdef MPI_Comm_delete_attr_function *_del = PyMPI_attr_delete_fn cdef _p_keyval state = _p_keyval(copy_fn, delete_fn, nopython) CHKERR( MPI_Comm_create_keyval(_copy, _del, &keyval, state) ) PyMPI_attr_state_set(MPI_COMM_NULL, keyval, state) return keyval @classmethod def Free_keyval(cls, int keyval: int) -> int: """ Free an attribute key for communicators. """ cdef int keyval_save = keyval CHKERR( MPI_Comm_free_keyval(&keyval) ) PyMPI_attr_state_del(MPI_COMM_NULL, keyval_save) return keyval # Error handling # -------------- @classmethod def Create_errhandler( cls, errhandler_fn: Callable[[Comm, int], None], ) -> Errhandler: """ Create a new error handler for communicators. """ cdef Errhandler errhandler = New(Errhandler) cdef MPI_Comm_errhandler_function *fn = NULL cdef int index = errhdl_new(errhandler_fn, &fn) try: CHKERR( MPI_Comm_create_errhandler(fn, &errhandler.ob_mpi) ) except: # ~> uncovered # noqa errhdl_del(&index, fn) # ~> uncovered raise # ~> uncovered return errhandler def Get_errhandler(self) -> Errhandler: """ Get the error handler for a communicator. """ cdef Errhandler errhandler = New(Errhandler) CHKERR( MPI_Comm_get_errhandler(self.ob_mpi, &errhandler.ob_mpi) ) return errhandler def Set_errhandler(self, Errhandler errhandler: Errhandler) -> None: """ Set the error handler for a communicator. """ CHKERR( MPI_Comm_set_errhandler(self.ob_mpi, errhandler.ob_mpi) ) def Call_errhandler(self, int errorcode: int) -> None: """ Call the error handler installed on a communicator. """ CHKERR( MPI_Comm_call_errhandler(self.ob_mpi, errorcode) ) def Abort(self, int errorcode: int = 0) -> NoReturn: """ Terminate the MPI execution environment. .. warning:: The invocation of this method prevents the execution of various Python exit and cleanup mechanisms. Use this method as a last resort to prevent parallel deadlocks in case of unrecoverable errors. """ CHKERR( MPI_Abort(self.ob_mpi, errorcode) ) # ~> uncovered # Naming Objects # -------------- def Get_name(self) -> str: """ Get the print name for this communicator. """ cdef char name[MPI_MAX_OBJECT_NAME+1] cdef int nlen = 0 CHKERR( MPI_Comm_get_name(self.ob_mpi, name, &nlen) ) return tompistr(name, nlen) def Set_name(self, name: str) -> None: """ Set the print name for this communicator. """ cdef char *cname = NULL name = asmpistr(name, &cname) CHKERR( MPI_Comm_set_name(self.ob_mpi, cname) ) property name: """Print name.""" def __get__(self) -> str: return self.Get_name() def __set__(self, value: str): self.Set_name(value) # Fortran Handle # -------------- def py2f(self) -> int: """ """ return MPI_Comm_c2f(self.ob_mpi) @classmethod def f2py(cls, arg: int) -> Comm: """ """ return fromhandle(MPI_Comm_f2c(arg)) # Python Communication # -------------------- def send( self, obj: Any, int dest: int, int tag: int = 0, ) -> None: """Send in standard mode.""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_send(obj, dest, tag, comm) def bsend( self, obj: Any, int dest: int, int tag: int = 0, ) -> None: """Send in buffered mode.""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_bsend(obj, dest, tag, comm) def ssend( self, obj: Any, int dest: int, int tag: int = 0, ) -> None: """Send in synchronous mode.""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_ssend(obj, dest, tag, comm) def recv( self, buf: Buffer | None = None, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Status | None = None, ) -> Any: """Receive.""" cdef MPI_Comm comm = self.ob_mpi cdef MPI_Status *statusp = arg_Status(status) return PyMPI_recv(buf, source, tag, comm, statusp) def sendrecv( self, sendobj: Any, int dest: int, int sendtag: int = 0, recvbuf: Buffer | None = None, int source: int = ANY_SOURCE, int recvtag: int = ANY_TAG, Status status: Status | None = None, ) -> Any: """Send and Receive.""" cdef MPI_Comm comm = self.ob_mpi cdef MPI_Status *statusp = arg_Status(status) return PyMPI_sendrecv(sendobj, dest, sendtag, recvbuf, source, recvtag, comm, statusp) def isend( self, obj: Any, int dest: int, int tag: int = 0, ) -> Request: """Nonblocking send.""" cdef MPI_Comm comm = self.ob_mpi cdef Request request = New(Request) request.ob_buf = PyMPI_isend(obj, dest, tag, comm, &request.ob_mpi) return request def ibsend( self, obj: Any, int dest: int, int tag: int = 0, ) -> Request: """Nonblocking send in buffered mode.""" cdef MPI_Comm comm = self.ob_mpi cdef Request request = New(Request) request.ob_buf = PyMPI_ibsend(obj, dest, tag, comm, &request.ob_mpi) return request def issend( self, obj: Any, int dest: int, int tag: int = 0, ) -> Request: """Nonblocking send in synchronous mode.""" cdef MPI_Comm comm = self.ob_mpi cdef Request request = New(Request) request.ob_buf = PyMPI_issend(obj, dest, tag, comm, &request.ob_mpi) return request def irecv( self, buf: Buffer | None = None, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, ) -> Request: """Nonblocking receive.""" cdef MPI_Comm comm = self.ob_mpi cdef Request request = New(Request) request.ob_buf = PyMPI_irecv(buf, source, tag, comm, &request.ob_mpi) return request def probe( self, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Status | None = None, ) -> Literal[True]: """Blocking test for a message.""" cdef MPI_Comm comm = self.ob_mpi cdef MPI_Status *statusp = arg_Status(status) return PyMPI_probe(source, tag, comm, statusp) def iprobe( self, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Status | None = None, ) -> bool: """Nonblocking test for a message.""" cdef MPI_Comm comm = self.ob_mpi cdef MPI_Status *statusp = arg_Status(status) return PyMPI_iprobe(source, tag, comm, statusp) def mprobe( self, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Status | None = None, ) -> Message: """Blocking test for a matched message.""" cdef MPI_Comm comm = self.ob_mpi cdef MPI_Status *statusp = arg_Status(status) cdef Message message = New(Message) message.ob_buf = PyMPI_mprobe(source, tag, comm, &message.ob_mpi, statusp) return message def improbe( self, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Status | None = None, ) -> Message | None: """Nonblocking test for a matched message.""" cdef int flag = 0 cdef MPI_Comm comm = self.ob_mpi cdef MPI_Status *statusp = arg_Status(status) cdef Message message = New(Message) message.ob_buf = PyMPI_improbe(source, tag, comm, &flag, &message.ob_mpi, statusp) if flag == 0: return None return message def barrier(self) -> None: """ Barrier synchronization. .. note:: This method is equivalent to `Barrier`. """ cdef MPI_Comm comm = self.ob_mpi return PyMPI_barrier(comm) def bcast( self, obj: Any, int root: int = 0, ) -> Any: """Broadcast.""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_bcast(obj, root, comm) def gather( self, sendobj: Any, int root: int = 0, ) -> list[Any] | None: """Gather.""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_gather(sendobj, root, comm) def scatter( self, sendobj: Sequence[Any] | None, int root: int = 0, ) -> Any: """Scatter.""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_scatter(sendobj, root, comm) def allgather( self, sendobj: Any, ) -> list[Any]: """Gather to All.""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_allgather(sendobj, comm) def alltoall( self, sendobj: Sequence[Any], ) -> list[Any]: """All to All Scatter/Gather.""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_alltoall(sendobj, comm) def reduce( self, sendobj: Any, op: Op | Callable[[Any, Any], Any] = SUM, int root: int = 0, ) -> Any | None: """Reduce to Root.""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_reduce(sendobj, op, root, comm) def allreduce( self, sendobj: Any, op: Op | Callable[[Any, Any], Any] = SUM, ) -> Any: """Reduce to All.""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_allreduce(sendobj, op, comm) cdef class Intracomm(Comm): """ Intracommunicator. """ def __cinit__(self, Comm comm: Comm | None = None): comm # unused if self.ob_mpi == MPI_COMM_NULL: return cdef int inter = 1 CHKERR( MPI_Comm_test_inter(self.ob_mpi, &inter) ) if inter: raise TypeError("expecting an intracommunicator") # Communicator Constructors # ------------------------- def Create_group(self, Group group: Group, int tag: int = 0) -> Intracomm: """ Create communicator from group. """ cdef Intracomm comm = New(Intracomm) with nogil: CHKERR( MPI_Comm_create_group( self.ob_mpi, group.ob_mpi, tag, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm @classmethod def Create_from_group( cls, Group group: Group, stringtag: str = "org.mpi4py", Info info: Info = INFO_NULL, Errhandler errhandler: Errhandler | None = None, ) -> Intracomm: """ Create communicator from group. """ cdef char *cstringtag = NULL stringtag = asmpistr(stringtag, &cstringtag) cdef MPI_Errhandler cerrhdl = arg_Errhandler(errhandler) cdef Intracomm comm = New(Intracomm) with nogil: CHKERR( MPI_Comm_create_from_group( group.ob_mpi, cstringtag, info.ob_mpi, cerrhdl, &comm.ob_mpi) ) return comm def Create_cart( self, dims: Sequence[int], periods: Sequence[bool] | None = None, bint reorder: bool = False, ) -> Cartcomm: """ Create cartesian communicator. """ cdef int ndims = 0, *idims = NULL, *iperiods = NULL dims = getarray(dims, &ndims, &idims) if periods is None: periods = False if isinstance(periods, bool): periods = [periods] * ndims periods = chkarray(periods, ndims, &iperiods) # cdef Cartcomm comm = New(Cartcomm) with nogil: CHKERR( MPI_Cart_create( self.ob_mpi, ndims, idims, iperiods, reorder, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm def Create_graph( self, index: Sequence[int], edges: Sequence[int], bint reorder: bool = False, ) -> Graphcomm: """ Create graph communicator. """ cdef int nnodes = 0, *iindex = NULL index = getarray(index, &nnodes, &iindex) cdef int nedges = 0, *iedges = NULL edges = getarray(edges, &nedges, &iedges) # extension: 'standard' adjacency arrays if iindex[0]==0 and iindex[nnodes-1]==nedges: nnodes -= 1 iindex += 1 # cdef Graphcomm comm = New(Graphcomm) with nogil: CHKERR( MPI_Graph_create( self.ob_mpi, nnodes, iindex, iedges, reorder, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm def Create_dist_graph_adjacent( self, sources: Sequence[int], destinations: Sequence[int], sourceweights: Sequence[int] | None = None, destweights: Sequence[int] | None = None, Info info: Info = INFO_NULL, bint reorder: bool = False, ) -> Distgraphcomm: """ Create distributed graph communicator. """ cdef int indegree = 0, *isource = NULL cdef int outdegree = 0, *idest = NULL cdef int *isourceweight = MPI_UNWEIGHTED cdef int *idestweight = MPI_UNWEIGHTED if sources is not None: sources = getarray(sources, &indegree, &isource) sourceweights = asarray_weights( sourceweights, indegree, &isourceweight) if destinations is not None: destinations = getarray(destinations, &outdegree, &idest) destweights = asarray_weights( destweights, outdegree, &idestweight) # cdef Distgraphcomm comm = New(Distgraphcomm) with nogil: CHKERR( MPI_Dist_graph_create_adjacent( self.ob_mpi, indegree, isource, isourceweight, outdegree, idest, idestweight, info.ob_mpi, reorder, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm def Create_dist_graph( self, sources: Sequence[int], degrees: Sequence[int], destinations: Sequence[int], weights: Sequence[int] | None = None, Info info: Info = INFO_NULL, bint reorder: bool = False, ) -> Distgraphcomm: """ Create distributed graph communicator. """ cdef int nv = 0, ne = 0 cdef int *isource = NULL, *idegree = NULL, cdef int *idest = NULL, *iweight = MPI_UNWEIGHTED sources = getarray(sources, &nv, &isource) degrees = chkarray(degrees, nv, &idegree) for i in range(nv): ne += idegree[i] destinations = chkarray(destinations, ne, &idest) weights = asarray_weights(weights, ne, &iweight) # cdef Distgraphcomm comm = New(Distgraphcomm) with nogil: CHKERR( MPI_Dist_graph_create( self.ob_mpi, nv, isource, idegree, idest, iweight, info.ob_mpi, reorder, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm def Create_intercomm( self, int local_leader: int, Intracomm peer_comm: Intracomm, int remote_leader: int, int tag: int = 0, ) -> Intercomm: """ Create intercommunicator. """ cdef Intercomm comm = New(Intercomm) with nogil: CHKERR( MPI_Intercomm_create( self.ob_mpi, local_leader, peer_comm.ob_mpi, remote_leader, tag, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm # Low-Level Topology Functions # ---------------------------- def Cart_map( self, dims: Sequence[int], periods: Sequence[bool] | None = None, ) -> int: """ Determine optimal process placement on a Cartesian topology. """ cdef int ndims = 0, *idims = NULL, *iperiods = NULL dims = getarray(dims, &ndims, &idims) if periods is None: periods = False if isinstance(periods, bool): periods = [periods] * ndims periods = chkarray(periods, ndims, &iperiods) cdef int rank = MPI_PROC_NULL CHKERR( MPI_Cart_map(self.ob_mpi, ndims, idims, iperiods, &rank) ) return rank def Graph_map( self, index: Sequence[int], edges: Sequence[int], ) -> int: """ Determine optimal process placement on a graph topology. """ cdef int nnodes = 0, *iindex = NULL index = getarray(index, &nnodes, &iindex) cdef int nedges = 0, *iedges = NULL edges = getarray(edges, &nedges, &iedges) # extension: accept more 'standard' adjacency arrays if iindex[0]==0 and iindex[nnodes-1]==nedges: nnodes -= 1 iindex += 1 cdef int rank = MPI_PROC_NULL CHKERR( MPI_Graph_map(self.ob_mpi, nnodes, iindex, iedges, &rank) ) return rank # Global Reduction Operations # --------------------------- # Inclusive Scan def Scan( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, Op op: Op = SUM, ) -> None: """ Inclusive Scan. """ cdef _p_msg_cco m = message_cco() m.for_scan(sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Scan_c( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, self.ob_mpi) ) # Exclusive Scan def Exscan( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, Op op: Op = SUM, ) -> None: """ Exclusive Scan. """ cdef _p_msg_cco m = message_cco() m.for_exscan(sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Exscan_c( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, self.ob_mpi) ) # Nonblocking def Iscan( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, Op op: Op = SUM, ) -> Request: """ Inclusive Scan. """ cdef _p_msg_cco m = message_cco() m.for_scan(sendbuf, recvbuf, self.ob_mpi) cdef Request request = New(Request) with nogil: CHKERR( MPI_Iscan_c( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, self.ob_mpi, &request.ob_mpi) ) return request def Iexscan( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, Op op: Op = SUM, ) -> Request: """ Inclusive Scan. """ cdef _p_msg_cco m = message_cco() m.for_exscan(sendbuf, recvbuf, self.ob_mpi) cdef Request request = New(Request) with nogil: CHKERR( MPI_Iexscan_c( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, self.ob_mpi, &request.ob_mpi) ) return request # Persistent def Scan_init( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, Op op: Op = SUM, Info info: Info = INFO_NULL, ) -> Prequest: """ Persistent Inclusive Scan. """ cdef _p_msg_cco m = message_cco() m.for_scan(sendbuf, recvbuf, self.ob_mpi) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Scan_init_c( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Exscan_init( self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, Op op: Op = SUM, Info info: Info = INFO_NULL, ) -> Prequest: """ Persistent Exclusive Scan. """ cdef _p_msg_cco m = message_cco() m.for_exscan(sendbuf, recvbuf, self.ob_mpi) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Exscan_init_c( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request # Python Communication def scan( self, sendobj: Any, op: Op | Callable[[Any, Any], Any] = SUM, ) -> Any: """Inclusive Scan.""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_scan(sendobj, op, comm) def exscan( self, sendobj: Any, op: Op | Callable[[Any, Any], Any] = SUM, ) -> Any: """Exclusive Scan.""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_exscan(sendobj, op, comm) # Establishing Communication # -------------------------- # Starting Processes def Spawn( self, command: str, args: Sequence[str] | None = None, int maxprocs: int = 1, Info info: Info = INFO_NULL, int root: int = 0, errcodes: list[int] | None = None, ) -> Intercomm: """ Spawn instances of a single MPI application. """ cdef char *cmd = NULL cdef char **argv = MPI_ARGV_NULL cdef int *ierrcodes = MPI_ERRCODES_IGNORE # cdef int rank = MPI_UNDEFINED CHKERR( MPI_Comm_rank(self.ob_mpi, &rank) ) cdef object unused1, unused2, unused3 if root == rank: unused1 = asmpistr(command, &cmd) unused2 = asarray_argv(args, &argv) if errcodes is not None: unused3 = newarray(maxprocs, &ierrcodes) # cdef Intercomm comm = New(Intercomm) with nogil: CHKERR( MPI_Comm_spawn( cmd, argv, maxprocs, info.ob_mpi, root, self.ob_mpi, &comm.ob_mpi, ierrcodes) ) # if errcodes is not None: errcodes[:] = [ierrcodes[i] for i in range(maxprocs)] # comm_set_eh(comm.ob_mpi) return comm def Spawn_multiple( self, command: Sequence[str], args: Sequence[Sequence[str]] | None = None, maxprocs: Sequence[int] | None = None, info: Sequence[Info] | Info = INFO_NULL, int root: int = 0, errcodes: list[list[int]] | None = None, ) -> Intercomm: """ Spawn instances of multiple MPI applications. """ cdef int count = 0 cdef char **cmds = NULL cdef char ***argvs = MPI_ARGVS_NULL cdef MPI_Info *infos = NULL cdef int *imaxprocs = NULL cdef int *ierrcodes = MPI_ERRCODES_IGNORE # cdef int rank = MPI_UNDEFINED CHKERR( MPI_Comm_rank(self.ob_mpi, &rank) ) cdef object unused1, unused2, unused3, unused4, unused5 if root == rank: unused1 = asarray_cmds(command, &count, &cmds) unused2 = asarray_argvs(args, count, &argvs) unused3 = asarray_nprocs(maxprocs, count, &imaxprocs) unused4 = asarray_Info(info, count, &infos) cdef int np = 0 if errcodes is not None: if root != rank: count = len(maxprocs) unused3 = asarray_nprocs(maxprocs, count, &imaxprocs) for i in range(count): np += imaxprocs[i] unused5 = newarray(np, &ierrcodes) # cdef Intercomm comm = New(Intercomm) with nogil: CHKERR( MPI_Comm_spawn_multiple( count, cmds, argvs, imaxprocs, infos, root, self.ob_mpi, &comm.ob_mpi, ierrcodes) ) # cdef int p=0, q=0 if errcodes is not None: errcodes[:] = [[] for _ in range(count)] for i in range(count): q = p + imaxprocs[i] errcodes[i][:] = [ierrcodes[j] for j in range(p, q)] p = q # comm_set_eh(comm.ob_mpi) return comm # Server Routines def Accept( self, port_name: str, Info info: Info = INFO_NULL, int root: int = 0, ) -> Intercomm: """ Accept a request to form a new intercommunicator. """ cdef char *cportname = NULL cdef int rank = MPI_UNDEFINED CHKERR( MPI_Comm_rank(self.ob_mpi, &rank) ) if root == rank: port_name = asmpistr(port_name, &cportname) cdef Intercomm comm = New(Intercomm) with nogil: CHKERR( MPI_Comm_accept( cportname, info.ob_mpi, root, self.ob_mpi, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm # Client Routines def Connect( self, port_name: str, Info info: Info = INFO_NULL, int root: int = 0, ) -> Intercomm: """ Make a request to form a new intercommunicator. """ cdef char *cportname = NULL cdef int rank = MPI_UNDEFINED CHKERR( MPI_Comm_rank(self.ob_mpi, &rank) ) if root == rank: port_name = asmpistr(port_name, &cportname) cdef Intercomm comm = New(Intercomm) with nogil: CHKERR( MPI_Comm_connect( cportname, info.ob_mpi, root, self.ob_mpi, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm cdef class Topocomm(Intracomm): """ Topology intracommunicator. """ def __cinit__(self, Comm comm: Comm | None = None): comm # unused if self.ob_mpi == MPI_COMM_NULL: return cdef int topo = MPI_UNDEFINED CHKERR( MPI_Topo_test(self.ob_mpi, &topo) ) if topo == MPI_UNDEFINED: raise TypeError("expecting a topology communicator") property degrees: """Number of incoming and outgoing neighbors.""" def __get__(self) -> tuple[int, int]: cdef object dim, rank cdef object nneighbors if isinstance(self, Cartcomm): dim = self.Get_dim() return (2*dim, 2*dim) if isinstance(self, Graphcomm): rank = self.Get_rank() nneighbors = self.Get_neighbors_count(rank) return (nneighbors, nneighbors) if isinstance(self, Distgraphcomm): nneighbors = self.Get_dist_neighbors_count()[:2] return nneighbors raise TypeError("expecting a topology communicator") # ~> unreachable property indegree: """Number of incoming neighbors.""" def __get__(self) -> int: return self.degrees[0] property outdegree: """Number of outgoing neighbors.""" def __get__(self) -> int: return self.degrees[1] property inoutedges: """Incoming and outgoing neighbors.""" def __get__(self) -> tuple[list[int], list[int]]: cdef object direction, source, dest, rank cdef object neighbors if isinstance(self, Cartcomm): neighbors = [] for direction in range(self.Get_dim()): source, dest = self.Shift(direction, 1) neighbors.append(source) neighbors.append(dest) return (neighbors, neighbors) if isinstance(self, Graphcomm): rank = self.Get_rank() neighbors = self.Get_neighbors(rank) return (neighbors, neighbors) if isinstance(self, Distgraphcomm): neighbors = self.Get_dist_neighbors()[:2] return neighbors raise TypeError("expecting a topology communicator") # ~> unreachable property inedges: """Incoming neighbors.""" def __get__(self) -> list[int]: return self.inoutedges[0] property outedges: """Outgoing neighbors.""" def __get__(self) -> list[int]: return self.inoutedges[1] # Neighborhood Collectives # ------------------------ def Neighbor_allgather( self, sendbuf: BufSpec, recvbuf: BufSpecB, ) -> None: """ Neighbor Gather to All. """ cdef _p_msg_cco m = message_cco() m.for_neighbor_allgather(0, sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Neighbor_allgather_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, self.ob_mpi) ) def Neighbor_allgatherv( self, sendbuf: BufSpec, recvbuf: BufSpecV, ) -> None: """ Neighbor Gather to All Vector. """ cdef _p_msg_cco m = message_cco() m.for_neighbor_allgather(1, sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Neighbor_allgatherv_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, self.ob_mpi) ) def Neighbor_alltoall( self, sendbuf: BufSpecB, recvbuf: BufSpecB, ) -> None: """ Neighbor All to All. """ cdef _p_msg_cco m = message_cco() m.for_neighbor_alltoall(0, sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Neighbor_alltoall_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, self.ob_mpi) ) def Neighbor_alltoallv( self, sendbuf: BufSpecV, recvbuf: BufSpecV, ) -> None: """ Neighbor All to All Vector. """ cdef _p_msg_cco m = message_cco() m.for_neighbor_alltoall(1, sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Neighbor_alltoallv_c( m.sbuf, m.scounts, m.sdispls, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, self.ob_mpi) ) def Neighbor_alltoallw( self, sendbuf: BufSpecW, recvbuf: BufSpecW, ) -> None: """ Neighbor All to All General. """ cdef _p_msg_ccow m = message_ccow() m.for_neighbor_alltoallw(sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Neighbor_alltoallw_c( m.sbuf, m.scounts, m.sdispls, m.stypes, m.rbuf, m.rcounts, m.rdispls, m.rtypes, self.ob_mpi) ) # Nonblocking Neighborhood Collectives # ------------------------------------ def Ineighbor_allgather( self, sendbuf: BufSpec, recvbuf: BufSpecB, ) -> Request: """ Nonblocking Neighbor Gather to All. """ cdef _p_msg_cco m = message_cco() m.for_neighbor_allgather(0, sendbuf, recvbuf, self.ob_mpi) cdef Request request = New(Request) with nogil: CHKERR( MPI_Ineighbor_allgather_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Ineighbor_allgatherv( self, sendbuf: BufSpec, recvbuf: BufSpecV, ) -> Request: """ Nonblocking Neighbor Gather to All Vector. """ cdef _p_msg_cco m = message_cco() m.for_neighbor_allgather(1, sendbuf, recvbuf, self.ob_mpi) cdef Request request = New(Request) with nogil: CHKERR( MPI_Ineighbor_allgatherv_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Ineighbor_alltoall( self, sendbuf: BufSpecB, recvbuf: BufSpecB, ) -> Request: """ Nonblocking Neighbor All to All. """ cdef _p_msg_cco m = message_cco() m.for_neighbor_alltoall(0, sendbuf, recvbuf, self.ob_mpi) cdef Request request = New(Request) with nogil: CHKERR( MPI_Ineighbor_alltoall_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Ineighbor_alltoallv( self, sendbuf: BufSpecV, recvbuf: BufSpecV, ) -> Request: """ Nonblocking Neighbor All to All Vector. """ cdef _p_msg_cco m = message_cco() m.for_neighbor_alltoall(1, sendbuf, recvbuf, self.ob_mpi) cdef Request request = New(Request) with nogil: CHKERR( MPI_Ineighbor_alltoallv_c( m.sbuf, m.scounts, m.sdispls, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Ineighbor_alltoallw( self, sendbuf: BufSpecW, recvbuf: BufSpecW, ) -> Request: """ Nonblocking Neighbor All to All General. """ cdef _p_msg_ccow m = message_ccow() m.for_neighbor_alltoallw(sendbuf, recvbuf, self.ob_mpi) cdef Request request = New(Request) with nogil: CHKERR( MPI_Ineighbor_alltoallw_c( m.sbuf, m.scounts, m.sdispls, m.stypes, m.rbuf, m.rcounts, m.rdispls, m.rtypes, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request # Persistent Neighborhood Collectives # ----------------------------------- def Neighbor_allgather_init( self, sendbuf: BufSpec, recvbuf: BufSpecB, Info info: Info = INFO_NULL, ) -> Prequest: """ Persistent Neighbor Gather to All. """ cdef _p_msg_cco m = message_cco() m.for_neighbor_allgather(0, sendbuf, recvbuf, self.ob_mpi) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Neighbor_allgather_init_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Neighbor_allgatherv_init( self, sendbuf: BufSpec, recvbuf: BufSpecV, Info info: Info = INFO_NULL, ) -> Prequest: """ Persistent Neighbor Gather to All Vector. """ cdef _p_msg_cco m = message_cco() m.for_neighbor_allgather(1, sendbuf, recvbuf, self.ob_mpi) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Neighbor_allgatherv_init_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Neighbor_alltoall_init( self, sendbuf: BufSpecB, recvbuf: BufSpecB, Info info: Info = INFO_NULL, ) -> Prequest: """ Persistent Neighbor All to All. """ cdef _p_msg_cco m = message_cco() m.for_neighbor_alltoall(0, sendbuf, recvbuf, self.ob_mpi) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Neighbor_alltoall_init_c( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Neighbor_alltoallv_init( self, sendbuf: BufSpecV, recvbuf: BufSpecV, Info info: Info = INFO_NULL, ) -> Prequest: """ Persistent Neighbor All to All Vector. """ cdef _p_msg_cco m = message_cco() m.for_neighbor_alltoall(1, sendbuf, recvbuf, self.ob_mpi) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Neighbor_alltoallv_init_c( m.sbuf, m.scounts, m.sdispls, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Neighbor_alltoallw_init( self, sendbuf: BufSpecW, recvbuf: BufSpecW, Info info: Info = INFO_NULL, ) -> Prequest: """ Persistent Neighbor All to All General. """ cdef _p_msg_ccow m = message_ccow() m.for_neighbor_alltoallw(sendbuf, recvbuf, self.ob_mpi) cdef Prequest request = New(Prequest) with nogil: CHKERR( MPI_Neighbor_alltoallw_init_c( m.sbuf, m.scounts, m.sdispls, m.stypes, m.rbuf, m.rcounts, m.rdispls, m.rtypes, self.ob_mpi, info.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request # Python Communication def neighbor_allgather(self, sendobj: Any) -> list[Any]: """Neighbor Gather to All.""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_neighbor_allgather(sendobj, comm) def neighbor_alltoall(self, sendobj: list[Any]) -> list[Any]: """Neighbor All to All.""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_neighbor_alltoall(sendobj, comm) cdef class Cartcomm(Topocomm): """ Cartesian topology intracommunicator. """ def __cinit__(self, Comm comm: Comm | None = None): comm # unused if self.ob_mpi == MPI_COMM_NULL: return cdef int topo = MPI_UNDEFINED CHKERR( MPI_Topo_test(self.ob_mpi, &topo) ) if topo != MPI_CART: raise TypeError("expecting a Cartesian communicator") # Cartesian Inquiry Functions # --------------------------- def Get_dim(self) -> int: """ Return number of dimensions. """ cdef int dim = 0 CHKERR( MPI_Cartdim_get(self.ob_mpi, &dim) ) return dim property dim: """Number of dimensions.""" def __get__(self) -> int: return self.Get_dim() property ndim: """Number of dimensions.""" def __get__(self) -> int: return self.Get_dim() def Get_topo(self) -> tuple[list[int], list[int], list[int]]: """ Return information on the cartesian topology. """ cdef int ndim = 0 CHKERR( MPI_Cartdim_get(self.ob_mpi, &ndim) ) cdef int *idims = NULL cdef unused1 = newarray(ndim, &idims) cdef int *iperiods = NULL cdef unused2 = newarray(ndim, &iperiods) cdef int *icoords = NULL cdef unused3 = newarray(ndim, &icoords) CHKERR( MPI_Cart_get(self.ob_mpi, ndim, idims, iperiods, icoords) ) cdef object dims = [idims[i] for i in range(ndim)] cdef object periods = [iperiods[i] for i in range(ndim)] cdef object coords = [icoords[i] for i in range(ndim)] return (dims, periods, coords) property topo: """Topology information.""" def __get__(self) -> tuple[list[int], list[int], list[int]]: return self.Get_topo() property dims: """Dimensions.""" def __get__(self) -> list[int]: return self.Get_topo()[0] property periods: """Periodicity.""" def __get__(self) -> list[int]: return self.Get_topo()[1] property coords: """Coordinates.""" def __get__(self) -> list[int]: return self.Get_topo()[2] # Cartesian Translator Functions # ------------------------------ def Get_cart_rank(self, coords: Sequence[int]) -> int: """ Translate logical coordinates to ranks. """ cdef int ndim = 0, *icoords = NULL CHKERR( MPI_Cartdim_get( self.ob_mpi, &ndim) ) coords = chkarray(coords, ndim, &icoords) cdef int rank = MPI_PROC_NULL CHKERR( MPI_Cart_rank(self.ob_mpi, icoords, &rank) ) return rank def Get_coords(self, int rank: int) -> list[int]: """ Translate ranks to logical coordinates. """ cdef int ndim = 0, *icoords = NULL CHKERR( MPI_Cartdim_get(self.ob_mpi, &ndim) ) cdef unused = newarray(ndim, &icoords) CHKERR( MPI_Cart_coords(self.ob_mpi, rank, ndim, icoords) ) cdef object coords = [icoords[i] for i in range(ndim)] return coords # Cartesian Shift Function # ------------------------ def Shift(self, int direction: int, int disp: int) -> tuple[int, int]: """ Return a process ranks for data shifting with `Sendrecv`. """ cdef int source = MPI_PROC_NULL, dest = MPI_PROC_NULL CHKERR( MPI_Cart_shift(self.ob_mpi, direction, disp, &source, &dest) ) return (source, dest) # Cartesian Partition Function # ---------------------------- def Sub(self, remain_dims: Sequence[bool]) -> Cartcomm: """ Return a lower-dimensional Cartesian topology. """ cdef int ndim = 0, *iremdims = NULL CHKERR( MPI_Cartdim_get(self.ob_mpi, &ndim) ) remain_dims = chkarray(remain_dims, ndim, &iremdims) cdef Cartcomm comm = New(Cartcomm) with nogil: CHKERR( MPI_Cart_sub(self.ob_mpi, iremdims, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm # Cartesian Convenience Function def Compute_dims(int nnodes: int, dims: int | Sequence[int]) -> list[int]: """ Return a balanced distribution of processes per coordinate direction. """ cdef int ndims = 0, *idims = NULL if is_integral(dims): ndims = PyNumber_Index(dims) dims = [0] * ndims else: ndims = len(dims) cdef unused = chkarray(dims, ndims, &idims) CHKERR( MPI_Dims_create(nnodes, ndims, idims) ) dims = [idims[i] for i in range(ndims)] return dims cdef class Graphcomm(Topocomm): """ General graph topology intracommunicator. """ def __cinit__(self, Comm comm: Comm | None = None): comm # unused if self.ob_mpi == MPI_COMM_NULL: return cdef int topo = MPI_UNDEFINED CHKERR( MPI_Topo_test(self.ob_mpi, &topo) ) if topo != MPI_GRAPH: raise TypeError("expecting a general graph communicator") # Graph Inquiry Functions # ----------------------- def Get_dims(self) -> tuple[int, int]: """ Return the number of nodes and edges. """ cdef int nnodes = 0, nedges = 0 CHKERR( MPI_Graphdims_get(self.ob_mpi, &nnodes, &nedges) ) return (nnodes, nedges) property dims: """Number of nodes and edges.""" def __get__(self) -> tuple[int, int]: return self.Get_dims() property nnodes: """Number of nodes.""" def __get__(self) -> int: return self.Get_dims()[0] property nedges: """Number of edges.""" def __get__(self) -> int: return self.Get_dims()[1] def Get_topo(self) -> tuple[list[int], list[int]]: """ Return index and edges. """ cdef int nindex = 0, nedges = 0 CHKERR( MPI_Graphdims_get( self.ob_mpi, &nindex, &nedges) ) cdef int *iindex = NULL cdef unused1 = newarray(nindex, &iindex) cdef int *iedges = NULL cdef unused2 = newarray(nedges, &iedges) CHKERR( MPI_Graph_get(self.ob_mpi, nindex, nedges, iindex, iedges) ) cdef object index = [iindex[i] for i in range(nindex)] cdef object edges = [iedges[i] for i in range(nedges)] return (index, edges) property topo: """Topology information.""" def __get__(self) -> tuple[list[int], list[int]]: return self.Get_topo() property index: """Index.""" def __get__(self) -> list[int]: return self.Get_topo()[0] property edges: """Edges.""" def __get__(self) -> list[int]: return self.Get_topo()[1] # Graph Information Functions # --------------------------- def Get_neighbors_count(self, int rank: int) -> int: """ Return number of neighbors of a process. """ cdef int nneighbors = 0 CHKERR( MPI_Graph_neighbors_count(self.ob_mpi, rank, &nneighbors) ) return nneighbors property nneighbors: """Number of neighbors.""" def __get__(self) -> int: cdef int rank = self.Get_rank() return self.Get_neighbors_count(rank) def Get_neighbors(self, int rank: int) -> list[int]: """ Return list of neighbors of a process. """ cdef int nneighbors = 0, *ineighbors = NULL CHKERR( MPI_Graph_neighbors_count( self.ob_mpi, rank, &nneighbors) ) cdef unused = newarray(nneighbors, &ineighbors) CHKERR( MPI_Graph_neighbors( self.ob_mpi, rank, nneighbors, ineighbors) ) cdef object neighbors = [ineighbors[i] for i in range(nneighbors)] return neighbors property neighbors: """Neighbors.""" def __get__(self) -> list[int]: cdef int rank = self.Get_rank() return self.Get_neighbors(rank) cdef class Distgraphcomm(Topocomm): """ Distributed graph topology intracommunicator. """ def __cinit__(self, Comm comm: Comm | None = None): comm # unused if self.ob_mpi == MPI_COMM_NULL: return cdef int topo = MPI_UNDEFINED CHKERR( MPI_Topo_test(self.ob_mpi, &topo) ) if topo != MPI_DIST_GRAPH: raise TypeError("expecting a distributed graph communicator") # Topology Inquiry Functions # -------------------------- def Get_dist_neighbors_count(self) -> int: """ Return adjacency information for a distributed graph topology. """ cdef int indegree = 0 cdef int outdegree = 0 cdef int weighted = 0 CHKERR( MPI_Dist_graph_neighbors_count( self.ob_mpi, &indegree, &outdegree, &weighted) ) return (indegree, outdegree, weighted) def Get_dist_neighbors(self) -> \ tuple[list[int], list[int], tuple[list[int], list[int]] | None]: """ Return adjacency information for a distributed graph topology. """ cdef int maxindegree = 0, maxoutdegree = 0, weighted = 0 CHKERR( MPI_Dist_graph_neighbors_count( self.ob_mpi, &maxindegree, &maxoutdegree, &weighted) ) # cdef int *sources = NULL, *destinations = NULL cdef int *sourceweights = MPI_UNWEIGHTED cdef int *destweights = MPI_UNWEIGHTED cdef object unused1, unused2, unused3, unused4 unused1 = newarray(maxindegree, &sources) unused2 = newarray(maxoutdegree, &destinations) if weighted: unused3 = newarray(maxindegree, &sourceweights) for i in range(maxindegree): sourceweights[i] = 1 unused4 = newarray(maxoutdegree, &destweights) for i in range(maxoutdegree): destweights[i] = 1 # CHKERR( MPI_Dist_graph_neighbors( self.ob_mpi, maxindegree, sources, sourceweights, maxoutdegree, destinations, destweights) ) # cdef object src = [sources[i] for i in range(maxindegree)] cdef object dst = [destinations[i] for i in range(maxoutdegree)] if not weighted: return (src, dst, None) # cdef object sw = [sourceweights[i] for i in range(maxindegree)] cdef object dw = [destweights[i] for i in range(maxoutdegree)] return (src, dst, (sw, dw)) cdef class Intercomm(Comm): """ Intercommunicator. """ def __cinit__(self, Comm comm: Comm | None = None): comm # unused if self.ob_mpi == MPI_COMM_NULL: return cdef int inter = 0 CHKERR( MPI_Comm_test_inter(self.ob_mpi, &inter) ) if not inter: raise TypeError("expecting an intercommunicator") # Intercommunicator Constructors # ------------------------------ @classmethod def Create_from_groups( cls, Group local_group: Group, int local_leader: int, Group remote_group: Group, int remote_leader: int, stringtag: str = "org.mpi4py", Info info: Info = INFO_NULL, Errhandler errhandler: Errhandler | None = None, ) -> Intracomm: """ Create communicator from group. """ cdef char *cstringtag = NULL stringtag = asmpistr(stringtag, &cstringtag) cdef MPI_Errhandler cerrhdl = arg_Errhandler(errhandler) cdef Intercomm comm = New(Intercomm) with nogil: CHKERR( MPI_Intercomm_create_from_groups( local_group.ob_mpi, local_leader, remote_group.ob_mpi, remote_leader, cstringtag, info.ob_mpi, cerrhdl, &comm.ob_mpi) ) return comm # Intercommunicator Accessors # --------------------------- def Get_remote_group(self) -> Group: """ Access the remote group associated with the inter-communicator. """ cdef Group group = New(Group) with nogil: CHKERR( MPI_Comm_remote_group( self.ob_mpi, &group.ob_mpi) ) return group property remote_group: """Remote group.""" def __get__(self) -> Group: return self.Get_remote_group() def Get_remote_size(self) -> int: """ Intercommunicator remote size. """ cdef int size = -1 CHKERR( MPI_Comm_remote_size(self.ob_mpi, &size) ) return size property remote_size: """Number of remote processes.""" def __get__(self) -> int: return self.Get_remote_size() # Communicator Constructors # ------------------------- def Merge(self, bint high: bool = False) -> Intracomm: """ Merge intercommunicator into an intracommunicator. """ cdef Intracomm comm = New(Intracomm) with nogil: CHKERR( MPI_Intercomm_merge( self.ob_mpi, high, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm cdef Comm __COMM_NULL__ = def_Comm ( MPI_COMM_NULL , "COMM_NULL" ) cdef Intracomm __COMM_SELF__ = def_Intracomm ( MPI_COMM_SELF , "COMM_SELF" ) cdef Intracomm __COMM_WORLD__ = def_Intracomm ( MPI_COMM_WORLD , "COMM_WORLD" ) cdef Intercomm __COMM_PARENT__ = def_Intercomm ( MPI_COMM_NULL ) # Predefined communicators # ------------------------ COMM_NULL = __COMM_NULL__ #: Null communicator handle COMM_SELF = __COMM_SELF__ #: Self communicator handle COMM_WORLD = __COMM_WORLD__ #: World communicator handle # Buffer Allocation and Usage # --------------------------- BSEND_OVERHEAD = MPI_BSEND_OVERHEAD #: Upper bound of memory overhead for sending in buffered mode BUFFER_AUTOMATIC = __BUFFER_AUTOMATIC__ #: Special address for automatic buffering def Attach_buffer(buf: Buffer | None) -> None: """ Attach a user-provided buffer for sending in buffered mode. """ cdef void *base = NULL cdef MPI_Count size = 0 buf = attach_buffer(buf, &base, &size) with nogil: CHKERR( MPI_Buffer_attach_c(base, size) ) detach_buffer_set(0, buf) def Detach_buffer() -> Buffer | None: """ Remove an existing attached buffer. """ cdef void *base = NULL cdef MPI_Count size = 0 with nogil: CHKERR( MPI_Buffer_detach_c(&base, &size) ) return detach_buffer_get(0, base, size) def Flush_buffer() -> None: """ Block until all buffered messages have been transmitted. """ with nogil: CHKERR( MPI_Buffer_flush() ) def Iflush_buffer() -> Request: """ Nonblocking flush for buffered messages. """ cdef Request request = New(Request) with nogil: CHKERR( MPI_Buffer_iflush(&request.ob_mpi) ) return request # ~> MPI-4.1 # Process Creation and Management # ------------------------------- # Server Routines # --------------- def Open_port(Info info: Info = INFO_NULL) -> str: """ Return an address used to connect group of processes. """ cdef char cportname[MPI_MAX_PORT_NAME+1] cportname[0] = 0 # just in case with nogil: CHKERR( MPI_Open_port(info.ob_mpi, cportname) ) cportname[MPI_MAX_PORT_NAME] = 0 # just in case return mpistr(cportname) def Close_port(port_name: str) -> None: """ Close a port. """ cdef char *cportname = NULL port_name = asmpistr(port_name, &cportname) with nogil: CHKERR( MPI_Close_port(cportname) ) # Name Publishing # --------------- def Publish_name( service_name: str, port_name: str, Info info: Info = INFO_NULL, ) -> None: """ Publish a service name. """ cdef char *csrvcname = NULL service_name = asmpistr(service_name, &csrvcname) cdef char *cportname = NULL port_name = asmpistr(port_name, &cportname) cdef MPI_Info cinfo = arg_Info(info) with nogil: CHKERR( MPI_Publish_name(csrvcname, cinfo, cportname) ) def Unpublish_name( service_name: str, port_name: str, Info info: Info = INFO_NULL, ) -> None: """ Unpublish a service name. """ cdef char *csrvcname = NULL service_name = asmpistr(service_name, &csrvcname) cdef char *cportname = NULL port_name = asmpistr(port_name, &cportname) cdef MPI_Info cinfo = arg_Info(info) with nogil: CHKERR( MPI_Unpublish_name(csrvcname, cinfo, cportname) ) def Lookup_name( service_name: str, info: Info = INFO_NULL, ) -> str: """ Lookup a port name given a service name. """ cdef char *csrvcname = NULL service_name = asmpistr(service_name, &csrvcname) cdef MPI_Info cinfo = arg_Info(info) cdef char cportname[MPI_MAX_PORT_NAME+1] cportname[0] = 0 # just in case with nogil: CHKERR( MPI_Lookup_name(csrvcname, cinfo, cportname) ) cportname[MPI_MAX_PORT_NAME] = 0 # just in case return mpistr(cportname) mpi4py-4.0.3/src/mpi4py/MPI.src/Datatype.pyx000066400000000000000000001070731475341043600205200ustar00rootroot00000000000000# Storage order for arrays # ------------------------ ORDER_C = MPI_ORDER_C #: C order (a.k.a. row major) ORDER_FORTRAN = MPI_ORDER_FORTRAN #: Fortran order (a.k.a. column major) ORDER_F = MPI_ORDER_FORTRAN #: Convenience alias for ORDER_FORTRAN # Type classes for Fortran datatype matching # ------------------------------------------ TYPECLASS_INTEGER = MPI_TYPECLASS_INTEGER TYPECLASS_REAL = MPI_TYPECLASS_REAL TYPECLASS_COMPLEX = MPI_TYPECLASS_COMPLEX # Type of distributions (HPF-like arrays) # --------------------------------------- DISTRIBUTE_NONE = MPI_DISTRIBUTE_NONE #: Dimension not distributed DISTRIBUTE_BLOCK = MPI_DISTRIBUTE_BLOCK #: Block distribution DISTRIBUTE_CYCLIC = MPI_DISTRIBUTE_CYCLIC #: Cyclic distribution DISTRIBUTE_DFLT_DARG = MPI_DISTRIBUTE_DFLT_DARG #: Default distribution # Combiner values for datatype decoding # ------------------------------------- COMBINER_NAMED = MPI_COMBINER_NAMED COMBINER_DUP = MPI_COMBINER_DUP COMBINER_CONTIGUOUS = MPI_COMBINER_CONTIGUOUS COMBINER_VECTOR = MPI_COMBINER_VECTOR COMBINER_HVECTOR = MPI_COMBINER_HVECTOR COMBINER_INDEXED = MPI_COMBINER_INDEXED COMBINER_HINDEXED = MPI_COMBINER_HINDEXED COMBINER_INDEXED_BLOCK = MPI_COMBINER_INDEXED_BLOCK COMBINER_HINDEXED_BLOCK = MPI_COMBINER_HINDEXED_BLOCK COMBINER_STRUCT = MPI_COMBINER_STRUCT COMBINER_SUBARRAY = MPI_COMBINER_SUBARRAY COMBINER_DARRAY = MPI_COMBINER_DARRAY COMBINER_RESIZED = MPI_COMBINER_RESIZED COMBINER_VALUE_INDEX = MPI_COMBINER_VALUE_INDEX COMBINER_F90_INTEGER = MPI_COMBINER_F90_INTEGER COMBINER_F90_REAL = MPI_COMBINER_F90_REAL COMBINER_F90_COMPLEX = MPI_COMBINER_F90_COMPLEX cdef class Datatype: """ Datatype object. """ def __cinit__(self, Datatype datatype: Datatype | None = None): cinit(self, datatype) def __dealloc__(self): dealloc(self) def __richcmp__(self, other, int op): if not isinstance(other, Datatype): return NotImplemented return richcmp(self, other, op) def __bool__(self) -> bool: return nonnull(self) def __reduce__(self) -> str | tuple[Any, ...]: return reduce_Datatype(self) property handle: """MPI handle.""" def __get__(self) -> int: return tohandle(self) @classmethod def fromhandle(cls, handle: int) -> Datatype: """ Create object from MPI handle. """ return fromhandle( handle) def free(self) -> None: """ Call `Free` if not null or predefined. """ safefree(self) # Datatype Accessors # ------------------ def Get_size(self) -> int: """ Return the number of bytes occupied by entries in the datatype. """ cdef MPI_Count size = 0 CHKERR( MPI_Type_size_c(self.ob_mpi, &size) ) return size property size: """Size (in bytes).""" def __get__(self) -> int: cdef MPI_Count size = 0 CHKERR( MPI_Type_size_c(self.ob_mpi, &size) ) return size def Get_extent(self) -> tuple[int, int]: """ Return lower bound and extent of datatype. """ cdef MPI_Count lb = 0, extent = 0 CHKERR( MPI_Type_get_extent_c(self.ob_mpi, &lb, &extent) ) return (lb, extent) property extent: """Extent.""" def __get__(self) -> int: cdef MPI_Count lb = 0, extent = 0 CHKERR( MPI_Type_get_extent_c(self.ob_mpi, &lb, &extent) ) return extent property lb: """Lower bound.""" def __get__(self) -> int: cdef MPI_Count lb = 0, extent = 0 CHKERR( MPI_Type_get_extent_c(self.ob_mpi, &lb, &extent) ) return lb property ub: """Upper bound.""" def __get__(self) -> int: cdef MPI_Count lb = 0, extent = 0 CHKERR( MPI_Type_get_extent_c(self.ob_mpi, &lb, &extent) ) return lb + extent # Datatype Constructors # --------------------- def Dup(self) -> Self: """ Duplicate a datatype. """ cdef Datatype datatype = New(type(self)) CHKERR( MPI_Type_dup(self.ob_mpi, &datatype.ob_mpi) ) return datatype Create_dup = Dup #: convenience alias def Create_contiguous(self, Count count: int) -> Self: """ Create a contiguous datatype. """ cdef Datatype datatype = New(type(self)) CHKERR( MPI_Type_contiguous_c( count, self.ob_mpi, &datatype.ob_mpi) ) return datatype def Create_vector( self, Count count: int, Count blocklength: int, Count stride: int, ) -> Self: """ Create a vector (strided) datatype. """ cdef Datatype datatype = New(type(self)) CHKERR( MPI_Type_vector_c( count, blocklength, stride, self.ob_mpi, &datatype.ob_mpi) ) return datatype def Create_hvector( self, Count count: int, Count blocklength: int, Count stride: int, ) -> Self: """ Create a vector (strided) datatype with stride in bytes. """ cdef Datatype datatype = New(type(self)) CHKERR( MPI_Type_create_hvector_c( count, blocklength, stride, self.ob_mpi, &datatype.ob_mpi) ) return datatype def Create_indexed( self, blocklengths: Sequence[int], displacements: Sequence[int], ) -> Self: """ Create an indexed datatype. """ cdef MPI_Count count = 0, *iblen = NULL, *idisp = NULL blocklengths = getarray(blocklengths, &count, &iblen) displacements = chkarray(displacements, count, &idisp) # cdef Datatype datatype = New(type(self)) CHKERR( MPI_Type_indexed_c( count, iblen, idisp, self.ob_mpi, &datatype.ob_mpi) ) return datatype def Create_hindexed( self, blocklengths: Sequence[int], displacements: Sequence[int], ) -> Self: """ Create an indexed datatype. .. note:: Displacements are measured in bytes. """ cdef MPI_Count count = 0, *iblen = NULL, *idisp = NULL blocklengths = getarray(blocklengths, &count, &iblen) displacements = chkarray(displacements, count, &idisp) # cdef Datatype datatype = New(type(self)) CHKERR( MPI_Type_create_hindexed_c( count, iblen, idisp, self.ob_mpi, &datatype.ob_mpi) ) return datatype def Create_indexed_block( self, Count blocklength: int, displacements: Sequence[int], ) -> Self: """ Create an indexed datatype with constant-sized blocks. """ cdef Count count = 0, *idisp = NULL displacements = getarray(displacements, &count, &idisp) # cdef Datatype datatype = New(type(self)) CHKERR( MPI_Type_create_indexed_block_c( count, blocklength, idisp, self.ob_mpi, &datatype.ob_mpi) ) return datatype def Create_hindexed_block( self, Count blocklength: int, displacements: Sequence[int], ) -> Self: """ Create an indexed datatype with constant-sized blocks. .. note:: Displacements are measured in bytes. """ cdef MPI_Count count = 0, *idisp = NULL displacements = getarray(displacements, &count, &idisp) # cdef Datatype datatype = New(type(self)) CHKERR( MPI_Type_create_hindexed_block_c( count, blocklength, idisp, self.ob_mpi, &datatype.ob_mpi) ) return datatype @classmethod def Create_struct( cls, blocklengths: Sequence[int], displacements: Sequence[int], datatypes: Sequence[Datatype], ) -> Self: """ Create a general composite (struct) datatype. .. note:: Displacements are measured in bytes. """ cdef MPI_Count count = 0, *iblen = NULL, *idisp = NULL cdef MPI_Datatype *ptype = NULL blocklengths = getarray(blocklengths, &count, &iblen) displacements = chkarray(displacements, count, &idisp) datatypes = asarray_Datatype(datatypes, count, &ptype) # cdef Datatype datatype = New(cls) CHKERR( MPI_Type_create_struct_c( count, iblen, idisp, ptype, &datatype.ob_mpi) ) return datatype # Subarray Datatype Constructor # ----------------------------- def Create_subarray( self, sizes: Sequence[int], subsizes: Sequence[int], starts: Sequence[int], int order: int = ORDER_C, ) -> Self: """ Create a datatype for a subarray of a multidimensional array. """ cdef int ndims = 0 cdef MPI_Count *isizes = NULL cdef MPI_Count *isubsizes = NULL cdef MPI_Count *istarts = NULL sizes = getarray(sizes, &ndims, &isizes ) subsizes = chkarray(subsizes, ndims, &isubsizes) starts = chkarray(starts, ndims, &istarts ) # cdef Datatype datatype = New(type(self)) CHKERR( MPI_Type_create_subarray_c( ndims, isizes, isubsizes, istarts, order, self.ob_mpi, &datatype.ob_mpi) ) return datatype # Distributed Array Datatype Constructor # -------------------------------------- def Create_darray( self, int size: int, int rank: int, gsizes: Sequence[int], distribs: Sequence[int], dargs: Sequence[int], psizes: Sequence[int], int order: int = ORDER_C, ) -> Self: """ Create a datatype for a distributed array on Cartesian process grids. """ cdef int ndims = 0 cdef MPI_Count *igsizes = NULL cdef int *idistribs = NULL, *idargs = NULL, *ipsizes = NULL gsizes = getarray(gsizes, &ndims, &igsizes ) distribs = chkarray(distribs, ndims, &idistribs ) dargs = chkarray(dargs, ndims, &idargs ) psizes = chkarray(psizes, ndims, &ipsizes ) # cdef Datatype datatype = New(type(self)) CHKERR( MPI_Type_create_darray_c( size, rank, ndims, igsizes, idistribs, idargs, ipsizes, order, self.ob_mpi, &datatype.ob_mpi) ) return datatype # Pair Datatype # ------------- @classmethod def Get_value_index( cls, Datatype value: Datatype, Datatype index: Datatype, ) -> Self: """ Return a predefined pair datatype. """ cdef Datatype datatype = New(cls) CHKERR( MPI_Type_get_value_index( value.ob_mpi, index.ob_mpi, &datatype.ob_mpi) ) return datatype # Parametrized and size-specific Fortran Datatypes # ------------------------------------------------ @classmethod def Create_f90_integer(cls, int r: int) -> Self: """ Return a bounded integer datatype. """ cdef Datatype datatype = New(cls) CHKERR( MPI_Type_create_f90_integer(r, &datatype.ob_mpi) ) return datatype @classmethod def Create_f90_real(cls, int p: int, int r: int) -> Self: """ Return a bounded real datatype. """ cdef Datatype datatype = New(cls) CHKERR( MPI_Type_create_f90_real(p, r, &datatype.ob_mpi) ) return datatype @classmethod def Create_f90_complex(cls, int p: int, int r: int) -> Self: """ Return a bounded complex datatype. """ cdef Datatype datatype = New(cls) CHKERR( MPI_Type_create_f90_complex(p, r, &datatype.ob_mpi) ) return datatype @classmethod def Match_size(cls, int typeclass: int, int size: int) -> Self: """ Find a datatype matching a specified size in bytes. """ cdef Datatype datatype = New(cls) CHKERR( MPI_Type_match_size(typeclass, size, &datatype.ob_mpi) ) return datatype # Use of Derived Datatypes # ------------------------ def Commit(self) -> Self: """ Commit the datatype. """ CHKERR( MPI_Type_commit(&self.ob_mpi) ) return self def Free(self) -> None: """ Free the datatype. """ cdef MPI_Datatype save = self.ob_mpi CHKERR( MPI_Type_free(&self.ob_mpi) ) if constobj(self): self.ob_mpi = save # Datatype Resizing # ----------------- def Create_resized(self, Count lb: int, Count extent: int) -> Self: """ Create a datatype with a new lower bound and extent. """ cdef Datatype datatype = New(type(self)) CHKERR( MPI_Type_create_resized_c( self.ob_mpi, lb, extent, &datatype.ob_mpi) ) return datatype Resized = Create_resized #: compatibility alias def Get_true_extent(self) -> tuple[int, int]: """ Return the true lower bound and extent of a datatype. """ cdef MPI_Count lb = 0, extent = 0 CHKERR( MPI_Type_get_true_extent_c( self.ob_mpi, &lb, &extent) ) return (lb, extent) property true_extent: """True extent.""" def __get__(self) -> int: cdef MPI_Count lb = 0, extent = 0 CHKERR( MPI_Type_get_true_extent_c( self.ob_mpi, &lb, &extent) ) return extent property true_lb: """True lower bound.""" def __get__(self) -> int: cdef MPI_Count lb = 0, extent = 0 CHKERR( MPI_Type_get_true_extent_c( self.ob_mpi, &lb, &extent) ) return lb property true_ub: """True upper bound.""" def __get__(self) -> int: cdef MPI_Count lb = 0, extent = 0 CHKERR( MPI_Type_get_true_extent_c( self.ob_mpi, &lb, &extent) ) return lb + extent # Decoding a Datatype # ------------------- def Get_envelope(self) -> tuple[int, int, int, int, int]: """ Return the number of input arguments used to create a datatype. """ cdef int combiner = MPI_UNDEFINED cdef MPI_Count ni = 0, na = 0, nc = 0, nd = 0 CHKERR( MPI_Type_get_envelope_c( self.ob_mpi, &ni, &na, &nc, &nd, &combiner) ) return (ni, na, nc, nd, combiner) property envelope: """Envelope.""" def __get__(self) -> tuple[int, int, int, int, int]: return self.Get_envelope() def Get_contents(self) -> \ tuple[list[int], list[int], list[int], list[Datatype]]: """ Return the input arguments used to create a datatype. """ cdef int combiner = MPI_UNDEFINED cdef MPI_Count ni = 0, na = 0, nc = 0, nd = 0 CHKERR( MPI_Type_get_envelope_c( self.ob_mpi, &ni, &na, &nc, &nd, &combiner) ) cdef int *i = NULL cdef MPI_Aint *a = NULL cdef MPI_Count *c = NULL cdef MPI_Datatype *d = NULL cdef unused1 = allocate(ni, sizeof(int), &i) cdef unused2 = allocate(na, sizeof(MPI_Aint), &a) cdef unused3 = allocate(nc, sizeof(MPI_Count), &c) cdef unused4 = allocate(nd, sizeof(MPI_Datatype), &d) CHKERR( MPI_Type_get_contents_c( self.ob_mpi, ni, na, nc, nd, i, a, c, d) ) cdef object integers = [i[k] for k in range(ni)] cdef object addresses = [a[k] for k in range(na)] cdef object counts = [c[k] for k in range(nc)] cdef object datatypes = [ref_Datatype(d[k]) for k in range(nd)] return (integers, addresses, counts, datatypes) property contents: """Contents.""" def __get__(self) -> \ tuple[list[int], list[int], list[int], list[Datatype]]: return self.Get_contents() def decode(self) -> tuple[Datatype, str, dict[str, Any]]: """ Convenience method for decoding a datatype. """ return datatype_decode(self, mark=False) property combiner: """Combiner.""" def __get__(self) -> int: cdef int combiner = self.Get_envelope()[-1] return combiner property is_named: """Is a named datatype.""" def __get__(self) -> bool: cdef int combiner = self.Get_envelope()[-1] return combiner == MPI_COMBINER_NAMED property is_predefined: """Is a predefined datatype.""" def __get__(self) -> bool: if self.ob_mpi == MPI_DATATYPE_NULL: return True cdef int combiner = self.Get_envelope()[-1] return (combiner == MPI_COMBINER_NAMED or combiner == MPI_COMBINER_VALUE_INDEX or combiner == MPI_COMBINER_F90_INTEGER or combiner == MPI_COMBINER_F90_REAL or combiner == MPI_COMBINER_F90_COMPLEX) # Pack and Unpack # --------------- def Pack( self, inbuf: BufSpec, outbuf: BufSpec, Count position: int, Comm comm: Comm, ) -> int: """ Pack into contiguous memory according to datatype. """ cdef MPI_Count lb = 0, extent = 0 CHKERR( MPI_Type_get_extent_c(self.ob_mpi, &lb, &extent) ) # cdef void *ibptr = NULL, *obptr = NULL cdef MPI_Aint iblen = 0, oblen = 0 cdef unused1 = asbuffer_r(inbuf, &ibptr, &iblen) cdef unused2 = asbuffer_w(outbuf, &obptr, &oblen) cdef MPI_Count icount = iblen // extent cdef MPI_Count ocount = oblen # CHKERR( MPI_Pack_c( ibptr, icount, self.ob_mpi, obptr, ocount, &position, comm.ob_mpi) ) return position def Unpack( self, inbuf: BufSpec, Count position: int, outbuf: BufSpec, Comm comm: Comm, ) -> int: """ Unpack from contiguous memory according to datatype. """ cdef MPI_Count lb = 0, extent = 0 CHKERR( MPI_Type_get_extent_c(self.ob_mpi, &lb, &extent) ) # cdef void *ibptr = NULL, *obptr = NULL cdef MPI_Aint iblen = 0, oblen = 0 cdef unused1 = asbuffer_r(inbuf, &ibptr, &iblen) cdef unused2 = asbuffer_w(outbuf, &obptr, &oblen) cdef MPI_Count icount = iblen cdef MPI_Count ocount = oblen // extent # CHKERR( MPI_Unpack_c( ibptr, icount, &position, obptr, ocount, self.ob_mpi, comm.ob_mpi) ) return position def Pack_size( self, Count count: int, Comm comm: Comm, ) -> int: """ Determine the amount of space needed to pack a message. .. note:: Returns an upper bound measured in bytes. """ cdef MPI_Count size = 0 CHKERR( MPI_Pack_size_c( count, self.ob_mpi, comm.ob_mpi, &size) ) return size # Canonical Pack and Unpack # ------------------------- def Pack_external( self, datarep: str, inbuf: BufSpec, outbuf: BufSpec, Count position: int, ) -> int: """ Pack into contiguous memory according to datatype. Uses the portable data representation **external32**. """ cdef char *cdatarep = NULL datarep = asmpistr(datarep, &cdatarep) cdef MPI_Count lb = 0, extent = 0 CHKERR( MPI_Type_get_extent_c(self.ob_mpi, &lb, &extent) ) # cdef void *ibptr = NULL, *obptr = NULL cdef MPI_Aint iblen = 0, oblen = 0 cdef unused1 = asbuffer_r(inbuf, &ibptr, &iblen) cdef unused2 = asbuffer_w(outbuf, &obptr, &oblen) cdef MPI_Count icount = iblen // extent cdef MPI_Count ocount = oblen # CHKERR( MPI_Pack_external_c( cdatarep, ibptr, icount, self.ob_mpi, obptr, ocount, &position) ) return position def Unpack_external( self, datarep: str, inbuf: BufSpec, Count position: int, outbuf: BufSpec, ) -> int: """ Unpack from contiguous memory according to datatype. Uses the portable data representation **external32**. """ cdef char *cdatarep = NULL datarep = asmpistr(datarep, &cdatarep) cdef MPI_Count lb = 0, extent = 0 CHKERR( MPI_Type_get_extent_c(self.ob_mpi, &lb, &extent) ) # cdef void *ibptr = NULL, *obptr = NULL cdef MPI_Aint iblen = 0, oblen = 0 cdef unused1 = asbuffer_r(inbuf, &ibptr, &iblen) cdef unused2 = asbuffer_w(outbuf, &obptr, &oblen) cdef MPI_Count icount = iblen cdef MPI_Count ocount = oblen // extent # CHKERR( MPI_Unpack_external_c( cdatarep, ibptr, icount, &position, obptr, ocount, self.ob_mpi) ) return position def Pack_external_size( self, datarep: str, Count count: int, ) -> int: """ Determine the amount of space needed to pack a message. Uses the portable data representation **external32**. .. note:: Returns an upper bound measured in bytes. """ cdef char *cdatarep = NULL cdef MPI_Count size = 0 datarep = asmpistr(datarep, &cdatarep) CHKERR( MPI_Pack_external_size_c( cdatarep, count, self.ob_mpi, &size) ) return size # Attributes # ---------- def Get_attr(self, int keyval: int) -> int | Any | None: """ Retrieve attribute value by key. """ cdef void *attrval = NULL cdef int flag = 0 CHKERR( MPI_Type_get_attr(self.ob_mpi, keyval, &attrval, &flag) ) if not flag: return None if attrval == NULL: return 0 # user-defined attribute keyval return PyMPI_attr_get(self.ob_mpi, keyval, attrval) def Set_attr(self, int keyval: int, attrval: Any) -> None: """ Store attribute value associated with a key. """ PyMPI_attr_set(self.ob_mpi, keyval, attrval) def Delete_attr(self, int keyval: int) -> None: """ Delete attribute value associated with a key. """ PyMPI_attr_del(self.ob_mpi, keyval) @classmethod def Create_keyval( cls, copy_fn: Callable[[Datatype, int, Any], Any] | None = None, delete_fn: Callable[[Datatype, int, Any], None] | None = None, nopython: bool = False, ) -> int: """ Create a new attribute key for datatypes. """ cdef int keyval = MPI_KEYVAL_INVALID cdef MPI_Type_copy_attr_function *_copy = PyMPI_attr_copy_fn cdef MPI_Type_delete_attr_function *_del = PyMPI_attr_delete_fn cdef _p_keyval state = _p_keyval(copy_fn, delete_fn, nopython) CHKERR( MPI_Type_create_keyval(_copy, _del, &keyval, state) ) PyMPI_attr_state_set(MPI_DATATYPE_NULL, keyval, state) return keyval @classmethod def Free_keyval(cls, int keyval: int) -> int: """ Free an attribute key for datatypes. """ cdef int keyval_save = keyval CHKERR( MPI_Type_free_keyval(&keyval) ) PyMPI_attr_state_del(MPI_DATATYPE_NULL, keyval_save) return keyval # Naming Objects # -------------- def Get_name(self) -> str: """ Get the print name for this datatype. """ cdef char name[MPI_MAX_OBJECT_NAME+1] cdef int nlen = 0 CHKERR( MPI_Type_get_name(self.ob_mpi, name, &nlen) ) return tompistr(name, nlen) def Set_name(self, name: str) -> None: """ Set the print name for this datatype. """ cdef char *cname = NULL name = asmpistr(name, &cname) CHKERR( MPI_Type_set_name(self.ob_mpi, cname) ) property name: """Print name.""" def __get__(self) -> str: return self.Get_name() def __set__(self, value: str): self.Set_name(value) # Fortran Handle # -------------- def py2f(self) -> int: """ """ return MPI_Type_c2f(self.ob_mpi) @classmethod def f2py(cls, arg: int) -> Datatype: """ """ return fromhandle(MPI_Type_f2c(arg)) # Python/NumPy interoperability # ----------------------------- def tocode(self) -> str: """ Get character code or type string from predefined MPI datatype. """ cdef const char *s = DatatypeCode(self.ob_mpi) if s != NULL: return pystr(s) raise ValueError("cannot map to character code or type string") @classmethod def fromcode(cls, code: str) -> Datatype: """ Get predefined MPI datatype from character code or type string. """ try: return TypeDict[code] except KeyError: raise ValueError(f"cannot map code {code!r} to MPI datatype") property typestr: """Type string.""" def __get__(self) -> str: if self.ob_mpi == MPI_DATATYPE_NULL: return "" cdef const char *s = DatatypeStr(self.ob_mpi) if s != NULL: return pystr(s) return f"V{mpiextent(self.ob_mpi)}" property typechar: """Character code.""" def __get__(self) -> str: if self.ob_mpi == MPI_DATATYPE_NULL: return "" cdef const char *s = DatatypeChar(self.ob_mpi) if s != NULL: return pystr(s) return "V" # Address Functions # ----------------- def Get_address(location: Buffer | Bottom) -> int: """ Get the address of a location in memory. """ cdef void *baseptr = MPI_BOTTOM if not is_BOTTOM(location): asbuffer_r(location, &baseptr, NULL) cdef MPI_Aint address = 0 CHKERR( MPI_Get_address(baseptr, &address) ) return address def Aint_add(Aint base: int, Aint disp: int) -> int: """ Return the sum of base address and displacement. """ return MPI_Aint_add(base, disp) def Aint_diff(Aint addr1: int, Aint addr2: int) -> int: """ Return the difference between absolute addresses. """ return MPI_Aint_diff(addr1, addr2) cdef Datatype __DATATYPE_NULL__ = def_Datatype( MPI_DATATYPE_NULL , "DATATYPE_NULL" ) # noqa cdef Datatype __PACKED__ = def_Datatype( MPI_PACKED , "PACKED" ) cdef Datatype __BYTE__ = def_Datatype( MPI_BYTE , "BYTE" ) cdef Datatype __AINT__ = def_Datatype( MPI_AINT , "AINT" ) cdef Datatype __OFFSET__ = def_Datatype( MPI_OFFSET , "OFFSET" ) cdef Datatype __COUNT__ = def_Datatype( MPI_COUNT , "COUNT" ) cdef Datatype __CHAR__ = def_Datatype( MPI_CHAR , "CHAR" ) # noqa cdef Datatype __WCHAR__ = def_Datatype( MPI_WCHAR , "WCHAR" ) # noqa cdef Datatype __SIGNED_CHAR__ = def_Datatype( MPI_SIGNED_CHAR , "SIGNED_CHAR" ) # noqa cdef Datatype __SHORT__ = def_Datatype( MPI_SHORT , "SHORT" ) # noqa cdef Datatype __INT__ = def_Datatype( MPI_INT , "INT" ) # noqa cdef Datatype __LONG__ = def_Datatype( MPI_LONG , "LONG" ) # noqa cdef Datatype __LONG_LONG__ = def_Datatype( MPI_LONG_LONG , "LONG_LONG" ) # noqa cdef Datatype __UNSIGNED_CHAR__ = def_Datatype( MPI_UNSIGNED_CHAR , "UNSIGNED_CHAR" ) # noqa cdef Datatype __UNSIGNED_SHORT__ = def_Datatype( MPI_UNSIGNED_SHORT , "UNSIGNED_SHORT" ) # noqa cdef Datatype __UNSIGNED__ = def_Datatype( MPI_UNSIGNED , "UNSIGNED" ) # noqa cdef Datatype __UNSIGNED_LONG__ = def_Datatype( MPI_UNSIGNED_LONG , "UNSIGNED_LONG" ) # noqa cdef Datatype __UNSIGNED_LONG_LONG__ = def_Datatype( MPI_UNSIGNED_LONG_LONG , "UNSIGNED_LONG_LONG" ) # noqa cdef Datatype __FLOAT__ = def_Datatype( MPI_FLOAT , "FLOAT" ) # noqa cdef Datatype __DOUBLE__ = def_Datatype( MPI_DOUBLE , "DOUBLE" ) # noqa cdef Datatype __LONG_DOUBLE__ = def_Datatype( MPI_LONG_DOUBLE , "LONG_DOUBLE" ) # noqa cdef Datatype __C_BOOL__ = def_Datatype( MPI_C_BOOL , "C_BOOL" ) # noqa cdef Datatype __INT8_T__ = def_Datatype( MPI_INT8_T , "INT8_T" ) # noqa cdef Datatype __INT16_T__ = def_Datatype( MPI_INT16_T , "INT16_T" ) # noqa cdef Datatype __INT32_T__ = def_Datatype( MPI_INT32_T , "INT32_T" ) # noqa cdef Datatype __INT64_T__ = def_Datatype( MPI_INT64_T , "INT64_T" ) # noqa cdef Datatype __UINT8_T__ = def_Datatype( MPI_UINT8_T , "UINT8_T" ) # noqa cdef Datatype __UINT16_T__ = def_Datatype( MPI_UINT16_T , "UINT16_T" ) # noqa cdef Datatype __UINT32_T__ = def_Datatype( MPI_UINT32_T , "UINT32_T" ) # noqa cdef Datatype __UINT64_T__ = def_Datatype( MPI_UINT64_T , "UINT64_T" ) # noqa cdef Datatype __C_COMPLEX__ = def_Datatype( MPI_C_COMPLEX , "C_COMPLEX" ) # noqa cdef Datatype __C_FLOAT_COMPLEX__ = __C_COMPLEX__ cdef Datatype __C_DOUBLE_COMPLEX__ = def_Datatype( MPI_C_DOUBLE_COMPLEX , "C_DOUBLE_COMPLEX" ) # noqa cdef Datatype __C_LONG_DOUBLE_COMPLEX__ = def_Datatype( MPI_C_LONG_DOUBLE_COMPLEX , "C_LONG_DOUBLE_COMPLEX" ) # noqa cdef Datatype __CXX_BOOL__ = def_Datatype( MPI_CXX_BOOL , "CXX_BOOL" ) # noqa cdef Datatype __CXX_FLOAT_COMPLEX__ = def_Datatype( MPI_CXX_FLOAT_COMPLEX , "CXX_FLOAT_COMPLEX" ) # noqa cdef Datatype __CXX_DOUBLE_COMPLEX__ = def_Datatype( MPI_CXX_DOUBLE_COMPLEX , "CXX_DOUBLE_COMPLEX" ) # noqa cdef Datatype __CXX_LONG_DOUBLE_COMPLEX__ = def_Datatype( MPI_CXX_LONG_DOUBLE_COMPLEX , "CXX_LONG_DOUBLE_COMPLEX" ) # noqa cdef Datatype __SHORT_INT__ = def_Datatype( MPI_SHORT_INT , "SHORT_INT" ) # noqa cdef Datatype __INT_INT__ = def_Datatype( MPI_2INT , "INT_INT" ) # noqa cdef Datatype __LONG_INT__ = def_Datatype( MPI_LONG_INT , "LONG_INT" ) # noqa cdef Datatype __FLOAT_INT__ = def_Datatype( MPI_FLOAT_INT , "FLOAT_INT" ) # noqa cdef Datatype __DOUBLE_INT__ = def_Datatype( MPI_DOUBLE_INT , "DOUBLE_INT" ) # noqa cdef Datatype __LONG_DOUBLE_INT__ = def_Datatype( MPI_LONG_DOUBLE_INT , "LONG_DOUBLE_INT" ) # noqa cdef Datatype __CHARACTER__ = def_Datatype( MPI_CHARACTER , "CHARACTER" ) # noqa cdef Datatype __LOGICAL__ = def_Datatype( MPI_LOGICAL , "LOGICAL" ) # noqa cdef Datatype __INTEGER__ = def_Datatype( MPI_INTEGER , "INTEGER" ) # noqa cdef Datatype __REAL__ = def_Datatype( MPI_REAL , "REAL" ) # noqa cdef Datatype __DOUBLE_PRECISION__ = def_Datatype( MPI_DOUBLE_PRECISION , "DOUBLE_PRECISION" ) # noqa cdef Datatype __COMPLEX__ = def_Datatype( MPI_COMPLEX , "COMPLEX" ) # noqa cdef Datatype __DOUBLE_COMPLEX__ = def_Datatype( MPI_DOUBLE_COMPLEX , "DOUBLE_COMPLEX" ) # noqa cdef Datatype __LOGICAL1__ = def_Datatype( MPI_LOGICAL1 , "LOGICAL1" ) cdef Datatype __LOGICAL2__ = def_Datatype( MPI_LOGICAL2 , "LOGICAL2" ) cdef Datatype __LOGICAL4__ = def_Datatype( MPI_LOGICAL4 , "LOGICAL4" ) cdef Datatype __LOGICAL8__ = def_Datatype( MPI_LOGICAL8 , "LOGICAL8" ) cdef Datatype __INTEGER1__ = def_Datatype( MPI_INTEGER1 , "INTEGER1" ) cdef Datatype __INTEGER2__ = def_Datatype( MPI_INTEGER2 , "INTEGER2" ) cdef Datatype __INTEGER4__ = def_Datatype( MPI_INTEGER4 , "INTEGER4" ) cdef Datatype __INTEGER8__ = def_Datatype( MPI_INTEGER8 , "INTEGER8" ) cdef Datatype __INTEGER16__ = def_Datatype( MPI_INTEGER16 , "INTEGER16" ) cdef Datatype __REAL2__ = def_Datatype( MPI_REAL2 , "REAL2" ) cdef Datatype __REAL4__ = def_Datatype( MPI_REAL4 , "REAL4" ) cdef Datatype __REAL8__ = def_Datatype( MPI_REAL8 , "REAL8" ) cdef Datatype __REAL16__ = def_Datatype( MPI_REAL16 , "REAL16" ) cdef Datatype __COMPLEX4__ = def_Datatype( MPI_COMPLEX4 , "COMPLEX4" ) cdef Datatype __COMPLEX8__ = def_Datatype( MPI_COMPLEX8 , "COMPLEX8" ) cdef Datatype __COMPLEX16__ = def_Datatype( MPI_COMPLEX16 , "COMPLEX16" ) cdef Datatype __COMPLEX32__ = def_Datatype( MPI_COMPLEX32 , "COMPLEX32" ) # Predefined datatype handles # --------------------------- DATATYPE_NULL = __DATATYPE_NULL__ #: Null datatype handle # MPI-specific datatypes PACKED = __PACKED__ BYTE = __BYTE__ AINT = __AINT__ OFFSET = __OFFSET__ COUNT = __COUNT__ # Elementary C datatypes CHAR = __CHAR__ WCHAR = __WCHAR__ SIGNED_CHAR = __SIGNED_CHAR__ SHORT = __SHORT__ INT = __INT__ LONG = __LONG__ LONG_LONG = __LONG_LONG__ UNSIGNED_CHAR = __UNSIGNED_CHAR__ UNSIGNED_SHORT = __UNSIGNED_SHORT__ UNSIGNED = __UNSIGNED__ UNSIGNED_LONG = __UNSIGNED_LONG__ UNSIGNED_LONG_LONG = __UNSIGNED_LONG_LONG__ FLOAT = __FLOAT__ DOUBLE = __DOUBLE__ LONG_DOUBLE = __LONG_DOUBLE__ # C99 datatypes C_BOOL = __C_BOOL__ INT8_T = __INT8_T__ INT16_T = __INT16_T__ INT32_T = __INT32_T__ INT64_T = __INT64_T__ UINT8_T = __UINT8_T__ UINT16_T = __UINT16_T__ UINT32_T = __UINT32_T__ UINT64_T = __UINT64_T__ C_COMPLEX = __C_COMPLEX__ C_FLOAT_COMPLEX = __C_FLOAT_COMPLEX__ C_DOUBLE_COMPLEX = __C_DOUBLE_COMPLEX__ C_LONG_DOUBLE_COMPLEX = __C_LONG_DOUBLE_COMPLEX__ # C++ datatypes CXX_BOOL = __CXX_BOOL__ CXX_FLOAT_COMPLEX = __CXX_FLOAT_COMPLEX__ CXX_DOUBLE_COMPLEX = __CXX_DOUBLE_COMPLEX__ CXX_LONG_DOUBLE_COMPLEX = __CXX_LONG_DOUBLE_COMPLEX__ # C Datatypes for reduction operations SHORT_INT = __SHORT_INT__ INT_INT = TWOINT = __INT_INT__ LONG_INT = __LONG_INT__ FLOAT_INT = __FLOAT_INT__ DOUBLE_INT = __DOUBLE_INT__ LONG_DOUBLE_INT = __LONG_DOUBLE_INT__ # Elementary Fortran datatypes CHARACTER = __CHARACTER__ LOGICAL = __LOGICAL__ INTEGER = __INTEGER__ REAL = __REAL__ DOUBLE_PRECISION = __DOUBLE_PRECISION__ COMPLEX = __COMPLEX__ DOUBLE_COMPLEX = __DOUBLE_COMPLEX__ # Size-specific Fortran datatypes LOGICAL1 = __LOGICAL1__ LOGICAL2 = __LOGICAL2__ LOGICAL4 = __LOGICAL4__ LOGICAL8 = __LOGICAL8__ INTEGER1 = __INTEGER1__ INTEGER2 = __INTEGER2__ INTEGER4 = __INTEGER4__ INTEGER8 = __INTEGER8__ INTEGER16 = __INTEGER16__ REAL2 = __REAL2__ REAL4 = __REAL4__ REAL8 = __REAL8__ REAL16 = __REAL16__ COMPLEX4 = __COMPLEX4__ COMPLEX8 = __COMPLEX8__ COMPLEX16 = __COMPLEX16__ COMPLEX32 = __COMPLEX32__ # Convenience aliases UNSIGNED_INT = __UNSIGNED__ SIGNED_SHORT = __SHORT__ SIGNED_INT = __INT__ SIGNED_LONG = __LONG__ SIGNED_LONG_LONG = __LONG_LONG__ BOOL = __C_BOOL__ SINT8_T = __INT8_T__ SINT16_T = __INT16_T__ SINT32_T = __INT32_T__ SINT64_T = __INT64_T__ F_BOOL = __LOGICAL__ F_INT = __INTEGER__ F_FLOAT = __REAL__ F_DOUBLE = __DOUBLE_PRECISION__ F_COMPLEX = __COMPLEX__ F_FLOAT_COMPLEX = __COMPLEX__ F_DOUBLE_COMPLEX = __DOUBLE_COMPLEX__ mpi4py-4.0.3/src/mpi4py/MPI.src/Errhandler.pyx000066400000000000000000000043521475341043600210270ustar00rootroot00000000000000cdef class Errhandler: """ Error handler. """ def __cinit__(self, Errhandler errhandler: Errhandler | None = None): cinit(self, errhandler) def __dealloc__(self): dealloc(self) def __richcmp__(self, other, int op): if not isinstance(other, Errhandler): return NotImplemented return richcmp(self, other, op) def __bool__(self) -> bool: return nonnull(self) def __reduce__(self) -> str | tuple[Any, ...]: return reduce_default(self) property handle: """MPI handle.""" def __get__(self) -> int: return tohandle(self) @classmethod def fromhandle(cls, handle: int) -> Errhandler: """ Create object from MPI handle. """ return fromhandle( handle) def free(self) -> None: """ Call `Free` if not null. """ safefree(self) # Freeing Errorhandlers # --------------------- def Free(self) -> None: """ Free an error handler. """ cdef MPI_Errhandler save = self.ob_mpi CHKERR( MPI_Errhandler_free(&self.ob_mpi) ) if constobj(self): self.ob_mpi = save # Fortran Handle # -------------- def py2f(self) -> int: """ """ return MPI_Errhandler_c2f(self.ob_mpi) @classmethod def f2py(cls, arg: int) -> Errhandler: """ """ return fromhandle(MPI_Errhandler_f2c(arg)) cdef Errhandler __ERRHANDLER_NULL__ = def_Errhandler( MPI_ERRHANDLER_NULL , "ERRHANDLER_NULL" ) # noqa cdef Errhandler __ERRORS_RETURN__ = def_Errhandler( MPI_ERRORS_RETURN , "ERRORS_RETURN" ) # noqa cdef Errhandler __ERRORS_ABORT__ = def_Errhandler( MPI_ERRORS_ABORT , "ERRORS_ABORT" ) # noqa cdef Errhandler __ERRORS_ARE_FATAL__ = def_Errhandler( MPI_ERRORS_ARE_FATAL , "ERRORS_ARE_FATAL" ) # noqa # Predefined errhandler handles # ----------------------------- ERRHANDLER_NULL = __ERRHANDLER_NULL__ #: Null error handler ERRORS_RETURN = __ERRORS_RETURN__ #: Errors return error handler ERRORS_ABORT = __ERRORS_ABORT__ #: Errors abort error handler ERRORS_ARE_FATAL = __ERRORS_ARE_FATAL__ #: Errors are fatal error handler mpi4py-4.0.3/src/mpi4py/MPI.src/ErrorCode.pyx000066400000000000000000000106501475341043600206230ustar00rootroot00000000000000# Error Classes # ------------- # No Errors SUCCESS = MPI_SUCCESS ERR_LASTCODE = MPI_ERR_LASTCODE # Object Handles ERR_TYPE = MPI_ERR_TYPE ERR_REQUEST = MPI_ERR_REQUEST ERR_OP = MPI_ERR_OP ERR_GROUP = MPI_ERR_GROUP ERR_INFO = MPI_ERR_INFO ERR_ERRHANDLER = MPI_ERR_ERRHANDLER ERR_SESSION = MPI_ERR_SESSION ERR_COMM = MPI_ERR_COMM ERR_WIN = MPI_ERR_WIN ERR_FILE = MPI_ERR_FILE # Communication Arguments ERR_BUFFER = MPI_ERR_BUFFER ERR_COUNT = MPI_ERR_COUNT ERR_TAG = MPI_ERR_TAG ERR_RANK = MPI_ERR_RANK ERR_ROOT = MPI_ERR_ROOT ERR_TRUNCATE = MPI_ERR_TRUNCATE # Multiple Completion ERR_IN_STATUS = MPI_ERR_IN_STATUS ERR_PENDING = MPI_ERR_PENDING # Topology Arguments ERR_TOPOLOGY = MPI_ERR_TOPOLOGY ERR_DIMS = MPI_ERR_DIMS # Other Arguments ERR_ARG = MPI_ERR_ARG # Other Errors ERR_OTHER = MPI_ERR_OTHER ERR_UNKNOWN = MPI_ERR_UNKNOWN ERR_INTERN = MPI_ERR_INTERN # Attributes ERR_KEYVAL = MPI_ERR_KEYVAL # Memory Allocation ERR_NO_MEM = MPI_ERR_NO_MEM # Info Object ERR_INFO_KEY = MPI_ERR_INFO_KEY ERR_INFO_VALUE = MPI_ERR_INFO_VALUE ERR_INFO_NOKEY = MPI_ERR_INFO_NOKEY # Dynamic Process Management ERR_SPAWN = MPI_ERR_SPAWN ERR_PORT = MPI_ERR_PORT ERR_SERVICE = MPI_ERR_SERVICE ERR_NAME = MPI_ERR_NAME ERR_PROC_ABORTED = MPI_ERR_PROC_ABORTED # One-Sided Communications ERR_BASE = MPI_ERR_BASE ERR_SIZE = MPI_ERR_SIZE ERR_DISP = MPI_ERR_DISP ERR_ASSERT = MPI_ERR_ASSERT ERR_LOCKTYPE = MPI_ERR_LOCKTYPE ERR_RMA_CONFLICT = MPI_ERR_RMA_CONFLICT ERR_RMA_SYNC = MPI_ERR_RMA_SYNC ERR_RMA_RANGE = MPI_ERR_RMA_RANGE ERR_RMA_ATTACH = MPI_ERR_RMA_ATTACH ERR_RMA_SHARED = MPI_ERR_RMA_SHARED ERR_RMA_FLAVOR = MPI_ERR_RMA_FLAVOR # Input/Output ERR_BAD_FILE = MPI_ERR_BAD_FILE ERR_NO_SUCH_FILE = MPI_ERR_NO_SUCH_FILE ERR_FILE_EXISTS = MPI_ERR_FILE_EXISTS ERR_FILE_IN_USE = MPI_ERR_FILE_IN_USE ERR_AMODE = MPI_ERR_AMODE ERR_ACCESS = MPI_ERR_ACCESS ERR_READ_ONLY = MPI_ERR_READ_ONLY ERR_NO_SPACE = MPI_ERR_NO_SPACE ERR_QUOTA = MPI_ERR_QUOTA ERR_NOT_SAME = MPI_ERR_NOT_SAME ERR_IO = MPI_ERR_IO ERR_UNSUPPORTED_OPERATION = MPI_ERR_UNSUPPORTED_OPERATION ERR_UNSUPPORTED_DATAREP = MPI_ERR_UNSUPPORTED_DATAREP ERR_CONVERSION = MPI_ERR_CONVERSION ERR_DUP_DATAREP = MPI_ERR_DUP_DATAREP ERR_VALUE_TOO_LARGE = MPI_ERR_VALUE_TOO_LARGE # Process Fault Tolerance ERR_REVOKED = MPI_ERR_REVOKED ERR_PROC_FAILED = MPI_ERR_PROC_FAILED ERR_PROC_FAILED_PENDING = MPI_ERR_PROC_FAILED_PENDING def Get_error_class(int errorcode: int) -> int: """ Convert an *error code* into an *error class*. """ cdef int errorclass = MPI_SUCCESS CHKERR( MPI_Error_class(errorcode, &errorclass) ) return errorclass def Get_error_string(int errorcode: int) -> str: """ Return the *error string* for a given *error class* or *error code*. """ cdef char string[MPI_MAX_ERROR_STRING+1] cdef int resultlen = 0 CHKERR( MPI_Error_string(errorcode, string, &resultlen) ) return tompistr(string, resultlen) def Add_error_class() -> int: """ Add an *error class* to the known error classes. """ cdef int errorclass = MPI_SUCCESS CHKERR( MPI_Add_error_class(&errorclass) ) return errorclass def Remove_error_class(int errorclass: int) -> None: """ Remove an *error class* from the known error classes. """ CHKERR( MPI_Remove_error_class(errorclass) ) def Add_error_code(int errorclass: int) -> int: """ Add an *error code* to an *error class*. """ cdef int errorcode = MPI_SUCCESS CHKERR( MPI_Add_error_code(errorclass, &errorcode) ) return errorcode def Remove_error_code(int errorcode: int) -> None: """ Remove an *error code* from the known error codes. """ CHKERR( MPI_Remove_error_code(errorcode) ) def Add_error_string(int errorcode: int, string: str) -> None: """ Associate an *error string* with an *error class* or *error code*. """ cdef char *cstring = NULL string = asmpistr(string, &cstring) CHKERR( MPI_Add_error_string(errorcode, cstring) ) def Remove_error_string(int errorcode: int) -> None: """ Remove *error string* association from *error class* or *error code*. """ CHKERR( MPI_Remove_error_string(errorcode) ) mpi4py-4.0.3/src/mpi4py/MPI.src/Exception.pyx000066400000000000000000000044301475341043600206740ustar00rootroot00000000000000# Exception Class # --------------- cdef extern from "Python.h": ctypedef class __builtin__.RuntimeError [object PyBaseExceptionObject]: pass cdef class Exception(RuntimeError): """ Exception class. """ cdef int ob_mpi def __cinit__(self, int ierr: int = 0): if ierr < MPI_SUCCESS: ierr = MPI_ERR_UNKNOWN self.ob_mpi = ierr RuntimeError.__init__(self, ierr) def __richcmp__(Exception self, object error, int op): cdef int ierr = self.ob_mpi if op == Py_LT: return ierr < error if op == Py_LE: return ierr <= error if op == Py_EQ: return ierr == error if op == Py_NE: return ierr != error if op == Py_GT: return ierr > error if op == Py_GE: return ierr >= error def __hash__(self) -> int: return hash(self.ob_mpi) def __bool__(self) -> bool: return self.ob_mpi != MPI_SUCCESS def __int__(self) -> int: return self.ob_mpi def __repr__(self) -> str: return f"MPI.Exception({self.ob_mpi})" def __str__(self) -> str: if MPI_VERSION < 4 and not mpi_active(): return f"error code: {self.ob_mpi}" # ~> legacy return self.Get_error_string() def Get_error_code(self) -> int: """ Error code. """ cdef int errorcode = MPI_SUCCESS errorcode = self.ob_mpi return errorcode property error_code: """Error code.""" def __get__(self) -> int: return self.Get_error_code() def Get_error_class(self) -> int: """ Error class. """ cdef int errorclass = MPI_SUCCESS CHKERR( MPI_Error_class(self.ob_mpi, &errorclass) ) return errorclass property error_class: """Error class.""" def __get__(self) -> int: return self.Get_error_class() def Get_error_string(self) -> str: """ Error string. """ cdef char string[MPI_MAX_ERROR_STRING+1] cdef int resultlen = 0 CHKERR( MPI_Error_string(self.ob_mpi, string, &resultlen) ) return tompistr(string, resultlen) property error_string: """Error string.""" def __get__(self) -> str: return self.Get_error_string() MPIException = Exception mpi4py-4.0.3/src/mpi4py/MPI.src/File.pyx000066400000000000000000000656121475341043600176260ustar00rootroot00000000000000# Opening modes # ------------- MODE_RDONLY = MPI_MODE_RDONLY #: Read only MODE_WRONLY = MPI_MODE_WRONLY #: Write only MODE_RDWR = MPI_MODE_RDWR #: Reading and writing MODE_CREATE = MPI_MODE_CREATE #: Create the file if it does not exist MODE_EXCL = MPI_MODE_EXCL #: Error if creating file that already exists MODE_DELETE_ON_CLOSE = MPI_MODE_DELETE_ON_CLOSE #: Delete file on close MODE_UNIQUE_OPEN = MPI_MODE_UNIQUE_OPEN #: File will not be concurrently opened elsewhere MODE_SEQUENTIAL = MPI_MODE_SEQUENTIAL #: File will only be accessed sequentially MODE_APPEND = MPI_MODE_APPEND #: Set initial position of all file pointers to end of file # Positioning # ----------- SEEK_SET = MPI_SEEK_SET #: File pointer is set to offset SEEK_CUR = MPI_SEEK_CUR #: File pointer is set to the current position plus offset SEEK_END = MPI_SEEK_END #: File pointer is set to the end plus offset DISPLACEMENT_CURRENT = MPI_DISPLACEMENT_CURRENT #: Special displacement value for files opened in sequential mode DISP_CUR = MPI_DISPLACEMENT_CURRENT #: Convenience alias for `DISPLACEMENT_CURRENT` cdef class File: """ File I/O context. """ def __cinit__(self, File file: File | None = None): cinit(self, file) def __dealloc__(self): dealloc(self) def __richcmp__(self, other, int op): if not isinstance(other, File): return NotImplemented return richcmp(self, other, op) def __bool__(self) -> bool: return nonnull(self) def __reduce__(self) -> str | tuple[Any, ...]: return reduce_default(self) property handle: """MPI handle.""" def __get__(self) -> int: return tohandle(self) @classmethod def fromhandle(cls, handle: int) -> File: """ Create object from MPI handle. """ return fromhandle( handle) def free(self) -> None: """ Call `Close` if not null. """ safefree(self) # File Manipulation # ----------------- @classmethod def Open( cls, Intracomm comm: Intracomm, filename: PathLike[AnyStr] | str | bytes, int amode: int = MODE_RDONLY, Info info: Info = INFO_NULL, ) -> Self: """ Open a file. """ cdef char *cfilename = NULL filename = asmpifspath(filename, &cfilename) cdef File file = New(cls) with nogil: CHKERR( MPI_File_open( comm.ob_mpi, cfilename, amode, info.ob_mpi, &file.ob_mpi) ) file_set_eh(file.ob_mpi) return file def Close(self) -> None: """ Close a file. """ cdef MPI_File save = self.ob_mpi with nogil: CHKERR( MPI_File_close(&self.ob_mpi) ) if constobj(self): self.ob_mpi = save @classmethod def Delete( cls, filename: PathLike[AnyStr] | str | bytes, Info info: Info = INFO_NULL, ) -> None: """ Delete a file. """ cdef char *cfilename = NULL filename = asmpifspath(filename, &cfilename) with nogil: CHKERR( MPI_File_delete(cfilename, info.ob_mpi) ) def Set_size(self, Offset size: int) -> None: """ Set the file size. """ with nogil: CHKERR( MPI_File_set_size(self.ob_mpi, size) ) def Preallocate(self, Offset size: int) -> None: """ Preallocate storage space for a file. """ with nogil: CHKERR( MPI_File_preallocate(self.ob_mpi, size) ) def Get_size(self) -> int: """ Return the file size. """ cdef MPI_Offset size = 0 with nogil: CHKERR( MPI_File_get_size(self.ob_mpi, &size) ) return size property size: """Size (in bytes).""" def __get__(self) -> int: return self.Get_size() def Get_amode(self) -> int: """ Return the file access mode. """ cdef int amode = 0 with nogil: CHKERR( MPI_File_get_amode(self.ob_mpi, &amode) ) return amode property amode: """Access mode.""" def __get__(self) -> int: return self.Get_amode() # File Group # ---------- def Get_group(self) -> Group: """ Access the group of processes that opened the file. """ cdef Group group = New(Group) with nogil: CHKERR( MPI_File_get_group(self.ob_mpi, &group.ob_mpi) ) return group property group: """Group.""" def __get__(self) -> Group: return self.Get_group() property group_size: """Group size.""" def __get__(self) -> int: cdef MPI_Group group = MPI_GROUP_NULL cdef int group_size = -1 CHKERR( MPI_File_get_group(self.ob_mpi, &group) ) try: CHKERR( MPI_Group_size(group, &group_size) ) finally: CHKERR( MPI_Group_free(&group) ) return group_size property group_rank: """Group rank.""" def __get__(self) -> int: cdef MPI_Group group = MPI_GROUP_NULL cdef int group_rank = MPI_PROC_NULL CHKERR( MPI_File_get_group(self.ob_mpi, &group) ) try: CHKERR( MPI_Group_rank(group, &group_rank) ) finally: CHKERR( MPI_Group_free(&group) ) return group_rank # File Info # --------- def Set_info(self, Info info: Info) -> None: """ Set new values for the hints associated with a file. """ with nogil: CHKERR( MPI_File_set_info(self.ob_mpi, info.ob_mpi) ) def Get_info(self) -> Info: """ Return the current hints for a file. """ cdef Info info = New(Info) with nogil: CHKERR( MPI_File_get_info(self.ob_mpi, &info.ob_mpi) ) return info property info: """Info hints.""" def __get__(self) -> Info: return self.Get_info() def __set__(self, value: Info): self.Set_info(value) # File Views # ---------- def Set_view( self, Offset disp: int = 0, Datatype etype: Datatype = BYTE, Datatype filetype: Datatype | None = None, datarep: str = "native", Info info: Info = INFO_NULL, ) -> None: """ Set the file view. """ cdef char *cdatarep = b"native" if datarep is not None: datarep = asmpistr(datarep, &cdatarep) cdef MPI_Datatype cetype = etype.ob_mpi cdef MPI_Datatype cftype = cetype if filetype is not None: cftype = filetype.ob_mpi with nogil: CHKERR( MPI_File_set_view( self.ob_mpi, disp, cetype, cftype, cdatarep, info.ob_mpi) ) def Get_view(self) -> tuple[int, Datatype, Datatype, str]: """ Return the file view. """ cdef MPI_Offset disp = 0 cdef MPI_Datatype cetype = MPI_DATATYPE_NULL cdef MPI_Datatype cftype = MPI_DATATYPE_NULL cdef char cdatarep[MPI_MAX_DATAREP_STRING+1] cdatarep[0] = 0 # just in case with nogil: CHKERR( MPI_File_get_view( self.ob_mpi, &disp, &cetype, &cftype, cdatarep) ) cdatarep[MPI_MAX_DATAREP_STRING] = 0 # just in case cdef Datatype etype = ref_Datatype(cetype) cdef Datatype ftype = ref_Datatype(cftype) cdef object datarep = mpistr(cdatarep) return (disp, etype, ftype, datarep) # Data Access # ----------- # Data Access with Explicit Offsets # --------------------------------- def Read_at( self, Offset offset: int, buf: BufSpec, Status status: Status | None = None, ) -> None: """ Read using explicit offset. """ cdef _p_msg_io m = message_io_read(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_read_at_c( self.ob_mpi, offset, m.buf, m.count, m.dtype, statusp) ) def Read_at_all( self, Offset offset: int, buf: BufSpec, Status status: Status | None = None, ) -> None: """ Collective read using explicit offset. """ cdef _p_msg_io m = message_io_read(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_read_at_all_c( self.ob_mpi, offset, m.buf, m.count, m.dtype, statusp) ) def Write_at( self, Offset offset: int, buf: BufSpec, Status status: Status | None = None, ) -> None: """ Write using explicit offset. """ cdef _p_msg_io m = message_io_write(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_write_at_c( self.ob_mpi, offset, m.buf, m.count, m.dtype, statusp) ) def Write_at_all( self, Offset offset: int, buf: BufSpec, Status status: Status | None = None, ) -> None: """ Collective write using explicit offset. """ cdef _p_msg_io m = message_io_write(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_write_at_all_c( self.ob_mpi, offset, m.buf, m.count, m.dtype, statusp) ) def Iread_at( self, Offset offset: int, buf: BufSpec, ) -> Request: """ Nonblocking read using explicit offset. """ cdef _p_msg_io m = message_io_read(buf) cdef Request request = New(Request) with nogil: CHKERR( MPI_File_iread_at_c( self.ob_mpi, offset, m.buf, m.count, m.dtype, &request.ob_mpi) ) request.ob_buf = m return request def Iread_at_all( self, Offset offset: int, buf: BufSpec, ) -> Request: """ Nonblocking collective read using explicit offset. """ cdef _p_msg_io m = message_io_read(buf) cdef Request request = New(Request) with nogil: CHKERR( MPI_File_iread_at_all_c( self.ob_mpi, offset, m.buf, m.count, m.dtype, &request.ob_mpi) ) request.ob_buf = m return request def Iwrite_at( self, Offset offset: int, buf: BufSpec, ) -> Request: """ Nonblocking write using explicit offset. """ cdef _p_msg_io m = message_io_write(buf) cdef Request request = New(Request) with nogil: CHKERR( MPI_File_iwrite_at_c( self.ob_mpi, offset, m.buf, m.count, m.dtype, &request.ob_mpi) ) request.ob_buf = m return request def Iwrite_at_all( self, Offset offset: int, buf: BufSpec, ) -> Request: """ Nonblocking collective write using explicit offset. """ cdef _p_msg_io m = message_io_write(buf) cdef Request request = New(Request) with nogil: CHKERR( MPI_File_iwrite_at_all_c( self.ob_mpi, offset, m.buf, m.count, m.dtype, &request.ob_mpi) ) request.ob_buf = m return request # Data Access with Individual File Pointers # ----------------------------------------- def Read( self, buf: BufSpec, Status status: Status | None = None, ) -> None: """ Read using individual file pointer. """ cdef _p_msg_io m = message_io_read(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_read_c( self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) def Read_all( self, buf: BufSpec, Status status: Status | None = None, ) -> None: """ Collective read using individual file pointer. """ cdef _p_msg_io m = message_io_read(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_read_all_c( self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) def Write( self, buf: BufSpec, Status status: Status | None = None, ) -> None: """ Write using individual file pointer. """ cdef _p_msg_io m = message_io_write(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_write_c( self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) def Write_all( self, buf: BufSpec, Status status: Status | None = None, ) -> None: """ Collective write using individual file pointer. """ cdef _p_msg_io m = message_io_write(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_write_all_c( self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) def Iread( self, buf: BufSpec, ) -> Request: """ Nonblocking read using individual file pointer. """ cdef _p_msg_io m = message_io_read(buf) cdef Request request = New(Request) with nogil: CHKERR( MPI_File_iread_c( self.ob_mpi, m.buf, m.count, m.dtype, &request.ob_mpi) ) request.ob_buf = m return request def Iread_all( self, buf: BufSpec, ) -> Request: """ Nonblocking collective read using individual file pointer. """ cdef _p_msg_io m = message_io_read(buf) cdef Request request = New(Request) with nogil: CHKERR( MPI_File_iread_all_c( self.ob_mpi, m.buf, m.count, m.dtype, &request.ob_mpi) ) request.ob_buf = m return request def Iwrite( self, buf: BufSpec, ) -> Request: """ Nonblocking write using individual file pointer. """ cdef _p_msg_io m = message_io_write(buf) cdef Request request = New(Request) with nogil: CHKERR( MPI_File_iwrite_c( self.ob_mpi, m.buf, m.count, m.dtype, &request.ob_mpi) ) request.ob_buf = m return request def Iwrite_all( self, buf: BufSpec, ) -> Request: """ Nonblocking collective write using individual file pointer. """ cdef _p_msg_io m = message_io_write(buf) cdef Request request = New(Request) with nogil: CHKERR( MPI_File_iwrite_all_c( self.ob_mpi, m.buf, m.count, m.dtype, &request.ob_mpi) ) request.ob_buf = m return request def Seek(self, Offset offset: int, int whence: int = SEEK_SET) -> None: """ Update the individual file pointer. """ with nogil: CHKERR( MPI_File_seek(self.ob_mpi, offset, whence) ) def Get_position(self) -> int: """ Return the current position of the individual file pointer. .. note:: Position is measured in etype units relative to the current file view. """ cdef MPI_Offset offset = 0 with nogil: CHKERR( MPI_File_get_position(self.ob_mpi, &offset) ) return offset def Get_byte_offset(self, Offset offset: int) -> int: """ Return the absolute byte position in the file. .. note:: Input *offset* is measured in etype units relative to the current file view. """ cdef MPI_Offset disp = 0 with nogil: CHKERR( MPI_File_get_byte_offset( self.ob_mpi, offset, &disp) ) return disp # Data Access with Shared File Pointers # ------------------------------------- def Read_shared( self, buf: BufSpec, Status status: Status | None = None, ) -> None: """ Read using shared file pointer. """ cdef _p_msg_io m = message_io_read(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_read_shared_c( self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) def Write_shared( self, buf: BufSpec, Status status: Status | None = None, ) -> None: """ Write using shared file pointer. """ cdef _p_msg_io m = message_io_write(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_write_shared_c( self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) def Iread_shared( self, buf: BufSpec, ) -> Request: """ Nonblocking read using shared file pointer. """ cdef _p_msg_io m = message_io_read(buf) cdef Request request = New(Request) with nogil: CHKERR( MPI_File_iread_shared_c( self.ob_mpi, m.buf, m.count, m.dtype, &request.ob_mpi) ) request.ob_buf = m return request def Iwrite_shared( self, buf: BufSpec, ) -> Request: """ Nonblocking write using shared file pointer. """ cdef _p_msg_io m = message_io_write(buf) cdef Request request = New(Request) with nogil: CHKERR( MPI_File_iwrite_shared_c( self.ob_mpi, m.buf, m.count, m.dtype, &request.ob_mpi) ) request.ob_buf = m return request def Read_ordered( self, buf: BufSpec, Status status: Status | None = None, ) -> None: """ Collective read using shared file pointer. """ cdef _p_msg_io m = message_io_read(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_read_ordered_c( self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) def Write_ordered( self, buf: BufSpec, Status status: Status | None = None, ) -> None: """ Collective write using shared file pointer. """ cdef _p_msg_io m = message_io_write(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_write_ordered_c( self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) def Seek_shared( self, Offset offset: int, int whence: int = SEEK_SET, ) -> None: """ Update the shared file pointer. """ with nogil: CHKERR( MPI_File_seek_shared( self.ob_mpi, offset, whence) ) def Get_position_shared(self) -> int: """ Return the current position of the shared file pointer. .. note:: Position is measured in etype units relative to the current view. """ cdef MPI_Offset offset = 0 with nogil: CHKERR( MPI_File_get_position_shared( self.ob_mpi, &offset) ) return offset # Split Collective Data Access Routines # ------------------------------------- # explicit offset def Read_at_all_begin( self, Offset offset: int, buf: BufSpec, ) -> None: """ Start a split collective read using explicit offset. """ cdef _p_msg_io m = message_io_read(buf) with nogil: CHKERR( MPI_File_read_at_all_begin_c( self.ob_mpi, offset, m.buf, m.count, m.dtype) ) def Read_at_all_end( self, buf: BufSpec, Status status: Status | None = None, ) -> None: """ Complete a split collective read using explicit offset. """ cdef _p_msg_io m = message_io_read(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_read_at_all_end( self.ob_mpi, m.buf, statusp) ) def Write_at_all_begin( self, Offset offset: int, buf: BufSpec, ) -> None: """ Start a split collective write using explicit offset. """ cdef _p_msg_io m = message_io_write(buf) with nogil: CHKERR( MPI_File_write_at_all_begin_c( self.ob_mpi, offset, m.buf, m.count, m.dtype) ) def Write_at_all_end( self, buf: BufSpec, Status status: Status | None = None, ) -> None: """ Complete a split collective write using explicit offset. """ cdef _p_msg_io m = message_io_write(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_write_at_all_end( self.ob_mpi, m.buf, statusp) ) # individual file pointer def Read_all_begin( self, buf: BufSpec, ) -> None: """ Start a split collective read using individual file pointer. """ cdef _p_msg_io m = message_io_read(buf) with nogil: CHKERR( MPI_File_read_all_begin_c( self.ob_mpi, m.buf, m.count, m.dtype) ) def Read_all_end( self, buf: BufSpec, Status status: Status | None = None, ) -> None: """ Complete a split collective read using individual file pointer. """ cdef _p_msg_io m = message_io_read(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_read_all_end( self.ob_mpi, m.buf, statusp) ) def Write_all_begin( self, buf: BufSpec, ) -> None: """ Start a split collective write using individual file pointer. """ cdef _p_msg_io m = message_io_write(buf) with nogil: CHKERR( MPI_File_write_all_begin_c( self.ob_mpi, m.buf, m.count, m.dtype) ) def Write_all_end( self, buf: BufSpec, Status status: Status | None = None, ) -> None: """ Complete a split collective write using individual file pointer. """ cdef _p_msg_io m = message_io_write(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_write_all_end( self.ob_mpi, m.buf, statusp) ) # shared file pointer def Read_ordered_begin( self, buf: BufSpec, ) -> None: """ Start a split collective read using shared file pointer. """ cdef _p_msg_io m = message_io_read(buf) with nogil: CHKERR( MPI_File_read_ordered_begin_c( self.ob_mpi, m.buf, m.count, m.dtype) ) def Read_ordered_end( self, buf: BufSpec, Status status: Status | None = None, ) -> None: """ Complete a split collective read using shared file pointer. """ cdef _p_msg_io m = message_io_read(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_read_ordered_end( self.ob_mpi, m.buf, statusp) ) def Write_ordered_begin( self, buf: BufSpec, ) -> None: """ Start a split collective write using shared file pointer. """ cdef _p_msg_io m = message_io_write(buf) with nogil: CHKERR( MPI_File_write_ordered_begin_c( self.ob_mpi, m.buf, m.count, m.dtype) ) def Write_ordered_end( self, buf: BufSpec, Status status: Status | None = None, ) -> None: """ Complete a split collective write using shared file pointer. """ cdef _p_msg_io m = message_io_write(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_write_ordered_end( self.ob_mpi, m.buf, statusp) ) # File Interoperability # --------------------- def Get_type_extent(self, Datatype datatype: Datatype) -> int: """ Return the extent of datatype in the file. """ cdef MPI_Count extent = 0 with nogil: CHKERR( MPI_File_get_type_extent_c( self.ob_mpi, datatype.ob_mpi, &extent) ) return extent # Consistency and Semantics # ------------------------- def Set_atomicity(self, bint flag: bool) -> None: """ Set the atomicity mode. """ with nogil: CHKERR( MPI_File_set_atomicity(self.ob_mpi, flag) ) def Get_atomicity(self) -> bool: """ Return the atomicity mode. """ cdef int flag = 0 with nogil: CHKERR( MPI_File_get_atomicity(self.ob_mpi, &flag) ) return flag property atomicity: """Atomicity mode.""" def __get__(self) -> bool: return self.Get_atomicity() def __set__(self, value: bool): self.Set_atomicity(value) def Sync(self) -> None: """ Causes all previous writes to be transferred to the storage device. """ with nogil: CHKERR( MPI_File_sync(self.ob_mpi) ) # Error Handling # -------------- @classmethod def Create_errhandler( cls, errhandler_fn: Callable[[File, int], None], ) -> Errhandler: """ Create a new error handler for files. """ cdef Errhandler errhandler = New(Errhandler) cdef MPI_File_errhandler_function *fn = NULL cdef int index = errhdl_new(errhandler_fn, &fn) try: CHKERR( MPI_File_create_errhandler(fn, &errhandler.ob_mpi) ) except: # ~> uncovered # noqa errhdl_del(&index, fn) # ~> uncovered raise # ~> uncovered return errhandler def Get_errhandler(self) -> Errhandler: """ Get the error handler for a file. """ cdef Errhandler errhandler = New(Errhandler) CHKERR( MPI_File_get_errhandler(self.ob_mpi, &errhandler.ob_mpi) ) return errhandler def Set_errhandler(self, Errhandler errhandler: Errhandler) -> None: """ Set the error handler for a file. """ CHKERR( MPI_File_set_errhandler(self.ob_mpi, errhandler.ob_mpi) ) def Call_errhandler(self, int errorcode: int) -> None: """ Call the error handler installed on a file. """ CHKERR( MPI_File_call_errhandler(self.ob_mpi, errorcode) ) # Fortran Handle # -------------- def py2f(self) -> int: """ """ return MPI_File_c2f(self.ob_mpi) @classmethod def f2py(cls, arg: int) -> File: """ """ return fromhandle(MPI_File_f2c(arg)) cdef File __FILE_NULL__ = def_File( MPI_FILE_NULL , "FILE_NULL" ) # Predefined file handles # ----------------------- FILE_NULL = __FILE_NULL__ #: Null file handle # User-defined data representations # --------------------------------- def Register_datarep( datarep: str, read_fn: Callable[[Buffer, Datatype, int, Buffer, int], None], write_fn: Callable[[Buffer, Datatype, int, Buffer, int], None], extent_fn: Callable[[Datatype], int], ) -> None: """ Register user-defined data representations. """ cdef char *cdatarep = NULL datarep = asmpistr(datarep, &cdatarep) cdef object state = _p_datarep(read_fn, write_fn, extent_fn) cdef MPI_Datarep_conversion_function_c *rd = MPI_CONVERSION_FN_NULL_C cdef MPI_Datarep_conversion_function_c *wr = MPI_CONVERSION_FN_NULL_C cdef MPI_Datarep_extent_function *ex = datarep_extent_fn cdef void *xs = state if read_fn is not None: rd = datarep_read_fn if write_fn is not None: wr = datarep_write_fn CHKERR ( MPI_Register_datarep_c(cdatarep, rd, wr, ex, xs) ) with datarep_lock: datarep_registry[datarep] = state mpi4py-4.0.3/src/mpi4py/MPI.src/Group.pyx000066400000000000000000000167201475341043600200370ustar00rootroot00000000000000cdef class Group: """ Group of processes. """ def __cinit__(self, Group group: Group | None = None): cinit(self, group) def __dealloc__(self): dealloc(self) def __richcmp__(self, other, int op): if not isinstance(other, Group): return NotImplemented return richcmp(self, other, op) def __bool__(self) -> bool: return nonnull(self) def __reduce__(self) -> str | tuple[Any, ...]: return reduce_default(self) property handle: """MPI handle.""" def __get__(self) -> int: return tohandle(self) @classmethod def fromhandle(cls, handle: int) -> Group: """ Create object from MPI handle. """ return fromhandle( handle) def free(self) -> None: """ Call `Free` if not null or predefined. """ safefree(self) # Group Accessors # --------------- def Get_size(self) -> int: """ Return the number of processes in a group. """ cdef int size = -1 CHKERR( MPI_Group_size(self.ob_mpi, &size) ) return size property size: """Number of processes.""" def __get__(self) -> int: return self.Get_size() def Get_rank(self) -> int: """ Return the rank of this process in a group. """ cdef int rank = MPI_PROC_NULL CHKERR( MPI_Group_rank(self.ob_mpi, &rank) ) return rank property rank: """Rank of this process.""" def __get__(self) -> int: return self.Get_rank() def Translate_ranks( self, ranks: Sequence[int] | None = None, Group group: Group | None = None, ) -> list[int]: """ Translate ranks in a group to those in another group. """ cdef MPI_Group group1 = MPI_GROUP_NULL cdef MPI_Group group2 = MPI_GROUP_NULL cdef int n = 0, *iranks1 = NULL, *iranks2 = NULL # cdef unused1 = None if ranks is not None: unused1 = getarray(ranks, &n, &iranks1) else: CHKERR( MPI_Group_size(self.ob_mpi, &n) ) unused1 = newarray(n, &iranks1) for i in range(n): iranks1[i] = i cdef unused2 = newarray(n, &iranks2) # group1 = self.ob_mpi if group is not None: group2 = group.ob_mpi else: CHKERR( MPI_Comm_group(MPI_COMM_WORLD, &group2) ) try: CHKERR( MPI_Group_translate_ranks( group1, n, iranks1, group2, iranks2) ) finally: if group is None: CHKERR( MPI_Group_free(&group2) ) # cdef object ranks2 = [iranks2[i] for i in range(n)] return ranks2 def Compare(self, Group group: Group) -> int: """ Compare two groups. """ cdef int flag = MPI_UNEQUAL CHKERR( MPI_Group_compare( self.ob_mpi, group.ob_mpi, &flag) ) return flag # Group Constructors # ------------------ def Dup(self) -> Self: """ Duplicate a group. """ cdef Group group = New(type(self)) CHKERR( MPI_Group_union(self.ob_mpi, MPI_GROUP_EMPTY, &group.ob_mpi) ) return group @classmethod def Union(cls, Group group1: Group, Group group2: Group) -> Self: """ Create a new group from the union of two existing groups. """ cdef Group group = New(cls) CHKERR( MPI_Group_union( group1.ob_mpi, group2.ob_mpi, &group.ob_mpi) ) return group @classmethod def Intersection(cls, Group group1: Group, Group group2: Group) -> Self: """ Create a new group from the intersection of two existing groups. """ cdef Group group = New(cls) CHKERR( MPI_Group_intersection( group1.ob_mpi, group2.ob_mpi, &group.ob_mpi) ) return group Intersect = Intersection @classmethod def Difference(cls, Group group1: Group, Group group2: Group) -> Self: """ Create a new group from the difference of two existing groups. """ cdef Group group = New(cls) CHKERR( MPI_Group_difference( group1.ob_mpi, group2.ob_mpi, &group.ob_mpi) ) return group def Incl(self, ranks: Sequence[int]) -> Self: """ Create a new group by including listed members. """ cdef int n = 0, *iranks = NULL ranks = getarray(ranks, &n, &iranks) cdef Group group = New(type(self)) CHKERR( MPI_Group_incl(self.ob_mpi, n, iranks, &group.ob_mpi) ) return group def Excl(self, ranks: Sequence[int]) -> Self: """ Create a new group by excluding listed members. """ cdef int n = 0, *iranks = NULL ranks = getarray(ranks, &n, &iranks) cdef Group group = New(type(self)) CHKERR( MPI_Group_excl(self.ob_mpi, n, iranks, &group.ob_mpi) ) return group def Range_incl(self, ranks: Sequence[tuple[int, int, int]]) -> Self: """ Create a new group by including ranges of members. """ cdef int *p = NULL, (*ranges)[3] # = NULL ## XXX cython fails ranges = NULL cdef int n = len(ranks) cdef unused1 = allocate(n, sizeof(int[3]), &ranges) for i in range(n): p = ranges[i] p[0], p[1], p[2] = ranks[i] cdef Group group = New(type(self)) CHKERR( MPI_Group_range_incl(self.ob_mpi, n, ranges, &group.ob_mpi) ) return group def Range_excl(self, ranks: Sequence[tuple[int, int, int]]) -> Self: """ Create a new group by excluding ranges of members. """ cdef int *p = NULL, (*ranges)[3] # = NULL ## XXX cython fails ranges = NULL cdef int n = len(ranks) cdef unused1 = allocate(n, sizeof(int[3]), &ranges) for i in range(n): p = ranges[i] p[0], p[1], p[2] = ranks[i] cdef Group group = New(type(self)) CHKERR( MPI_Group_range_excl(self.ob_mpi, n, ranges, &group.ob_mpi) ) return group @classmethod def Create_from_session_pset( cls, Session session: Session, pset_name: str, ) -> Self: """ Create a new group from session and process set. """ cdef char *cname = NULL pset_name = asmpistr(pset_name, &cname) cdef Group group = New(cls) CHKERR( MPI_Group_from_session_pset( session.ob_mpi, cname, &group.ob_mpi) ) return group # Group Destructor # ---------------- def Free(self) -> None: """ Free a group. """ cdef MPI_Group save = self.ob_mpi CHKERR( MPI_Group_free(&self.ob_mpi) ) if constobj(self): self.ob_mpi = save # Fortran Handle # -------------- def py2f(self) -> int: """ """ return MPI_Group_c2f(self.ob_mpi) @classmethod def f2py(cls, arg: int) -> Group: """ """ return fromhandle(MPI_Group_f2c(arg)) cdef Group __GROUP_NULL__ = def_Group ( MPI_GROUP_NULL , "GROUP_NULL" ) cdef Group __GROUP_EMPTY__ = def_Group ( MPI_GROUP_EMPTY , "GROUP_EMPTY" ) # Predefined group handles # ------------------------ GROUP_NULL = __GROUP_NULL__ #: Null group handle GROUP_EMPTY = __GROUP_EMPTY__ #: Empty group handle mpi4py-4.0.3/src/mpi4py/MPI.src/Info.pyx000066400000000000000000000207771475341043600176450ustar00rootroot00000000000000cdef class Info: """ Info object. """ def __cinit__(self, Info info: Info | None = None): cinit(self, info) def __dealloc__(self): dealloc(self) def __richcmp__(self, other, int op): if not isinstance(other, Info): return NotImplemented return richcmp(self, other, op) def __bool__(self) -> bool: return nonnull(self) def __reduce__(self) -> str | tuple[Any, ...]: return reduce_Info(self) property handle: """MPI handle.""" def __get__(self) -> int: return tohandle(self) @classmethod def fromhandle(cls, handle: int) -> Info: """ Create object from MPI handle. """ return fromhandle( handle) def free(self) -> None: """ Call `Free` if not null or predefined. """ safefree(self) # The Info Object # --------------- @classmethod def Create( cls, items: ( Info | Mapping[str, str] | Iterable[tuple[str, str]] | None ) = None, ) -> Self: """ Create a new info object. """ cdef Info info = New(cls) CHKERR( MPI_Info_create(&info.ob_mpi) ) if items is None: return info cdef object key, value try: if hasattr(items, 'keys'): for key in items.keys(): info.Set(key, items[key]) else: for key, value in items: info.Set(key, value) except: # noqa CHKERR( MPI_Info_free(&info.ob_mpi) ) raise return info @classmethod def Create_env(cls, args: Sequence[str] | None = None) -> Self: """ Create a new environment info object. """ cdef int argc = 0 cdef char **argv = MPI_ARGV_NULL cdef Info info = New(cls) args = asarray_argv(args, &argv) while argv and argv[argc]: argc += 1 CHKERR( MPI_Info_create_env(argc, argv, &info.ob_mpi) ) return info def Free(self) -> None: """ Free an info object. """ cdef MPI_Info save = self.ob_mpi CHKERR( MPI_Info_free(&self.ob_mpi) ) if constobj(self): self.ob_mpi = save def Dup(self) -> Self: """ Duplicate an existing info object. """ cdef Info info = New(type(self)) CHKERR( MPI_Info_dup(self.ob_mpi, &info.ob_mpi) ) return info def Get(self, key: str) -> str | None: """ Retrieve the value associated with a key. """ cdef char *ckey = NULL cdef char *cvalue = NULL cdef int buflen = MPI_MAX_INFO_VAL cdef int flag = 0 key = asmpistr(key, &ckey) cdef unused = allocate(buflen+1, sizeof(char), &cvalue) CHKERR( MPI_Info_get_string(self.ob_mpi, ckey, &buflen, cvalue, &flag) ) if not flag: return None return mpistr(cvalue) def Set(self, key: str, value: str) -> None: """ Store a value associated with a key. """ cdef char *ckey = NULL cdef char *cvalue = NULL key = asmpistr(key, &ckey) value = asmpistr(value, &cvalue) CHKERR( MPI_Info_set(self.ob_mpi, ckey, cvalue) ) def Delete(self, key: str) -> None: """ Remove a (key, value) pair from info. """ cdef char *ckey = NULL key = asmpistr(key, &ckey) CHKERR( MPI_Info_delete(self.ob_mpi, ckey) ) def Get_nkeys(self) -> int: """ Return the number of currently defined keys in info. """ cdef int nkeys = 0 CHKERR( MPI_Info_get_nkeys(self.ob_mpi, &nkeys) ) return nkeys def Get_nthkey(self, int n: int) -> str: """ Return the *n*-th defined key in info. """ cdef char ckey[MPI_MAX_INFO_KEY+1] ckey[0] = 0 # just in case CHKERR( MPI_Info_get_nthkey(self.ob_mpi, n, ckey) ) ckey[MPI_MAX_INFO_KEY] = 0 # just in case return mpistr(ckey) # Fortran Handle # -------------- def py2f(self) -> int: """ """ return MPI_Info_c2f(self.ob_mpi) @classmethod def f2py(cls, arg: int) -> Info: """ """ return fromhandle(MPI_Info_f2c(arg)) # Python mapping emulation # ------------------------ def __len__(self) -> int: if not self: return 0 return self.Get_nkeys() def __contains__(self, key: str) -> bool: if not self: return False cdef char *ckey = NULL cdef char cvalue[1] cdef int buflen = 0 cdef int flag = 0 key = asmpistr(key, &ckey) CHKERR( MPI_Info_get_string(self.ob_mpi, ckey, &buflen, cvalue, &flag) ) return flag def __iter__(self) -> Iterator[str]: return iter(self.keys()) def __getitem__(self, key: str) -> str: if not self: raise KeyError(key) cdef object value = self.Get(key) if value is None: raise KeyError(key) return value def __setitem__(self, key: str, value: str) -> None: if not self: raise KeyError(key) self.Set(key, value) def __delitem__(self, key: str) -> None: if not self: raise KeyError(key) if key not in self: raise KeyError(key) self.Delete(key) def get(self, key: str, default: str | None = None) -> str | None: """Retrieve value by key.""" if not self: return default cdef object value = self.Get(key) if value is None: return default return value def keys(self) -> list[str]: """Return list of keys.""" if not self: return [] cdef list keys = [] cdef int nkeys = self.Get_nkeys() cdef object key for k in range(nkeys): key = self.Get_nthkey(k) keys.append(key) return keys def values(self) -> list[str]: """Return list of values.""" if not self: return [] cdef list values = [] cdef int nkeys = self.Get_nkeys() cdef object key, val for k in range(nkeys): key = self.Get_nthkey(k) val = self.Get(key) values.append(val) return values def items(self) -> list[tuple[str, str]]: """Return list of items.""" if not self: return [] cdef list items = [] cdef int nkeys = self.Get_nkeys() cdef object key, value for k in range(nkeys): key = self.Get_nthkey(k) value = self.Get(key) items.append((key, value)) return items def update( self, items: Info | Mapping[str, str] | Iterable[tuple[str, str]] = (), **kwds: str, ) -> None: """Update contents.""" if not self: raise KeyError cdef object key, value if hasattr(items, 'keys'): for key in items.keys(): self.Set(key, items[key]) else: for key, value in items: self.Set(key, value) for key, value in kwds.items(): self.Set(key, value) def pop(self, key: str, *default: str) -> str: """Pop value by key.""" cdef object value = None if self: value = self.Get(key) if value is not None: self.Delete(key) return value if default: value, = default return value raise KeyError(key) def popitem(self) -> tuple[str, str]: """Pop first item.""" if not self: raise KeyError cdef object key, value cdef int nkeys = self.Get_nkeys() if nkeys == 0: raise KeyError key = self.Get_nthkey(nkeys - 1) value = self.Get(key) self.Delete(key) return (key, value) def copy(self) -> Self: """Copy contents.""" if not self: return New(type(self)) return self.Dup() def clear(self) -> None: """Clear contents.""" if not self: return None cdef object key cdef int k = 0, nkeys = self.Get_nkeys() while k < nkeys: key = self.Get_nthkey(0) self.Delete(key) k += 1 cdef Info __INFO_NULL__ = def_Info( MPI_INFO_NULL , "INFO_NULL" ) cdef Info __INFO_ENV__ = def_Info( MPI_INFO_ENV , "INFO_ENV" ) # Predefined info handles # ----------------------- INFO_NULL = __INFO_NULL__ #: Null info handle INFO_ENV = __INFO_ENV__ #: Environment info handle mpi4py-4.0.3/src/mpi4py/MPI.src/MPI.pyx000066400000000000000000000250651475341043600173720ustar00rootroot00000000000000# ----------------------------------------------------------------------------- __doc__ = """Message Passing Interface.""" from mpi4py.libmpi cimport * include "stdlib.pxi" include "atimport.pxi" bootstrap() initialize() include "allocate.pxi" include "asstring.pxi" include "asfspath.pxi" include "asbuffer.pxi" include "asarray.pxi" include "objmodel.pxi" include "attrimpl.pxi" include "errhimpl.pxi" include "reqimpl.pxi" include "opimpl.pxi" include "bufaimpl.pxi" include "commimpl.pxi" include "winimpl.pxi" include "drepimpl.pxi" include "msgbuffer.pxi" include "msgpickle.pxi" include "CAPI.pxi" # ----------------------------------------------------------------------------- # Assorted constants # ------------------ UNDEFINED = MPI_UNDEFINED #: Undefined integer value ANY_SOURCE = MPI_ANY_SOURCE #: Wildcard source value for receives ANY_TAG = MPI_ANY_TAG #: Wildcard tag value for receives PROC_NULL = MPI_PROC_NULL #: Special process rank for send/receive ROOT = MPI_ROOT #: Root process for collective inter-communications BOTTOM = __BOTTOM__ #: Special address for buffers IN_PLACE = __IN_PLACE__ #: *In-place* option for collective communications # Predefined Attribute Keyvals # ---------------------------- KEYVAL_INVALID = MPI_KEYVAL_INVALID TAG_UB = MPI_TAG_UB IO = MPI_IO WTIME_IS_GLOBAL = MPI_WTIME_IS_GLOBAL UNIVERSE_SIZE = MPI_UNIVERSE_SIZE APPNUM = MPI_APPNUM LASTUSEDCODE = MPI_LASTUSEDCODE WIN_BASE = MPI_WIN_BASE WIN_SIZE = MPI_WIN_SIZE WIN_DISP_UNIT = MPI_WIN_DISP_UNIT WIN_CREATE_FLAVOR = MPI_WIN_CREATE_FLAVOR WIN_FLAVOR = MPI_WIN_CREATE_FLAVOR WIN_MODEL = MPI_WIN_MODEL include "ErrorCode.pyx" include "Exception.pyx" include "Datatype.pyx" include "Status.pyx" include "Request.pyx" include "Message.pyx" include "Op.pyx" include "Group.pyx" include "Info.pyx" include "Errhandler.pyx" include "Session.pyx" include "Comm.pyx" include "Win.pyx" include "File.pyx" # Memory Allocation # ----------------- def Alloc_mem(Aint size: int, Info info: Info = INFO_NULL) -> buffer: """ Allocate memory for message passing and remote memory access. """ cdef void *base = NULL CHKERR( MPI_Alloc_mem(size, info.ob_mpi, &base) ) return mpibuf(base, size) def Free_mem(mem: buffer) -> None: """ Free memory allocated with `Alloc_mem`. """ cdef void *base = NULL cdef buffer buf = asbuffer(mem, &base, NULL, 1) CHKERR( MPI_Free_mem(base) ) buf.release() # Initialization and Exit # ----------------------- def Init() -> None: """ Initialize the MPI execution environment. """ CHKERR( MPI_Init(NULL, NULL) ) initialize() def Finalize() -> None: """ Terminate the MPI execution environment. """ finalize() CHKERR( MPI_Finalize() ) # Levels of MPI threading support # ------------------------------- THREAD_SINGLE = MPI_THREAD_SINGLE #: Only one thread will execute THREAD_FUNNELED = MPI_THREAD_FUNNELED #: MPI calls are *funneled* to the main thread THREAD_SERIALIZED = MPI_THREAD_SERIALIZED #: MPI calls are *serialized* THREAD_MULTIPLE = MPI_THREAD_MULTIPLE #: Multiple threads may call MPI def Init_thread(int required: int = THREAD_MULTIPLE) -> int: """ Initialize the MPI execution environment. """ cdef int provided = MPI_THREAD_SINGLE CHKERR( MPI_Init_thread(NULL, NULL, required, &provided) ) initialize() return provided def Query_thread() -> int: """ Return the level of thread support provided by the MPI library. """ cdef int provided = MPI_THREAD_SINGLE CHKERR( MPI_Query_thread(&provided) ) return provided def Is_thread_main() -> bool: """ Indicate whether this thread called `Init` or `Init_thread`. """ cdef int flag = 1 CHKERR( MPI_Is_thread_main(&flag) ) return flag def Is_initialized() -> bool: """ Indicate whether `Init` has been called. """ cdef int flag = 0 CHKERR( MPI_Initialized(&flag) ) return flag def Is_finalized() -> bool: """ Indicate whether `Finalize` has completed. """ cdef int flag = 0 CHKERR( MPI_Finalized(&flag) ) return flag # Implementation Information # -------------------------- # MPI Version Number # ----------------- VERSION = MPI_VERSION SUBVERSION = MPI_SUBVERSION def Get_version() -> tuple[int, int]: """ Obtain the version number of the MPI standard. """ cdef int version = 1 cdef int subversion = 0 CHKERR( MPI_Get_version(&version, &subversion) ) return (version, subversion) def Get_library_version() -> str: """ Obtain the version string of the MPI library. """ cdef char name[MPI_MAX_LIBRARY_VERSION_STRING+1] cdef int nlen = 0 CHKERR( MPI_Get_library_version(name, &nlen) ) return tompistr(name, nlen) # Environmental Inquires # ---------------------- def Get_processor_name() -> str: """ Obtain the name of the calling processor. """ cdef char name[MPI_MAX_PROCESSOR_NAME+1] cdef int nlen = 0 CHKERR( MPI_Get_processor_name(name, &nlen) ) return tompistr(name, nlen) def Get_hw_resource_info() -> Info: """ Obtain information about the hardware platform of the calling processor. """ cdef Info info = New(Info) CHKERR( MPI_Get_hw_resource_info(&info.ob_mpi) ) return info # ~> MPI-4.1 # Timers and Synchronization # -------------------------- def Wtime() -> float: """ Return an elapsed time on the calling processor. """ return MPI_Wtime() def Wtick() -> float: """ Return the resolution of `Wtime`. """ return MPI_Wtick() # Control of Profiling # -------------------- def Pcontrol(int level: int) -> None: """ Control profiling. """ if level < 0 or level > 2: CHKERR( MPI_ERR_ARG ) CHKERR( MPI_Pcontrol(level) ) # Maximum string sizes # -------------------- # MPI-1 MAX_PROCESSOR_NAME = MPI_MAX_PROCESSOR_NAME MAX_ERROR_STRING = MPI_MAX_ERROR_STRING # MPI-2 MAX_PORT_NAME = MPI_MAX_PORT_NAME MAX_INFO_KEY = MPI_MAX_INFO_KEY MAX_INFO_VAL = MPI_MAX_INFO_VAL MAX_OBJECT_NAME = MPI_MAX_OBJECT_NAME MAX_DATAREP_STRING = MPI_MAX_DATAREP_STRING # MPI-3 MAX_LIBRARY_VERSION_STRING = MPI_MAX_LIBRARY_VERSION_STRING # MPI-4 MAX_PSET_NAME_LEN = MPI_MAX_PSET_NAME_LEN MAX_STRINGTAG_LEN = MPI_MAX_STRINGTAG_LEN # ----------------------------------------------------------------------------- include "typemap.pxi" include "typestr.pxi" include "typedec.pxi" # ----------------------------------------------------------------------------- cdef extern from * nogil: int PyMPI_Get_vendor(const char**, int*, int*, int*) def get_vendor() -> tuple[str, tuple[int, int, int]]: """ Information about the underlying MPI implementation. Returns: - string with the name of the MPI implementation. - integer 3-tuple version number ``(major, minor, micro)``. """ cdef const char *name=NULL cdef int major=0, minor=0, micro=0 CHKERR( PyMPI_Get_vendor(&name, &major, &minor, µ) ) return (mpistr(name), (major, minor, micro)) # ----------------------------------------------------------------------------- cdef inline int _mpi_type(object arg, type cls) except -1: if isinstance(arg, type): if issubclass(arg, cls): return 1 else: if isinstance(arg, cls): return 1 return 0 def _sizeof(arg: Any) -> int: """ Size in bytes of the underlying MPI handle. """ if _mpi_type(arg, Status): return sizeof(MPI_Status) if _mpi_type(arg, Datatype): return sizeof(MPI_Datatype) if _mpi_type(arg, Request): return sizeof(MPI_Request) if _mpi_type(arg, Message): return sizeof(MPI_Message) if _mpi_type(arg, Op): return sizeof(MPI_Op) if _mpi_type(arg, Group): return sizeof(MPI_Group) if _mpi_type(arg, Info): return sizeof(MPI_Info) if _mpi_type(arg, Errhandler): return sizeof(MPI_Errhandler) if _mpi_type(arg, Session): return sizeof(MPI_Session) if _mpi_type(arg, Comm): return sizeof(MPI_Comm) if _mpi_type(arg, Win): return sizeof(MPI_Win) if _mpi_type(arg, File): return sizeof(MPI_File) raise TypeError("expecting an MPI type or instance") def _addressof(arg: Any) -> int: """ Memory address of the underlying MPI handle. """ cdef void *ptr = NULL if isinstance(arg, Status): ptr = &(arg).ob_mpi elif isinstance(arg, Datatype): ptr = &(arg).ob_mpi elif isinstance(arg, Request): ptr = &(arg).ob_mpi elif isinstance(arg, Message): ptr = &(arg).ob_mpi elif isinstance(arg, Op): ptr = &(arg).ob_mpi elif isinstance(arg, Group): ptr = &(arg).ob_mpi elif isinstance(arg, Info): ptr = &(arg).ob_mpi elif isinstance(arg, Errhandler): ptr = &(arg).ob_mpi elif isinstance(arg, Session): ptr = &(arg).ob_mpi elif isinstance(arg, Comm): ptr = &(arg).ob_mpi elif isinstance(arg, Win): ptr = &(arg).ob_mpi elif isinstance(arg, File): ptr = &(arg).ob_mpi else: raise TypeError("expecting an MPI instance") return PyLong_FromVoidPtr(ptr) def _handleof(arg: Any) -> int: """ Unsigned integer value with the underlying MPI handle. """ if isinstance(arg, Status): return &((arg).ob_mpi) elif isinstance(arg, Datatype): return ((arg).ob_mpi) elif isinstance(arg, Request): return ((arg).ob_mpi) elif isinstance(arg, Message): return ((arg).ob_mpi) elif isinstance(arg, Op): return ((arg).ob_mpi) elif isinstance(arg, Group): return ((arg).ob_mpi) elif isinstance(arg, Info): return ((arg).ob_mpi) elif isinstance(arg, Errhandler): return ((arg).ob_mpi) elif isinstance(arg, Session): return ((arg).ob_mpi) elif isinstance(arg, Comm): return ((arg).ob_mpi) elif isinstance(arg, Win): return ((arg).ob_mpi) elif isinstance(arg, File): return ((arg).ob_mpi) else: raise TypeError("expecting an MPI instance") # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/Message.pyx000066400000000000000000000143121475341043600203220ustar00rootroot00000000000000cdef class Message: """ Matched message. """ def __cinit__(self, Message message: Message | None = None): cinit(self, message) def __dealloc__(self): dealloc(self) def __richcmp__(self, other, int op): if not isinstance(other, Message): return NotImplemented return richcmp(self, other, op) def __bool__(self) -> bool: return nonnull(self) def __reduce__(self) -> str | tuple[Any, ...]: return reduce_default(self) property handle: """MPI handle.""" def __get__(self) -> int: return tohandle(self) @classmethod def fromhandle(cls, handle: int) -> Message: """ Create object from MPI handle. """ return fromhandle( handle) def free(self) -> None: """ Do nothing. """ safefree(self) # Matching Probe # -------------- @classmethod def Probe( cls, Comm comm: Comm, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Status | None = None, ) -> Self: """ Blocking test for a matched message. """ cdef MPI_Message cmessage = MPI_MESSAGE_NULL cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Mprobe( source, tag, comm.ob_mpi, &cmessage, statusp) ) cdef Message message = New(cls) message.ob_mpi = cmessage return message @classmethod def Iprobe( cls, Comm comm: Comm, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Status | None = None, ) -> Self | None: """ Nonblocking test for a matched message. """ cdef int flag = 0 cdef MPI_Message cmessage = MPI_MESSAGE_NULL cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Improbe( source, tag, comm.ob_mpi, &flag, &cmessage, statusp) ) if flag == 0: return None cdef Message message = New(cls) message.ob_mpi = cmessage return message # Matched receives # ---------------- def Recv( self, buf: BufSpec, Status status: Status | None = None, ) -> None: """ Blocking receive of matched message. """ cdef MPI_Message message = self.ob_mpi cdef int source = MPI_ANY_SOURCE if message == MPI_MESSAGE_NO_PROC: source = MPI_PROC_NULL cdef _p_msg_p2p rmsg = message_p2p_recv(buf, source) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Mrecv_c( rmsg.buf, rmsg.count, rmsg.dtype, &message, statusp) ) if self is not __MESSAGE_NO_PROC__: self.ob_mpi = message def Irecv(self, buf: BufSpec) -> Request: """ Nonblocking receive of matched message. """ cdef MPI_Message message = self.ob_mpi cdef int source = MPI_ANY_SOURCE if message == MPI_MESSAGE_NO_PROC: source = MPI_PROC_NULL cdef _p_msg_p2p rmsg = message_p2p_recv(buf, source) cdef Request request = New(Request) with nogil: CHKERR( MPI_Imrecv_c( rmsg.buf, rmsg.count, rmsg.dtype, &message, &request.ob_mpi) ) if self is not __MESSAGE_NO_PROC__: self.ob_mpi = message request.ob_buf = rmsg return request # Python Communication # -------------------- # @classmethod def probe( cls, Comm comm: Comm, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Status | None = None, ) -> Self: """ Blocking test for a matched message. """ cdef Message message = New(cls) cdef MPI_Status *statusp = arg_Status(status) message.ob_buf = PyMPI_mprobe(source, tag, comm.ob_mpi, &message.ob_mpi, statusp) return message @classmethod def iprobe( cls, Comm comm: Comm, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Status | None = None, ) -> Self | None: """ Nonblocking test for a matched message. """ cdef int flag = 0 cdef Message message = New(cls) cdef MPI_Status *statusp = arg_Status(status) message.ob_buf = PyMPI_improbe(source, tag, comm.ob_mpi, &flag, &message.ob_mpi, statusp) if flag == 0: return None return message def recv(self, Status status: Status | None = None) -> Any: """ Blocking receive of matched message. """ cdef object rmsg = self.ob_buf cdef MPI_Message message = self.ob_mpi cdef MPI_Status *statusp = arg_Status(status) rmsg = PyMPI_mrecv(rmsg, &message, statusp) if self is not __MESSAGE_NO_PROC__: self.ob_mpi = message if self.ob_mpi == MPI_MESSAGE_NULL: self.ob_buf = None return rmsg def irecv(self) -> Request: """ Nonblocking receive of matched message. """ cdef object rmsg = self.ob_buf cdef MPI_Message message = self.ob_mpi cdef Request request = New(Request) request.ob_buf = PyMPI_imrecv(rmsg, &message, &request.ob_mpi) if self is not __MESSAGE_NO_PROC__: self.ob_mpi = message if self.ob_mpi == MPI_MESSAGE_NULL: self.ob_buf = None return request # Fortran Handle # -------------- def py2f(self) -> int: """ """ return MPI_Message_c2f(self.ob_mpi) @classmethod def f2py(cls, arg: int) -> Message: """ """ return fromhandle(MPI_Message_f2c(arg)) cdef Message __MESSAGE_NULL__ = def_Message ( MPI_MESSAGE_NULL , "MESSAGE_NULL" ) # noqa cdef Message __MESSAGE_NO_PROC__ = def_Message ( MPI_MESSAGE_NO_PROC , "MESSAGE_NO_PROC" ) # noqa # Predefined message handles # -------------------------- MESSAGE_NULL = __MESSAGE_NULL__ #: Null message handle MESSAGE_NO_PROC = __MESSAGE_NO_PROC__ #: No-proc message handle mpi4py-4.0.3/src/mpi4py/MPI.src/Op.pyx000066400000000000000000000126531475341043600173220ustar00rootroot00000000000000cdef class Op: """ Reduction operation. """ def __cinit__(self, Op op: Op | None = None): cinit(self, op) def __dealloc__(self): dealloc(self) def __richcmp__(self, other, int op): if not isinstance(other, Op): return NotImplemented return richcmp(self, other, op) def __bool__(self) -> bool: return nonnull(self) def __reduce__(self) -> str | tuple[Any, ...]: return reduce_Op(self) def __call__(self, x: Any, y: Any) -> Any: cdef int index = op_user_id_get(self) return op_call(self.ob_mpi, index, x, y) property handle: """MPI handle.""" def __get__(self) -> int: return tohandle(self) @classmethod def fromhandle(cls, handle: int) -> Op: """ Create object from MPI handle. """ return fromhandle( handle) def free(self) -> None: """ Call `Free` if not null or predefined. """ safefree(self) # User-Defined Reduction Operations # --------------------------------- @classmethod def Create( cls, function: Callable[[Buffer, Buffer, Datatype], None], bint commute: bool = False, ) -> Self: """ Create a user-defined reduction operation. """ cdef Op self = New(cls) cdef MPI_User_function *fn_i = NULL cdef MPI_User_function_c *fn_c = NULL op_user_new(self, function, &fn_i, &fn_c) try: try: CHKERR( MPI_Op_create_c(fn_c, commute, &self.ob_mpi) ) except NotImplementedError: # ~> legacy CHKERR( MPI_Op_create(fn_i, commute, &self.ob_mpi) ) # ~> legacy except: # ~> uncovered # noqa op_user_del(self) # ~> uncovered raise # ~> uncovered return self def Free(self) -> None: """ Free a user-defined reduction operation. """ cdef MPI_Op save = self.ob_mpi CHKERR( MPI_Op_free(&self.ob_mpi) ) if constobj(self): self.ob_mpi = save op_user_del(self) # Process-local reduction # ----------------------- def Is_commutative(self) -> bool: """ Query reduction operations for their commutativity. """ cdef int flag = 0 CHKERR( MPI_Op_commutative(self.ob_mpi, &flag) ) return flag property is_commutative: """Is a commutative operation.""" def __get__(self) -> bool: return self.Is_commutative() def Reduce_local(self, inbuf: BufSpec, inoutbuf: BufSpec) -> None: """ Apply a reduction operation to local data. """ # get *in* and *inout* buffers cdef _p_msg_cco m = message_cco() m.for_cro_recv(inoutbuf, 0) m.for_cro_send(inbuf, 0) m.chk_cro_args() # do local reduction with nogil: CHKERR( MPI_Reduce_local_c( m.sbuf, m.rbuf, m.rcount, m.rtype, self.ob_mpi) ) property is_predefined: """Is a predefined operation.""" def __get__(self) -> bool: cdef MPI_Op op = self.ob_mpi return ( op == MPI_OP_NULL or op == MPI_MAX or op == MPI_MIN or op == MPI_SUM or op == MPI_PROD or op == MPI_LAND or op == MPI_BAND or op == MPI_LOR or op == MPI_BOR or op == MPI_LXOR or op == MPI_BXOR or op == MPI_MAXLOC or op == MPI_MINLOC or op == MPI_REPLACE or op == MPI_NO_OP ) # Fortran Handle # -------------- def py2f(self) -> int: """ """ return MPI_Op_c2f(self.ob_mpi) @classmethod def f2py(cls, arg: int) -> Op: """ """ return fromhandle(MPI_Op_f2c(arg)) cdef Op __OP_NULL__ = def_Op( MPI_OP_NULL , "OP_NULL" ) cdef Op __MAX__ = def_Op( MPI_MAX , "MAX" ) cdef Op __MIN__ = def_Op( MPI_MIN , "MIN" ) cdef Op __SUM__ = def_Op( MPI_SUM , "SUM" ) cdef Op __PROD__ = def_Op( MPI_PROD , "PROD" ) cdef Op __LAND__ = def_Op( MPI_LAND , "LAND" ) cdef Op __BAND__ = def_Op( MPI_BAND , "BAND" ) cdef Op __LOR__ = def_Op( MPI_LOR , "LOR" ) cdef Op __BOR__ = def_Op( MPI_BOR , "BOR" ) cdef Op __LXOR__ = def_Op( MPI_LXOR , "LXOR" ) cdef Op __BXOR__ = def_Op( MPI_BXOR , "BXOR" ) cdef Op __MAXLOC__ = def_Op( MPI_MAXLOC , "MAXLOC" ) cdef Op __MINLOC__ = def_Op( MPI_MINLOC , "MINLOC" ) cdef Op __REPLACE__ = def_Op( MPI_REPLACE , "REPLACE" ) cdef Op __NO_OP__ = def_Op( MPI_NO_OP , "NO_OP" ) # Predefined operation handles # ---------------------------- OP_NULL = __OP_NULL__ #: Null MAX = __MAX__ #: Maximum MIN = __MIN__ #: Minimum SUM = __SUM__ #: Sum PROD = __PROD__ #: Product LAND = __LAND__ #: Logical and BAND = __BAND__ #: Bit-wise and LOR = __LOR__ #: Logical or BOR = __BOR__ #: Bit-wise or LXOR = __LXOR__ #: Logical xor BXOR = __BXOR__ #: Bit-wise xor MAXLOC = __MAXLOC__ #: Maximum and location MINLOC = __MINLOC__ #: Minimum and location REPLACE = __REPLACE__ #: Replace (for RMA) NO_OP = __NO_OP__ #: No-op (for RMA) mpi4py-4.0.3/src/mpi4py/MPI.src/Request.pyx000066400000000000000000000410411475341043600203650ustar00rootroot00000000000000cdef class Request: """ Request handler. """ def __cinit__(self, Request request: Request | None = None): cinit(self, request) def __dealloc__(self): dealloc(self) def __richcmp__(self, other, int op): if not isinstance(other, Request): return NotImplemented return richcmp(self, other, op) def __bool__(self) -> bool: return nonnull(self) def __reduce__(self) -> str | tuple[Any, ...]: return reduce_default(self) property handle: """MPI handle.""" def __get__(self) -> int: return tohandle(self) @classmethod def fromhandle(cls, handle: int) -> Request: """ Create object from MPI handle. """ if issubclass(cls, Prequest): return PyMPIPrequest_New( handle) if issubclass(cls, Grequest): return PyMPIGrequest_New( handle) return fromhandle( handle) def free(self) -> None: """ Call `Free` if not null. """ safefree(self) # Completion Operations # --------------------- def Wait(self, Status status: Status | None = None) -> Literal[True]: """ Wait for a non-blocking operation to complete. """ cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Wait( &self.ob_mpi, statusp) ) if self.ob_mpi == MPI_REQUEST_NULL: self.ob_buf = None return True def Test(self, Status status: Status | None = None) -> bool: """ Test for the completion of a non-blocking operation. """ cdef int flag = 0 cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Test( &self.ob_mpi, &flag, statusp) ) if self.ob_mpi == MPI_REQUEST_NULL: self.ob_buf = None return flag def Get_status(self, Status status: Status | None = None) -> bool: """ Non-destructive test for the completion of a request. """ cdef int flag = 0 cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Request_get_status( self.ob_mpi, &flag, statusp) ) return flag # Multiple Completions # -------------------- @classmethod def Waitany( cls, requests: Sequence[Request], Status status: Status | None = None, ) -> int: """ Wait for any previously initiated request to complete. """ cdef int index = MPI_UNDEFINED cdef MPI_Status *statusp = arg_Status(status) cdef _p_rs rs = _p_rs.__new__(_p_rs) rs.acquire(requests) try: with nogil: CHKERR( MPI_Waitany( rs.count, rs.requests, &index, statusp) ) finally: rs.release() return index @classmethod def Testany( cls, requests: Sequence[Request], Status status: Status | None = None, ) -> tuple[int, bool]: """ Test for completion of any previously initiated request. """ cdef int index = MPI_UNDEFINED cdef int flag = 0 cdef MPI_Status *statusp = arg_Status(status) cdef _p_rs rs = _p_rs.__new__(_p_rs) rs.acquire(requests) try: with nogil: CHKERR( MPI_Testany( rs.count, rs.requests, &index, &flag, statusp) ) finally: rs.release() return (index, flag) @classmethod def Get_status_any( cls, requests: Sequence[Request], Status status: Status | None = None, ) -> tuple[int, bool]: """ Non-destructive test for the completion of any requests. """ cdef int index = MPI_UNDEFINED cdef int flag = 0 cdef MPI_Status *statusp = arg_Status(status) cdef _p_rs rs = _p_rs.__new__(_p_rs) rs.acquire(requests) try: with nogil: CHKERR( MPI_Request_get_status_any( rs.count, rs.requests, &index, &flag, statusp) ) finally: rs.release() # ~> MPI-4.1 return (index, flag) # ~> MPI-4.1 @classmethod def Waitall( cls, requests: Sequence[Request], statuses: list[Status] | None = None, ) -> Literal[True]: """ Wait for all previously initiated requests to complete. """ cdef _p_rs rs = _p_rs.__new__(_p_rs) rs.acquire(requests, statuses) try: with nogil: CHKERR( MPI_Waitall( rs.count, rs.requests, rs.statuses) ) finally: rs.release(statuses) return True @classmethod def Testall( cls, requests: Sequence[Request], statuses: list[Status] | None = None, ) -> bool: """ Test for completion of all previously initiated requests. """ cdef int flag = 0 cdef _p_rs rs = _p_rs.__new__(_p_rs) rs.acquire(requests, statuses) try: with nogil: CHKERR( MPI_Testall( rs.count, rs.requests, &flag, rs.statuses) ) finally: rs.release(statuses) return flag @classmethod def Get_status_all( cls, requests: Sequence[Request], statuses: list[Status] | None = None, ) -> bool: """ Non-destructive test for the completion of all requests. """ cdef int flag = 0 cdef _p_rs rs = _p_rs.__new__(_p_rs) rs.acquire(requests, statuses) try: with nogil: CHKERR( MPI_Request_get_status_all( rs.count, rs.requests, &flag, rs.statuses) ) finally: rs.release(statuses) # ~> MPI-4.1 return flag # ~> MPI-4.1 @classmethod def Waitsome( cls, requests: Sequence[Request], statuses: list[Status] | None = None, ) -> list[int] | None: """ Wait for some previously initiated requests to complete. """ cdef object indices = None cdef _p_rs rs = _p_rs.__new__(_p_rs) rs.acquire(requests, statuses) rs.add_indices() try: with nogil: CHKERR( MPI_Waitsome( rs.count, rs.requests, &rs.outcount, rs.indices, rs.statuses) ) indices = rs.get_indices() finally: rs.release(statuses) return indices @classmethod def Testsome( cls, requests: Sequence[Request], statuses: list[Status] | None = None, ) -> list[int] | None: """ Test for completion of some previously initiated requests. """ cdef object indices = None cdef _p_rs rs = _p_rs.__new__(_p_rs) rs.acquire(requests, statuses) rs.add_indices() try: with nogil: CHKERR( MPI_Testsome( rs.count, rs.requests, &rs.outcount, rs.indices, rs.statuses) ) indices = rs.get_indices() finally: rs.release(statuses) return indices @classmethod def Get_status_some( cls, requests: Sequence[Request], statuses: list[Status] | None = None, ) -> list[int] | None: """ Non-destructive test for completion of some requests. """ cdef object indices = None cdef _p_rs rs = _p_rs.__new__(_p_rs) rs.acquire(requests, statuses) rs.add_indices() try: with nogil: CHKERR( MPI_Request_get_status_some( rs.count, rs.requests, &rs.outcount, rs.indices, rs.statuses) ) indices = rs.get_indices() # ~> MPI-4.1 finally: rs.release(statuses) # ~> MPI-4.1 return indices # ~> MPI-4.1 # Cancel # ------ def Cancel(self) -> None: """ Cancel a request. """ with nogil: CHKERR( MPI_Cancel(&self.ob_mpi) ) # Deallocation # ------------ def Free(self) -> None: """ Free a communication request. """ cdef MPI_Request save = self.ob_mpi with nogil: CHKERR( MPI_Request_free(&self.ob_mpi) ) if constobj(self): self.ob_mpi = save # Fortran Handle # -------------- def py2f(self) -> int: """ """ return MPI_Request_c2f(self.ob_mpi) @classmethod def f2py(cls, arg: int) -> Request: """ """ if issubclass(cls, Prequest): return PyMPIPrequest_New(MPI_Request_f2c(arg)) if issubclass(cls, Grequest): return PyMPIGrequest_New(MPI_Request_f2c(arg)) return fromhandle(MPI_Request_f2c(arg)) # Python Communication # -------------------- def wait( self, Status status: Status | None = None, ) -> Any: """ Wait for a non-blocking operation to complete. """ cdef msg = PyMPI_wait(self, status) return msg def test( self, Status status: Status | None = None, ) -> tuple[bool, Any | None]: """ Test for the completion of a non-blocking operation. """ cdef int flag = 0 cdef msg = PyMPI_test(self, &flag, status) return (flag, msg) def get_status( self, Status status: Status | None = None, ) -> bool: """ Non-destructive test for the completion of a request. """ cdef int flag = 0 cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Request_get_status( self.ob_mpi, &flag, statusp) ) return flag @classmethod def waitany( cls, requests: Sequence[Request], Status status: Status | None = None ) -> tuple[int, Any]: """ Wait for any previously initiated request to complete. """ cdef int index = MPI_UNDEFINED cdef msg = PyMPI_waitany(requests, &index, status) return (index, msg) @classmethod def testany( cls, requests: Sequence[Request], Status status: Status | None = None, ) -> tuple[int, bool, Any | None]: """ Test for completion of any previously initiated request. """ cdef int index = MPI_UNDEFINED cdef int flag = 0 cdef msg = PyMPI_testany(requests, &index, &flag, status) return (index, flag, msg) @classmethod def get_status_any( cls, requests: Sequence[Request], Status status: Status | None = None, ) -> tuple[int, bool]: """ Non-destructive test for the completion of any requests. """ return Request.Get_status_any(requests, status) @classmethod def waitall( cls, requests: Sequence[Request], statuses: list[Status] | None = None, ) -> list[Any]: """ Wait for all previously initiated requests to complete. """ cdef msg = PyMPI_waitall(requests, statuses) return msg @classmethod def testall( cls, requests: Sequence[Request], statuses: list[Status] | None = None ) -> tuple[bool, list[Any] | None]: """ Test for completion of all previously initiated requests. """ cdef int flag = 0 cdef msg = PyMPI_testall(requests, &flag, statuses) return (flag, msg) @classmethod def get_status_all( cls, requests: Sequence[Request], statuses: list[Status] | None = None, ) -> bool: """ Non-destructive test for the completion of all requests. """ return Request.Get_status_all(requests, statuses) @classmethod def waitsome( cls, requests: Sequence[Request], statuses: list[Status] | None = None, ) -> tuple[list[int] | None, list[Any] | None]: """ Wait for some previously initiated requests to complete. """ return PyMPI_waitsome(requests, statuses) @classmethod def testsome( cls, requests: Sequence[Request], statuses: list[Status] | None = None, ) -> tuple[list[int] | None, list[Any] | None]: """ Test for completion of some previously initiated requests. """ return PyMPI_testsome(requests, statuses) @classmethod def get_status_some( cls, requests: Sequence[Request], statuses: list[Status] | None = None, ) -> list[int] | None: """ Non-destructive test for completion of some requests. """ return Request.Get_status_some(requests, statuses) def cancel(self) -> None: """ Cancel a request. """ with nogil: CHKERR( MPI_Cancel(&self.ob_mpi) ) cdef class Prequest(Request): """ Persistent request handler. """ def __cinit__(self, Request request: Request | None = None): if self.ob_mpi == MPI_REQUEST_NULL: return (request) def Start(self) -> None: """ Initiate a communication with a persistent request. """ with nogil: CHKERR( MPI_Start(&self.ob_mpi) ) @classmethod def Startall(cls, requests: list[Prequest]) -> None: """ Start a collection of persistent requests. """ cdef _p_rs rs = _p_rs.__new__(_p_rs) rs.acquire(requests) try: with nogil: CHKERR( MPI_Startall(rs.count, rs.requests) ) finally: rs.release() # Partitioned completion # ---------------------- def Pready( self, int partition: int, ) -> None: """ Mark a given partition as ready. """ CHKERR( MPI_Pready(partition, self.ob_mpi) ) def Pready_range( self, int partition_low: int, int partition_high: int, ) -> None: """ Mark a range of partitions as ready. """ CHKERR( MPI_Pready_range(partition_low, partition_high, self.ob_mpi) ) def Pready_list( self, partitions: Sequence[int], ) -> None: """ Mark a sequence of partitions as ready. """ cdef int length = 0, *array_of_partitions = NULL partitions = getarray(partitions, &length, &array_of_partitions) CHKERR( MPI_Pready_list(length, array_of_partitions, self.ob_mpi) ) def Parrived( self, int partition: int, ) -> bool: """ Test partial completion of a partitioned receive operation. """ cdef int flag = 0 CHKERR( MPI_Parrived(self.ob_mpi, partition, &flag) ) return flag cdef class Grequest(Request): """ Generalized request handler. """ def __cinit__(self, Request request: Request | None = None): self.ob_grequest = self.ob_mpi if self.ob_mpi == MPI_REQUEST_NULL: return (request) @classmethod def Start( cls, query_fn: Callable[..., None] | None = None, free_fn: Callable[..., None] | None = None, cancel_fn: Callable[..., None] | None = None, args: tuple[Any] | None = None, kwargs: dict[str, Any] | None = None, ) -> Grequest: """ Create and return a user-defined request. """ cdef Grequest request = New(Grequest) cdef _p_greq state = _p_greq( query_fn, free_fn, cancel_fn, args, kwargs) with nogil: CHKERR( MPI_Grequest_start( greq_query_fn, greq_free_fn, greq_cancel_fn, state, &request.ob_mpi) ) Py_INCREF(state) request.ob_grequest = request.ob_mpi return request def Complete(self) -> None: """ Notify that a user-defined request is complete. """ if self.ob_mpi != MPI_REQUEST_NULL: if self.ob_mpi != self.ob_grequest: raise MPIException(MPI_ERR_REQUEST) # ~> unreachable cdef MPI_Request grequest = self.ob_grequest self.ob_grequest = self.ob_mpi # or MPI_REQUEST_NULL ?? with nogil: CHKERR( MPI_Grequest_complete(grequest) ) self.ob_grequest = self.ob_mpi # or MPI_REQUEST_NULL ?? def complete(self, obj: Any = None) -> None: """ Notify that a user-defined request is complete. """ self.ob_buf = PyMPI_wrap_object(obj) Grequest.Complete(self) cdef Request __REQUEST_NULL__ = def_Request( MPI_REQUEST_NULL , "REQUEST_NULL" ) # Predefined request handles # -------------------------- REQUEST_NULL = __REQUEST_NULL__ #: Null request handle mpi4py-4.0.3/src/mpi4py/MPI.src/Session.pyx000066400000000000000000000146001475341043600203610ustar00rootroot00000000000000cdef class Session: """ Session context. """ def __cinit__(self, Session session: Session | None = None): cinit(self, session) def __dealloc__(self): dealloc(self) def __richcmp__(self, other, int op): if not isinstance(other, Session): return NotImplemented return richcmp(self, other, op) def __bool__(self) -> bool: return nonnull(self) def __reduce__(self) -> str | tuple[Any, ...]: return reduce_default(self) property handle: """MPI handle.""" def __get__(self) -> int: return tohandle(self) @classmethod def fromhandle(cls, handle: int) -> Session: """ Create object from MPI handle. """ return fromhandle( handle) def free(self) -> None: """ Call `Finalize` if not null. """ safefree(self) # @classmethod def Init( cls, Info info: Info = INFO_NULL, Errhandler errhandler: Errhandler | None = None, ) -> Self: """ Create a new session. """ cdef MPI_Errhandler cerrhdl = arg_Errhandler(errhandler) cdef Session session = New(cls) CHKERR( MPI_Session_init( info.ob_mpi, cerrhdl, &session.ob_mpi) ) session_set_eh(session.ob_mpi) return session def Finalize(self) -> None: """ Finalize a session. """ cdef MPI_Session save = self.ob_mpi CHKERR( MPI_Session_finalize(&self.ob_mpi) ) if constobj(self): self.ob_mpi = save def Get_num_psets(self, Info info: Info = INFO_NULL) -> int: """ Number of available process sets. """ cdef int num_psets = -1 CHKERR( MPI_Session_get_num_psets( self.ob_mpi, info.ob_mpi, &num_psets) ) return num_psets def Get_nth_pset(self, int n: int, Info info: Info = INFO_NULL) -> str: """ Name of the *n*-th process set. """ cdef int nlen = MPI_MAX_PSET_NAME_LEN cdef char *pset_name = NULL cdef unused = allocate(nlen+1, sizeof(char), &pset_name) CHKERR( MPI_Session_get_nth_pset( self.ob_mpi, info.ob_mpi, n, &nlen, pset_name) ) return mpistr(pset_name) def Get_info(self) -> Info: """ Return the current hints for a session. """ cdef Info info = New(Info) CHKERR( MPI_Session_get_info( self.ob_mpi, &info.ob_mpi) ) return info def Get_pset_info(self, pset_name: str) -> Info: """ Return the current hints for a session and process set. """ cdef char *cname = NULL pset_name = asmpistr(pset_name, &cname) cdef Info info = New(Info) CHKERR( MPI_Session_get_pset_info( self.ob_mpi, cname, &info.ob_mpi) ) return info def Create_group(self, pset_name: str) -> Group: """ Create a new group from session and process set. """ cdef char *cname = NULL pset_name = asmpistr(pset_name, &cname) cdef Group group = New(Group) CHKERR( MPI_Group_from_session_pset( self.ob_mpi, cname, &group.ob_mpi) ) return group # Buffer Allocation and Usage # --------------------------- def Attach_buffer(self, buf: Buffer | None) -> None: """ Attach a user-provided buffer for sending in buffered mode. """ cdef void *base = NULL cdef MPI_Count size = 0 buf = attach_buffer(buf, &base, &size) with nogil: CHKERR( MPI_Session_attach_buffer_c( self.ob_mpi, base, size) ) detach_buffer_set(self, buf) # ~> MPI-4.1 def Detach_buffer(self) -> Buffer | None: """ Remove an existing attached buffer. """ cdef void *base = NULL cdef MPI_Count size = 0 with nogil: CHKERR( MPI_Session_detach_buffer_c( self.ob_mpi, &base, &size) ) return detach_buffer_get(self, base, size) # ~> MPI-4.1 def Flush_buffer(self) -> None: """ Block until all buffered messages have been transmitted. """ with nogil: CHKERR( MPI_Session_flush_buffer(self.ob_mpi) ) def Iflush_buffer(self) -> Request: """ Nonblocking flush for buffered messages. """ cdef Request request = New(Request) with nogil: CHKERR( MPI_Session_iflush_buffer( self.ob_mpi, &request.ob_mpi) ) return request # ~> MPI-4.1 # Error handling # -------------- @classmethod def Create_errhandler( cls, errhandler_fn: Callable[[Session, int], None], ) -> Errhandler: """ Create a new error handler for sessions. """ cdef Errhandler errhandler = New(Errhandler) cdef MPI_Session_errhandler_function *fn = NULL cdef int index = errhdl_new(errhandler_fn, &fn) try: CHKERR( MPI_Session_create_errhandler(fn, &errhandler.ob_mpi) ) except: # ~> uncovered # noqa errhdl_del(&index, fn) # ~> uncovered raise # ~> uncovered return errhandler def Get_errhandler(self) -> Errhandler: """ Get the error handler for a session. """ cdef Errhandler errhandler = New(Errhandler) CHKERR( MPI_Session_get_errhandler(self.ob_mpi, &errhandler.ob_mpi) ) return errhandler def Set_errhandler(self, Errhandler errhandler: Errhandler) -> None: """ Set the error handler for a session. """ CHKERR( MPI_Session_set_errhandler(self.ob_mpi, errhandler.ob_mpi) ) def Call_errhandler(self, int errorcode: int) -> None: """ Call the error handler installed on a session. """ CHKERR( MPI_Session_call_errhandler(self.ob_mpi, errorcode) ) # Fortran Handle # -------------- def py2f(self) -> int: """ """ return MPI_Session_c2f(self.ob_mpi) @classmethod def f2py(cls, arg: int) -> Session: """ """ return fromhandle(MPI_Session_f2c(arg)) cdef Session __SESSION_NULL__ = def_Session( MPI_SESSION_NULL , "SESSION_NULL" ) # Predefined session handle # ------------------------- SESSION_NULL = __SESSION_NULL__ #: Null session handler mpi4py-4.0.3/src/mpi4py/MPI.src/Status.pyx000066400000000000000000000150551475341043600202260ustar00rootroot00000000000000cdef class Status: """ Status object. """ def __cinit__(self, Status status: Status | None = None): cdef MPI_Status *s = &self.ob_mpi CHKERR( MPI_Status_set_source (s, MPI_ANY_SOURCE ) ) CHKERR( MPI_Status_set_tag (s, MPI_ANY_TAG ) ) CHKERR( MPI_Status_set_error (s, MPI_SUCCESS ) ) if status is None: return self.ob_mpi = status.ob_mpi def __richcmp__(self, other, int op): if not isinstance(other, Status): return NotImplemented cdef Status s = self, o = other cdef int ne = memcmp(&s.ob_mpi, &o.ob_mpi, sizeof(MPI_Status)) if op == Py_EQ: return (ne == 0) elif op == Py_NE: return (ne != 0) cdef str mod = type(self).__module__ cdef str cls = type(self).__name__ raise TypeError(f"unorderable type '{mod}.{cls}'") def __reduce__(self) -> tuple[Any, tuple[Any, ...], dict[str, Any]]: return (__newobj__, (type(self),), self.__getstate__()) def __getstate__(self) -> dict[str, int]: cdef dict state = { 'source': self.Get_source(), 'tag': self.Get_tag(), 'error': self.Get_error(), } try: state['count'] = self.Get_elements(__BYTE__) except NotImplementedError: # ~> legacy pass # ~> legacy try: state['cancelled'] = self.Is_cancelled() except NotImplementedError: # ~> legacy pass # ~> legacy return state def __setstate__(self, state: dict[str, int]) -> None: self.Set_source(state['source']) self.Set_tag(state['tag']) self.Set_error(state['error']) if 'count' in state: self.Set_elements(__BYTE__, state['count']) if 'cancelled' in state: self.Set_cancelled(state['cancelled']) def Get_source(self) -> int: """ Get message source. """ cdef int source = MPI_ANY_SOURCE CHKERR( MPI_Status_get_source(&self.ob_mpi, &source) ) return source def Set_source(self, int source: int) -> None: """ Set message source. """ CHKERR( MPI_Status_set_source(&self.ob_mpi, source) ) property source: """Message source.""" def __get__(self) -> int: return self.Get_source() def __set__(self, value: int): self.Set_source(value) def Get_tag(self) -> int: """ Get message tag. """ cdef int tag = MPI_ANY_TAG CHKERR( MPI_Status_get_tag(&self.ob_mpi, &tag) ) return tag def Set_tag(self, int tag: int) -> None: """ Set message tag. """ CHKERR( MPI_Status_set_tag(&self.ob_mpi, tag) ) property tag: """Message tag.""" def __get__(self) -> int: return self.Get_tag() def __set__(self, value: int): self.Set_tag(value) def Get_error(self) -> int: """ Get message error. """ cdef int error = MPI_SUCCESS CHKERR( MPI_Status_get_error(&self.ob_mpi, &error) ) return error def Set_error(self, int error: int) -> None: """ Set message error. """ CHKERR( MPI_Status_set_error(&self.ob_mpi, error) ) property error: """Message error.""" def __get__(self) -> int: return self.Get_error() def __set__(self, value: int): self.Set_error(value) def Get_count(self, Datatype datatype: Datatype = BYTE) -> int: """ Get the number of *top level* elements. """ cdef MPI_Datatype dtype = datatype.ob_mpi cdef MPI_Count count = MPI_UNDEFINED CHKERR( MPI_Get_count_c(&self.ob_mpi, dtype, &count) ) return count property count: """Byte count.""" def __get__(self) -> int: return self.Get_count(__BYTE__) def __set__(self, value: int): self.Set_elements(__BYTE__, value) def Get_elements(self, Datatype datatype: Datatype) -> int: """ Get the number of basic elements in a datatype. """ cdef MPI_Datatype dtype = datatype.ob_mpi cdef MPI_Count elements = MPI_UNDEFINED CHKERR( MPI_Get_elements_c(&self.ob_mpi, dtype, &elements) ) return elements def Set_elements( self, Datatype datatype: Datatype, Count count: int, ) -> None: """ Set the number of elements in a status. .. note:: This method should be only used when implementing query callback functions for generalized requests. """ cdef MPI_Datatype dtype = datatype.ob_mpi CHKERR( MPI_Status_set_elements_c(&self.ob_mpi, dtype, count) ) def Is_cancelled(self) -> bool: """ Test to see if a request was cancelled. """ cdef int flag = 0 CHKERR( MPI_Test_cancelled(&self.ob_mpi, &flag) ) return flag def Set_cancelled(self, bint flag: bool) -> None: """ Set the cancelled state associated with a status. .. note:: This method should be used only when implementing query callback functions for generalized requests. """ CHKERR( MPI_Status_set_cancelled(&self.ob_mpi, flag) ) property cancelled: """Cancelled state.""" def __get__(self) -> bool: return self.Is_cancelled() def __set__(self, value: bool): self.Set_cancelled(value) # Fortran Handle # -------------- def py2f(self) -> list[int]: """ """ cdef Status status = self cdef Py_ssize_t n = (sizeof(MPI_Status)//sizeof(int)) cdef MPI_Status *c_status = &status.ob_mpi cdef MPI_Fint *f_status = NULL cdef unused = allocate(n+1, sizeof(MPI_Fint), &f_status) CHKERR( MPI_Status_c2f(c_status, f_status) ) return [f_status[i] for i in range(n)] @classmethod def f2py(cls, arg: list[int]) -> Self: """ """ cdef MPI_Status status cdef MPI_Status *c_status = &status cdef Py_ssize_t n = (sizeof(MPI_Status)//sizeof(int)) cdef MPI_Fint *f_status = NULL cdef unused = allocate(n+1, sizeof(MPI_Fint), &f_status) for i in range(n): f_status[i] = arg[i] CHKERR( MPI_Status_f2c(f_status, c_status) ) return PyMPIStatus_New(c_status) F_SOURCE = MPI_F_SOURCE F_TAG = MPI_F_TAG F_ERROR = MPI_F_ERROR F_STATUS_SIZE = MPI_F_STATUS_SIZE mpi4py-4.0.3/src/mpi4py/MPI.src/Win.pyx000066400000000000000000000561531475341043600175040ustar00rootroot00000000000000# Create flavors # -------------- WIN_FLAVOR_CREATE = MPI_WIN_FLAVOR_CREATE WIN_FLAVOR_ALLOCATE = MPI_WIN_FLAVOR_ALLOCATE WIN_FLAVOR_DYNAMIC = MPI_WIN_FLAVOR_DYNAMIC WIN_FLAVOR_SHARED = MPI_WIN_FLAVOR_SHARED # Memory model # ------------ WIN_SEPARATE = MPI_WIN_SEPARATE WIN_UNIFIED = MPI_WIN_UNIFIED # Assertion modes # --------------- MODE_NOCHECK = MPI_MODE_NOCHECK MODE_NOSTORE = MPI_MODE_NOSTORE MODE_NOPUT = MPI_MODE_NOPUT MODE_NOPRECEDE = MPI_MODE_NOPRECEDE MODE_NOSUCCEED = MPI_MODE_NOSUCCEED # Lock types # ---------- LOCK_EXCLUSIVE = MPI_LOCK_EXCLUSIVE LOCK_SHARED = MPI_LOCK_SHARED cdef class Win: """ Remote memory access context. """ def __cinit__(self, Win win: Win | None = None): cinit(self, win) def __dealloc__(self): dealloc(self) def __richcmp__(self, other, int op): if not isinstance(other, Win): return NotImplemented return richcmp(self, other, op) def __bool__(self) -> bool: return nonnull(self) def __reduce__(self) -> str | tuple[Any, ...]: return reduce_default(self) property handle: """MPI handle.""" def __get__(self) -> int: return tohandle(self) @classmethod def fromhandle(cls, handle: int) -> Win: """ Create object from MPI handle. """ return fromhandle( handle) def free(self) -> None: """ Call `Free` if not null. """ safefree(self) # Window Creation # --------------- @classmethod def Create( cls, memory: Buffer | Bottom, Aint disp_unit: int = 1, Info info: Info = INFO_NULL, Intracomm comm: Intracomm = COMM_SELF, ) -> Self: """ Create an window object for one-sided communication. """ cdef void *base = MPI_BOTTOM cdef MPI_Aint size = 0 if is_BOTTOM(memory): memory = None if memory is not None: memory = asbuffer_w(memory, &base, &size) cdef Win win = New(cls) with nogil: CHKERR( MPI_Win_create_c( base, size, disp_unit, info.ob_mpi, comm.ob_mpi, &win.ob_mpi) ) win_set_eh(win.ob_mpi) win.ob_mem = memory return win @classmethod def Allocate( cls, Aint size: int, Aint disp_unit: int = 1, Info info: Info = INFO_NULL, Intracomm comm: Intracomm = COMM_SELF, ) -> Self: """ Create an window object for one-sided communication. """ cdef void *base = NULL cdef Win win = New(cls) with nogil: CHKERR( MPI_Win_allocate_c( size, disp_unit, info.ob_mpi, comm.ob_mpi, &base, &win.ob_mpi) ) win_set_eh(win.ob_mpi) return win @classmethod def Allocate_shared( cls, Aint size: int, Aint disp_unit: int = 1, Info info: Info = INFO_NULL, Intracomm comm: Intracomm = COMM_SELF, ) -> Self: """ Create an window object for one-sided communication. """ cdef void *base = NULL cdef Win win = New(cls) with nogil: CHKERR( MPI_Win_allocate_shared_c( size, disp_unit, info.ob_mpi, comm.ob_mpi, &base, &win.ob_mpi) ) win_set_eh(win.ob_mpi) return win def Shared_query(self, int rank: int) -> tuple[buffer, int]: """ Query the process-local address for remote memory segments. """ cdef void *base = NULL cdef MPI_Aint size = 0 cdef MPI_Aint disp_unit = 1 with nogil: CHKERR( MPI_Win_shared_query_c( self.ob_mpi, rank, &size, &disp_unit, &base) ) return (tobuffer(self, base, size, 0), disp_unit) @classmethod def Create_dynamic( cls, Info info: Info = INFO_NULL, Intracomm comm: Intracomm = COMM_SELF, ) -> Self: """ Create an window object for one-sided communication. """ cdef Win win = New(cls) with nogil: CHKERR( MPI_Win_create_dynamic( info.ob_mpi, comm.ob_mpi, &win.ob_mpi) ) win_set_eh(win.ob_mpi) win.ob_mem = {} return win def Attach(self, memory: Buffer) -> None: """ Attach a local memory region. """ cdef void *base = NULL cdef MPI_Aint size = 0 memory = asbuffer_w(memory, &base, &size) with nogil: CHKERR( MPI_Win_attach(self.ob_mpi, base, size) ) try: if self.ob_mem is None: self.ob_mem = {} (self.ob_mem)[base] = memory except: # ~> uncovered # noqa pass # ~> uncovered def Detach(self, memory: Buffer) -> None: """ Detach a local memory region. """ cdef void *base = NULL memory = asbuffer_w(memory, &base, NULL) with nogil: CHKERR( MPI_Win_detach(self.ob_mpi, base) ) try: if self.ob_mem is None: return del (self.ob_mem)[base] except: # ~> uncovered # noqa pass # ~> uncovered def Free(self) -> None: """ Free a window. """ cdef MPI_Win save = self.ob_mpi with nogil: CHKERR( MPI_Win_free(&self.ob_mpi) ) if constobj(self): self.ob_mpi = save self.ob_mem = None # Window Info # ----------- def Set_info(self, Info info: Info) -> None: """ Set new values for the hints associated with a window. """ with nogil: CHKERR( MPI_Win_set_info(self.ob_mpi, info.ob_mpi) ) def Get_info(self) -> Info: """ Return the current hints for a window. """ cdef Info info = New(Info) with nogil: CHKERR( MPI_Win_get_info( self.ob_mpi, &info.ob_mpi) ) return info property info: """Info hints.""" def __get__(self) -> Info: return self.Get_info() def __set__(self, value: Info): self.Set_info(value) # Window Group # ------------- def Get_group(self) -> Group: """ Access the group of processes that created the window. """ cdef Group group = Group() with nogil: CHKERR( MPI_Win_get_group(self.ob_mpi, &group.ob_mpi) ) return group property group: """Group.""" def __get__(self) -> Group: return self.Get_group() property group_size: """Group size.""" def __get__(self) -> int: cdef MPI_Group group = MPI_GROUP_NULL cdef int group_size = -1 CHKERR( MPI_Win_get_group(self.ob_mpi, &group) ) try: CHKERR( MPI_Group_size(group, &group_size) ) finally: CHKERR( MPI_Group_free(&group) ) return group_size property group_rank: """Group rank.""" def __get__(self) -> int: cdef MPI_Group group = MPI_GROUP_NULL cdef int group_rank = MPI_PROC_NULL CHKERR( MPI_Win_get_group(self.ob_mpi, &group) ) try: CHKERR( MPI_Group_rank(group, &group_rank) ) finally: CHKERR( MPI_Group_free(&group) ) return group_rank # Window Attributes # ----------------- def Get_attr(self, int keyval: int) -> int | Any | None: """ Retrieve attribute value by key. """ cdef void *attrval = NULL cdef int flag = 0 CHKERR( MPI_Win_get_attr(self.ob_mpi, keyval, &attrval, &flag) ) if flag == 0: return None if attrval == NULL: return 0 # MPI-2 predefined attribute keyvals if keyval == MPI_WIN_BASE: return attrval elif keyval == MPI_WIN_SIZE: return (attrval)[0] elif keyval == MPI_WIN_DISP_UNIT: return (attrval)[0] # MPI-3 predefined attribute keyvals elif keyval == MPI_WIN_CREATE_FLAVOR: return (attrval)[0] elif keyval == MPI_WIN_MODEL: return (attrval)[0] # user-defined attribute keyval return PyMPI_attr_get(self.ob_mpi, keyval, attrval) def Set_attr(self, int keyval: int, attrval: Any) -> None: """ Store attribute value associated with a key. """ PyMPI_attr_set(self.ob_mpi, keyval, attrval) def Delete_attr(self, int keyval: int) -> None: """ Delete attribute value associated with a key. """ PyMPI_attr_del(self.ob_mpi, keyval) @classmethod def Create_keyval( cls, copy_fn: Callable[[Win, int, Any], Any] | None = None, delete_fn: Callable[[Win, int, Any], None] | None = None, nopython: bool = False, ) -> int: """ Create a new attribute key for windows. """ cdef int keyval = MPI_KEYVAL_INVALID cdef MPI_Win_copy_attr_function *_copy = PyMPI_attr_copy_fn cdef MPI_Win_delete_attr_function *_del = PyMPI_attr_delete_fn cdef _p_keyval state = _p_keyval(copy_fn, delete_fn, nopython) CHKERR( MPI_Win_create_keyval(_copy, _del, &keyval, state) ) PyMPI_attr_state_set(MPI_WIN_NULL, keyval, state) return keyval @classmethod def Free_keyval(cls, int keyval: int) -> int: """ Free an attribute key for windows. """ cdef int keyval_save = keyval CHKERR( MPI_Win_free_keyval(&keyval) ) PyMPI_attr_state_del(MPI_WIN_NULL, keyval_save) return keyval property attrs: "Attributes." def __get__(self) -> tuple[int, int, int]: cdef void *base = NULL cdef MPI_Aint size = 0 cdef int disp_unit = 1 win_get_base(self.ob_mpi, &base) win_get_size(self.ob_mpi, &size) win_get_unit(self.ob_mpi, &disp_unit) return (base, size, disp_unit) property flavor: """Create flavor.""" def __get__(self) -> int: cdef int keyval = MPI_WIN_CREATE_FLAVOR cdef int *attrval = NULL, flag = 0 cdef int flavor = MPI_WIN_FLAVOR_CREATE if keyval != MPI_KEYVAL_INVALID: CHKERR( MPI_Win_get_attr(self.ob_mpi, keyval, &attrval, &flag) ) if flag and attrval != NULL: flavor = attrval[0] return flavor property model: """Memory model.""" def __get__(self) -> int: cdef int keyval = MPI_WIN_MODEL cdef int *attrval = NULL, flag = 0 cdef int model = MPI_WIN_SEPARATE if keyval != MPI_KEYVAL_INVALID: CHKERR( MPI_Win_get_attr(self.ob_mpi, keyval, &attrval, &flag) ) if flag and attrval != NULL: model = attrval[0] return model def tomemory(self) -> buffer: """ Return window memory buffer. """ return getbuffer(self, 0, 1) # buffer interface (PEP 3118) def __getbuffer__(self, Py_buffer *view, int flags): cdef void *base = NULL cdef MPI_Aint size = 0 win_get_base(self.ob_mpi, &base) win_get_size(self.ob_mpi, &size) PyBuffer_FillInfo(view, self, base, size, 0, flags) # Communication Operations # ------------------------ def Put( self, origin: BufSpec, int target_rank: int, target: TargetSpec | None = None, ) -> None: """ Put data into a memory window on a remote process. """ cdef _p_msg_rma msg = message_rma() msg.for_put(origin, target_rank, target) with nogil: CHKERR( MPI_Put_c( msg.oaddr, msg.ocount, msg.otype, target_rank, msg.tdisp, msg.tcount, msg.ttype, self.ob_mpi) ) def Get( self, origin: BufSpec, int target_rank: int, target: TargetSpec | None = None, ) -> None: """ Get data from a memory window on a remote process. """ cdef _p_msg_rma msg = message_rma() msg.for_get(origin, target_rank, target) with nogil: CHKERR( MPI_Get_c( msg.oaddr, msg.ocount, msg.otype, target_rank, msg.tdisp, msg.tcount, msg.ttype, self.ob_mpi) ) def Accumulate( self, origin: BufSpec, int target_rank: int, target: TargetSpec | None = None, Op op: Op = SUM, ) -> None: """ Accumulate data into the target process. """ cdef _p_msg_rma msg = message_rma() msg.for_acc(origin, target_rank, target) with nogil: CHKERR( MPI_Accumulate_c( msg.oaddr, msg.ocount, msg.otype, target_rank, msg.tdisp, msg.tcount, msg.ttype, op.ob_mpi, self.ob_mpi) ) def Get_accumulate( self, origin: BufSpec, result: BufSpec, int target_rank: int, target: TargetSpec | None = None, Op op: Op = SUM, ) -> None: """ Fetch-and-accumulate data into the target process. """ cdef _p_msg_rma msg = message_rma() msg.for_get_acc(origin, result, target_rank, target) with nogil: CHKERR( MPI_Get_accumulate_c( msg.oaddr, msg.ocount, msg.otype, msg.raddr, msg.rcount, msg.rtype, target_rank, msg.tdisp, msg.tcount, msg.ttype, op.ob_mpi, self.ob_mpi) ) def Fetch_and_op( self, origin: BufSpec, result: BufSpec, int target_rank: int, Aint target_disp: int = 0, Op op: Op = SUM, ) -> None: """ Perform one-sided read-modify-write. """ cdef _p_msg_rma msg = message_rma() msg.for_fetch_op(origin, result, target_rank, target_disp) with nogil: CHKERR( MPI_Fetch_and_op( msg.oaddr, msg.raddr, msg.ttype, target_rank, target_disp, op.ob_mpi, self.ob_mpi) ) def Compare_and_swap( self, origin: BufSpec, compare: BufSpec, result: BufSpec, int target_rank: int, Aint target_disp: int = 0, ) -> None: """ Perform one-sided atomic compare-and-swap. """ cdef _p_msg_rma msg = message_rma() msg.for_cmp_swap(origin, compare, result, target_rank, target_disp) with nogil: CHKERR( MPI_Compare_and_swap( msg.oaddr, msg.caddr, msg.raddr, msg.ttype, target_rank, target_disp, self.ob_mpi) ) # Request-based RMA Communication Operations # ------------------------------------------ def Rput( self, origin: BufSpec, int target_rank: int, target: TargetSpec | None = None, ) -> Request: """ Put data into a memory window on a remote process. """ cdef _p_msg_rma msg = message_rma() msg.for_put(origin, target_rank, target) cdef Request request = New(Request) with nogil: CHKERR( MPI_Rput_c( msg.oaddr, msg.ocount, msg.otype, target_rank, msg.tdisp, msg.tcount, msg.ttype, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = msg return request def Rget( self, origin: BufSpec, int target_rank: int, target: TargetSpec | None = None, ) -> Request: """ Get data from a memory window on a remote process. """ cdef _p_msg_rma msg = message_rma() msg.for_get(origin, target_rank, target) cdef Request request = New(Request) with nogil: CHKERR( MPI_Rget_c( msg.oaddr, msg.ocount, msg.otype, target_rank, msg.tdisp, msg.tcount, msg.ttype, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = msg return request def Raccumulate( self, origin: BufSpec, int target_rank: int, target: TargetSpec | None = None, Op op: Op = SUM, ) -> Request: """ Fetch-and-accumulate data into the target process. """ cdef _p_msg_rma msg = message_rma() msg.for_acc(origin, target_rank, target) cdef Request request = New(Request) with nogil: CHKERR( MPI_Raccumulate_c( msg.oaddr, msg.ocount, msg.otype, target_rank, msg.tdisp, msg.tcount, msg.ttype, op.ob_mpi, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = msg return request def Rget_accumulate( self, origin: BufSpec, result: BufSpec, int target_rank: int, target: TargetSpec | None = None, Op op: Op = SUM, ) -> Request: """ Accumulate data into the target process using remote memory access. """ cdef _p_msg_rma msg = message_rma() msg.for_get_acc(origin, result, target_rank, target) cdef Request request = New(Request) with nogil: CHKERR( MPI_Rget_accumulate_c( msg.oaddr, msg.ocount, msg.otype, msg.raddr, msg.rcount, msg.rtype, target_rank, msg.tdisp, msg.tcount, msg.ttype, op.ob_mpi, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = msg return request # Synchronization Calls # --------------------- # Fence # ----- def Fence(self, int assertion: int = 0) -> None: """ Perform an MPI fence synchronization on a window. """ with nogil: CHKERR( MPI_Win_fence(assertion, self.ob_mpi) ) # General Active Target Synchronization # ------------------------------------- def Start(self, Group group: Group, int assertion: int = 0) -> None: """ Start an RMA access epoch for MPI. """ with nogil: CHKERR( MPI_Win_start( group.ob_mpi, assertion, self.ob_mpi) ) def Complete(self) -> None: """ Complete an RMA operation begun after an `Start`. """ with nogil: CHKERR( MPI_Win_complete(self.ob_mpi) ) def Post(self, Group group: Group, int assertion: int = 0) -> None: """ Start an RMA exposure epoch. """ with nogil: CHKERR( MPI_Win_post( group.ob_mpi, assertion, self.ob_mpi) ) def Wait(self) -> Literal[True]: """ Complete an RMA exposure epoch begun with `Post`. """ with nogil: CHKERR( MPI_Win_wait(self.ob_mpi) ) return True def Test(self) -> bool: """ Test whether an RMA exposure epoch has completed. """ cdef int flag = 0 with nogil: CHKERR( MPI_Win_test(self.ob_mpi, &flag) ) return flag # Lock # ---- def Lock( self, int rank: int, int lock_type: int = LOCK_EXCLUSIVE, int assertion: int = 0, ) -> None: """ Begin an RMA access epoch at the target process. """ with nogil: CHKERR( MPI_Win_lock( lock_type, rank, assertion, self.ob_mpi) ) def Unlock(self, int rank: int) -> None: """ Complete an RMA access epoch at the target process. """ with nogil: CHKERR( MPI_Win_unlock(rank, self.ob_mpi) ) def Lock_all(self, int assertion: int = 0) -> None: """ Begin an RMA access epoch at all processes. """ with nogil: CHKERR( MPI_Win_lock_all(assertion, self.ob_mpi) ) def Unlock_all(self) -> None: """ Complete an RMA access epoch at all processes. """ with nogil: CHKERR( MPI_Win_unlock_all(self.ob_mpi) ) # Flush and Sync # -------------- def Flush(self, int rank: int) -> None: """ Complete all outstanding RMA operations at a target. """ with nogil: CHKERR( MPI_Win_flush(rank, self.ob_mpi) ) def Flush_all(self) -> None: """ Complete all outstanding RMA operations at all targets. """ with nogil: CHKERR( MPI_Win_flush_all(self.ob_mpi) ) def Flush_local(self, int rank: int) -> None: """ Complete locally all outstanding RMA operations at a target. """ with nogil: CHKERR( MPI_Win_flush_local(rank, self.ob_mpi) ) def Flush_local_all(self) -> None: """ Complete locally all outstanding RMA operations at all targets. """ with nogil: CHKERR( MPI_Win_flush_local_all(self.ob_mpi) ) def Sync(self) -> None: """ Synchronize public and private copies of the window. """ with nogil: CHKERR( MPI_Win_sync(self.ob_mpi) ) # Error Handling # -------------- @classmethod def Create_errhandler( cls, errhandler_fn: Callable[[Win, int], None], ) -> Errhandler: """ Create a new error handler for windows. """ cdef Errhandler errhandler = New(Errhandler) cdef MPI_Win_errhandler_function *fn = NULL cdef int index = errhdl_new(errhandler_fn, &fn) try: CHKERR( MPI_Win_create_errhandler(fn, &errhandler.ob_mpi) ) except: # ~> uncovered # noqa errhdl_del(&index, fn) # ~> uncovered raise # ~> uncovered return errhandler def Get_errhandler(self) -> Errhandler: """ Get the error handler for a window. """ cdef Errhandler errhandler = New(Errhandler) CHKERR( MPI_Win_get_errhandler(self.ob_mpi, &errhandler.ob_mpi) ) return errhandler def Set_errhandler(self, Errhandler errhandler: Errhandler) -> None: """ Set the error handler for a window. """ CHKERR( MPI_Win_set_errhandler(self.ob_mpi, errhandler.ob_mpi) ) def Call_errhandler(self, int errorcode: int) -> None: """ Call the error handler installed on a window. """ CHKERR( MPI_Win_call_errhandler(self.ob_mpi, errorcode) ) # Naming Objects # -------------- def Get_name(self) -> str: """ Get the print name for this window. """ cdef char name[MPI_MAX_OBJECT_NAME+1] cdef int nlen = 0 CHKERR( MPI_Win_get_name(self.ob_mpi, name, &nlen) ) return tompistr(name, nlen) def Set_name(self, name: str) -> None: """ Set the print name for this window. """ cdef char *cname = NULL name = asmpistr(name, &cname) CHKERR( MPI_Win_set_name(self.ob_mpi, cname) ) property name: """Print name.""" def __get__(self) -> str: return self.Get_name() def __set__(self, value: str): self.Set_name(value) # Fortran Handle # -------------- def py2f(self) -> int: """ """ return MPI_Win_c2f(self.ob_mpi) @classmethod def f2py(cls, arg: int) -> Win: """ """ return fromhandle(MPI_Win_f2c(arg)) cdef Win __WIN_NULL__ = def_Win( MPI_WIN_NULL , "WIN_NULL" ) # Predefined window handles # ------------------------- WIN_NULL = __WIN_NULL__ #: Null window handle mpi4py-4.0.3/src/mpi4py/MPI.src/allocate.pxi000066400000000000000000000042051475341043600205020ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef extern from "Python.h": enum: PY_SSIZE_T_MAX void *PyMem_Malloc(size_t) noexcept void *PyMem_Calloc(size_t, size_t) noexcept void *PyMem_Realloc(void*, size_t) noexcept void PyMem_Free(void*) noexcept void *PyMem_RawMalloc(size_t) noexcept nogil void *PyMem_RawCalloc(size_t, size_t) noexcept nogil void *PyMem_RawRealloc(void*, size_t) noexcept nogil void PyMem_RawFree(void*) noexcept nogil @cython.final @cython.internal cdef class _PyMem: cdef void *buf cdef Py_ssize_t len cdef void (*free)(void*) noexcept def __cinit__(self): self.buf = NULL self.len = 0 self.free = NULL def __dealloc__(self): if self.free: self.free(self.buf) def __getbuffer__(self, Py_buffer *view, int flags): PyBuffer_FillInfo(view, self, self.buf, self.len, 0, flags) cdef inline _PyMem allocate(Py_ssize_t m, size_t b, void *buf): if m > PY_SSIZE_T_MAX // b: raise MemoryError("memory allocation size too large") # ~> uncovered if m < 0: raise RuntimeError("memory allocation with negative size") # ~> uncovered cdef _PyMem ob = <_PyMem>New(_PyMem) ob.len = m * b ob.free = PyMem_Free ob.buf = PyMem_Malloc(m * b) if ob.buf == NULL: raise MemoryError if buf != NULL: (buf)[0] = ob.buf return ob cdef inline _PyMem rawalloc(Py_ssize_t m, size_t b, bint clear, void *buf): if m > PY_SSIZE_T_MAX // b: raise MemoryError("memory allocation size too large") # ~> uncovered if m < 0: raise RuntimeError("memory allocation with negative size") # ~> uncovered cdef _PyMem ob = <_PyMem>New(_PyMem) ob.len = m * b ob.free = PyMem_RawFree if clear: ob.buf = PyMem_RawCalloc(m, b) else: ob.buf = PyMem_RawMalloc(m * b) if ob.buf == NULL: raise MemoryError if buf != NULL: (buf)[0] = ob.buf return ob # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/asarray.pxi000066400000000000000000000124651475341043600203670ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef extern from "Python.h": int PyIndex_Check(object) int PySequence_Check(object) object PyNumber_Index(object) Py_ssize_t PySequence_Size(object) except -1 cdef inline int is_integral(object ob) noexcept: if not PyIndex_Check(ob): return 0 if not PySequence_Check(ob): return 1 try: PySequence_Size(ob) except: pass # noqa else: return 0 try: PyNumber_Index(ob) except: return 0 # noqa else: return 1 # ----------------------------------------------------------------------------- cdef extern from * nogil: const int INT_MAX ctypedef fused count_t: int MPI_Count ctypedef fused integral_t: int MPI_Aint MPI_Count cdef inline int chklength(Py_ssize_t size) except -1: cdef int overflow = (size > (INT_MAX)) if overflow: raise OverflowError("length {size} larger than {INT_MAX}") return 0 cdef inline object newarray(Py_ssize_t n, integral_t **p): return allocate(n, sizeof(integral_t), p) cdef inline object getarray(object ob, count_t *n, integral_t **p): cdef Py_ssize_t size = len(ob) if count_t is int: chklength(size) cdef integral_t *base = NULL cdef object mem = newarray(size, &base) for i in range(size): base[i] = PyNumber_Index(ob[i]) n[0] = size p[0] = base return mem cdef inline object chkarray(object ob, count_t n, integral_t **p): cdef count_t size = 0 cdef object mem = getarray(ob, &size, p) if n != size: raise ValueError(f"expecting {n} items, got {size}") return mem # ----------------------------------------------------------------------------- cdef inline object asarray_Datatype( object sequence, MPI_Count size, MPI_Datatype **p, ): cdef MPI_Datatype *array = NULL if size != len(sequence): raise ValueError(f"expecting {size} items, got {len(sequence)}") cdef object ob = allocate(size, sizeof(MPI_Datatype), &array) for i in range(size): array[i] = (sequence[i]).ob_mpi p[0] = array return ob cdef inline object asarray_Info( object sequence, MPI_Count size, MPI_Info **p, ): cdef MPI_Info *array = NULL cdef MPI_Info info = MPI_INFO_NULL cdef object ob if sequence is None or isinstance(sequence, Info): if sequence is not None: info = (sequence).ob_mpi ob = allocate(size, sizeof(MPI_Info), &array) for i in range(size): array[i] = info else: if size != len(sequence): raise ValueError(f"expecting {size} items, got {len(sequence)}") ob = allocate(size, sizeof(MPI_Datatype), &array) for i in range(size): array[i] = (sequence[i]).ob_mpi p[0] = array return ob # ----------------------------------------------------------------------------- cdef inline int is_string(object obj): return isinstance(obj, str) or isinstance(obj, bytes) cdef inline object asstring(object ob, char *s[]): cdef Py_ssize_t n = 0 cdef char *p = NULL, *q = NULL ob = asmpistr(ob, &p) PyBytes_AsStringAndSize(ob, &p, &n) cdef object mem = allocate(n+1, sizeof(char), &q) memcpy(q, p, n) q[n] = 0 s[0] = q return mem cdef inline object asarray_str(object sequence, char ***p): cdef char** array = NULL cdef Py_ssize_t size = len(sequence) cdef object ob = allocate(size+1, sizeof(char*), &array) for i in range(size): sequence[i] = asstring(sequence[i], &array[i]) array[size] = NULL p[0] = array return (sequence, ob) cdef inline object asarray_argv(object sequence, char ***p): if sequence is None: p[0] = MPI_ARGV_NULL return None if is_string(sequence): sequence = [sequence] else: sequence = list(sequence) return asarray_str(sequence, p) cdef inline object asarray_cmds(object sequence, int *count, char ***p): if is_string(sequence): raise ValueError("expecting a sequence of strings") sequence = list(sequence) count[0] = len(sequence) return asarray_str(sequence, p) cdef inline object asarray_argvs(object sequence, int size, char ****p): if sequence is None: p[0] = MPI_ARGVS_NULL return None if is_string(sequence): sequence = [sequence] * size else: sequence = list(sequence) if size != len(sequence): raise ValueError(f"expecting {size} items, got {len(sequence)}") cdef char*** array = NULL cdef object ob = allocate(size+1, sizeof(char**), &array) cdef object argv for i in range(size): argv = sequence[i] if argv is None: argv = [] sequence[i] = asarray_argv(argv, &array[i]) array[size] = NULL p[0] = array return (sequence, ob) cdef inline object asarray_nprocs(object sequence, int size, int **p): cdef object ob cdef int *array = NULL cdef int value = 1 if sequence is None or is_integral(sequence): if sequence is not None: value = sequence ob = newarray(size, &array) for i in range(size): array[i] = value else: ob = chkarray(sequence, size, &array) p[0] = array return ob # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/asbuffer.pxi000066400000000000000000000315031475341043600205140ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef extern from "Python.h": ctypedef struct PyObject void Py_CLEAR(PyObject*) object PyLong_FromVoidPtr(void*) void* PyLong_AsVoidPtr(object) except? NULL # Python 3 buffer interface (PEP 3118) cdef extern from "Python.h": ctypedef struct Py_buffer: PyObject *obj void *buf Py_ssize_t len Py_ssize_t itemsize bint readonly char *format int ndim Py_ssize_t *shape Py_ssize_t *strides Py_ssize_t *suboffsets void *internal cdef enum: PyBUF_SIMPLE PyBUF_WRITABLE PyBUF_FORMAT PyBUF_ND PyBUF_STRIDES PyBUF_ANY_CONTIGUOUS int PyObject_CheckBuffer(object) int PyObject_GetBuffer(object, Py_buffer *, int) except -1 void PyBuffer_Release(Py_buffer *) int PyBuffer_FillInfo(Py_buffer *, object, void *, Py_ssize_t, bint, int) except -1 cdef extern from "Python.h": enum: PyBUF_READ enum: PyBUF_WRITE object PyMemoryView_FromObject(object) object PyMemoryView_GetContiguous(object, int, char) cdef inline int is_big_endian() noexcept nogil: cdef int i = 1 return (&i)[0] == 0 cdef inline int is_little_endian() noexcept nogil: cdef int i = 1 return (&i)[0] != 0 cdef char BYTE_FMT[2] BYTE_FMT[0] = c'B' BYTE_FMT[1] = 0 include "asdlpack.pxi" include "ascaibuf.pxi" cdef int PyMPI_GetBuffer(object obj, Py_buffer *view, int flags) except -1: try: return PyObject_GetBuffer(obj, view, flags) except BaseException: try: return Py_GetDLPackBuffer(obj, view, flags) except NotImplementedError: pass except BaseException: raise try: return Py_GetCAIBuffer(obj, view, flags) except NotImplementedError: pass except BaseException: raise raise cdef void PyMPI_ReleaseBuffer(int kind, Py_buffer *view) noexcept: if kind == 0: # Python buffer interface PyBuffer_Release(view) else: # DLPack/CAI buffer interface Py_CLEAR(view.obj) # ----------------------------------------------------------------------------- cdef extern from "Python.h": int PyIndex_Check(object) int PySlice_Check(object) int PySlice_Unpack(object, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *) except -1 Py_ssize_t PySlice_AdjustIndices(Py_ssize_t, Py_ssize_t *, Py_ssize_t *, Py_ssize_t) noexcept nogil Py_ssize_t PyNumber_AsSsize_t(object, object) except? -1 cdef extern from "Python.h": # TODO: PySlice_GetIndicesEx is deprecated since Python 3.6.1 int PySlice_GetIndicesEx(object, Py_ssize_t, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *) except -1 cdef inline int check_cpu_accessible(int kind) except -1: cdef unsigned device_type = kind if device_type == 0 : return 0 if device_type == kDLCPU : return 0 if device_type == kDLCUDAHost : return 0 # ~> uncovered if device_type == kDLROCMHost : return 0 # ~> uncovered if device_type == kDLCUDAManaged : return 0 # ~> uncovered raise BufferError("buffer is not CPU-accessible") @cython.final cdef class buffer: """ Buffer. """ cdef Py_buffer view cdef int kind def __cinit__(self, *args): if args: self.kind = PyMPI_GetBuffer(args[0], &self.view, PyBUF_SIMPLE) else: PyBuffer_FillInfo(&self.view, NULL, NULL, 0, 0, PyBUF_SIMPLE) def __dealloc__(self): PyMPI_ReleaseBuffer(self.kind, &self.view) @staticmethod def allocate( Aint nbytes: int, bint clear: bool = False, ) -> buffer: """Buffer allocation.""" cdef void *addr = NULL cdef Py_ssize_t size = nbytes if size < 0: raise ValueError("expecting non-negative size") cdef object ob = rawalloc(size, 1, clear, &addr) cdef buffer buf = New(buffer) PyBuffer_FillInfo(&buf.view, ob, addr, size, 0, PyBUF_SIMPLE) return buf @staticmethod def frombuffer( obj: Buffer, bint readonly: bool = False, ) -> buffer: """Buffer from buffer-like object.""" cdef int flags = PyBUF_SIMPLE if not readonly: flags |= PyBUF_WRITABLE cdef buffer buf = New(buffer) buf.kind = PyMPI_GetBuffer(obj, &buf.view, flags) buf.view.readonly = readonly return buf @staticmethod def fromaddress( address: int, Aint nbytes: int, bint readonly: bool = False, ) -> buffer: """Buffer from address and size in bytes.""" cdef void *addr = PyLong_AsVoidPtr(address) cdef Py_ssize_t size = nbytes if size < 0: raise ValueError("expecting non-negative buffer length") elif size > 0 and addr == NULL: raise ValueError("expecting non-NULL address") cdef buffer buf = New(buffer) PyBuffer_FillInfo(&buf.view, NULL, addr, size, readonly, PyBUF_SIMPLE) return buf # properties property address: """Buffer address.""" def __get__(self) -> int: return PyLong_FromVoidPtr(self.view.buf) # memoryview properties property obj: """Object exposing buffer.""" def __get__(self) -> Buffer | None: if self.view.obj == NULL: return None return self.view.obj property nbytes: """Buffer size (in bytes).""" def __get__(self) -> int: return self.view.len property readonly: """Buffer is read-only.""" def __get__(self) -> bool: return self.view.readonly property format: """Format of each element.""" def __get__(self) -> str: if self.view.format != NULL: return pystr(self.view.format) return pystr(BYTE_FMT) property itemsize: """Size (in bytes) of each element.""" def __get__(self) -> int: return self.view.itemsize # memoryview methods def cast( self, format: str, shape: list[int] | tuple[int, ...] = ..., ) -> memoryview: """ Cast to a `memoryview` with new format or shape. """ check_cpu_accessible(self.kind) if shape is Ellipsis: return PyMemoryView_FromObject(self).cast(format) else: return PyMemoryView_FromObject(self).cast(format, shape) def tobytes(self, order: str | None = None) -> bytes: """Return the data in the buffer as a byte string.""" order # unused check_cpu_accessible(self.kind) return PyBytes_FromStringAndSize(self.view.buf, self.view.len) def toreadonly(self) -> buffer: """Return a readonly version of the buffer object.""" cdef object obj = self if self.view.obj != NULL: obj = self.view.obj cdef buffer buf = New(buffer) buf.kind = PyMPI_GetBuffer(obj, &buf.view, PyBUF_SIMPLE) buf.view.readonly = 1 return buf def release(self) -> None: """Release the underlying buffer exposed by the buffer object.""" PyMPI_ReleaseBuffer(self.kind, &self.view) PyBuffer_FillInfo(&self.view, NULL, NULL, 0, 0, PyBUF_SIMPLE) self.kind = 0 # buffer interface (PEP 3118) def __getbuffer__(self, Py_buffer *view, int flags): PyBuffer_FillInfo(view, self, self.view.buf, self.view.len, self.view.readonly, flags) # sequence interface (basic) def __len__(self): return self.view.len def __getitem__(self, object item): check_cpu_accessible(self.kind) cdef Py_ssize_t start=0, stop=0, step=1, slen=0 cdef unsigned char *buf = self.view.buf cdef Py_ssize_t blen = self.view.len if PyIndex_Check(item): start = PyNumber_AsSsize_t(item, IndexError) if start < 0: start += blen if start < 0 or start >= blen: raise IndexError("index out of range") return buf[start] elif PySlice_Check(item): # PySlice_Unpack(item, &start, &stop, &step) # slen = PySlice_AdjustIndices(blen, &start, &stop, step) PySlice_GetIndicesEx(item, blen, &start, &stop, &step, &slen) if step != 1: raise IndexError("slice with step not supported") return tobuffer(self, buf+start, slen, self.view.readonly) else: raise TypeError("index must be integer or slice") def __setitem__(self, object item, object value): check_cpu_accessible(self.kind) if self.view.readonly: raise TypeError("buffer is read-only") cdef Py_ssize_t start=0, stop=0, step=1, slen=0 cdef unsigned char *buf = self.view.buf cdef Py_ssize_t blen = self.view.len cdef buffer inbuf if PyIndex_Check(item): start = PyNumber_AsSsize_t(item, IndexError) if start < 0: start += blen if start < 0 or start >= blen: raise IndexError("index out of range") buf[start] = value elif PySlice_Check(item): PySlice_GetIndicesEx(item, blen, &start, &stop, &step, &slen) if step != 1: raise IndexError("slice with step not supported") if PyIndex_Check(value): memset(buf+start, value, slen) else: inbuf = getbuffer(value, 1, 0) if inbuf.view.len != slen: raise ValueError("slice length does not match buffer") memmove(buf+start, inbuf.view.buf, slen) else: raise TypeError("index must be integer or slice") memory = buffer # Backward compatibility alias # ----------------------------------------------------------------------------- cdef inline buffer newbuffer(): return New(buffer) cdef inline buffer getbuffer(object ob, bint readonly, bint format): cdef buffer buf = newbuffer() cdef int flags = PyBUF_ANY_CONTIGUOUS if not readonly: flags |= PyBUF_WRITABLE if format: flags |= PyBUF_FORMAT buf.kind = PyMPI_GetBuffer(ob, &buf.view, flags) return buf cdef inline buffer asbuffer(object ob, void **base, MPI_Aint *size, bint ro): cdef buffer buf if type(ob) is buffer: buf = ob if buf.view.readonly and not ro: raise BufferError("Object is not writable") else: buf = getbuffer(ob, ro, 0) if base != NULL: base[0] = buf.view.buf if size != NULL: size[0] = buf.view.len return buf cdef inline buffer asbuffer_r(object ob, void **base, MPI_Aint *size): return asbuffer(ob, base, size, 1) cdef inline buffer asbuffer_w(object ob, void **base, MPI_Aint *size): return asbuffer(ob, base, size, 0) cdef inline buffer tobuffer(object ob, void *base, MPI_Aint size, bint ro): if size < 0: raise ValueError("expecting non-negative buffer length") cdef buffer buf = newbuffer() PyBuffer_FillInfo(&buf.view, ob, base, size, ro, PyBUF_SIMPLE) return buf cdef inline buffer mpibuf(void *base, MPI_Count count): cdef MPI_Aint size = count cdef int neq = (count != size) if neq: raise OverflowError("length {count} does not fit in 'MPI_Aint'") return tobuffer(NULL, base, size, 0) cdef inline object aspybuffer( object obj, void **base, MPI_Aint *size, bint readonly, const char format[], ): cdef int buftype = PyBUF_READ if readonly else PyBUF_WRITE obj = PyMemoryView_GetContiguous(obj, buftype, c'A') cdef Py_buffer view cdef int flags = PyBUF_ANY_CONTIGUOUS if not readonly: flags |= PyBUF_WRITABLE if format != NULL: flags |= PyBUF_FORMAT PyObject_GetBuffer(obj, &view, flags) if format != NULL and view.format != NULL: if strncmp(format, view.format, 4) != 0: PyBuffer_Release(&view) raise ValueError( f"expecting buffer with format {pystr(format)!r}, " f"got {pystr(view.format)!r}") if base != NULL: base[0] = view.buf if size != NULL: size[0] = view.len // view.itemsize PyBuffer_Release(&view) return obj # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/ascaibuf.pxi000066400000000000000000000134451475341043600205010ustar00rootroot00000000000000# ----------------------------------------------------------------------------- # CUDA array interface for interoperating Python CUDA GPU libraries # See https://numba.pydata.org/numba-doc/latest/cuda/cuda_array_interface.html cdef inline int cuda_is_contig( tuple shape, tuple strides, Py_ssize_t itemsize, char order, ) except -1: cdef Py_ssize_t i, ndim = len(shape) cdef Py_ssize_t start, step, index, dim, size = itemsize if order == c'F': start = 0 step = 1 else: start = ndim - 1 step = -1 for i in range(ndim): index = start + step * i dim = shape[index] if dim > 1 and size != strides[index]: return 0 size *= dim return 1 cdef inline char* cuda_get_format( char typekind, Py_ssize_t itemsize, ) noexcept nogil: if typekind == c'b': if itemsize == sizeof(char): return b"?" if typekind == c'i': if itemsize == sizeof(char): return b"b" if itemsize == sizeof(short): return b"h" if itemsize == sizeof(int): return b"i" if itemsize == sizeof(long): return b"l" if itemsize == sizeof(long long): return b"q" # ~> long if typekind == c'u': if itemsize == sizeof(char): return b"B" if itemsize == sizeof(short): return b"H" if itemsize == sizeof(int): return b"I" if itemsize == sizeof(long): return b"L" if itemsize == sizeof(long long): return b"Q" # ~> long if typekind == c'f': if itemsize == sizeof(float)//2: return b"e" if itemsize == sizeof(float): return b"f" if itemsize == sizeof(double): return b"d" if itemsize == sizeof(long double): return b"g" if typekind == c'c': if itemsize == 2*sizeof(float)//2: return b"Ze" if itemsize == 2*sizeof(float): return b"Zf" if itemsize == 2*sizeof(double): return b"Zd" if itemsize == 2*sizeof(long double): return b"Zg" return BYTE_FMT # ----------------------------------------------------------------------------- cdef int Py_CheckCAIBuffer(object obj) noexcept: try: return hasattr(obj, '__cuda_array_interface__') except: return 0 # ~> uncovered # noqa cdef int Py_GetCAIBuffer(object obj, Py_buffer *view, int flags) except -1: cdef dict cuda_array_interface cdef tuple data cdef str typestr cdef tuple shape cdef tuple strides cdef list descr cdef object dev_ptr, mask cdef void *buf = NULL cdef bint readonly = 0 cdef Py_ssize_t s, size = 1 cdef Py_ssize_t itemsize = 1 cdef char *format = BYTE_FMT cdef char byteorder = c'|' cdef char typekind = c'u' try: cuda_array_interface = obj.__cuda_array_interface__ except AttributeError: raise NotImplementedError("missing CUDA array interface") # mandatory data = cuda_array_interface['data'] typestr = cuda_array_interface['typestr'] shape = cuda_array_interface['shape'] # optional strides = cuda_array_interface.get('strides') descr = cuda_array_interface.get('descr') mask = cuda_array_interface.get('mask') dev_ptr, readonly = data for s in shape: size *= s if dev_ptr is None and size == 0: dev_ptr = 0 buf = PyLong_AsVoidPtr(dev_ptr) byteorder = ord(typestr[0:1]) typekind = ord(typestr[1:2]) itemsize = int(typestr[2:]) format = cuda_get_format(typekind, itemsize) if (flags & PyBUF_FORMAT) == PyBUF_FORMAT: if byteorder == c'<': # little-endian if not is_little_endian(): raise BufferError( # ~> big-endian f"__cuda_array_interface__: " # ~> big-endian f"typestr {typestr!r} " # ~> big-endian f"with non-native byte order") # ~> big-endian elif byteorder == c'>': # big-endian if not is_big_endian(): raise BufferError( # ~> little-endian f"__cuda_array_interface__: " # ~> little-endian f"typestr {typestr!r} " # ~> little-endian f"with non-native byte order") # ~> little-endian elif byteorder != c'|': raise BufferError( f"__cuda_array_interface__: " f"typestr {typestr!r} " f"with unrecognized byte order") if mask is not None: raise BufferError( "__cuda_array_interface__: " "cannot handle masked arrays" ) if size < 0: raise BufferError( f"__cuda_array_interface__: " f"buffer with negative size " f"(shape:{shape}, size:{size})" ) if ( strides is not None and not cuda_is_contig(shape, strides, itemsize, c'C') and not cuda_is_contig(shape, strides, itemsize, c'F') ): raise BufferError( f"__cuda_array_interface__: " f"buffer is not contiguous " f"(shape:{shape}, strides:{strides}, itemsize:{itemsize})" ) if descr is not None and (len(descr) != 1 or descr[0] != ('', typestr)): PyErr_WarnFormat( RuntimeWarning, 1, b"__cuda_array_interface__: %s", b"ignoring 'descr' key", ) if PYPY and readonly and ((flags & PyBUF_WRITABLE) == PyBUF_WRITABLE): raise BufferError("Object is not writable") # ~> pypy PyBuffer_FillInfo(view, obj, buf, size*itemsize, readonly, flags) if (flags & PyBUF_FORMAT) == PyBUF_FORMAT: view.format = format if view.format != BYTE_FMT: view.itemsize = itemsize return kDLCUDA # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/asdlpack.pxi000066400000000000000000000232321475341043600205010ustar00rootroot00000000000000# ----------------------------------------------------------------------------- # From dlpack.h (as of v0.8) cdef extern from * nogil: ctypedef unsigned char uint8_t ctypedef unsigned short uint16_t ctypedef signed int int32_t ctypedef unsigned int uint32_t ctypedef signed long long int64_t ctypedef unsigned long long uint64_t ctypedef struct DLPackVersion: uint32_t major uint32_t minor ctypedef enum DLDeviceType: kDLCPU = 1 kDLCUDA = 2 kDLCUDAHost = 3 kDLOpenCL = 4 kDLVulkan = 7 kDLMetal = 8 kDLVPI = 9 kDLROCM = 10 kDLROCMHost = 11 kDLExtDev = 12 kDLCUDAManaged = 13 kDLOneAPI = 14 kDLWebGPU = 15 kDLHexagon = 16 kDLMAIA = 17 ctypedef struct DLDevice: DLDeviceType device_type int32_t device_id ctypedef enum DLDataTypeCode: kDLInt = 0 kDLUInt = 1 kDLFloat = 2 kDLOpaqueHandle = 3 kDLBfloat = 4 kDLComplex = 5 kDLBool = 6 ctypedef struct DLDataType: uint8_t code uint8_t bits uint16_t lanes ctypedef struct DLTensor: void *data DLDevice device int32_t ndim DLDataType dtype int64_t *shape int64_t *strides uint64_t byte_offset ctypedef struct DLManagedTensor: DLTensor dl_tensor void *manager_ctx void (*deleter)(DLManagedTensor *) ctypedef enum: DLPACK_FLAG_BITMASK_READ_ONLY = (1UL << 0UL) DLPACK_FLAG_BITMASK_IS_COPIED = (1UL << 1UL) ctypedef struct DLManagedTensorVersioned: DLPackVersion version void *manager_ctx void (*deleter)(DLManagedTensorVersioned *) uint64_t flags DLTensor dl_tensor # ----------------------------------------------------------------------------- cdef extern from "Python.h": void* PyCapsule_GetPointer(object, const char[]) except? NULL int PyCapsule_SetName(object, const char[]) except -1 int PyCapsule_IsValid(object, const char[]) # ----------------------------------------------------------------------------- cdef inline int dlpack_check_version( const DLPackVersion *version, unsigned version_major, ) except -1: if version == NULL: return 0 if version.major >= version_major: return 0 raise BufferError("dlpack: unexpected version") cdef inline int dlpack_is_contig( const DLTensor *dltensor, char order, ) noexcept nogil: cdef int i, ndim = dltensor.ndim cdef int64_t *shape = dltensor.shape cdef int64_t *strides = dltensor.strides cdef int64_t start, step, index, dim, size = 1 if strides == NULL: return order != c'F' or ndim <= 1 if order == c'F': start = 0 step = 1 else: start = ndim - 1 step = -1 for i in range(ndim): index = start + step * i dim = shape[index] if dim > 1 and size != strides[index]: return 0 size *= dim return 1 cdef inline int dlpack_check_shape(const DLTensor *dltensor) except -1: cdef int i, ndim = dltensor.ndim if ndim < 0: raise BufferError("dlpack: number of dimensions is negative") if ndim > 0 and dltensor.shape == NULL: raise BufferError("dlpack: shape is NULL") for i in range(ndim): if dltensor.shape[i] < 0: raise BufferError("dlpack: shape item is negative") if dltensor.strides != NULL: for i in range(ndim): if dltensor.strides[i] < 0: raise BufferError("dlpack: strides item is negative") return 0 cdef inline int dlpack_check_contig(const DLTensor *dltensor) except -1: if dlpack_is_contig(dltensor, c'C'): return 0 if dlpack_is_contig(dltensor, c'F'): return 0 raise BufferError("dlpack: buffer is not contiguous") cdef inline void *dlpack_get_data( const DLTensor *dltensor, ) noexcept nogil: return dltensor.data + dltensor.byte_offset cdef inline Py_ssize_t dlpack_get_size( const DLTensor *dltensor, ) noexcept nogil: cdef int i, ndim = dltensor.ndim cdef int64_t *shape = dltensor.shape cdef Py_ssize_t bits = dltensor.dtype.bits cdef Py_ssize_t lanes = dltensor.dtype.lanes cdef Py_ssize_t size = 1 for i in range(ndim): size *= shape[i] size *= (bits * lanes + 7) // 8 return size cdef inline char *dlpack_get_format( const DLTensor *dltensor, ) noexcept nogil: cdef unsigned int code = dltensor.dtype.code cdef unsigned int bits = dltensor.dtype.bits if dltensor.dtype.lanes != 1: if code == kDLFloat and dltensor.dtype.lanes == 2: if bits == 8*sizeof(float): return b"Zf" if bits == 8*sizeof(double): return b"Zd" if bits == 8*sizeof(long double): return b"Zg" return b"B" if code == kDLBool: if bits == 8: return b"?" if code == kDLInt: if bits == 8*sizeof(char): return b"b" if bits == 8*sizeof(short): return b"h" if bits == 8*sizeof(int): return b"i" if bits == 8*sizeof(long): return b"l" if bits == 8*sizeof(long long): return b"q" # ~> long if code == kDLUInt: if bits == 8*sizeof(char): return b"B" if bits == 8*sizeof(short): return b"H" if bits == 8*sizeof(int): return b"I" if bits == 8*sizeof(long): return b"L" if bits == 8*sizeof(long long): return b"Q" # ~> long if code == kDLFloat: if bits == 8*sizeof(float)//2: return b"e" if bits == 8*sizeof(float): return b"f" if bits == 8*sizeof(double): return b"d" if bits == 8*sizeof(long double): return b"g" if code == kDLComplex: if bits == 8*2*sizeof(float)//2: return b"Ze" if bits == 8*2*sizeof(float): return b"Zf" if bits == 8*2*sizeof(double): return b"Zd" if bits == 8*2*sizeof(long double): return b"Zg" # ~> uncovered return BYTE_FMT cdef inline Py_ssize_t dlpack_get_itemsize( const DLTensor *dltensor, ) noexcept nogil: cdef unsigned int code = dltensor.dtype.code cdef unsigned int bits = dltensor.dtype.bits if dltensor.dtype.lanes != 1: if code == kDLFloat and dltensor.dtype.lanes == 2: if ( bits == 8*sizeof(float) or bits == 8*sizeof(double) or bits == 8*sizeof(long double) ): return bits // 8 * 2 bits = 1 return (bits + 7) // 8 # ----------------------------------------------------------------------------- cdef int Py_CheckDLPackBuffer(object obj) noexcept: try: return hasattr(obj, '__dlpack__') except: return 0 # ~> uncovered # noqa cdef int Py_GetDLPackBuffer(object obj, Py_buffer *view, int flags) except -1: cdef unsigned version_major = 1 cdef const char *capsulename = b"dltensor_versioned" cdef const char *usedcapsulename = b"used_dltensor_versioned" cdef uint64_t READONLY = DLPACK_FLAG_BITMASK_READ_ONLY cdef object dlpack cdef object dlpack_device cdef tuple max_version cdef unsigned device_type cdef int device_id cdef object capsule cdef void *pointer cdef DLManagedTensorVersioned *managed1 = NULL cdef DLManagedTensor *managed0 = NULL cdef const DLPackVersion *dlversion cdef const DLTensor *dltensor cdef void *buf cdef Py_ssize_t size cdef Py_ssize_t itemsize cdef char *format cdef bint readonly try: dlpack = obj.__dlpack__ dlpack_device = obj.__dlpack_device__ except AttributeError: raise NotImplementedError("dlpack: missing support") device_type, device_id = dlpack_device() device_id # unused try: # DLPack v1.0+ max_version = (version_major, 0) if device_type == kDLCPU: capsule = dlpack(max_version=max_version, copy=False) else: capsule = dlpack(stream=-1, max_version=max_version, copy=False) except TypeError: # DLPack v0.x version_major = 0 capsulename = b"dltensor" usedcapsulename = b"used_dltensor" if device_type == kDLCPU: capsule = dlpack() else: capsule = dlpack(stream=-1) if not PyCapsule_IsValid(capsule, capsulename): raise BufferError("dlpack: invalid capsule object") pointer = PyCapsule_GetPointer(capsule, capsulename) if version_major >= 1: managed1 = pointer dlversion = &managed1.version dltensor = &managed1.dl_tensor readonly = (managed1.flags & READONLY) == READONLY else: managed0 = pointer dlversion = NULL dltensor = &managed0.dl_tensor readonly = 0 try: dlpack_check_version(dlversion, version_major) dlpack_check_shape(dltensor) dlpack_check_contig(dltensor) buf = dlpack_get_data(dltensor) size = dlpack_get_size(dltensor) itemsize = dlpack_get_itemsize(dltensor) format = dlpack_get_format(dltensor) finally: if managed1 != NULL: if managed1.deleter != NULL: managed1.deleter(managed1) if managed0 != NULL: if managed0.deleter != NULL: managed0.deleter(managed0) PyCapsule_SetName(capsule, usedcapsulename) del capsule if PYPY and readonly and ((flags & PyBUF_WRITABLE) == PyBUF_WRITABLE): raise BufferError("Object is not writable") # ~> pypy PyBuffer_FillInfo(view, obj, buf, size, readonly, flags) if (flags & PyBUF_FORMAT) == PyBUF_FORMAT: view.format = format if view.format != BYTE_FMT: view.itemsize = itemsize return device_type # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/asfspath.pxi000066400000000000000000000007521475341043600205320ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef extern from "Python.h": object PyOS_FSPath(object) object PyUnicode_EncodeFSDefault(object) cdef inline object asmpifspath(object path, char *p[]): path = PyOS_FSPath(path) if PyUnicode_Check(path): path = PyUnicode_EncodeFSDefault(path) PyBytes_AsStringAndSize(path, p, NULL) return path # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/asstring.pxi000066400000000000000000000046111475341043600205510ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef extern from "Python.h": int PyUnicode_Check(object) object PyUnicode_AsUTF8String(object) object PyUnicode_FromString(const char[]) object PyUnicode_FromStringAndSize(const char[], Py_ssize_t) object PyBytes_FromString(const char[]) object PyBytes_FromStringAndSize(const char[], Py_ssize_t) int PyBytes_AsStringAndSize(object, char*[], Py_ssize_t*) except -1 # ----------------------------------------------------------------------------- cdef inline object asmpistr(object ob, char *s[]): if PyUnicode_Check(ob): ob = PyUnicode_AsUTF8String(ob) PyBytes_AsStringAndSize(ob, s, NULL) return ob cdef inline object tompistr(const char s[], int n): return PyUnicode_FromStringAndSize(s, n) cdef inline object mpistr(const char s[]): return PyUnicode_FromString(s) cdef inline object pystr(const char s[]): return PyUnicode_FromString(s) # ----------------------------------------------------------------------------- cdef extern from * nogil: """ static int PyMPI_tolower(int c) { return (c >= 'A' && c <= 'Z') ? c + ('a' - 'A') : c; } static int PyMPI_strcasecmp(const char *s1, const char *s2) { int c1, c2; do { c1 = PyMPI_tolower((int)*s1++); c2 = PyMPI_tolower((int)*s2++); } while (c1 && c1 == c2); return c1 - c2; } """ int PyMPI_strcasecmp(const char[], const char[]) cdef inline int cstr2bool(const char s[]) noexcept nogil: cdef const char **T = [b"true", b"yes", b"on", b"y", b"1"], *t = NULL cdef const char **F = [b"false", b"no", b"off", b"n", b"0"], *f = NULL if s == NULL: return 0 if s[0] == 0: return 0 for f in F[:5]: if PyMPI_strcasecmp(s, f) == 0: return 0 for t in T[:5]: if PyMPI_strcasecmp(s, t) == 0: return 1 return -1 cdef inline int cstr_is_bool(const char s[]) noexcept nogil: return cstr2bool(s) >= 0 cdef inline int cstr_is_uint(const char s[]) noexcept nogil: if s == NULL: return 0 if s[0] == 0: return 0 for i in range(64): if s[i] == c' ' or s[i] == c'\t': continue if s[i] >= c'0' and s[i] <= c'9': continue if s[i] == 0: return 1 break return 0 # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/atimport.pxi000066400000000000000000000336641475341043600205700ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef extern from "Python.h": """ #define MPICH_SKIP_MPICXX 1 #define OMPI_SKIP_MPICXX 1 """ cdef extern from "" nogil: """ #include "lib-mpi/config.h" #include "lib-mpi/missing.h" #include "lib-mpi/fallback.h" #include "lib-mpi/compat.h" #include "pympivendor.h" #include "pympicommctx.h" """ # ----------------------------------------------------------------------------- cdef extern from "Python.h": enum: PY_VERSION_HEX ctypedef ssize_t Py_intptr_t ctypedef size_t Py_uintptr_t cdef extern from "Python.h": """ #ifdef Py_PYTHON_H #if PY_VERSION_HEX < 0x030B0000 && !defined(Py_GETENV) # define Py_GETENV(s) (Py_IgnoreEnvironmentFlag ? NULL : getenv(s)) #endif #endif """ const char *Py_GETENV(const char[]) nogil cdef extern from "Python.h": """ #ifdef Py_PYTHON_H #if PY_VERSION_HEX < 0x30C00A7 && !defined(PyErr_DisplayException) #define PyErr_DisplayException PyErr_DisplayException_312 static void PyErr_DisplayException(PyObject *exc) { PyObject *et = NULL; PyObject *tb = NULL; #if defined(PYPY_VERSION) et = PyObject_Type(exc); tb = PyException_GetTraceback(exc); #endif PyErr_Display(et, exc, tb); if (et) Py_DecRef(et); if (tb) Py_DecRef(tb); } #endif #endif """ void *PyExc_RuntimeError void *PyExc_NotImplementedError void PyErr_SetNone(object) void PyErr_SetObject(object, object) void PyErr_DisplayException(object) int PyErr_WarnFormat(object, Py_ssize_t, const char[], ...) except -1 void PySys_WriteStderr(const char[], ...) # ----------------------------------------------------------------------------- cdef extern from * nogil: """ #if defined(PYPY_VERSION) # define PyMPI_RUNTIME_PYPY 1 #else # define PyMPI_RUNTIME_PYPY 0 #endif """ enum: PYPY "PyMPI_RUNTIME_PYPY" # ----------------------------------------------------------------------------- cdef extern from * nogil: """ #if !defined(PyMPI_USE_MATCHED_RECV) # if defined(PyMPI_HAVE_MPI_Mprobe) && defined(PyMPI_HAVE_MPI_Mrecv) # if defined(MPI_VERSION) && MPI_VERSION >= 3 # define PyMPI_USE_MATCHED_RECV 1 # endif # endif #endif #if !defined(PyMPI_USE_MATCHED_RECV) # define PyMPI_USE_MATCHED_RECV 0 #endif """ enum: USE_MATCHED_RECV "PyMPI_USE_MATCHED_RECV" ctypedef struct Options: bint initialize bint threads int thread_level bint finalize bint fast_reduce bint recv_mprobe MPI_Count irecv_bufsz int errors cdef Options options options.initialize = 1 options.threads = 1 options.thread_level = MPI_THREAD_MULTIPLE options.finalize = 1 options.fast_reduce = 1 options.recv_mprobe = 1 options.irecv_bufsz = 32768 options.errors = 1 cdef object getOpt(object rc, const char name[], object value): cdef bytes bname = b"MPI4PY_RC_" + name.upper() cdef const char *cname = bname cdef const char *cvalue = Py_GETENV(cname) if cvalue == NULL: return getattr(rc, pystr(name), value) if cstr_is_uint(cvalue) and (type(value) is int): value = int(pystr(cvalue)) elif cstr_is_bool(cvalue): value = cstr2bool(cvalue) else: value = pystr(cvalue).lower() try: setattr(rc, pystr(name), value) except: # noqa pass return value cdef int warnOpt(const char name[], object value) except -1: value = PyUnicode_AsUTF8String(repr(value)) PyErr_WarnFormat( RuntimeWarning, 1, b"mpi4py.rc.%s: unexpected value %.200s", name, value, ) return 0 cdef int getOptions(Options* opts) except -1: opts.initialize = 1 opts.threads = 1 opts.thread_level = MPI_THREAD_MULTIPLE opts.finalize = 1 opts.fast_reduce = 1 opts.recv_mprobe = USE_MATCHED_RECV opts.irecv_bufsz = 32768 opts.errors = 1 # cdef object rc try: from . import rc except (ImportError, ImportWarning): rc = None # cdef object initialize = getOpt(rc, b"initialize" , True ) cdef object threads = getOpt(rc, b"threads" , True ) cdef object thread_level = getOpt(rc, b"thread_level" , 'multiple' ) cdef object finalize = getOpt(rc, b"finalize" , None ) cdef object fast_reduce = getOpt(rc, b"fast_reduce" , True ) cdef object recv_mprobe = getOpt(rc, b"recv_mprobe" , True ) cdef object irecv_bufsz = getOpt(rc, b"irecv_bufsz" , 32768 ) cdef object errors = getOpt(rc, b"errors" , 'exception' ) # if initialize in (True, 'yes'): opts.initialize = 1 elif initialize in (False, 'no'): opts.initialize = 0 else: warnOpt(b"initialize", initialize) # if threads in (True, 'yes'): opts.threads = 1 elif threads in (False, 'no'): opts.threads = 0 else: warnOpt(b"threads", threads) # if thread_level == 'single': opts.thread_level = MPI_THREAD_SINGLE elif thread_level == 'funneled': opts.thread_level = MPI_THREAD_FUNNELED elif thread_level == 'serialized': opts.thread_level = MPI_THREAD_SERIALIZED elif thread_level == 'multiple': opts.thread_level = MPI_THREAD_MULTIPLE else: warnOpt(b"thread_level", thread_level) # if finalize is None: opts.finalize = opts.initialize elif finalize in (True, 'yes'): opts.finalize = 1 elif finalize in (False, 'no'): opts.finalize = 0 else: warnOpt(b"finalize", finalize) # if fast_reduce in (True, 'yes'): opts.fast_reduce = 1 elif fast_reduce in (False, 'no'): opts.fast_reduce = 0 else: warnOpt(b"fast_reduce", fast_reduce) # if recv_mprobe in (True, 'yes'): opts.recv_mprobe = 1 and USE_MATCHED_RECV elif recv_mprobe in (False, 'no'): opts.recv_mprobe = 0 else: warnOpt(b"recv_mprobe", recv_mprobe) # if type(irecv_bufsz) is int and irecv_bufsz >= 0: opts.irecv_bufsz = irecv_bufsz else: warnOpt(b"irecv_bufsz", irecv_bufsz) # if errors == 'default': opts.errors = 0 elif errors == 'exception': opts.errors = 1 elif errors == 'abort': opts.errors = 2 if MPI_ERRORS_ABORT == MPI_ERRHANDLER_NULL: opts.errors = 3 elif errors == 'fatal': opts.errors = 3 else: warnOpt(b"errors", errors) # return 0 # ----------------------------------------------------------------------------- cdef int warn_environ(const char envvar[]) except -1 with gil: PyErr_WarnFormat( RuntimeWarning, 1, b"environment variable %s: " b"unexpected value '%.200s'", envvar, getenv(envvar), ) cdef int warn_mpiexec( const char envvar[], const char vendor[], ) except -1 with gil: PyErr_WarnFormat( RuntimeWarning, 1, b"suspicious MPI execution environment\n" b"Your environment has %s=%.200s set, " b"but mpi4py was built with %s.\n" b"You may be using `mpiexec` or `mpirun` " b"from a different MPI implementation.", envvar, getenv(envvar), vendor, ) cdef int check_mpiexec() except -1 nogil: cdef int ierr, size = 0 ierr = MPI_Comm_size(MPI_COMM_WORLD, &size) if ierr != MPI_SUCCESS: return 0 if size > 1: return 0 cdef int check = 1 cdef const char *ename = b"MPI4PY_CHECK_MPIEXEC" cdef const char *value = Py_GETENV(ename) if value != NULL: check = cstr2bool(value) if check == -1: warn_environ(ename) if check <= 0: return 0 cdef const char *vendor = NULL PyMPI_Get_vendor(&vendor, NULL, NULL, NULL) cdef bint MPICH = (strncmp(b"MPICH", vendor, 6) == 0) cdef bint IMPI = (strncmp(b"Intel MPI", vendor, 10) == 0) cdef bint OPENMPI = (strncmp(b"Open MPI", vendor, 9) == 0) cdef const char *mpich = b"HYDI_CONTROL_FD" cdef const char *impi = b"I_MPI_HYDRA_TOPOLIB" cdef const char *pmi_sz = b"PMI_SIZE" cdef const char *openmpi = b"OMPI_COMM_WORLD_SIZE" cdef const char *bad_env = NULL MPICH |= (strncmp(b"MVAPICH", vendor, 8) == 0) MPICH |= (strncmp(b"MVAPICH2", vendor, 9) == 0) if MPICH: if getenv(mpich) == NULL and getenv(pmi_sz) == NULL: # ~> mpich if getenv(openmpi) != NULL: # ~> mpich bad_env = openmpi # ~> mpich if IMPI: if getenv(impi) == NULL and getenv(pmi_sz) == NULL: # ~> impi if getenv(openmpi) != NULL: # ~> impi bad_env = openmpi # ~> impi if OPENMPI: if getenv(openmpi) == NULL: # ~> openmpi if getenv(mpich) != NULL and getenv(pmi_sz) != NULL: # ~> openmpi bad_env = pmi_sz # ~> openmpi if getenv(impi ) != NULL and getenv(pmi_sz) != NULL: # ~> openmpi bad_env = pmi_sz # ~> openmpi if bad_env != NULL: warn_mpiexec(bad_env, vendor) return 0 # ----------------------------------------------------------------------------- cdef extern from "Python.h": int Py_AtExit(void (*)() noexcept nogil) cdef int bootstrap() except -1: # Get options from 'mpi4py.rc' module getOptions(&options) # Cleanup at (the very end of) Python exit if Py_AtExit(atexit) < 0: PySys_WriteStderr( # ~> uncovered b"WARNING: %s\n", # ~> uncovered b"could not register cleanup with Py_AtExit()", # ~> uncovered ) # Do we have to initialize MPI? cdef int initialized = 1 MPI_Initialized(&initialized) if initialized: options.finalize = 0 # ~> TODO return 0 # ~> TODO if not options.initialize: return 0 # MPI initialization cdef int ierr = MPI_SUCCESS cdef int required = MPI_THREAD_SINGLE cdef int provided = MPI_THREAD_SINGLE if options.threads: required = options.thread_level ierr = MPI_Init_thread(NULL, NULL, required, &provided) if ierr != MPI_SUCCESS: raise RuntimeError( # ~> uncovered f"MPI_Init_thread() failed " # ~> uncovered f"[error code: {ierr}]") # ~> uncovered else: ierr = MPI_Init(NULL, NULL) if ierr != MPI_SUCCESS: raise RuntimeError( # ~> uncovered f"MPI_Init() failed " # ~> uncovered f"[error code: {ierr}]") # ~> uncovered return 0 @cython.linetrace(False) cdef inline int mpi_active() noexcept nogil: cdef int ierr = MPI_SUCCESS # MPI initialized ? cdef int initialized = 0 ierr = MPI_Initialized(&initialized) if not initialized or ierr != MPI_SUCCESS: return 0 # MPI finalized ? cdef int finalized = 1 ierr = MPI_Finalized(&finalized) if finalized or ierr != MPI_SUCCESS: return 0 # MPI should be active ... return 1 cdef int mpi_version = 0 cdef int mpi_subversion = 0 cdef int mpi_numversion = 0 cdef int initialize() except -1 nogil: global mpi_version, mpi_subversion, mpi_numversion MPI_Get_version(&mpi_version, &mpi_subversion) mpi_numversion = 10 * mpi_version + mpi_subversion if not mpi_active(): return 0 check_mpiexec() comm_set_eh(MPI_COMM_SELF) comm_set_eh(MPI_COMM_WORLD) return 0 @cython.linetrace(False) cdef void finalize() noexcept nogil: if not mpi_active(): return PyMPI_Commctx_finalize() cdef int abort_status = 0 @cython.linetrace(False) cdef void atexit() noexcept nogil: if not mpi_active(): return if abort_status: MPI_Abort(MPI_COMM_WORLD, abort_status) finalize() if options.finalize: MPI_Finalize() def _set_abort_status(int status: int) -> None: """ Helper for ``python -m mpi4py.run ...``. """ global abort_status abort_status = status # ----------------------------------------------------------------------------- # Raise exceptions without adding to traceback cdef extern from * nogil: enum: PyMPI_ERR_UNAVAILABLE cdef object MPIException = PyExc_RuntimeError cdef int PyMPI_Raise(int ierr) except -1 with gil: if ierr == PyMPI_ERR_UNAVAILABLE: PyErr_SetObject(PyExc_NotImplementedError, None) # ~> uncovered return 0 # ~> uncovered if (MPIException) == NULL: PyErr_SetObject(PyExc_RuntimeError, ierr) # ~> uncovered return 0 # ~> uncovered PyErr_SetObject(MPIException, ierr) return 0 cdef inline int CHKERR(int ierr) except -1 nogil: if ierr == MPI_SUCCESS: return 0 PyMPI_Raise(ierr) return -1 cdef int PyMPI_HandleException(object exc) noexcept: PyErr_DisplayException(exc) if (MPIException) != NULL: if isinstance(exc, Exception): return (exc).ob_mpi return MPI_ERR_OTHER # ----------------------------------------------------------------------------- cdef extern from "Python.h": # PyPy: Py_IsInitialized() cannot be called without the GIL int _Py_IsInitialized"Py_IsInitialized"() noexcept nogil cdef object _py_module_sentinel = None @cython.linetrace(False) cdef inline int py_module_alive() noexcept nogil: return NULL != _py_module_sentinel @cython.linetrace(False) cdef inline int Py_IsInitialized() noexcept nogil: if PYPY and not py_module_alive(): return 0 return _Py_IsInitialized() # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/attrimpl.pxi000066400000000000000000000215521475341043600205560ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef extern from "Python.h": void Py_INCREF(object) void Py_DECREF(object) # ----------------------------------------------------------------------------- @cython.final @cython.internal cdef class _p_keyval: cdef public object copy_fn cdef public object delete_fn cdef public bint nopython cdef int ierr cdef object lock def __cinit__(self, copy_fn, delete_fn, nopython): if copy_fn is False: copy_fn = None if delete_fn is False: delete_fn = None if delete_fn is True: delete_fn = None self.copy_fn = copy_fn self.delete_fn = delete_fn self.nopython = nopython self.ierr = MPI_SUCCESS self.lock = RLock() cdef object keyval_lock_type = Lock() cdef object keyval_lock_comm = Lock() cdef object keyval_lock_win = Lock() cdef dict keyval_registry_type = {} cdef dict keyval_registry_comm = {} cdef dict keyval_registry_win = {} _keyval_registry = { 'Datatype' : keyval_registry_type, 'Comm' : keyval_registry_comm, 'Win' : keyval_registry_win, } # ----------------------------------------------------------------------------- ctypedef fused PyMPI_attr_type: MPI_Datatype MPI_Comm MPI_Win cdef inline object PyMPI_attr_call( object function, PyMPI_attr_type hdl, int keyval, object attrval, ): cdef object handle cdef object result if PyMPI_attr_type is MPI_Datatype: handle = fromhandle(hdl) if PyMPI_attr_type is MPI_Comm: handle = fromhandle(hdl) if PyMPI_attr_type is MPI_Win: handle = fromhandle(hdl) try: result = function(handle, keyval, attrval) finally: if False: pass elif PyMPI_attr_type is MPI_Datatype: (handle).ob_mpi = MPI_DATATYPE_NULL elif PyMPI_attr_type is MPI_Comm: (handle).ob_mpi = MPI_COMM_NULL elif PyMPI_attr_type is MPI_Win: (handle).ob_mpi = MPI_WIN_NULL return result cdef inline int PyMPI_attr_copy( PyMPI_attr_type hdl, int keyval, void *extra_state, void *attrval_in, void *attrval_out, int *flag, ) except -1: if flag != NULL: flag[0] = 0 cdef _p_keyval state = <_p_keyval>extra_state if state.copy_fn is None: return 0 cdef int p = not state.nopython if p and attrval_in == NULL: raise RuntimeError cdef object attrval if p: attrval = attrval_in else: attrval = PyLong_FromVoidPtr(attrval_in) if state.copy_fn is not True: attrval = PyMPI_attr_call(state.copy_fn, hdl, keyval, attrval) if attrval is NotImplemented: return 0 cdef void **outval = attrval_out if p: outval[0] = attrval else: outval[0] = PyLong_AsVoidPtr(attrval) if flag != NULL: flag[0] = 1 if p: Py_INCREF(attrval) Py_INCREF(state) return 0 cdef inline int PyMPI_attr_delete( PyMPI_attr_type hdl, int keyval, void *attrval_in, void *extra_state, ) except -1: cdef _p_keyval state = <_p_keyval>extra_state cdef int p = not state.nopython if p and attrval_in == NULL: raise RuntimeError cdef object attrval if p: attrval = attrval_in else: attrval = PyLong_FromVoidPtr(attrval_in) try: if state.delete_fn is not None: PyMPI_attr_call(state.delete_fn, hdl, keyval, attrval) finally: if p: Py_DECREF(attrval) Py_DECREF(state) return 0 cdef inline int PyMPI_attr_copy_cb( PyMPI_attr_type hdl, int keyval, void *extra_state, void *attrval_in, void *attrval_out, int *flag, ) noexcept with gil: cdef int ierr = MPI_SUCCESS cdef object exc try: PyMPI_attr_copy( hdl, keyval, extra_state, attrval_in, attrval_out, flag, ) except BaseException as exc: ierr = PyMPI_HandleException(exc) return ierr cdef inline int PyMPI_attr_delete_cb( PyMPI_attr_type hdl, int keyval, void *attrval, void *extra_state, ) noexcept with gil: cdef int ierr = MPI_SUCCESS cdef object exc cdef _p_keyval state = <_p_keyval>extra_state state.ierr = MPI_SUCCESS try: PyMPI_attr_delete( hdl, keyval, attrval, extra_state, ) except BaseException as exc: ierr = PyMPI_HandleException(exc) state.ierr, ierr = ierr, MPI_SUCCESS return ierr @cython.callspec("MPIAPI") cdef int PyMPI_attr_copy_fn( PyMPI_attr_type hdl, int keyval, void *extra_state, void *attrval_in, void *attrval_out, int *flag, ) noexcept nogil: if flag != NULL: flag[0] = 0 if extra_state == NULL: return MPI_ERR_INTERN if attrval_out == NULL: return MPI_ERR_INTERN if not Py_IsInitialized(): return MPI_SUCCESS if not py_module_alive(): return MPI_SUCCESS return PyMPI_attr_copy_cb( hdl, keyval, extra_state, attrval_in, attrval_out, flag, ) @cython.callspec("MPIAPI") cdef int PyMPI_attr_delete_fn( PyMPI_attr_type hdl, int keyval, void *attrval, void *extra_state, ) noexcept nogil: if extra_state == NULL: return MPI_ERR_INTERN if not Py_IsInitialized(): return MPI_SUCCESS if not py_module_alive(): return MPI_SUCCESS return PyMPI_attr_delete_cb( hdl, keyval, attrval, extra_state, ) # --- cdef inline _p_keyval PyMPI_attr_state_get( PyMPI_attr_type hdl, int keyval, ): hdl # unused if PyMPI_attr_type is MPI_Datatype: with keyval_lock_type: return <_p_keyval>keyval_registry_type.get(keyval) if PyMPI_attr_type is MPI_Comm: with keyval_lock_comm: return <_p_keyval>keyval_registry_comm.get(keyval) if PyMPI_attr_type is MPI_Win: with keyval_lock_win: return <_p_keyval>keyval_registry_win.get(keyval) cdef inline int PyMPI_attr_state_set( PyMPI_attr_type hdl, int keyval, _p_keyval state, ) except -1: hdl # unused if PyMPI_attr_type is MPI_Datatype: with keyval_lock_type: keyval_registry_type[keyval] = state if PyMPI_attr_type is MPI_Comm: with keyval_lock_comm: keyval_registry_comm[keyval] = state if PyMPI_attr_type is MPI_Win: with keyval_lock_win: keyval_registry_win[keyval] = state return 0 cdef inline int PyMPI_attr_state_del( PyMPI_attr_type hdl, int keyval, ) except -1: hdl # unused try: if PyMPI_attr_type is MPI_Datatype: with keyval_lock_type: del keyval_registry_type[keyval] if PyMPI_attr_type is MPI_Comm: with keyval_lock_comm: del keyval_registry_comm[keyval] if PyMPI_attr_type is MPI_Win: with keyval_lock_win: del keyval_registry_win[keyval] except KeyError: # ~> uncovered pass return 0 # --- cdef inline object PyMPI_attr_get( PyMPI_attr_type hdl, int keyval, void *attrval, ): cdef _p_keyval state = PyMPI_attr_state_get(hdl, keyval) if state is not None and not state.nopython: return attrval else: return PyLong_FromVoidPtr(attrval) cdef inline int PyMPI_set_attr( PyMPI_attr_type hdl, int keyval, void *attrval, ) except -1: if PyMPI_attr_type is MPI_Datatype: CHKERR( MPI_Type_set_attr(hdl, keyval, attrval) ) if PyMPI_attr_type is MPI_Comm: CHKERR( MPI_Comm_set_attr(hdl, keyval, attrval) ) if PyMPI_attr_type is MPI_Win: CHKERR( MPI_Win_set_attr(hdl, keyval, attrval) ) return 0 cdef inline int PyMPI_attr_set( PyMPI_attr_type hdl, int keyval, object attrval, ) except -1: cdef _p_keyval state = PyMPI_attr_state_get(hdl, keyval) cdef void *valptr = NULL if state is not None and not state.nopython: valptr = attrval else: valptr = PyLong_AsVoidPtr(attrval) PyMPI_set_attr(hdl, keyval, valptr) if state is not None: if not state.nopython: Py_INCREF(attrval) Py_INCREF(state) return 0 cdef inline int PyMPI_delete_attr( PyMPI_attr_type hdl, int keyval, ) except -1: if PyMPI_attr_type is MPI_Datatype: CHKERR( MPI_Type_delete_attr(hdl, keyval) ) if PyMPI_attr_type is MPI_Comm: CHKERR( MPI_Comm_delete_attr(hdl, keyval) ) if PyMPI_attr_type is MPI_Win: CHKERR( MPI_Win_delete_attr(hdl, keyval) ) return 0 cdef inline int PyMPI_attr_del( PyMPI_attr_type hdl, int keyval, ) except -1: cdef int ierr = MPI_SUCCESS cdef _p_keyval state = PyMPI_attr_state_get(hdl, keyval) if state is None: return PyMPI_delete_attr(hdl, keyval) with state.lock: PyMPI_delete_attr(hdl, keyval) ierr, state.ierr = state.ierr, ierr CHKERR( ierr ) # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/bufaimpl.pxi000066400000000000000000000060331475341043600205160ustar00rootroot00000000000000# ----------------------------------------------------------------------------- @cython.final cdef class BufferAutomaticType(int): """ Type of `BUFFER_AUTOMATIC`. """ def __cinit__(self): cdef MPI_Aint a = self, b = MPI_BUFFER_AUTOMATIC if a != b: raise ValueError("cannot create instance") def __getbuffer__(self, Py_buffer *view, int flags): cdef void *bufauto = MPI_BUFFER_AUTOMATIC PyBuffer_FillInfo(view, NULL, bufauto, 0, 0, flags) def __repr__(self) -> str: self # unused return 'BUFFER_AUTOMATIC' def __reduce__(self) -> str: self # unused return 'BUFFER_AUTOMATIC' cdef object __BUFFER_AUTOMATIC__ = \ BufferAutomaticType(MPI_BUFFER_AUTOMATIC) cdef inline bint is_BUFFER_AUTOMATIC(object obj): return is_constant(obj, __BUFFER_AUTOMATIC__) # ----------------------------------------------------------------------------- ctypedef fused PyMPI_attach_buffer_type: Py_uintptr_t Comm Session cdef dict _mpi_buffer_comm = {} cdef dict _mpi_buffer_session = {} cdef inline object attach_buffer( object buf, void **p, MPI_Count *n, ): cdef void *bptr = MPI_BUFFER_AUTOMATIC cdef MPI_Aint blen = 0 if buf is None or is_BUFFER_AUTOMATIC(buf): buf = __BUFFER_AUTOMATIC__ else: buf = asbuffer_w(buf, &bptr, &blen) p[0] = bptr n[0] = blen return buf cdef inline int detach_buffer_set( PyMPI_attach_buffer_type obj, object buf, ) except -1: cdef Py_uintptr_t handle if PyMPI_attach_buffer_type is Py_uintptr_t: handle = obj handle # unused _mpi_buffer_comm[None] = buf if PyMPI_attach_buffer_type is Comm: handle = obj.ob_mpi # ~> MPI-4.1 _mpi_buffer_comm[handle] = buf # ~> MPI-4.1 if PyMPI_attach_buffer_type is Session: handle = obj.ob_mpi # ~> MPI-4.1 _mpi_buffer_session[handle] = buf # ~> MPI-4.1 return 0 cdef inline object detach_buffer_get( PyMPI_attach_buffer_type obj, void *p, MPI_Count n, ): cdef Py_uintptr_t handle cdef buffer buf = None if PyMPI_attach_buffer_type is Py_uintptr_t: handle = obj handle # unused buf = _mpi_buffer_comm.pop(None, None) if PyMPI_attach_buffer_type is Comm: handle = obj.ob_mpi # ~> MPI-4.1 buf = _mpi_buffer_comm.pop(handle, None) # ~> MPI-4.1 if PyMPI_attach_buffer_type is Session: handle = obj.ob_mpi # ~> MPI-4.1 buf = _mpi_buffer_session.pop(handle, None) # ~> MPI-4.1 if p == MPI_BUFFER_AUTOMATIC: return __BUFFER_AUTOMATIC__ # ~> MPI-4.1 if buf is not None and buf.view.buf == p and buf.view.obj != NULL: return buf.view.obj return mpibuf(p, n) # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/commimpl.pxi000066400000000000000000000110501475341043600205270ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef object __UNWEIGHTED__ = MPI_UNWEIGHTED cdef object __WEIGHTS_EMPTY__ = MPI_WEIGHTS_EMPTY cdef inline bint is_UNWEIGHTED(object weights): return is_constant(weights, __UNWEIGHTED__) cdef inline bint is_WEIGHTS_EMPTY(object weights): return is_constant(weights, __WEIGHTS_EMPTY__) cdef inline object asarray_weights(object weights, int nweight, int **iweight): if weights is None: iweight[0] = MPI_UNWEIGHTED return None if is_UNWEIGHTED(weights): iweight[0] = MPI_UNWEIGHTED return None if is_WEIGHTS_EMPTY(weights): if nweight > 0: raise ValueError("empty weights but nonzero degree") iweight[0] = MPI_WEIGHTS_EMPTY return None return chkarray(weights, nweight, iweight) # ----------------------------------------------------------------------------- cdef inline int comm_neighbors_count( MPI_Comm comm, int *incoming, int *outgoing, ) except -1: cdef int topo = MPI_UNDEFINED cdef int size=0, ndims=0, rank=0, nneighbors=0 cdef int indegree=0, outdegree=0, weighted=0 CHKERR( MPI_Topo_test(comm, &topo) ) if topo == MPI_UNDEFINED: # ~> unreachable CHKERR( MPI_Comm_size(comm, &size) ) # ~> unreachable indegree = outdegree = size # ~> unreachable elif topo == MPI_CART: CHKERR( MPI_Cartdim_get(comm, &ndims) ) indegree = outdegree = 2*ndims elif topo == MPI_GRAPH: CHKERR( MPI_Comm_rank(comm, &rank) ) CHKERR( MPI_Graph_neighbors_count( comm, rank, &nneighbors) ) indegree = outdegree = nneighbors elif topo == MPI_DIST_GRAPH: CHKERR( MPI_Dist_graph_neighbors_count( comm, &indegree, &outdegree, &weighted) ) if incoming != NULL: incoming[0] = indegree if outgoing != NULL: outgoing[0] = outdegree return 0 # ----------------------------------------------------------------------------- cdef int commlock_keyval = MPI_KEYVAL_INVALID cdef object commlock_lock = Lock() cdef dict commlock_registry = {} cdef inline int commlock_free_cb( MPI_Comm comm, ) except MPI_ERR_UNKNOWN with gil: cdef object key = comm with commlock_lock: if key in commlock_registry: del commlock_registry[key] return MPI_SUCCESS @cython.linetrace(False) @cython.callspec("MPIAPI") cdef int commlock_free_fn( MPI_Comm comm, int keyval, void *attrval, void *xstate, ) noexcept nogil: keyval # unused attrval # unused xstate # unused if comm == MPI_COMM_SELF: MPI_Comm_free_keyval(&commlock_keyval) if not Py_IsInitialized(): return MPI_SUCCESS if not py_module_alive(): return MPI_SUCCESS return commlock_free_cb(comm) cdef inline dict commlock_table(MPI_Comm comm): cdef int found = 0 cdef void *attrval = NULL cdef dict table if commlock_keyval == MPI_KEYVAL_INVALID: CHKERR( MPI_Comm_create_keyval( MPI_COMM_NULL_COPY_FN, commlock_free_fn, &commlock_keyval, NULL) ) table = {} CHKERR( MPI_Comm_set_attr( MPI_COMM_SELF, commlock_keyval, table) ) commlock_registry[MPI_COMM_SELF] = table CHKERR( MPI_Comm_get_attr( comm, commlock_keyval, &attrval, &found) ) if not found: table = {} CHKERR( MPI_Comm_set_attr( comm, commlock_keyval, table) ) commlock_registry[comm] = table elif PYPY: table = commlock_registry[comm] # ~> pypy else: table = attrval return table cdef inline object PyMPI_Lock(MPI_Comm comm, object key): cdef dict table cdef object lock with commlock_lock: table = commlock_table(comm) try: lock = table[key] except KeyError: lock = Lock() table[key] = lock return lock cdef inline object PyMPI_Lock_table(MPI_Comm comm): with commlock_lock: return commlock_table(comm) def _comm_lock(Comm comm: Comm, object key: Hashable | None = None) -> Lock: """ Create/get communicator lock. """ return PyMPI_Lock(comm.ob_mpi, key) def _comm_lock_table(Comm comm: Comm) -> dict[Hashable, Lock]: """ Internal communicator lock table. """ return PyMPI_Lock_table(comm.ob_mpi) # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/drepimpl.pxi000066400000000000000000000121351475341043600205330ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef object datarep_lock = Lock() cdef dict datarep_registry = {} @cython.linetrace(False) # ~> TODO @cython.final @cython.internal cdef class _p_datarep: cdef object read_fn cdef object write_fn cdef object extent_fn def __cinit__(self, read_fn, write_fn, extent_fn): self.read_fn = read_fn self.write_fn = write_fn self.extent_fn = extent_fn cdef int read( self, void *userbuf, MPI_Datatype datatype, MPI_Count count, void *filebuf, MPI_Offset position, ) except -1: cdef MPI_Count lb=0, extent=0 CHKERR( MPI_Type_get_extent_c(datatype, &lb, &extent) ) cdef MPI_Count ulen = (position + count) * extent cdef MPI_Count flen = PY_SSIZE_T_MAX cdef object ubuf = mpibuf(userbuf, ulen) cdef object fbuf = mpibuf(filebuf, flen) cdef Datatype dtype = New(Datatype) dtype.ob_mpi = datatype try: self.read_fn(ubuf, dtype, count, fbuf, position) finally: dtype.ob_mpi = MPI_DATATYPE_NULL return 0 cdef int write( self, void *userbuf, MPI_Datatype datatype, MPI_Count count, void *filebuf, MPI_Offset position, ) except -1: cdef MPI_Count lb=0, extent=0 CHKERR( MPI_Type_get_extent_c(datatype, &lb, &extent) ) cdef MPI_Count ulen = (position + count) * extent cdef MPI_Count flen = PY_SSIZE_T_MAX cdef object ubuf = mpibuf(userbuf, ulen) cdef object fbuf = mpibuf(filebuf, flen) cdef Datatype dtype = New(Datatype) dtype.ob_mpi = datatype try: self.write_fn(ubuf, dtype, count, fbuf, position) finally: dtype.ob_mpi = MPI_DATATYPE_NULL return 0 cdef int extent( self, MPI_Datatype datatype, MPI_Aint *file_extent, ) except -1: cdef Datatype dtype = New(Datatype) dtype.ob_mpi = datatype try: file_extent[0] = self.extent_fn(dtype) finally: dtype.ob_mpi = MPI_DATATYPE_NULL return 0 # --- @cython.linetrace(False) # ~> TODO cdef int datarep_read( void *userbuf, MPI_Datatype datatype, MPI_Count count, void *filebuf, MPI_Offset position, void *extra_state, ) noexcept with gil: cdef _p_datarep state = <_p_datarep>extra_state cdef int ierr = MPI_SUCCESS cdef object exc try: state.read(userbuf, datatype, count, filebuf, position) except BaseException as exc: ierr = PyMPI_HandleException(exc) return ierr @cython.linetrace(False) # ~> TODO cdef int datarep_write( void *userbuf, MPI_Datatype datatype, MPI_Count count, void *filebuf, MPI_Offset position, void *extra_state, ) noexcept with gil: cdef _p_datarep state = <_p_datarep>extra_state cdef int ierr = MPI_SUCCESS cdef object exc try: state.write(userbuf, datatype, count, filebuf, position) except BaseException as exc: ierr = PyMPI_HandleException(exc) return ierr @cython.linetrace(False) # ~> TODO cdef int datarep_extent( MPI_Datatype datatype, MPI_Aint *file_extent, void *extra_state, ) noexcept with gil: cdef _p_datarep state = <_p_datarep>extra_state cdef int ierr = MPI_SUCCESS cdef object exc try: state.extent(datatype, file_extent) except BaseException as exc: ierr = PyMPI_HandleException(exc) return ierr # --- @cython.linetrace(False) # ~> TODO @cython.callspec("MPIAPI") cdef int datarep_read_fn( void *userbuf, MPI_Datatype datatype, MPI_Count count, void *filebuf, MPI_Offset position, void *extra_state, ) noexcept nogil: if extra_state == NULL: return MPI_ERR_INTERN if not Py_IsInitialized(): return MPI_ERR_INTERN if not py_module_alive(): return MPI_ERR_INTERN return datarep_read( userbuf, datatype, count, filebuf, position, extra_state, ) @cython.linetrace(False) # ~> TODO @cython.callspec("MPIAPI") cdef int datarep_write_fn( void *userbuf, MPI_Datatype datatype, MPI_Count count, void *filebuf, MPI_Offset position, void *extra_state, ) noexcept nogil: if extra_state == NULL: return MPI_ERR_INTERN if not Py_IsInitialized(): return MPI_ERR_INTERN if not py_module_alive(): return MPI_ERR_INTERN return datarep_write( userbuf, datatype, count, filebuf, position, extra_state, ) @cython.linetrace(False) # ~> TODO @cython.callspec("MPIAPI") cdef int datarep_extent_fn( MPI_Datatype datatype, MPI_Aint *file_extent, void *extra_state, ) noexcept nogil: if extra_state == NULL: return MPI_ERR_INTERN if not Py_IsInitialized(): return MPI_ERR_INTERN if not py_module_alive(): return MPI_ERR_INTERN return datarep_extent( datatype, file_extent, extra_state, ) # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/errhimpl.pxi000066400000000000000000000300101475341043600205310ustar00rootroot00000000000000# ----------------------------------------------------------------------------- ctypedef fused mpi_scwf_t: MPI_Session MPI_Comm MPI_Win MPI_File ctypedef fused mpi_ehfn_t: MPI_Session_errhandler_function MPI_Comm_errhandler_function MPI_Win_errhandler_function MPI_File_errhandler_function cdef object errhdl_lock = Lock() cdef list errhdl_registry = [ [None]*(1+32), # Session [None]*(1+32), # Comm [None]*(1+32), # Win [None]*(1+32), # File ] cdef inline void errhdl_call_mpi( int index, mpi_scwf_t handle, int errcode, ) noexcept with gil: cdef object pyhandle = None cdef object registry = None # errors in user-defined error handler functions are unrecoverable try: if mpi_scwf_t is MPI_Session: pyhandle = fromhandle(handle) registry = errhdl_registry[0] if mpi_scwf_t is MPI_Comm: pyhandle = fromhandle(handle) registry = errhdl_registry[1] if mpi_scwf_t is MPI_Win: pyhandle = fromhandle(handle) registry = errhdl_registry[2] if mpi_scwf_t is MPI_File: pyhandle = fromhandle(handle) registry = errhdl_registry[3] try: registry[index](pyhandle, errcode) finally: if False: pass elif mpi_scwf_t is MPI_Session: (pyhandle).ob_mpi = MPI_SESSION_NULL elif mpi_scwf_t is MPI_Comm: (pyhandle).ob_mpi = MPI_COMM_NULL elif mpi_scwf_t is MPI_Win: (pyhandle).ob_mpi = MPI_WIN_NULL elif mpi_scwf_t is MPI_File: (pyhandle).ob_mpi = MPI_FILE_NULL except BaseException as exc: # ~> uncovered PyErr_DisplayException(exc) # ~> uncovered PySys_WriteStderr( # ~> uncovered b"Fatal Python error: %s\n", # ~> uncovered b"exception in user-defined error handler function", # ~> uncovered ) # ~> uncovered MPI_Abort(MPI_COMM_WORLD, 1) # ~> uncovered cdef inline void errhdl_call( int index, mpi_scwf_t handle, int errcode, ) noexcept nogil: # make it abort if Python has finalized if not Py_IsInitialized(): MPI_Abort(MPI_COMM_WORLD, 1) # make it abort if module cleanup has been done if not py_module_alive(): MPI_Abort(MPI_COMM_WORLD, 1) # make the actual GIL-safe Python call errhdl_call_mpi(index, handle, errcode) @cython.callspec("MPIAPI") cdef void errhdl_01(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 1, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_02(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 2, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_03(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 3, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_04(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 4, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_05(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 5, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_06(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 6, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_07(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 7, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_08(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 8, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_09(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 9, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_10(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 10, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_11(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 11, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_12(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 12, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_13(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 13, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_14(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 14, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_15(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 15, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_16(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 16, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_17(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 17, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_18(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 18, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_19(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 19, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_20(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 20, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_21(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 21, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_22(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 22, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_23(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 23, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_24(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 24, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_25(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 25, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_26(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 26, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_27(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 27, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_28(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 28, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_29(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 29, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_30(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 30, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_31(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 31, handle[0], errcode[0]) @cython.callspec("MPIAPI") cdef void errhdl_32(mpi_scwf_t *handle, int *errcode, ...) noexcept nogil: errhdl_call( 32, handle[0], errcode[0]) cdef inline void errhdl_map(int index, mpi_ehfn_t **fn) noexcept nogil: if index == 1: fn[0] = errhdl_01 elif index == 2: fn[0] = errhdl_02 elif index == 3: fn[0] = errhdl_03 elif index == 4: fn[0] = errhdl_04 elif index == 5: fn[0] = errhdl_05 elif index == 6: fn[0] = errhdl_06 elif index == 7: fn[0] = errhdl_07 elif index == 8: fn[0] = errhdl_08 elif index == 9: fn[0] = errhdl_09 elif index == 10: fn[0] = errhdl_10 elif index == 11: fn[0] = errhdl_11 elif index == 12: fn[0] = errhdl_12 elif index == 13: fn[0] = errhdl_13 elif index == 14: fn[0] = errhdl_14 elif index == 15: fn[0] = errhdl_15 elif index == 16: fn[0] = errhdl_16 elif index == 17: fn[0] = errhdl_17 elif index == 18: fn[0] = errhdl_18 elif index == 19: fn[0] = errhdl_19 elif index == 20: fn[0] = errhdl_20 elif index == 21: fn[0] = errhdl_21 elif index == 22: fn[0] = errhdl_22 elif index == 23: fn[0] = errhdl_23 elif index == 24: fn[0] = errhdl_24 elif index == 25: fn[0] = errhdl_25 elif index == 26: fn[0] = errhdl_26 elif index == 27: fn[0] = errhdl_27 elif index == 28: fn[0] = errhdl_28 elif index == 29: fn[0] = errhdl_29 elif index == 30: fn[0] = errhdl_30 elif index == 31: fn[0] = errhdl_31 elif index == 32: fn[0] = errhdl_32 cdef inline int errhdl_new( object function, mpi_ehfn_t **fn, ) except -1: # check whether the function is callable function.__call__ # find a free slot in the registry # and register the Python function cdef list registry = None if mpi_ehfn_t is MPI_Session_errhandler_function: registry = errhdl_registry[0] if mpi_ehfn_t is MPI_Comm_errhandler_function: registry = errhdl_registry[1] if mpi_ehfn_t is MPI_Win_errhandler_function: registry = errhdl_registry[2] if mpi_ehfn_t is MPI_File_errhandler_function: registry = errhdl_registry[3] cdef int index = 0 try: with errhdl_lock: index = registry.index(None, 1) registry[index] = function except ValueError: raise RuntimeError( "cannot create too many user-defined error handlers", ) # map slot index to the associated C callback, # and return the slot index in the registry errhdl_map(index, fn) return index @cython.linetrace(False) # ~> TODO cdef inline int errhdl_del( int *indexp, mpi_ehfn_t *fn, ) except -1: fn # unused # clear index value cdef int index = indexp[0] indexp[0] = 0 # free slot in the registry cdef object registry = None if mpi_ehfn_t is MPI_Session_errhandler_function: registry = errhdl_registry[0] if mpi_ehfn_t is MPI_Comm_errhandler_function: registry = errhdl_registry[1] if mpi_ehfn_t is MPI_Win_errhandler_function: registry = errhdl_registry[2] if mpi_ehfn_t is MPI_File_errhandler_function: registry = errhdl_registry[3] with errhdl_lock: registry[index] = None return 0 # ----------------------------------------------------------------------------- cdef inline int session_set_eh(MPI_Session ob) except -1 nogil: if ob == MPI_SESSION_NULL: return 0 cdef int opt = options.errors if opt == 0: pass elif opt == 1: CHKERR( MPI_Session_set_errhandler(ob, MPI_ERRORS_RETURN) ) elif opt == 2: CHKERR( MPI_Session_set_errhandler(ob, MPI_ERRORS_ABORT) ) elif opt == 3: CHKERR( MPI_Session_set_errhandler(ob, MPI_ERRORS_ARE_FATAL) ) return 0 cdef inline int comm_set_eh(MPI_Comm ob) except -1 nogil: if ob == MPI_COMM_NULL: return 0 cdef int opt = options.errors if opt == 0: pass elif opt == 1: CHKERR( MPI_Comm_set_errhandler(ob, MPI_ERRORS_RETURN) ) elif opt == 2: CHKERR( MPI_Comm_set_errhandler(ob, MPI_ERRORS_ABORT) ) elif opt == 3: CHKERR( MPI_Comm_set_errhandler(ob, MPI_ERRORS_ARE_FATAL) ) return 0 cdef inline int win_set_eh(MPI_Win ob) except -1 nogil: if ob == MPI_WIN_NULL: return 0 cdef int opt = options.errors if opt == 0: pass elif opt == 1: CHKERR( MPI_Win_set_errhandler(ob, MPI_ERRORS_RETURN) ) elif opt == 2: CHKERR( MPI_Win_set_errhandler(ob, MPI_ERRORS_ABORT) ) elif opt == 3: CHKERR( MPI_Win_set_errhandler(ob, MPI_ERRORS_ARE_FATAL) ) return 0 cdef inline int file_set_eh(MPI_File ob) except -1 nogil: if ob == MPI_FILE_NULL: return 0 cdef int opt = options.errors if opt == 0: pass elif opt == 1: CHKERR( MPI_File_set_errhandler(ob, MPI_ERRORS_RETURN) ) elif opt == 2: CHKERR( MPI_File_set_errhandler(ob, MPI_ERRORS_ABORT) ) elif opt == 3: CHKERR( MPI_File_set_errhandler(ob, MPI_ERRORS_ARE_FATAL) ) return 0 # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/msgbuffer.pxi000066400000000000000000001163271475341043600207070ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef extern from "Python.h": int is_list "PyList_Check" (object) int is_tuple "PyTuple_Check" (object) cdef inline int is_buffer(object ob) noexcept: return PyObject_CheckBuffer(ob) cdef inline int is_dlpack_buffer(object ob) noexcept: return Py_CheckDLPackBuffer(ob) cdef inline int is_cai_buffer(object ob) noexcept: return Py_CheckCAIBuffer(ob) cdef inline int is_datatype(object ob) except -1: if isinstance(ob, Datatype): return 1 if isinstance(ob, str): return 1 return 0 cdef inline bint is_constant(object obj, object CONST): if PYPY: return type(obj) is type(CONST) and obj == CONST return obj is CONST # ----------------------------------------------------------------------------- @cython.final cdef class BottomType(int): """ Type of `BOTTOM`. """ def __cinit__(self): cdef MPI_Aint a = self, b = MPI_BOTTOM if a != b : raise ValueError("cannot create instance") def __getbuffer__(self, Py_buffer *view, int flags): PyBuffer_FillInfo(view, NULL, MPI_BOTTOM, 0, 0, flags) def __repr__(self) -> str: self # unused return 'BOTTOM' def __reduce__(self) -> str: self # unused return 'BOTTOM' @cython.final cdef class InPlaceType(int): """ Type of `IN_PLACE`. """ def __cinit__(self): cdef MPI_Aint a = self, b = MPI_IN_PLACE if a != b: raise ValueError("cannot create instance") def __getbuffer__(self, Py_buffer *view, int flags): PyBuffer_FillInfo(view, NULL, MPI_IN_PLACE, 0, 0, flags) def __repr__(self) -> str: self # unused return 'IN_PLACE' def __reduce__(self) -> str: self # unused return 'IN_PLACE' cdef object __BOTTOM__ = BottomType(MPI_BOTTOM) cdef object __IN_PLACE__ = InPlaceType(MPI_IN_PLACE) cdef inline bint is_BOTTOM(object obj): return obj is None or is_constant(obj, __BOTTOM__) cdef inline bint is_IN_PLACE(object obj): return obj is None or is_constant(obj, __IN_PLACE__) # ----------------------------------------------------------------------------- cdef inline const char *getformat(const char format[]) except NULL: cdef char byteorder = format[0] if byteorder == c'@': # native return format + 1 # ~> uncovered if byteorder == c'<': # little-endian if not is_little_endian(): raise BufferError( # ~> big-endian f"format string {pystr(format)!r} " # ~> big-endian f"with non-native byte order") # ~> big-endian return format + 1 # ~> little-endian if byteorder == c'>': # big-endian if not is_big_endian(): raise BufferError( # ~> little-endian f"format string {pystr(format)!r} " # ~> little-endian f"with non-native byte order") # ~> little-endian return format + 1 # ~> big-endian # passthrough return format cdef inline Datatype lookup_datatype(object key): try: return TypeDict[key] except KeyError: raise KeyError(f"cannot map {key!r} to MPI datatype") cdef inline Datatype asdatatype(object datatype): if isinstance(datatype, Datatype): return datatype return lookup_datatype(datatype) cdef inline Datatype getdatatype(const char format[]): if format == BYTE_FMT: return __BYTE__ return lookup_datatype(pystr(getformat(format))) @cython.final @cython.internal cdef class _p_message: cdef buffer buf cdef object count cdef object displ cdef Datatype type cdef _p_message message_basic(object o_buf, object o_type, bint readonly, # void **baddr, MPI_Count *bsize, MPI_Datatype *btype, ): cdef _p_message m = _p_message.__new__(_p_message) # special-case for BOTTOM or None, # an explicit MPI datatype is required if is_BOTTOM(o_buf): m.buf = newbuffer() m.type = asdatatype(o_type) baddr[0] = MPI_BOTTOM bsize[0] = 0 btype[0] = m.type.ob_mpi return m # get message buffer cdef bint fmt = (o_type is None) m.buf = getbuffer(o_buf, readonly, fmt) # get message datatype if o_type is not None: m.type = asdatatype(o_type) else: m.type = getdatatype(m.buf.view.format) # return collected message data baddr[0] = m.buf.view.buf bsize[0] = m.buf.view.len btype[0] = m.type.ob_mpi return m cdef _p_message message_simple(object msg, int readonly, int rank, int blocks, # void **_addr, MPI_Count *_count, MPI_Datatype *_type, ): # special-case PROC_NULL target rank if rank == MPI_PROC_NULL: _addr[0] = NULL _count[0] = 0 _type[0] = MPI_BYTE return None # unpack message list/tuple cdef Py_ssize_t nargs = 0 cdef object o_buf = None cdef object o_count = None cdef object o_displ = None cdef object o_type = None if is_buffer(msg): o_buf = msg elif is_list(msg) or is_tuple(msg): nargs = len(msg) if nargs == 2: (o_buf, o_count) = msg if is_datatype(o_count): (o_count, o_type) = None, o_count elif is_tuple(o_count) or is_list(o_count): (o_count, o_displ) = o_count elif nargs == 3: (o_buf, o_count, o_type) = msg if is_tuple(o_count) or is_list(o_count): (o_count, o_displ) = o_count elif nargs == 4: (o_buf, o_count, o_displ, o_type) = msg else: raise ValueError("message: expecting 2 to 4 items") elif is_dlpack_buffer(msg): o_buf = msg elif is_cai_buffer(msg): o_buf = msg else: raise TypeError("message: expecting buffer or list/tuple") # buffer: address, length, and datatype cdef void *baddr = NULL cdef MPI_Count bsize = 0 cdef MPI_Datatype btype = MPI_DATATYPE_NULL cdef _p_message m = message_basic(o_buf, o_type, readonly, &baddr, &bsize, &btype) # buffer: count and displacement cdef MPI_Count count = 0 # number of datatype entries cdef MPI_Aint displ = 0 # from base buffer, in datatype entries cdef MPI_Count extent = 0, lb = 0 # datatype extent cdef MPI_Count length = bsize # in bytes cdef MPI_Aint offset = 0 # from base buffer, in bytes if o_displ is not None: displ = PyNumber_Index(o_displ) if displ < 0: raise ValueError( f"message: negative displacement {displ}") if displ > 0: if btype == MPI_DATATYPE_NULL: raise ValueError( f"message: cannot handle displacement, " f"datatype is null") CHKERR( MPI_Type_get_extent_c(btype, &lb, &extent) ) if extent <= 0: raise ValueError( f"message: cannot handle displacement, " f"datatype extent {extent} (lb:{lb}, ub:{lb+extent})") if displ*extent > length: raise ValueError( f"message: displacement {displ} out of bounds, " f"number of datatype entries {length//extent}") offset = (displ * extent) length -= offset if o_count is not None: count = PyNumber_Index(o_count) if count < 0: raise ValueError( f"message: negative count {count}") if count > 0 and o_buf is None: raise ValueError( f"message: buffer is None but count is {count}") elif length > 0: if extent == 0: if btype == MPI_DATATYPE_NULL: raise ValueError( f"message: cannot infer count, " f"datatype is null") CHKERR( MPI_Type_get_extent_c(btype, &lb, &extent) ) if extent <= 0: raise ValueError( f"message: cannot infer count, " f"datatype extent {extent} (lb:{lb}, ub:{lb+extent})") if (length % extent) != 0: raise ValueError( f"message: cannot infer count, " f"buffer length {length} is not a multiple of " f"datatype extent {extent} (lb:{lb}, ub:{lb+extent})") if blocks < 2: count = length // extent else: if ((length // extent) % blocks) != 0: raise ValueError( f"message: cannot infer count, " f"number of entries {length//extent} is not a multiple of " f"required number of blocks {blocks}") count = (length // extent) // blocks # return collected message data m.count = o_count if o_count is not None else count m.displ = o_displ if o_displ is not None else displ _addr[0] = (baddr + offset) _count[0] = count _type[0] = btype return m cdef _p_message message_vector(object msg, int readonly, int rank, int blocks, # void **_addr, MPI_Count **_counts, MPI_Aint **_displs, MPI_Datatype *_type, ): # special-case PROC_NULL target rank if rank == MPI_PROC_NULL: _addr[0] = NULL _counts[0] = NULL _displs[0] = NULL _type[0] = MPI_BYTE return None # unpack message list/tuple cdef Py_ssize_t nargs = 0 cdef object o_buf = None cdef object o_counts = None cdef object o_displs = None cdef object o_type = None if is_buffer(msg): o_buf = msg elif is_list(msg) or is_tuple(msg): nargs = len(msg) if nargs == 2: (o_buf, o_counts) = msg if is_datatype(o_counts): (o_counts, o_type) = None, o_counts elif is_tuple(o_counts): (o_counts, o_displs) = o_counts elif nargs == 3: (o_buf, o_counts, o_type) = msg if is_tuple(o_counts): (o_counts, o_displs) = o_counts elif nargs == 4: (o_buf, o_counts, o_displs, o_type) = msg else: raise ValueError("message: expecting 2 to 4 items") elif is_dlpack_buffer(msg): o_buf = msg elif is_cai_buffer(msg): o_buf = msg else: raise TypeError("message: expecting buffer or list/tuple") # buffer: address, length, and datatype cdef void *baddr = NULL cdef MPI_Count bsize = 0 cdef MPI_Datatype btype = MPI_DATATYPE_NULL cdef _p_message m = message_basic(o_buf, o_type, readonly, &baddr, &bsize, &btype) # counts and displacements cdef int i cdef MPI_Count *counts = NULL cdef MPI_Aint *displs = NULL cdef MPI_Count extent=0, lb=0 cdef MPI_Aint avalue=0 cdef MPI_Count cvalue=0, csize=0 if o_counts is None: if bsize > 0: if btype == MPI_DATATYPE_NULL: raise ValueError( f"message: cannot infer count, " f"datatype is null") CHKERR( MPI_Type_get_extent_c(btype, &lb, &extent) ) if extent <= 0: raise ValueError( f"message: cannot infer count, " f"datatype extent {extent} (lb:{lb}, ub:{lb+extent})") if (bsize % extent) != 0: raise ValueError( f"message: cannot infer count, " f"buffer length {bsize} is not a multiple of " f"datatype extent {extent} (lb:{lb}, ub:{lb+extent})") csize = bsize // extent o_counts = newarray(blocks, &counts) for i in range(blocks): cvalue = (csize // blocks) + (csize % blocks > i) counts[i] = cvalue elif is_integral(o_counts): cvalue = PyNumber_Index(o_counts) o_counts = newarray(blocks, &counts) for i in range(blocks): counts[i] = cvalue else: o_counts = chkarray(o_counts, blocks, &counts) if o_displs is None: # contiguous avalue = 0 o_displs = newarray(blocks, &displs) for i in range(blocks): displs[i] = avalue avalue += counts[i] elif is_integral(o_displs): # strided avalue = PyNumber_Index(o_displs) o_displs = newarray(blocks, &displs) for i in range(blocks): displs[i] = avalue * i else: # general o_displs = chkarray(o_displs, blocks, &displs) # return collected message data m.count = o_counts m.displ = o_displs _addr[0] = baddr _counts[0] = counts _displs[0] = displs _type[0] = btype return m cdef tuple message_vector_w(object msg, int readonly, int blocks, # void **_addr, MPI_Count **_counts, MPI_Aint **_displs, MPI_Datatype **_types, ): cdef Py_ssize_t nargs = len(msg) cdef object o_buffer, o_counts, o_displs, o_types if nargs == 2: o_buffer, o_types = msg o_counts = o_displs = None elif nargs == 3: o_buffer, (o_counts, o_displs), o_types = msg elif nargs == 4: o_buffer, o_counts, o_displs, o_types = msg else: raise ValueError("message: expecting 2 to 4 items") if is_BOTTOM(o_buffer): if o_counts is None: raise ValueError("message: BOTTOM requires counts") if o_displs is None: raise ValueError("message: BOTTOM requires displs") _addr[0] = MPI_BOTTOM elif readonly: o_buffer = asbuffer_r(o_buffer, _addr, NULL) else: o_buffer = asbuffer_w(o_buffer, _addr, NULL) if o_counts is None and o_displs is None: o_counts = newarray(blocks, _counts) o_displs = newarray(blocks, _displs) for i in range(blocks): _counts[0][i] = 1 _displs[0][i] = 0 else: o_counts = chkarray(o_counts, blocks, _counts) o_displs = chkarray(o_displs, blocks, _displs) o_types = asarray_Datatype(o_types, blocks, _types) return (o_buffer, o_counts, o_displs, o_types) # ----------------------------------------------------------------------------- @cython.final @cython.internal cdef class _p_msg_p2p: # raw C-side arguments cdef void *buf cdef MPI_Count count cdef MPI_Datatype dtype # python-side argument cdef object _msg def __cinit__(self): self.buf = NULL self.count = 0 self.dtype = MPI_DATATYPE_NULL cdef int for_send(self, object msg, int rank, int parts) except -1: self._msg = message_simple( msg, 1, # readonly rank, parts, &self.buf, &self.count, &self.dtype, ) return 0 cdef int for_recv(self, object msg, int rank, int parts) except -1: self._msg = message_simple( msg, 0, # writable rank, parts, &self.buf, &self.count, &self.dtype, ) return 0 cdef inline _p_msg_p2p message_p2p_send(object sendbuf, int dest): cdef _p_msg_p2p msg = _p_msg_p2p.__new__(_p_msg_p2p) msg.for_send(sendbuf, dest, 1) return msg cdef inline _p_msg_p2p message_p2p_recv(object recvbuf, int source): cdef _p_msg_p2p msg = _p_msg_p2p.__new__(_p_msg_p2p) msg.for_recv(recvbuf, source, 1) return msg cdef inline _p_msg_p2p message_p2p_psend(object sendbuf, int dest, int parts): cdef _p_msg_p2p msg = _p_msg_p2p.__new__(_p_msg_p2p) msg.for_send(sendbuf, dest, parts) return msg cdef inline _p_msg_p2p message_p2p_precv(object recvbuf, int source, int parts): cdef _p_msg_p2p msg = _p_msg_p2p.__new__(_p_msg_p2p) msg.for_recv(recvbuf, source, parts) return msg # ----------------------------------------------------------------------------- @cython.final @cython.internal cdef class _p_msg_cco: # raw C-side arguments cdef void *sbuf, *rbuf cdef MPI_Count scount, rcount cdef MPI_Count *scounts, *rcounts cdef MPI_Aint *sdispls, *rdispls cdef MPI_Datatype stype, rtype # python-side arguments cdef object _smsg, _rmsg cdef object _rcnt def __cinit__(self): self.sbuf = self.rbuf = NULL self.scount = self.rcount = 0 self.scounts = self.rcounts = NULL self.sdispls = self.rdispls = NULL self.stype = self.rtype = MPI_DATATYPE_NULL # Collective Communication Operations # ----------------------------------- # sendbuf arguments cdef int for_cco_send( self, bint VECTOR, object amsg, int rank, int blocks, ) except -1: cdef bint readonly = 1 if not VECTOR: # block variant self._smsg = message_simple( amsg, readonly, rank, blocks, &self.sbuf, &self.scount, &self.stype) else: # vector variant self._smsg = message_vector( amsg, readonly, rank, blocks, &self.sbuf, &self.scounts, &self.sdispls, &self.stype) return 0 # recvbuf arguments cdef int for_cco_recv( self, bint VECTOR, object amsg, int rank, int blocks, ) except -1: cdef bint readonly = 0 if not VECTOR: # block variant self._rmsg = message_simple( amsg, readonly, rank, blocks, &self.rbuf, &self.rcount, &self.rtype) else: # vector variant self._rmsg = message_vector( amsg, readonly, rank, blocks, &self.rbuf, &self.rcounts, &self.rdispls, &self.rtype) return 0 # bcast cdef int for_bcast( self, object msg, int root, MPI_Comm comm, ) except -1: if comm == MPI_COMM_NULL: return 0 cdef int inter=0, rank=0, sending=0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) if not inter: # intra-communication CHKERR( MPI_Comm_rank(comm, &rank) ) if root == rank: self.for_cco_send(0, msg, root, 0) sending = 1 else: self.for_cco_recv(0, msg, root, 0) sending = 0 else: # inter-communication if root == MPI_ROOT or root == MPI_PROC_NULL: self.for_cco_send(0, msg, root, 0) sending = 1 else: self.for_cco_recv(0, msg, root, 0) sending = 0 if sending: self.rbuf = self.sbuf self.rcount = self.scount self.rtype = self.stype else: self.sbuf = self.rbuf self.scount = self.rcount self.stype = self.rtype return 0 # gather/gatherv cdef int for_gather( self, int v, object smsg, object rmsg, int root, MPI_Comm comm, ) except -1: if comm == MPI_COMM_NULL: return 0 cdef int inter=0, size=0, rank=0, null=MPI_PROC_NULL CHKERR( MPI_Comm_test_inter(comm, &inter) ) if not inter: # intra-communication CHKERR( MPI_Comm_size(comm, &size) ) CHKERR( MPI_Comm_rank(comm, &rank) ) if root == rank: self.for_cco_recv(v, rmsg, root, size) if is_IN_PLACE(smsg): self.sbuf = MPI_IN_PLACE else: self.for_cco_send(0, smsg, 0, 0) else: self.for_cco_recv(v, rmsg, null, size) self.for_cco_send(0, smsg, root, 0) else: # inter-communication CHKERR( MPI_Comm_remote_size(comm, &size) ) if root == MPI_ROOT or root == MPI_PROC_NULL: self.for_cco_recv(v, rmsg, root, size) self.for_cco_send(0, smsg, null, 0) else: self.for_cco_recv(v, rmsg, null, size) self.for_cco_send(0, smsg, root, 0) return 0 # scatter/scatterv cdef int for_scatter( self, int v, object smsg, object rmsg, int root, MPI_Comm comm, ) except -1: if comm == MPI_COMM_NULL: return 0 cdef int inter=0, size=0, rank=0, null=MPI_PROC_NULL CHKERR( MPI_Comm_test_inter(comm, &inter) ) if not inter: # intra-communication CHKERR( MPI_Comm_size(comm, &size) ) CHKERR( MPI_Comm_rank(comm, &rank) ) if root == rank: self.for_cco_send(v, smsg, root, size) if is_IN_PLACE(rmsg): self.rbuf = MPI_IN_PLACE else: self.for_cco_recv(0, rmsg, root, 0) else: self.for_cco_send(v, smsg, null, size) self.for_cco_recv(0, rmsg, root, 0) else: # inter-communication CHKERR( MPI_Comm_remote_size(comm, &size) ) if root == MPI_ROOT or root == MPI_PROC_NULL: self.for_cco_send(v, smsg, root, size) self.for_cco_recv(0, rmsg, null, 0) else: self.for_cco_send(v, smsg, null, size) self.for_cco_recv(0, rmsg, root, 0) return 0 # allgather/allgatherv cdef int for_allgather( self, int v, object smsg, object rmsg, MPI_Comm comm, ) except -1: if comm == MPI_COMM_NULL: return 0 cdef int inter=0, size=0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) if not inter: # intra-communication CHKERR( MPI_Comm_size(comm, &size) ) else: # inter-communication CHKERR( MPI_Comm_remote_size(comm, &size) ) # self.for_cco_recv(v, rmsg, 0, size) if not inter and is_IN_PLACE(smsg): self.sbuf = MPI_IN_PLACE else: self.for_cco_send(0, smsg, 0, 0) return 0 # alltoall/alltoallv cdef int for_alltoall( self, int v, object smsg, object rmsg, MPI_Comm comm, ) except -1: if comm == MPI_COMM_NULL: return 0 cdef int inter=0, size=0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) if not inter: # intra-communication CHKERR( MPI_Comm_size(comm, &size) ) else: # inter-communication CHKERR( MPI_Comm_remote_size(comm, &size) ) # self.for_cco_recv(v, rmsg, 0, size) if not inter and is_IN_PLACE(smsg): self.sbuf = MPI_IN_PLACE else: self.for_cco_send(v, smsg, 0, size) return 0 # Neighbor Collectives # -------------------- # neighbor allgather/allgatherv cdef int for_neighbor_allgather( self, int v, object smsg, object rmsg, MPI_Comm comm, ) except -1: if comm == MPI_COMM_NULL: return 0 cdef int recvsize=0 comm_neighbors_count(comm, &recvsize, NULL) self.for_cco_send(0, smsg, 0, 0) self.for_cco_recv(v, rmsg, 0, recvsize) return 0 # neighbor alltoall/alltoallv cdef int for_neighbor_alltoall( self, int v, object smsg, object rmsg, MPI_Comm comm, ) except -1: if comm == MPI_COMM_NULL: return 0 cdef int sendsize=0, recvsize=0 comm_neighbors_count(comm, &recvsize, &sendsize) self.for_cco_send(v, smsg, 0, sendsize) self.for_cco_recv(v, rmsg, 0, recvsize) return 0 # Collective Reductions Operations # -------------------------------- # sendbuf cdef int for_cro_send( self, object amsg, int root, ) except -1: cdef bint readonly = 1 self._smsg = message_simple( amsg, readonly, root, 0, &self.sbuf, &self.scount, &self.stype) return 0 # recvbuf cdef int for_cro_recv( self, object amsg, int root, ) except -1: cdef bint readonly = 0 self._rmsg = message_simple( amsg, readonly, root, 0, &self.rbuf, &self.rcount, &self.rtype) return 0 # check counts and datatypes cdef int chk_cro_args(self) except -1: if self.sbuf == MPI_IN_PLACE: return 0 if self.stype != self.rtype: raise ValueError( f"mismatch in send and receive MPI datatypes") if self.scount != self.rcount: raise ValueError( f"mismatch in send count {self.scount} " f"and receive count {self.rcount}") return 0 cdef int for_reduce( self, object smsg, object rmsg, int root, MPI_Comm comm, ) except -1: if comm == MPI_COMM_NULL: return 0 cdef int inter=0, rank=0, null=MPI_PROC_NULL CHKERR( MPI_Comm_test_inter(comm, &inter) ) if not inter: # intra-communication CHKERR( MPI_Comm_rank(comm, &rank) ) if root == rank: self.for_cro_recv(rmsg, root) if is_IN_PLACE(smsg): self.sbuf = MPI_IN_PLACE else: self.for_cro_send(smsg, root) self.chk_cro_args() else: self.for_cro_recv(rmsg, null) self.for_cro_send(smsg, root) self.rcount = self.scount self.rtype = self.stype else: # inter-communication if root == MPI_ROOT or root == MPI_PROC_NULL: self.for_cro_recv(rmsg, root) self.scount = self.rcount self.stype = self.rtype else: self.for_cro_send(smsg, root) self.rcount = self.scount self.rtype = self.stype return 0 cdef int for_allreduce( self, object smsg, object rmsg, MPI_Comm comm, ) except -1: if comm == MPI_COMM_NULL: return 0 cdef int inter=0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) # get send and recv buffers self.for_cro_recv(rmsg, 0) if not inter and is_IN_PLACE(smsg): self.sbuf = MPI_IN_PLACE else: self.for_cro_send(smsg, 0) self.chk_cro_args() return 0 cdef int for_reduce_scatter_block( self, object smsg, object rmsg, MPI_Comm comm, ) except -1: if comm == MPI_COMM_NULL: return 0 cdef int inter=0, size=0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) CHKERR( MPI_Comm_size(comm, &size) ) # get send and recv buffers if not inter and is_IN_PLACE(smsg): self.for_cco_recv(0, rmsg, 0, size) self.sbuf = MPI_IN_PLACE else: self.for_cco_recv(0, rmsg, 0, 0) self.for_cco_send(0, smsg, 0, size) self.chk_cro_args() return 0 cdef int for_reduce_scatter( self, object smsg, object rmsg, object rcnt, MPI_Comm comm, ) except -1: if comm == MPI_COMM_NULL: return 0 cdef int inter=0, size=0, rank=MPI_PROC_NULL CHKERR( MPI_Comm_test_inter(comm, &inter) ) CHKERR( MPI_Comm_size(comm, &size) ) CHKERR( MPI_Comm_rank(comm, &rank) ) # get send and recv buffers self.for_cro_recv(rmsg, 0) if not inter and is_IN_PLACE(smsg): self.sbuf = MPI_IN_PLACE else: self.for_cro_send(smsg, 0) # get receive counts if rcnt is None and not inter and self.sbuf != MPI_IN_PLACE: self._rcnt = newarray(size, &self.rcounts) CHKERR( MPI_Allgather_c( &self.rcount, 1, MPI_COUNT, self.rcounts, 1, MPI_COUNT, comm) ) else: self._rcnt = chkarray(rcnt, size, &self.rcounts) # total sum or receive counts cdef MPI_Count sumrcounts=0 for i in range(size): sumrcounts += self.rcounts[i] # check counts and datatypes if self.sbuf != MPI_IN_PLACE: if self.stype != self.rtype: raise ValueError( f"mismatch in send and receive MPI datatypes") if self.scount != sumrcounts: raise ValueError( f"mismatch in send count {self.scount} " f"and sum(counts) {sumrcounts}") if self.rcount != self.rcounts[rank]: raise ValueError( f"mismatch in receive count {self.rcount} " f"and counts[{rank}] {self.rcounts[rank]}") else: if self.rcount != sumrcounts: raise ValueError( f"mismatch in receive count {self.rcount} " f"and sum(counts) {sumrcounts}") return 0 cdef int for_scan( self, object smsg, object rmsg, MPI_Comm comm, ) except -1: if comm == MPI_COMM_NULL: return 0 # get send and recv buffers self.for_cro_recv(rmsg, 0) if is_IN_PLACE(smsg): self.sbuf = MPI_IN_PLACE else: self.for_cro_send(smsg, 0) self.chk_cro_args() cdef int for_exscan( self, object smsg, object rmsg, MPI_Comm comm, ) except -1: if comm == MPI_COMM_NULL: return 0 # get send and recv buffers self.for_cro_recv(rmsg, 0) if is_IN_PLACE(smsg): self.sbuf = MPI_IN_PLACE else: self.for_cro_send(smsg, 0) self.chk_cro_args() cdef inline _p_msg_cco message_cco(): cdef _p_msg_cco msg = _p_msg_cco.__new__(_p_msg_cco) return msg # ----------------------------------------------------------------------------- @cython.final @cython.internal cdef class _p_msg_ccow: # raw C-side arguments cdef void *sbuf, *rbuf cdef MPI_Count *scounts, *rcounts cdef MPI_Aint *sdispls, *rdispls cdef MPI_Datatype *stypes, *rtypes # python-side arguments cdef object _smsg, _rmsg def __cinit__(self): self.sbuf = self.rbuf = NULL self.scounts = self.rcounts = NULL self.sdispls = self.rdispls = NULL self.stypes = self.rtypes = NULL # alltoallw cdef int for_alltoallw( self, object smsg, object rmsg, MPI_Comm comm, ) except -1: if comm == MPI_COMM_NULL: return 0 cdef int inter=0, size=0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) if not inter: # intra-communication CHKERR( MPI_Comm_size(comm, &size) ) else: # inter-communication CHKERR( MPI_Comm_remote_size(comm, &size) ) # self._rmsg = message_vector_w( rmsg, 0, size, &self.rbuf, &self.rcounts, &self.rdispls, &self.rtypes) if not inter and is_IN_PLACE(smsg): self.sbuf = MPI_IN_PLACE return 0 self._smsg = message_vector_w( smsg, 1, size, &self.sbuf, &self.scounts, &self.sdispls, &self.stypes) return 0 # neighbor alltoallw cdef int for_neighbor_alltoallw( self, object smsg, object rmsg, MPI_Comm comm, ) except -1: if comm == MPI_COMM_NULL: return 0 cdef int sendsize=0, recvsize=0 comm_neighbors_count(comm, &recvsize, &sendsize) self._rmsg = message_vector_w( rmsg, 0, recvsize, &self.rbuf, &self.rcounts, &self.rdispls, &self.rtypes) self._smsg = message_vector_w( smsg, 1, sendsize, &self.sbuf, &self.scounts, &self.sdispls, &self.stypes) return 0 cdef inline _p_msg_ccow message_ccow(): cdef _p_msg_ccow msg = _p_msg_ccow.__new__(_p_msg_ccow) return msg # ----------------------------------------------------------------------------- @cython.final @cython.internal cdef class _p_msg_rma: # raw origin arguments cdef void* oaddr cdef MPI_Count ocount cdef MPI_Datatype otype # raw compare arguments cdef void* caddr cdef MPI_Count ccount cdef MPI_Datatype ctype # raw result arguments cdef void* raddr cdef MPI_Count rcount cdef MPI_Datatype rtype # raw target arguments cdef MPI_Aint tdisp cdef MPI_Count tcount cdef MPI_Datatype ttype # python-side arguments cdef object _origin cdef object _compare cdef object _result cdef object _target def __cinit__(self): self.oaddr = NULL self.ocount = 0 self.otype = MPI_DATATYPE_NULL self.raddr = NULL self.rcount = 0 self.rtype = MPI_DATATYPE_NULL self.tdisp = 0 self.tcount = 0 self.ttype = MPI_DATATYPE_NULL cdef int for_rma(self, int readonly, object origin, int rank, object target) except -1: # ORIGIN self._origin = message_simple( origin, readonly, rank, 0, &self.oaddr, &self.ocount, &self.otype) if ( (rank == MPI_PROC_NULL) and (origin is not None) and (is_list(origin) or is_tuple(origin)) and (len(origin) > 0 and is_datatype(origin[-1])) ): self.otype = asdatatype(origin[-1]).ob_mpi self._origin = origin # TARGET cdef Py_ssize_t nargs = 0 if target is None: self.tdisp = 0 self.tcount = self.ocount self.ttype = self.otype elif is_integral(target): self.tdisp = PyNumber_Index(target) self.tcount = self.ocount self.ttype = self.otype elif is_list(target) or is_tuple(target): self.tdisp = 0 self.tcount = self.ocount self.ttype = self.otype nargs = len(target) if nargs >= 1: self.tdisp = PyNumber_Index(target[0]) if nargs >= 2: self.tcount = PyNumber_Index(target[1]) if nargs >= 3: self.ttype = asdatatype(target[2]).ob_mpi if nargs >= 4: raise ValueError("target: expecting 3 items at most") else: raise ValueError("target: expecting integral or list/tuple") self._target = target return 0 cdef int for_put(self, object origin, int rank, object target) except -1: self.for_rma(1, origin, rank, target) return 0 cdef int for_get(self, object origin, int rank, object target) except -1: self.for_rma(0, origin, rank, target) return 0 cdef int for_acc(self, object origin, int rank, object target) except -1: self.for_rma(1, origin, rank, target) return 0 cdef int set_origin(self, object origin, int rank) except -1: self._origin = message_simple( origin, 1, rank, 0, &self.oaddr, &self.ocount, &self.otype) self.tdisp = 0 self.tcount = self.ocount self.ttype = self.otype cdef int set_compare(self, object compare, int rank) except -1: self._compare = message_simple( compare, 1, rank, 0, &self.caddr, &self.ccount, &self.ctype) cdef int set_result(self, object result, int rank) except -1: self._result = message_simple( result, 0, rank, 0, &self.raddr, &self.rcount, &self.rtype) cdef int for_get_acc(self, object origin, object result, int rank, object target) except -1: self.for_rma(0, origin, rank, target) self.set_result(result, rank) return 0 cdef int for_fetch_op(self, object origin, object result, int rank, MPI_Aint disp) except -1: self.set_origin(origin, rank) self.set_result(result, rank) self.tdisp = disp if rank == MPI_PROC_NULL: return 0 # Check if self.ocount != 1: raise ValueError( f"origin: expecting a single element, got {self.ocount}") if self.rcount != 1: raise ValueError( f"result: expecting a single element, got {self.rcount}") if self.otype != self.rtype: raise ValueError( f"mismatch in origin and result MPI datatypes") return 0 cdef int for_cmp_swap(self, object origin, object compare, object result, int rank, MPI_Aint disp) except -1: self.set_origin(origin, rank) self.set_compare(compare, rank) self.set_result(result, rank) self.tdisp = disp if rank == MPI_PROC_NULL: return 0 # Check if self.ocount != 1: raise ValueError( f"origin: expecting a single element, got {self.ocount}") if self.ccount != 1: raise ValueError( f"compare: expecting a single element, got {self.ccount}") if self.rcount != 1: raise ValueError( f"result: expecting a single element, got {self.rcount}") if self.otype != self.ctype: raise ValueError( f"mismatch in origin and compare MPI datatypes") if self.otype != self.rtype: raise ValueError( f"mismatch in origin and result MPI datatypes") return 0 cdef inline _p_msg_rma message_rma(): cdef _p_msg_rma msg = _p_msg_rma.__new__(_p_msg_rma) return msg # ----------------------------------------------------------------------------- @cython.final @cython.internal cdef class _p_msg_io: # raw C-side data cdef void *buf cdef MPI_Count count cdef MPI_Datatype dtype # python-side data cdef object _msg def __cinit__(self): self.buf = NULL self.count = 0 self.dtype = MPI_DATATYPE_NULL cdef int for_read(self, object msg) except -1: self._msg = message_simple( msg, 0, # writable 0, 0, &self.buf, &self.count, &self.dtype, ) return 0 cdef int for_write(self, object msg) except -1: self._msg = message_simple( msg, 1, # readonly 0, 0, &self.buf, &self.count, &self.dtype, ) return 0 cdef inline _p_msg_io message_io_read(object buf): cdef _p_msg_io msg = _p_msg_io.__new__(_p_msg_io) msg.for_read(buf) return msg cdef inline _p_msg_io message_io_write(object buf): cdef _p_msg_io msg = _p_msg_io.__new__(_p_msg_io) msg.for_write(buf) return msg # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/msgpickle.pxi000066400000000000000000001327251475341043600207050ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef extern from "Python.h": bint PyBytes_CheckExact(object) char* PyBytes_AsString(object) except NULL Py_ssize_t PyBytes_Size(object) except -1 object PyBytes_FromStringAndSize(char*, Py_ssize_t) # ----------------------------------------------------------------------------- cdef object PyPickle_dumps = None cdef object PyPickle_loads = None cdef object PyPickle_PROTOCOL = None cdef object PyPickle_THRESHOLD = 1024**2 // 4 # 0.25 MiB from pickle import dumps as PyPickle_dumps from pickle import loads as PyPickle_loads from pickle import HIGHEST_PROTOCOL as PyPickle_PROTOCOL if Py_GETENV(b"MPI4PY_PICKLE_PROTOCOL") != NULL: PyPickle_PROTOCOL = int(Py_GETENV(b"MPI4PY_PICKLE_PROTOCOL")) if Py_GETENV(b"MPI4PY_PICKLE_THRESHOLD") != NULL: PyPickle_THRESHOLD = int(Py_GETENV(b"MPI4PY_PICKLE_THRESHOLD")) cdef class Pickle: """ Pickle/unpickle Python objects. """ cdef object ob_dumps cdef object ob_loads cdef object ob_PROTO cdef object ob_THRES def __cinit__(self, *args, **kwargs): args # unused kwargs # unused self.ob_dumps = PyPickle_dumps self.ob_loads = PyPickle_loads self.ob_PROTO = PyPickle_PROTOCOL self.ob_THRES = PyPickle_THRESHOLD def __init__( self, dumps: Callable[[Any, int], bytes] | None = None, loads: Callable[[Buffer], Any] | None = None, protocol: int | None = None, threshold: int | None = None, ) -> None: if dumps is None: dumps = PyPickle_dumps if loads is None: loads = PyPickle_loads if protocol is None: if dumps is PyPickle_dumps: protocol = PyPickle_PROTOCOL if threshold is None: threshold = PyPickle_THRESHOLD self.ob_dumps = dumps self.ob_loads = loads self.ob_PROTO = protocol self.ob_THRES = threshold def dumps( self, obj: Any, ) -> bytes: """ Serialize object to pickle data stream. """ return cdumps(self, obj) def loads( self, data: Buffer, ) -> Any: """ Deserialize object from pickle data stream. """ return cloads(self, data) def dumps_oob( self, obj: Any, ) -> tuple[bytes, list[buffer]]: """ Serialize object to pickle data stream and out-of-band buffers. """ return cdumps_oob(self, obj) def loads_oob( self, data: Buffer, buffers: Iterable[Buffer], ) -> Any: """ Deserialize object from pickle data stream and out-of-band buffers. """ return cloads_oob(self, data, buffers) property PROTOCOL: """Protocol version.""" def __get__(self) -> int | None: return self.ob_PROTO def __set__(self, protocol: int | None): if protocol is None: if self.ob_dumps is PyPickle_dumps: protocol = PyPickle_PROTOCOL self.ob_PROTO = protocol property THRESHOLD: """Out-of-band threshold.""" def __get__(self) -> int: return self.ob_THRES def __set__(self, threshold: int | None): if threshold is None: threshold = PyPickle_THRESHOLD self.ob_THRES = threshold cdef Pickle PyMPI_PICKLE = Pickle() pickle = PyMPI_PICKLE # ----------------------------------------------------------------------------- cdef int have_pickle5 = -1 # ~> legacy cdef object PyPickle5_dumps = None # ~> legacy cdef object PyPickle5_loads = None # ~> legacy cdef int import_pickle5() except -1: # ~> legacy global have_pickle5 # ~> legacy global PyPickle5_dumps # ~> legacy global PyPickle5_loads # ~> legacy if have_pickle5 < 0: # ~> legacy try: # ~> legacy from pickle5 import dumps as PyPickle5_dumps # ~> legacy from pickle5 import loads as PyPickle5_loads # ~> legacy have_pickle5 = 1 # ~> legacy except ImportError: # ~> legacy PyPickle5_dumps = None # ~> legacy PyPickle5_loads = None # ~> legacy have_pickle5 = 0 # ~> legacy return have_pickle5 # ~> legacy cdef object get_buffer_callback(list buffers, Py_ssize_t threshold): def buffer_callback(ob): cdef buffer buf = getbuffer(ob, 1, 0) if buf.view.len >= threshold: buffers.append(buf) return False else: return True return buffer_callback cdef object cdumps_oob(Pickle pkl, object obj): cdef object pkl_dumps = pkl.ob_dumps if PY_VERSION_HEX < 0x03080000: # ~> legacy if pkl_dumps is PyPickle_dumps: # ~> legacy if not import_pickle5(): # ~> legacy return cdumps(pkl, obj), [] # ~> legacy pkl_dumps = PyPickle5_dumps # ~> legacy cdef object protocol = pkl.ob_PROTO if protocol is None: protocol = PyPickle_PROTOCOL # ~> uncovered protocol = max(protocol, 5) cdef list buffers = [] cdef Py_ssize_t threshold = pkl.ob_THRES cdef object buf_cb = get_buffer_callback(buffers, threshold) cdef object data = pkl_dumps(obj, protocol, buffer_callback=buf_cb) return data, buffers cdef object cloads_oob(Pickle pkl, object data, object buffers): cdef object pkl_loads = pkl.ob_loads if PY_VERSION_HEX < 0x03080000: # ~> legacy if pkl_loads is PyPickle_loads: # ~> legacy if not import_pickle5(): # ~> legacy return cloads(pkl, data) # ~> legacy pkl_loads = PyPickle5_loads # ~> legacy return pkl_loads(data, buffers=buffers) # ----------------------------------------------------------------------------- cdef object cdumps(Pickle pkl, object obj): if pkl.ob_PROTO is not None: return pkl.ob_dumps(obj, pkl.ob_PROTO) else: return pkl.ob_dumps(obj) cdef object cloads(Pickle pkl, object buf): return pkl.ob_loads(buf) cdef object pickle_dump(Pickle pkl, object obj, void **p, MPI_Count *n): cdef object buf = cdumps(pkl, obj) p[0] = PyBytes_AsString(buf) n[0] = PyBytes_Size(buf) return buf cdef object pickle_load(Pickle pkl, void *p, MPI_Count n): if p == NULL or n == 0: return None return cloads(pkl, mpibuf(p, n)) cdef object pickle_dumpv( Pickle pkl, object obj, void **p, int n, MPI_Count cnt[], MPI_Aint dsp[], ): cdef Py_ssize_t m = n cdef object items if obj is None: items = [None] * m else: items = list(obj) m = len(items) if m != n: raise ValueError(f"expecting {n} items, got {m}") cdef MPI_Count c = 0 cdef MPI_Aint d = 0 for i in range(m): items[i] = pickle_dump(pkl, items[i], p, &c) cnt[i] = c dsp[i] = d d = d + c cdef object buf = b''.join(items) p[0] = PyBytes_AsString(buf) return buf cdef object pickle_loadv( Pickle pkl, void *p, int n, MPI_Count cnt[], MPI_Aint dsp[], ): cdef Py_ssize_t m = n cdef object items = [None] * m if p == NULL: return items for i in range(m): items[i] = pickle_load(pkl, p + dsp[i], cnt[i]) return items cdef object pickle_alloc(void **p, MPI_Count n): cdef object buf = PyBytes_FromStringAndSize(NULL, n) p[0] = PyBytes_AsString(buf) return buf cdef object pickle_allocv(void **p, int n, MPI_Count cnt[], MPI_Aint dsp[]): cdef MPI_Count d=0 for i in range(n): dsp[i] = d d += cnt[i] return pickle_alloc(p, d) cdef object allocate_count_displ(int n, MPI_Count **p, MPI_Aint **q): cdef object mem1 = allocate(n, sizeof(MPI_Count), p) cdef object mem2 = allocate(n, sizeof(MPI_Aint), q) return (mem1, mem2) # ----------------------------------------------------------------------------- cdef object PyMPI_send(object obj, int dest, int tag, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef MPI_Count scount = 0 cdef MPI_Datatype stype = MPI_BYTE # cdef object unuseds = None if dest != MPI_PROC_NULL: unuseds = pickle_dump(pickle, obj, &sbuf, &scount) with nogil: CHKERR( MPI_Send_c( sbuf, scount, stype, dest, tag, comm) ) return None cdef object PyMPI_bsend(object obj, int dest, int tag, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef MPI_Count scount = 0 cdef MPI_Datatype stype = MPI_BYTE # cdef object unuseds = None if dest != MPI_PROC_NULL: unuseds = pickle_dump(pickle, obj, &sbuf, &scount) with nogil: CHKERR( MPI_Bsend_c( sbuf, scount, stype, dest, tag, comm) ) return None cdef object PyMPI_ssend(object obj, int dest, int tag, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef MPI_Count scount = 0 cdef MPI_Datatype stype = MPI_BYTE # cdef object unuseds = None if dest != MPI_PROC_NULL: unuseds = pickle_dump(pickle, obj, &sbuf, &scount) with nogil: CHKERR( MPI_Ssend_c( sbuf, scount, stype, dest, tag, comm) ) return None # ----------------------------------------------------------------------------- cdef object PyMPI_recv_obarg(object obj, int source, int tag, MPI_Comm comm, MPI_Status *status): cdef Pickle pickle = PyMPI_PICKLE # cdef void *rbuf = NULL cdef MPI_Count rcount = 0 cdef MPI_Datatype rtype = MPI_BYTE cdef MPI_Status rsts cdef object rmsg = None cdef MPI_Aint rlen = 0 # PyErr_WarnFormat( UserWarning, 1, b"%s", b"the 'buf' argument is deprecated", ) # if source != MPI_PROC_NULL: if is_integral(obj): rcount = PyNumber_Index(obj) rmsg = pickle_alloc(&rbuf, rcount) else: rmsg = asbuffer_w(obj, &rbuf, &rlen) rcount = rlen if status == MPI_STATUS_IGNORE: status = &rsts rmsg with nogil: CHKERR( MPI_Recv_c( rbuf, rcount, rtype, source, tag, comm, status) ) if source != MPI_PROC_NULL: CHKERR( MPI_Get_count_c(status, rtype, &rcount) ) # if rcount <= 0: return None return pickle_load(pickle, rbuf, rcount) cdef object PyMPI_recv_match(object obj, int source, int tag, MPI_Comm comm, MPI_Status *status): cdef Pickle pickle = PyMPI_PICKLE # cdef void *rbuf = NULL cdef MPI_Count rcount = 0 cdef MPI_Datatype rtype = MPI_BYTE # cdef MPI_Message match = MPI_MESSAGE_NULL cdef MPI_Status rsts obj # unused # with nogil: CHKERR( MPI_Mprobe(source, tag, comm, &match, &rsts) ) CHKERR( MPI_Get_count_c(&rsts, rtype, &rcount) ) cdef object unusedr = pickle_alloc(&rbuf, rcount) with nogil: CHKERR( MPI_Mrecv_c( rbuf, rcount, rtype, &match, status) ) # if rcount <= 0: return None return pickle_load(pickle, rbuf, rcount) cdef object PyMPI_recv_probe(object obj, int source, int tag, MPI_Comm comm, MPI_Status *status): cdef Pickle pickle = PyMPI_PICKLE # cdef void *rbuf = NULL cdef MPI_Count rcount = 0 cdef MPI_Datatype rtype = MPI_BYTE # cdef MPI_Status rsts cdef object unusedr obj # unused # with PyMPI_Lock(comm, "recv"): with nogil: CHKERR( MPI_Probe(source, tag, comm, &rsts) ) CHKERR( MPI_Get_count_c(&rsts, rtype, &rcount) ) CHKERR( MPI_Status_get_source(&rsts, &source) ) CHKERR( MPI_Status_get_tag(&rsts, &tag) ) unusedr = pickle_alloc(&rbuf, rcount) with nogil: CHKERR( MPI_Recv_c( rbuf, rcount, rtype, source, tag, comm, status) ) # if rcount <= 0: return None return pickle_load(pickle, rbuf, rcount) cdef object PyMPI_recv(object obj, int source, int tag, MPI_Comm comm, MPI_Status *status): if obj is not None: return PyMPI_recv_obarg(obj, source, tag, comm, status) elif options.recv_mprobe: return PyMPI_recv_match(obj, source, tag, comm, status) else: return PyMPI_recv_probe(obj, source, tag, comm, status) # ----------------------------------------------------------------------------- cdef object PyMPI_isend(object obj, int dest, int tag, MPI_Comm comm, MPI_Request *request): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef MPI_Count scount = 0 cdef MPI_Datatype stype = MPI_BYTE # cdef object smsg = None if dest != MPI_PROC_NULL: smsg = pickle_dump(pickle, obj, &sbuf, &scount) with nogil: CHKERR( MPI_Isend_c( sbuf, scount, stype, dest, tag, comm, request) ) return smsg cdef object PyMPI_ibsend(object obj, int dest, int tag, MPI_Comm comm, MPI_Request *request): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef MPI_Count scount = 0 cdef MPI_Datatype stype = MPI_BYTE # cdef object smsg = None if dest != MPI_PROC_NULL: smsg = pickle_dump(pickle, obj, &sbuf, &scount) with nogil: CHKERR( MPI_Ibsend_c( sbuf, scount, stype, dest, tag, comm, request) ) return smsg cdef object PyMPI_issend(object obj, int dest, int tag, MPI_Comm comm, MPI_Request *request): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef MPI_Count scount = 0 cdef MPI_Datatype stype = MPI_BYTE # cdef object smsg = None if dest != MPI_PROC_NULL: smsg = pickle_dump(pickle, obj, &sbuf, &scount) with nogil: CHKERR( MPI_Issend_c( sbuf, scount, stype, dest, tag, comm, request) ) return smsg cdef object PyMPI_irecv(object obj, int source, int tag, MPI_Comm comm, MPI_Request *request): # cdef void *rbuf = NULL cdef MPI_Aint rlen = 0 cdef MPI_Count rcount = 0 cdef MPI_Datatype rtype = MPI_BYTE # cdef object rmsg = None if source != MPI_PROC_NULL: if obj is None: rcount = options.irecv_bufsz obj = pickle_alloc(&rbuf, rcount) rmsg = asbuffer_r(obj, NULL, NULL) elif is_integral(obj): rcount = PyNumber_Index(obj) obj = pickle_alloc(&rbuf, rcount) rmsg = asbuffer_r(obj, NULL, NULL) else: rmsg = asbuffer_w(obj, &rbuf, &rlen) rcount = rlen rmsg = PyMPI_wrap_buffer(rmsg) with nogil: CHKERR( MPI_Irecv_c( rbuf, rcount, rtype, source, tag, comm, request) ) return rmsg # ----------------------------------------------------------------------------- cdef object PyMPI_sendrecv(object sobj, int dest, int sendtag, object robj, int source, int recvtag, MPI_Comm comm, MPI_Status *status): cdef MPI_Request request = MPI_REQUEST_NULL sobj = PyMPI_isend(sobj, dest, sendtag, comm, &request) robj = PyMPI_recv (robj, source, recvtag, comm, status) with nogil: CHKERR( MPI_Wait(&request, MPI_STATUS_IGNORE) ) return robj # ----------------------------------------------------------------------------- @cython.final @cython.internal cdef class _p_req_buf: cdef buffer buf cdef inline object PyMPI_wrap_buffer(buffer buf): cdef _p_req_buf ob = _p_req_buf.__new__(_p_req_buf) ob.buf = buf return ob cdef inline object PyMPI_load_buffer(_p_req_buf ob, MPI_Status *status): cdef MPI_Count rcount = 0 cdef MPI_Datatype rtype = MPI_BYTE CHKERR( MPI_Get_count_c(status, rtype, &rcount) ) if rcount <= 0: return None cdef Pickle pickle = PyMPI_PICKLE cdef void *rbuf = ob.buf.view.buf return pickle_load(pickle, rbuf, rcount) @cython.final @cython.internal cdef class _p_req_obj: cdef object obj cdef inline object PyMPI_wrap_object(object obj): cdef _p_req_obj ob = _p_req_obj.__new__(_p_req_obj) ob.obj = obj return ob cdef inline object PyMPI_load_object(_p_req_obj ob): return ob.obj cdef inline object PyMPI_load(object ob, MPI_Status *status): if type(ob) is _p_req_buf: return PyMPI_load_buffer(<_p_req_buf>ob, status) if type(ob) is _p_req_obj: return PyMPI_load_object(<_p_req_obj>ob) return None # ----------------------------------------------------------------------------- cdef object PyMPI_wait(Request request, Status status): cdef _p_rs rs = _p_rs.__new__(_p_rs) rs.set_request(request) rs.set_status(status) with nogil: CHKERR( MPI_Wait(&request.ob_mpi, rs.status) ) return rs.get_result() cdef object PyMPI_test(Request request, int *flag, Status status): cdef _p_rs rs = _p_rs.__new__(_p_rs) rs.set_request(request) rs.set_status(status) with nogil: CHKERR( MPI_Test(&request.ob_mpi, flag, rs.status) ) if not flag[0]: return None return rs.get_result() cdef object PyMPI_waitany(requests, int *index, Status status): cdef _p_rs rs = _p_rs.__new__(_p_rs) rs.acquire(requests) rs.set_status(status) cdef object obj = None try: with nogil: CHKERR( MPI_Waitany( rs.count, rs.requests, index, rs.status) ) if index[0] != MPI_UNDEFINED: obj = rs.get_object(index[0]) finally: rs.release() return obj cdef object PyMPI_testany(requests, int *index, int *flag, Status status): cdef _p_rs rs = _p_rs.__new__(_p_rs) rs.acquire(requests) rs.set_status(status) cdef object obj = None try: with nogil: CHKERR( MPI_Testany( rs.count, rs.requests, index, flag, rs.status) ) if index[0] != MPI_UNDEFINED and flag[0]: obj = rs.get_object(index[0]) finally: rs.release() return obj cdef object PyMPI_waitall(requests, statuses): cdef _p_rs rs = _p_rs.__new__(_p_rs) rs.acquire(requests) rs.add_statuses() cdef object objects = None try: with nogil: CHKERR( MPI_Waitall( rs.count, rs.requests, rs.statuses) ) objects = rs.get_objects() finally: rs.release(statuses) return objects cdef object PyMPI_testall(requests, int *flag, statuses): cdef _p_rs rs = _p_rs.__new__(_p_rs) rs.acquire(requests) rs.add_statuses() cdef object objects = None try: with nogil: CHKERR( MPI_Testall( rs.count, rs.requests, flag, rs.statuses) ) if flag[0]: objects = rs.get_objects() finally: rs.release(statuses) return objects cdef object PyMPI_waitsome(requests, statuses): cdef _p_rs rs = _p_rs.__new__(_p_rs) rs.acquire(requests) rs.add_indices() rs.add_statuses() cdef object indices = None cdef object objects = None try: with nogil: CHKERR( MPI_Waitsome( rs.count, rs.requests, &rs.outcount, rs.indices, rs.statuses) ) indices = rs.get_indices() objects = rs.get_objects() finally: rs.release(statuses) return (indices, objects) cdef object PyMPI_testsome(requests, statuses): cdef _p_rs rs = _p_rs.__new__(_p_rs) rs.acquire(requests) rs.add_indices() rs.add_statuses() cdef object indices = None cdef object objects = None try: with nogil: CHKERR( MPI_Testsome( rs.count, rs.requests, &rs.outcount, rs.indices, rs.statuses) ) indices = rs.get_indices() objects = rs.get_objects() finally: rs.release(statuses) return (indices, objects) # ----------------------------------------------------------------------------- cdef object PyMPI_probe(int source, int tag, MPI_Comm comm, MPI_Status *status): with nogil: CHKERR( MPI_Probe(source, tag, comm, status) ) return True cdef object PyMPI_iprobe(int source, int tag, MPI_Comm comm, MPI_Status *status): cdef int flag = 0 with nogil: CHKERR( MPI_Iprobe(source, tag, comm, &flag, status) ) return flag cdef object PyMPI_mprobe(int source, int tag, MPI_Comm comm, MPI_Message *message, MPI_Status *status): cdef void* rbuf = NULL cdef MPI_Count rcount = 0 cdef MPI_Datatype rtype = MPI_BYTE cdef MPI_Status rsts if (status == MPI_STATUS_IGNORE): status = &rsts with nogil: CHKERR( MPI_Mprobe(source, tag, comm, message, status) ) if message[0] == MPI_MESSAGE_NO_PROC: return None CHKERR( MPI_Get_count_c(status, rtype, &rcount) ) cdef object rmsg = pickle_alloc(&rbuf, rcount) return rmsg cdef object PyMPI_improbe(int source, int tag, MPI_Comm comm, int *flag, MPI_Message *message, MPI_Status *status): cdef void* rbuf = NULL cdef MPI_Count rcount = 0 cdef MPI_Datatype rtype = MPI_BYTE cdef MPI_Status rsts if (status == MPI_STATUS_IGNORE): status = &rsts with nogil: CHKERR( MPI_Improbe(source, tag, comm, flag, message, status) ) if flag[0] == 0 or message[0] == MPI_MESSAGE_NO_PROC: return None CHKERR( MPI_Get_count_c(status, rtype, &rcount) ) cdef object rmsg = pickle_alloc(&rbuf, rcount) return rmsg cdef object PyMPI_mrecv(object rmsg, MPI_Message *message, MPI_Status *status): cdef Pickle pickle = PyMPI_PICKLE cdef void* rbuf = NULL cdef MPI_Aint rlen = 0 cdef MPI_Datatype rtype = MPI_BYTE if message[0] == MPI_MESSAGE_NO_PROC: rmsg = None elif rmsg is None: pass elif PyBytes_CheckExact(rmsg): rmsg = asbuffer_r(rmsg, &rbuf, &rlen) else: rmsg = asbuffer_w(rmsg, &rbuf, &rlen) # ~> unreachable cdef MPI_Count rcount = rlen with nogil: CHKERR( MPI_Mrecv_c( rbuf, rcount, rtype, message, status) ) rmsg = pickle_load(pickle, rbuf, rcount) return rmsg cdef object PyMPI_imrecv(object rmsg, MPI_Message *message, MPI_Request *request): cdef void* rbuf = NULL cdef MPI_Aint rlen = 0 cdef MPI_Datatype rtype = MPI_BYTE if message[0] == MPI_MESSAGE_NO_PROC: rmsg = None elif rmsg is None: pass elif PyBytes_CheckExact(rmsg): rmsg = asbuffer_r(rmsg, &rbuf, &rlen) else: rmsg = asbuffer_w(rmsg, &rbuf, &rlen) # ~> unreachable if rmsg is not None: rmsg = PyMPI_wrap_buffer(rmsg) cdef MPI_Count rcount = rlen with nogil: CHKERR( MPI_Imrecv_c( rbuf, rcount, rtype, message, request) ) return rmsg # ----------------------------------------------------------------------------- cdef object PyMPI_barrier(MPI_Comm comm): with nogil: CHKERR( MPI_Barrier(comm) ) return None cdef object PyMPI_bcast(object obj, int root, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE # cdef void *buf = NULL cdef MPI_Count count = 0 cdef MPI_Datatype dtype = MPI_BYTE # cdef int dosend=0, dorecv=0 cdef int inter=0, rank=0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) if inter: if root == MPI_PROC_NULL: dosend, dorecv = 0, 0 elif root == MPI_ROOT: dosend, dorecv = 1, 0 else: dosend, dorecv = 0, 1 else: CHKERR( MPI_Comm_rank(comm, &rank) ) if root == rank: dosend, dorecv = 1, 1 else: dosend, dorecv = 0, 1 # cdef object smsg = None cdef object rmsg = None # if dosend: smsg = pickle_dump(pickle, obj, &buf, &count) if dosend and dorecv: rmsg = smsg with PyMPI_Lock(comm, "bcast"): with nogil: CHKERR( MPI_Bcast_c( &count, 1, MPI_COUNT, root, comm) ) if dorecv and not dosend: rmsg = pickle_alloc(&buf, count) with nogil: CHKERR( MPI_Bcast_c( buf, count, dtype, root, comm) ) if dorecv: rmsg = pickle_load(pickle, buf, count) # return rmsg cdef object PyMPI_gather(object sendobj, int root, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef MPI_Count scount = 0 cdef MPI_Datatype stype = MPI_BYTE cdef void *rbuf = NULL cdef MPI_Count *rcounts = NULL cdef MPI_Aint *rdispls = NULL cdef MPI_Datatype rtype = MPI_BYTE # cdef int dosend=0, dorecv=0 cdef int inter=0, size=0, rank=0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) if inter: CHKERR( MPI_Comm_remote_size(comm, &size) ) if root == MPI_PROC_NULL: dosend, dorecv = 0, 0 elif root == MPI_ROOT: dosend, dorecv = 0, 1 else: dosend, dorecv = 1, 0 else: CHKERR( MPI_Comm_size(comm, &size) ) CHKERR( MPI_Comm_rank(comm, &rank) ) if root == rank: dosend, dorecv = 1, 1 else: dosend, dorecv = 1, 0 # cdef object unuseds = None cdef object rmsg = None cdef object unused1 # if dorecv: unused1 = allocate_count_displ(size, &rcounts, &rdispls) if dosend: unuseds = pickle_dump(pickle, sendobj, &sbuf, &scount) with PyMPI_Lock(comm, "gather"): with nogil: CHKERR( MPI_Gather_c( &scount, 1, MPI_COUNT, rcounts, 1, MPI_COUNT, root, comm) ) if dorecv: rmsg = pickle_allocv(&rbuf, size, rcounts, rdispls) with nogil: CHKERR( MPI_Gatherv_c( sbuf, scount, stype, rbuf, rcounts, rdispls, rtype, root, comm) ) if dorecv: rmsg = pickle_loadv(pickle, rbuf, size, rcounts, rdispls) # return rmsg cdef object PyMPI_scatter(object sendobj, int root, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef MPI_Count *scounts = NULL cdef MPI_Aint *sdispls = NULL cdef MPI_Datatype stype = MPI_BYTE cdef void *rbuf = NULL cdef MPI_Count rcount = 0 cdef MPI_Datatype rtype = MPI_BYTE # cdef int dosend=0, dorecv=0 cdef int inter=0, size=0, rank=0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) if inter: CHKERR( MPI_Comm_remote_size(comm, &size) ) if root == MPI_PROC_NULL: dosend, dorecv = 0, 0 elif root == MPI_ROOT: dosend, dorecv = 1, 0 else: dosend, dorecv = 0, 1 else: CHKERR( MPI_Comm_size(comm, &size) ) CHKERR( MPI_Comm_rank(comm, &rank) ) if root == rank: dosend, dorecv = 1, 1 else: dosend, dorecv = 0, 1 # cdef object unuseds = None cdef object rmsg = None cdef object unused1 # if dosend: unused1 = allocate_count_displ(size, &scounts, &sdispls) if dosend: unuseds = pickle_dumpv(pickle, sendobj, &sbuf, size, scounts, sdispls) with PyMPI_Lock(comm, "scatter"): with nogil: CHKERR( MPI_Scatter_c( scounts, 1, MPI_COUNT, &rcount, 1, MPI_COUNT, root, comm) ) if dorecv: rmsg = pickle_alloc(&rbuf, rcount) with nogil: CHKERR( MPI_Scatterv_c( sbuf, scounts, sdispls, stype, rbuf, rcount, rtype, root, comm) ) if dorecv: rmsg = pickle_load(pickle, rbuf, rcount) # return rmsg cdef object PyMPI_allgather(object sendobj, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef MPI_Count scount = 0 cdef MPI_Datatype stype = MPI_BYTE cdef void *rbuf = NULL cdef MPI_Count *rcounts = NULL cdef MPI_Aint *rdispls = NULL cdef MPI_Datatype rtype = MPI_BYTE # cdef int inter=0, size=0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) if inter: CHKERR( MPI_Comm_remote_size(comm, &size) ) else: CHKERR( MPI_Comm_size(comm, &size) ) # cdef object unuseds = None cdef object rmsg = None cdef object unused1 # unused1 = allocate_count_displ(size, &rcounts, &rdispls) unuseds = pickle_dump(pickle, sendobj, &sbuf, &scount) with PyMPI_Lock(comm, "allgather"): with nogil: CHKERR( MPI_Allgather_c( &scount, 1, MPI_COUNT, rcounts, 1, MPI_COUNT, comm) ) rmsg = pickle_allocv(&rbuf, size, rcounts, rdispls) with nogil: CHKERR( MPI_Allgatherv_c( sbuf, scount, stype, rbuf, rcounts, rdispls, rtype, comm) ) rmsg = pickle_loadv(pickle, rbuf, size, rcounts, rdispls) # return rmsg cdef object PyMPI_alltoall(object sendobj, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef MPI_Count *scounts = NULL cdef MPI_Aint *sdispls = NULL cdef MPI_Datatype stype = MPI_BYTE cdef void *rbuf = NULL cdef MPI_Count *rcounts = NULL cdef MPI_Aint *rdispls = NULL cdef MPI_Datatype rtype = MPI_BYTE # cdef int inter=0, size=0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) if inter: CHKERR( MPI_Comm_remote_size(comm, &size) ) else: CHKERR( MPI_Comm_size(comm, &size) ) # cdef object unuseds = None cdef object rmsg = None cdef object unused1, unused2 # unused1 = allocate_count_displ(size, &scounts, &sdispls) unused2 = allocate_count_displ(size, &rcounts, &rdispls) unuseds = pickle_dumpv(pickle, sendobj, &sbuf, size, scounts, sdispls) with PyMPI_Lock(comm, "alltoall"): with nogil: CHKERR( MPI_Alltoall_c( scounts, 1, MPI_COUNT, rcounts, 1, MPI_COUNT, comm) ) rmsg = pickle_allocv(&rbuf, size, rcounts, rdispls) with nogil: CHKERR( MPI_Alltoallv_c( sbuf, scounts, sdispls, stype, rbuf, rcounts, rdispls, rtype, comm) ) rmsg = pickle_loadv(pickle, rbuf, size, rcounts, rdispls) # return rmsg cdef object PyMPI_neighbor_allgather(object sendobj, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef MPI_Count scount = 0 cdef MPI_Datatype stype = MPI_BYTE cdef void *rbuf = NULL cdef MPI_Count *rcounts = NULL cdef MPI_Aint *rdispls = NULL cdef MPI_Datatype rtype = MPI_BYTE # cdef int rsize=0 comm_neighbors_count(comm, &rsize, NULL) # cdef object unuseds = None cdef object rmsg = None cdef object unused1 # unused1 = allocate_count_displ(rsize, &rcounts, &rdispls) for i in range(rsize): rcounts[i] = 0 unuseds = pickle_dump(pickle, sendobj, &sbuf, &scount) with PyMPI_Lock(comm, "neighbor_allgather"): with nogil: CHKERR( MPI_Neighbor_allgather_c( &scount, 1, MPI_COUNT, rcounts, 1, MPI_COUNT, comm) ) rmsg = pickle_allocv(&rbuf, rsize, rcounts, rdispls) with nogil: CHKERR( MPI_Neighbor_allgatherv_c( sbuf, scount, stype, rbuf, rcounts, rdispls, rtype, comm) ) rmsg = pickle_loadv(pickle, rbuf, rsize, rcounts, rdispls) # return rmsg cdef object PyMPI_neighbor_alltoall(object sendobj, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef MPI_Count *scounts = NULL cdef MPI_Aint *sdispls = NULL cdef MPI_Datatype stype = MPI_BYTE cdef void *rbuf = NULL cdef MPI_Count *rcounts = NULL cdef MPI_Aint *rdispls = NULL cdef MPI_Datatype rtype = MPI_BYTE # cdef int ssize=0, rsize=0 comm_neighbors_count(comm, &rsize, &ssize) # cdef object unuseds = None cdef object rmsg = None cdef object unused1, unused2 # unused1 = allocate_count_displ(ssize, &scounts, &sdispls) unused2 = allocate_count_displ(rsize, &rcounts, &rdispls) for i in range(rsize): rcounts[i] = 0 unuseds = pickle_dumpv(pickle, sendobj, &sbuf, ssize, scounts, sdispls) with PyMPI_Lock(comm, "neighbor_alltoall"): with nogil: CHKERR( MPI_Neighbor_alltoall_c( scounts, 1, MPI_COUNT, rcounts, 1, MPI_COUNT, comm) ) rmsg = pickle_allocv(&rbuf, rsize, rcounts, rdispls) with nogil: CHKERR( MPI_Neighbor_alltoallv_c( sbuf, scounts, sdispls, stype, rbuf, rcounts, rdispls, rtype, comm) ) rmsg = pickle_loadv(pickle, rbuf, rsize, rcounts, rdispls) # return rmsg # ----------------------------------------------------------------------------- cdef inline object _py_reduce(object seq, object op): if seq is None: return None cdef Py_ssize_t i, n = len(seq) cdef object res = seq[0] for i in range(1, n): res = op(res, seq[i]) return res cdef inline object _py_scan(object seq, object op): if seq is None: return None cdef Py_ssize_t i, n = len(seq) for i in range(1, n): seq[i] = op(seq[i-1], seq[i]) return seq cdef inline object _py_exscan(object seq, object op): if seq is None: return None seq = _py_scan(seq, op) seq.pop(-1) seq.insert(0, None) return seq cdef object PyMPI_reduce_naive(object sendobj, object op, int root, MPI_Comm comm): cdef object items = PyMPI_gather(sendobj, root, comm) return _py_reduce(items, op) cdef object PyMPI_allreduce_naive(object sendobj, object op, MPI_Comm comm): cdef object items = PyMPI_allgather(sendobj, comm) return _py_reduce(items, op) cdef object PyMPI_scan_naive(object sendobj, object op, MPI_Comm comm): cdef object items = PyMPI_gather(sendobj, 0, comm) items = _py_scan(items, op) return PyMPI_scatter(items, 0, comm) cdef object PyMPI_exscan_naive(object sendobj, object op, MPI_Comm comm): cdef object items = PyMPI_gather(sendobj, 0, comm) items = _py_exscan(items, op) return PyMPI_scatter(items, 0, comm) # --- cdef object PyMPI_copy(object obj): cdef Pickle pickle = PyMPI_PICKLE cdef void *buf = NULL cdef MPI_Count count = 0 obj = pickle_dump(pickle, obj, &buf, &count) return pickle_load(pickle, buf, count) cdef object PyMPI_send_p2p(object obj, int dst, int tag, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE cdef void *sbuf = NULL cdef MPI_Count scount = 0 cdef MPI_Datatype stype = MPI_BYTE cdef object unuseds = pickle_dump(pickle, obj, &sbuf, &scount) with nogil: CHKERR( MPI_Send_c(&scount, 1, MPI_COUNT, dst, tag, comm) ) with nogil: CHKERR( MPI_Send_c(sbuf, scount, stype, dst, tag, comm) ) return None cdef object PyMPI_recv_p2p(int src, int tag, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE cdef void *rbuf = NULL cdef MPI_Count rcount = 0 cdef MPI_Datatype rtype = MPI_BYTE cdef MPI_Status *status = MPI_STATUS_IGNORE with nogil: CHKERR( MPI_Recv_c(&rcount, 1, MPI_COUNT, src, tag, comm, status) ) cdef object unusedr = pickle_alloc(&rbuf, rcount) with nogil: CHKERR( MPI_Recv_c(rbuf, rcount, rtype, src, tag, comm, status) ) return pickle_load(pickle, rbuf, rcount) cdef object PyMPI_sendrecv_p2p(object obj, int dst, int stag, int src, int rtag, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE cdef void *sbuf = NULL, *rbuf = NULL cdef MPI_Count scount = 0, rcount = 0 cdef MPI_Datatype dtype = MPI_BYTE cdef object unuseds = pickle_dump(pickle, obj, &sbuf, &scount) with nogil: CHKERR( MPI_Sendrecv_c( &scount, 1, MPI_COUNT, dst, stag, &rcount, 1, MPI_COUNT, src, rtag, comm, MPI_STATUS_IGNORE) ) cdef object unusedr = pickle_alloc(&rbuf, rcount) with nogil: CHKERR( MPI_Sendrecv_c( sbuf, scount, dtype, dst, stag, rbuf, rcount, dtype, src, rtag, comm, MPI_STATUS_IGNORE) ) return pickle_load(pickle, rbuf, rcount) cdef object PyMPI_bcast_p2p(object obj, int root, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE cdef void *buf = NULL cdef MPI_Count count = 0 cdef MPI_Datatype dtype = MPI_BYTE cdef int rank = MPI_PROC_NULL CHKERR( MPI_Comm_rank(comm, &rank) ) if root == rank: obj = pickle_dump(pickle, obj, &buf, &count) with PyMPI_Lock(comm, "@bcast_p2p@"): with nogil: CHKERR( MPI_Bcast_c(&count, 1, MPI_COUNT, root, comm) ) if root != rank: obj = pickle_alloc(&buf, count) with nogil: CHKERR( MPI_Bcast_c(buf, count, dtype, root, comm) ) return pickle_load(pickle, buf, count) cdef object PyMPI_reduce_p2p(object sendobj, object op, int root, MPI_Comm comm, int tag): # Get communicator size and rank cdef int size = MPI_UNDEFINED cdef int rank = MPI_PROC_NULL CHKERR( MPI_Comm_size(comm, &size) ) CHKERR( MPI_Comm_rank(comm, &rank) ) # Check root argument if root < 0 or root >= size: MPI_Comm_call_errhandler(comm, MPI_ERR_ROOT) raise MPIException(MPI_ERR_ROOT) # cdef object result = PyMPI_copy(sendobj) cdef object unused # Compute reduction at process 0 cdef unsigned int umask = 1 cdef unsigned int usize = size cdef unsigned int urank = rank cdef int target = 0 while umask < usize: if (umask & urank) != 0: target = ((urank & ~umask) % usize) PyMPI_send_p2p(result, target, tag, comm) else: target = (urank | umask) if target < size: unused = PyMPI_recv_p2p(target, tag, comm) result = op(result, unused) umask <<= 1 # Send reduction to root if root != 0: if rank == 0: PyMPI_send_p2p(result, root, tag, comm) elif rank == root: result = PyMPI_recv_p2p(0, tag, comm) if rank != root: result = None # return result cdef object PyMPI_scan_p2p(object sendobj, object op, MPI_Comm comm, int tag): # Get communicator size and rank cdef int size = MPI_UNDEFINED cdef int rank = MPI_PROC_NULL CHKERR( MPI_Comm_size(comm, &size) ) CHKERR( MPI_Comm_rank(comm, &rank) ) # cdef object result = PyMPI_copy(sendobj) cdef object partial = result cdef object unused # Compute prefix reduction cdef unsigned int umask = 1 cdef unsigned int usize = size cdef unsigned int urank = rank cdef int target = 0 while umask < usize: target = (urank ^ umask) if target < size: unused = PyMPI_sendrecv_p2p( partial, target, tag, target, tag, comm) if rank > target: partial = op(unused, partial) result = op(unused, result) else: unused = op(partial, unused) partial = unused umask <<= 1 # return result cdef object PyMPI_exscan_p2p(object sendobj, object op, MPI_Comm comm, int tag): # Get communicator size and rank cdef int size = MPI_UNDEFINED cdef int rank = MPI_PROC_NULL CHKERR( MPI_Comm_size(comm, &size) ) CHKERR( MPI_Comm_rank(comm, &rank) ) # cdef object result = PyMPI_copy(sendobj) cdef object partial = result cdef object unused # Compute prefix reduction cdef unsigned int umask = 1 cdef unsigned int usize = size cdef unsigned int urank = rank cdef unsigned int uflag = 0 cdef int target = 0 while umask < usize: target = (urank ^ umask) if target < size: unused = PyMPI_sendrecv_p2p( partial, target, tag, target, tag, comm) if rank > target: partial = op(unused, partial) if uflag == 0: uflag = 1 result = unused else: result = op(unused, result) else: unused = op(partial, unused) partial = unused umask <<= 1 # if rank == 0: result = None return result # --- cdef extern from * nogil: int PyMPI_Commctx_intra(MPI_Comm, MPI_Comm*, int*) int PyMPI_Commctx_inter(MPI_Comm, MPI_Comm*, int*, MPI_Comm*, int*) int PyMPI_Commctx_finalize() cdef int PyMPI_Commctx_INTRA( MPI_Comm comm, MPI_Comm *dupcomm, int *tag, ) except -1: with PyMPI_Lock(comm, "@commctx_intra"): CHKERR( PyMPI_Commctx_intra(comm, dupcomm, tag) ) return 0 cdef int PyMPI_Commctx_INTER( MPI_Comm comm, MPI_Comm *dupcomm, int *tag, MPI_Comm *localcomm, int *low_group, ) except -1: with PyMPI_Lock(comm, "@commctx_inter"): CHKERR( PyMPI_Commctx_inter(comm, dupcomm, tag, localcomm, low_group) ) return 0 def _commctx_intra( Intracomm comm: Intracomm, ) -> tuple[Intracomm, int]: """ Create/get intracommunicator duplicate. """ cdef int tag = MPI_UNDEFINED cdef Intracomm dupcomm = New(Intracomm) PyMPI_Commctx_INTRA(comm.ob_mpi, &dupcomm.ob_mpi, &tag) return (dupcomm, tag) def _commctx_inter( Intercomm comm: Intercomm, ) -> tuple[Intercomm, int, Intracomm, bool]: """ Create/get intercommunicator duplicate. """ cdef int tag = MPI_UNDEFINED, low_group = 0 cdef Intercomm dupcomm = New(Intercomm) cdef Intracomm localcomm = New(Intracomm) PyMPI_Commctx_INTER(comm.ob_mpi, &dupcomm.ob_mpi, &tag, &localcomm.ob_mpi, &low_group) return (dupcomm, tag, localcomm, low_group) # --- cdef object PyMPI_reduce_intra(object sendobj, object op, int root, MPI_Comm comm): cdef int tag = MPI_UNDEFINED PyMPI_Commctx_INTRA(comm, &comm, &tag) return PyMPI_reduce_p2p(sendobj, op, root, comm, tag) cdef object PyMPI_reduce_inter(object sendobj, object op, int root, MPI_Comm comm): cdef int tag = MPI_UNDEFINED cdef MPI_Comm localcomm = MPI_COMM_NULL PyMPI_Commctx_INTER(comm, &comm, &tag, &localcomm, NULL) # Get communicator remote size and rank cdef int size = MPI_UNDEFINED cdef int rank = MPI_PROC_NULL CHKERR( MPI_Comm_remote_size(comm, &size) ) CHKERR( MPI_Comm_rank(comm, &rank) ) if root >= 0 and root < size: # Reduce in local group and send to remote root sendobj = PyMPI_reduce_p2p(sendobj, op, 0, localcomm, tag) if rank == 0: PyMPI_send_p2p(sendobj, root, tag, comm) return None elif root == MPI_ROOT: # Receive from remote group return PyMPI_recv_p2p(0, tag, comm) elif root == MPI_PROC_NULL: # This process does nothing return None else: # Wrong root argument MPI_Comm_call_errhandler(comm, MPI_ERR_ROOT) raise MPIException(MPI_ERR_ROOT) cdef object PyMPI_allreduce_intra(object sendobj, object op, MPI_Comm comm): cdef int tag = MPI_UNDEFINED PyMPI_Commctx_INTRA(comm, &comm, &tag) sendobj = PyMPI_reduce_p2p(sendobj, op, 0, comm, tag) return PyMPI_bcast_p2p(sendobj, 0, comm) cdef object PyMPI_allreduce_inter(object sendobj, object op, MPI_Comm comm): cdef int tag = MPI_UNDEFINED cdef int rank = MPI_PROC_NULL cdef MPI_Comm localcomm = MPI_COMM_NULL PyMPI_Commctx_INTER(comm, &comm, &tag, &localcomm, NULL) CHKERR( MPI_Comm_rank(comm, &rank) ) # Reduce in local group, exchange, and broadcast in local group sendobj = PyMPI_reduce_p2p(sendobj, op, 0, localcomm, tag) if rank == 0: sendobj = PyMPI_sendrecv_p2p(sendobj, 0, tag, 0, tag, comm) return PyMPI_bcast_p2p(sendobj, 0, localcomm) cdef object PyMPI_scan_intra(object sendobj, object op, MPI_Comm comm): cdef int tag = MPI_UNDEFINED PyMPI_Commctx_INTRA(comm, &comm, &tag) return PyMPI_scan_p2p(sendobj, op, comm, tag) cdef object PyMPI_exscan_intra(object sendobj, object op, MPI_Comm comm): cdef int tag = MPI_UNDEFINED PyMPI_Commctx_INTRA(comm, &comm, &tag) return PyMPI_exscan_p2p(sendobj, op, comm, tag) # --- cdef inline bint comm_is_intra(MPI_Comm comm) except -1 nogil: cdef int inter = 0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) if inter: return 0 else: return 1 cdef object PyMPI_reduce(object sendobj, object op, int root, MPI_Comm comm): if not options.fast_reduce: return PyMPI_reduce_naive(sendobj, op, root, comm) elif comm_is_intra(comm): return PyMPI_reduce_intra(sendobj, op, root, comm) else: return PyMPI_reduce_inter(sendobj, op, root, comm) cdef object PyMPI_allreduce(object sendobj, object op, MPI_Comm comm): if not options.fast_reduce: return PyMPI_allreduce_naive(sendobj, op, comm) elif comm_is_intra(comm): return PyMPI_allreduce_intra(sendobj, op, comm) else: return PyMPI_allreduce_inter(sendobj, op, comm) cdef object PyMPI_scan(object sendobj, object op, MPI_Comm comm): if not options.fast_reduce: return PyMPI_scan_naive(sendobj, op, comm) else: return PyMPI_scan_intra(sendobj, op, comm) cdef object PyMPI_exscan(object sendobj, object op, MPI_Comm comm): if not options.fast_reduce: return PyMPI_exscan_naive(sendobj, op, comm) else: return PyMPI_exscan_intra(sendobj, op, comm) # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/objmodel.pxi000066400000000000000000000537541475341043600205260ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef object Lock = None cdef object RLock = None if PY_VERSION_HEX >= 0x030900F0: from _thread import allocate_lock as Lock from _thread import RLock as RLock else: try: # ~> legacy from _thread import allocate_lock as Lock # ~> legacy from _thread import RLock as RLock # ~> legacy except ImportError: # ~> legacy from _dummy_thread import allocate_lock as Lock # ~> legacy from _dummy_thread import allocate_lock as RLock # ~> legacy # ----------------------------------------------------------------------------- cdef inline object New(type cls): return cls.__new__(cls) # ----------------------------------------------------------------------------- ctypedef fused handle_t: MPI_Datatype MPI_Request MPI_Message MPI_Op MPI_Group MPI_Info MPI_Errhandler MPI_Session MPI_Comm MPI_Win MPI_File cdef inline handle_t mpinull(handle_t _) noexcept nogil: cdef handle_t null if handle_t is MPI_Datatype : null = MPI_DATATYPE_NULL if handle_t is MPI_Request : null = MPI_REQUEST_NULL if handle_t is MPI_Message : null = MPI_MESSAGE_NULL if handle_t is MPI_Op : null = MPI_OP_NULL if handle_t is MPI_Group : null = MPI_GROUP_NULL if handle_t is MPI_Info : null = MPI_INFO_NULL if handle_t is MPI_Errhandler : null = MPI_ERRHANDLER_NULL if handle_t is MPI_Session : null = MPI_SESSION_NULL if handle_t is MPI_Comm : null = MPI_COMM_NULL if handle_t is MPI_Win : null = MPI_WIN_NULL if handle_t is MPI_File : null = MPI_FILE_NULL return null cdef inline int named_Datatype(MPI_Datatype arg) noexcept nogil: if arg == MPI_DATATYPE_NULL : return 1 if arg == MPI_PACKED : return 1 if arg == MPI_BYTE : return 1 if arg == MPI_AINT : return 1 if arg == MPI_OFFSET : return 1 if arg == MPI_COUNT : return 1 if arg == MPI_CHAR : return 1 if arg == MPI_WCHAR : return 1 if arg == MPI_SIGNED_CHAR : return 1 if arg == MPI_SHORT : return 1 if arg == MPI_INT : return 1 if arg == MPI_LONG : return 1 if arg == MPI_LONG_LONG : return 1 if arg == MPI_UNSIGNED_CHAR : return 1 if arg == MPI_UNSIGNED_SHORT : return 1 if arg == MPI_UNSIGNED : return 1 if arg == MPI_UNSIGNED_LONG : return 1 if arg == MPI_UNSIGNED_LONG_LONG : return 1 if arg == MPI_FLOAT : return 1 if arg == MPI_DOUBLE : return 1 if arg == MPI_LONG_DOUBLE : return 1 if arg == MPI_C_BOOL : return 1 if arg == MPI_INT8_T : return 1 if arg == MPI_INT16_T : return 1 if arg == MPI_INT32_T : return 1 if arg == MPI_INT64_T : return 1 if arg == MPI_UINT8_T : return 1 if arg == MPI_UINT16_T : return 1 if arg == MPI_UINT32_T : return 1 if arg == MPI_UINT64_T : return 1 if arg == MPI_C_COMPLEX : return 1 if arg == MPI_C_FLOAT_COMPLEX : return 1 if arg == MPI_C_DOUBLE_COMPLEX : return 1 if arg == MPI_C_LONG_DOUBLE_COMPLEX : return 1 if arg == MPI_CXX_BOOL : return 1 if arg == MPI_CXX_FLOAT_COMPLEX : return 1 if arg == MPI_CXX_DOUBLE_COMPLEX : return 1 if arg == MPI_CXX_LONG_DOUBLE_COMPLEX : return 1 if arg == MPI_SHORT_INT : return 1 if arg == MPI_2INT : return 1 if arg == MPI_LONG_INT : return 1 if arg == MPI_FLOAT_INT : return 1 if arg == MPI_DOUBLE_INT : return 1 if arg == MPI_LONG_DOUBLE_INT : return 1 if arg == MPI_CHARACTER : return 1 if arg == MPI_LOGICAL : return 1 if arg == MPI_INTEGER : return 1 if arg == MPI_REAL : return 1 if arg == MPI_DOUBLE_PRECISION : return 1 if arg == MPI_COMPLEX : return 1 if arg == MPI_DOUBLE_COMPLEX : return 1 if arg == MPI_LOGICAL1 : return 1 if arg == MPI_LOGICAL2 : return 1 if arg == MPI_LOGICAL4 : return 1 if arg == MPI_LOGICAL8 : return 1 if arg == MPI_INTEGER1 : return 1 if arg == MPI_INTEGER2 : return 1 if arg == MPI_INTEGER4 : return 1 if arg == MPI_INTEGER8 : return 1 if arg == MPI_INTEGER16 : return 1 if arg == MPI_REAL2 : return 1 if arg == MPI_REAL4 : return 1 if arg == MPI_REAL8 : return 1 if arg == MPI_REAL16 : return 1 if arg == MPI_COMPLEX4 : return 1 if arg == MPI_COMPLEX8 : return 1 if arg == MPI_COMPLEX16 : return 1 if arg == MPI_COMPLEX32 : return 1 return 0 cdef inline int predef_Datatype(MPI_Datatype arg) noexcept nogil: if named_Datatype(arg): return 1 cdef MPI_Count ni = 0, na = 0, nc = 0, nd = 0 cdef int combiner = MPI_UNDEFINED cdef int ierr = MPI_Type_get_envelope_c( arg, &ni, &na, &nc, &nd, &combiner) if ierr != MPI_SUCCESS: return 0 # XXX Error? return ( combiner == MPI_COMBINER_NAMED or combiner == MPI_COMBINER_VALUE_INDEX or combiner == MPI_COMBINER_F90_INTEGER or combiner == MPI_COMBINER_F90_REAL or combiner == MPI_COMBINER_F90_COMPLEX ) cdef inline int predef_Request(MPI_Request arg) noexcept nogil: if arg == MPI_REQUEST_NULL : return 1 return 0 cdef inline int predef_Message(MPI_Message arg) noexcept nogil: if arg == MPI_MESSAGE_NULL : return 1 if arg == MPI_MESSAGE_NO_PROC : return 1 return 0 cdef inline int predef_Op(MPI_Op arg) noexcept nogil: if arg == MPI_OP_NULL : return 1 if arg == MPI_MAX : return 1 if arg == MPI_MIN : return 1 if arg == MPI_SUM : return 1 if arg == MPI_PROD : return 1 if arg == MPI_LAND : return 1 if arg == MPI_BAND : return 1 if arg == MPI_LOR : return 1 if arg == MPI_BOR : return 1 if arg == MPI_LXOR : return 1 if arg == MPI_BXOR : return 1 if arg == MPI_MAXLOC : return 1 if arg == MPI_MINLOC : return 1 if arg == MPI_REPLACE : return 1 if arg == MPI_NO_OP : return 1 return 0 cdef inline int predef_Group(MPI_Group arg) noexcept nogil: if arg == MPI_GROUP_NULL : return 1 if arg == MPI_GROUP_EMPTY : return 1 return 0 cdef inline int predef_Info(MPI_Info arg) noexcept nogil: if arg == MPI_INFO_NULL : return 1 if arg == MPI_INFO_ENV : return 1 return 0 cdef inline int predef_Errhandler(MPI_Errhandler arg) noexcept nogil: if arg == MPI_ERRHANDLER_NULL : return 1 if arg == MPI_ERRORS_RETURN : return 1 if arg == MPI_ERRORS_ABORT : return 1 if arg == MPI_ERRORS_ARE_FATAL : return 1 return 0 cdef inline int predef_Session(MPI_Session arg) noexcept nogil: if arg == MPI_SESSION_NULL : return 1 return 0 cdef inline int predef_Comm(MPI_Comm arg) noexcept nogil: if arg == MPI_COMM_NULL : return 1 if arg == MPI_COMM_SELF : return 1 if arg == MPI_COMM_WORLD : return 1 return 0 cdef inline int predef_Win(MPI_Win arg) noexcept nogil: if arg == MPI_WIN_NULL : return 1 return 0 cdef inline int predef_File(MPI_File arg) noexcept nogil: if arg == MPI_FILE_NULL : return 1 return 0 cdef inline int predefined(handle_t arg) noexcept nogil: cdef int result = 0 if handle_t is MPI_Datatype : result = predef_Datatype(arg) if handle_t is MPI_Request : result = predef_Request(arg) if handle_t is MPI_Message : result = predef_Message(arg) if handle_t is MPI_Op : result = predef_Op(arg) if handle_t is MPI_Group : result = predef_Group(arg) if handle_t is MPI_Info : result = predef_Info(arg) if handle_t is MPI_Errhandler : result = predef_Errhandler(arg) if handle_t is MPI_Session : result = predef_Session(arg) if handle_t is MPI_Comm : result = predef_Comm(arg) if handle_t is MPI_Win : result = predef_Win(arg) if handle_t is MPI_File : result = predef_File(arg) return result cdef inline int named(handle_t arg) noexcept nogil: if handle_t is MPI_Datatype: return named_Datatype(arg) else: return predefined(arg) # ----------------------------------------------------------------------------- ctypedef fused PyMPIClass: Datatype Request Message Op Group Info Errhandler Session Comm Win File cdef extern from * nogil: """ #define PyMPI_FLAGS_READY (1U<<0) #define PyMPI_FLAGS_CONST (1U<<1) #define PyMPI_FLAGS_TEMP (1U<<2) """ enum: PyMPI_FLAGS_READY enum: PyMPI_FLAGS_CONST enum: PyMPI_FLAGS_TEMP cdef inline int cinit(PyMPIClass self, PyMPIClass arg) except -1: self.ob_mpi = mpinull(self.ob_mpi) self.flags |= PyMPI_FLAGS_READY if arg is None: return 0 self.ob_mpi = arg.ob_mpi if PyMPIClass is Request: self.ob_buf = arg.ob_buf if PyMPIClass is Message: self.ob_buf = arg.ob_buf if PyMPIClass is Op: op_user_cpy(self, arg) if PyMPIClass is Win: self.ob_mem = arg.ob_mem return 0 cdef inline int marktemp(PyMPIClass self) except -1: if not predefined(self.ob_mpi): self.flags |= PyMPI_FLAGS_TEMP return 0 @cython.linetrace(False) cdef inline int freetemp(PyMPIClass self) except -1: if named(self.ob_mpi): return 0 if not mpi_active(): return 0 if PyMPIClass is Datatype: CHKERR( MPI_Type_free(&self.ob_mpi) ) return 0 @cython.linetrace(False) cdef inline int dealloc(PyMPIClass self) except -1: if not (self.flags & PyMPI_FLAGS_READY): return 0 if (self.flags & PyMPI_FLAGS_CONST): return 0 if (self.flags & PyMPI_FLAGS_TEMP ): return freetemp(self) if self.flags: return 0 # TODO: this always return if not mpi_active(): return 0 if predefined(self.ob_mpi): return 0 PyErr_WarnFormat( RuntimeWarning, 1, b"collecting object with %.200U handle %p", cython.typeof(self.ob_mpi), self.ob_mpi, ) cdef extern from "Python.h": enum: Py_LT enum: Py_LE enum: Py_EQ enum: Py_NE enum: Py_GT enum: Py_GE cdef inline object richcmp(PyMPIClass self, object other, int op): if op == Py_EQ: return (self.ob_mpi == (other).ob_mpi) if op == Py_NE: return (self.ob_mpi != (other).ob_mpi) cdef str mod = type(self).__module__ cdef str cls = type(self).__name__ raise TypeError(f"unorderable type '{mod}.{cls}'") cdef inline int nonnull(PyMPIClass self) noexcept nogil: return self.ob_mpi != mpinull(self.ob_mpi) cdef inline int constobj(PyMPIClass self) noexcept nogil: return self.flags & PyMPI_FLAGS_CONST # ----------------------------------------------------------------------------- cdef dict def_registry = {} cdef inline type def_class(handle_t handle): handle # unused cdef type result = None if handle_t is MPI_Datatype : result = Datatype if handle_t is MPI_Request : result = Request if handle_t is MPI_Message : result = Message if handle_t is MPI_Op : result = Op if handle_t is MPI_Group : result = Group if handle_t is MPI_Info : result = Info if handle_t is MPI_Errhandler : result = Errhandler if handle_t is MPI_Session : result = Session if handle_t is MPI_Comm : result = Comm if handle_t is MPI_Win : result = Win if handle_t is MPI_File : result = File return result cdef inline int def_register( handle_t handle, object pyobj, object name, ) except -1: cdef type cls = def_class(handle) cdef dict registry = def_registry.get(cls) cdef object key = handle if registry is None: registry = def_registry[cls] = {} if key not in registry: registry[key] = (pyobj, name) return 0 cdef inline object def_lookup(handle_t handle): cdef type cls = def_class(handle) cdef dict registry = def_registry[cls] cdef object key = handle return registry[key] cdef __newobj__ = None from copyreg import __newobj__ cdef inline object def_reduce(PyMPIClass self): cdef object pyobj, name pyobj, name = def_lookup(self.ob_mpi) if self is pyobj: return name return (__newobj__, (type(self), pyobj)) cdef inline object reduce_default(PyMPIClass self): if named(self.ob_mpi): return def_reduce(self) cdef str mod = type(self).__module__ cdef str cls = type(self).__name__ raise ValueError(f"cannot serialize '{mod}.{cls}' instance") # ----------------------------------------------------------------------------- cdef inline Py_uintptr_t tohandle(PyMPIClass self) noexcept nogil: return self.ob_mpi cdef inline object fromhandle(handle_t arg): cdef object obj = None if handle_t is MPI_Datatype : obj = PyMPIDatatype_New(arg) if handle_t is MPI_Request : obj = PyMPIRequest_New(arg) if handle_t is MPI_Message : obj = PyMPIMessage_New(arg) if handle_t is MPI_Op : obj = PyMPIOp_New(arg) if handle_t is MPI_Group : obj = PyMPIGroup_New(arg) if handle_t is MPI_Info : obj = PyMPIInfo_New(arg) if handle_t is MPI_Errhandler : obj = PyMPIErrhandler_New(arg) if handle_t is MPI_Session : obj = PyMPISession_New(arg) if handle_t is MPI_Comm : obj = PyMPIComm_New(arg) if handle_t is MPI_Win : obj = PyMPIWin_New(arg) if handle_t is MPI_File : obj = PyMPIFile_New(arg) return obj # ----------------------------------------------------------------------------- cdef inline int nullify(PyMPIClass self) except -1: self.ob_mpi = mpinull(self.ob_mpi) return 0 cdef inline int callfree(PyMPIClass self) except -1: if PyMPIClass is not Errhandler: if predefined(self.ob_mpi): return nullify(self) if PyMPIClass is Datatype: self.Free() if PyMPIClass is Request: self.Free() if PyMPIClass is Message: pass if PyMPIClass is Op: self.Free() if PyMPIClass is Group: self.Free() if PyMPIClass is Info: self.Free() if PyMPIClass is Errhandler: self.Free() if PyMPIClass is Session: self.Finalize() if PyMPIClass is Comm: self.Free() if PyMPIClass is Win: self.Free() if PyMPIClass is File: self.Close() return 0 cdef inline int safefree(PyMPIClass self) except -1: # skip freeing module constant objects if self.flags & PyMPI_FLAGS_CONST: return 0 # skip freeing objects with null handles if self.ob_mpi == mpinull(self.ob_mpi): return 0 # since MPI-4, some objects can be freeded # before/after the world model init/finalize if ( PyMPIClass is Info or PyMPIClass is Session or PyMPIClass is Errhandler ) and mpi_version >= 4: return callfree(self) # skip freeing before/after init/finalize if not mpi_active(): return nullify(self) # ~> uncovered # can safely free object return callfree(self) # ----------------------------------------------------------------------------- # Status cdef inline MPI_Status *arg_Status(object status) except? NULL: if status is None: return MPI_STATUS_IGNORE return &((status).ob_mpi) # ----------------------------------------------------------------------------- # Datatype cdef inline Datatype def_Datatype(MPI_Datatype arg, object name): cdef Datatype obj = Datatype.__new__(Datatype) obj.ob_mpi = arg obj.flags |= PyMPI_FLAGS_CONST def_register(arg, obj, name) return obj cdef inline Datatype ref_Datatype(MPI_Datatype arg): cdef Datatype obj = Datatype.__new__(Datatype) obj.ob_mpi = arg if not predefined(arg): obj.flags |= 0 # TODO return obj cdef inline object reduce_Datatype(Datatype self): # named if named(self.ob_mpi): return def_reduce(self) # predefined and user-defined cdef object basetype, combiner, params basetype, combiner, params = datatype_decode(self, True) return (_datatype_create, (basetype, combiner, params, True)) # ----------------------------------------------------------------------------- # Request cdef inline Request def_Request(MPI_Request arg, object name): cdef Request obj = Request.__new__(Request) obj.ob_mpi = arg obj.flags |= PyMPI_FLAGS_CONST def_register(arg, obj, name) return obj # ----------------------------------------------------------------------------- # Message cdef inline Message def_Message(MPI_Message arg, object name): cdef Message obj = Message.__new__(Message) obj.ob_mpi = arg obj.flags |= PyMPI_FLAGS_CONST def_register(arg, obj, name) return obj # ----------------------------------------------------------------------------- # Op cdef dict def_op = {} cdef inline Op def_Op(MPI_Op arg, object name): cdef Op obj = Op.__new__(Op) obj.ob_mpi = arg obj.flags |= PyMPI_FLAGS_CONST def_register(arg, obj, name) return obj cdef inline object reduce_Op(Op self): # predefined if named(self.ob_mpi): return def_reduce(self) # user-defined cdef int index = op_user_id_get(self) if index == 0: raise ValueError("cannot pickle user-defined reduction operation") cdef object function = op_user_registry[index] cdef object commute = self.Is_commutative() return (type(self).Create, (function, commute,)) # ----------------------------------------------------------------------------- # Group cdef inline Group def_Group(MPI_Group arg, object name): cdef Group obj = Group.__new__(Group) obj.ob_mpi = arg obj.flags |= PyMPI_FLAGS_CONST def_register(arg, obj, name) return obj # ----------------------------------------------------------------------------- # Info cdef inline Info def_Info(MPI_Info arg, object name): cdef Info obj = Info.__new__(Info) obj.ob_mpi = arg obj.flags |= PyMPI_FLAGS_CONST def_register(arg, obj, name) return obj cdef extern from *: const MPI_Info _info_null "MPI_INFO_NULL" cdef inline MPI_Info arg_Info(object obj) except? _info_null: if obj is None: return MPI_INFO_NULL return (obj).ob_mpi cdef inline object reduce_Info(Info self): # predefined if named(self.ob_mpi): return def_reduce(self) # user-defined return (type(self).Create, (self.items(),)) # ----------------------------------------------------------------------------- # Errhandler cdef inline Errhandler def_Errhandler(MPI_Errhandler arg, object name): cdef Errhandler obj = Errhandler.__new__(Errhandler) obj.ob_mpi = arg obj.flags |= PyMPI_FLAGS_CONST def_register(arg, obj, name) return obj cdef extern from *: const MPI_Errhandler _errhandler_null "MPI_ERRHANDLER_NULL" cdef inline MPI_Errhandler arg_Errhandler(object obj) except? _errhandler_null: if obj is not None: return (obj).ob_mpi cdef int opt = options.errors if opt == 0: pass elif opt == 1: return MPI_ERRORS_RETURN elif opt == 2: return MPI_ERRORS_ABORT elif opt == 3: return MPI_ERRORS_ARE_FATAL return MPI_ERRORS_ARE_FATAL # ----------------------------------------------------------------------------- # Session cdef inline Session def_Session(MPI_Session arg, object name): cdef Session obj = Session.__new__(Session) obj.ob_mpi = arg obj.flags |= PyMPI_FLAGS_CONST def_register(arg, obj, name) return obj # ----------------------------------------------------------------------------- # Comm cdef inline type CommType(MPI_Comm arg): if arg == MPI_COMM_NULL: return Comm if arg == MPI_COMM_SELF: return Intracomm if arg == MPI_COMM_WORLD: return Intracomm cdef int inter = 0 CHKERR( MPI_Comm_test_inter(arg, &inter) ) if inter: return Intercomm cdef int topo = MPI_UNDEFINED CHKERR( MPI_Topo_test(arg, &topo) ) if topo == MPI_UNDEFINED: return Intracomm if topo == MPI_CART: return Cartcomm if topo == MPI_GRAPH: return Graphcomm if topo == MPI_DIST_GRAPH: return Distgraphcomm return Comm # ~> unreachable cdef inline Comm def_Comm(MPI_Comm arg, object name): cdef Comm obj = Comm.__new__(Comm) obj.ob_mpi = arg obj.flags |= PyMPI_FLAGS_CONST def_register(arg, obj, name) return obj cdef inline Intracomm def_Intracomm(MPI_Comm arg, object name): cdef Intracomm obj = Intracomm.__new__(Intracomm) obj.ob_mpi = arg obj.flags |= PyMPI_FLAGS_CONST def_register(arg, obj, name) return obj cdef inline Intercomm def_Intercomm(MPI_Comm arg): cdef Intercomm obj = Intercomm.__new__(Intercomm) obj.ob_mpi = arg obj.flags |= PyMPI_FLAGS_CONST return obj # ----------------------------------------------------------------------------- # Win cdef inline Win def_Win(MPI_Win arg, object name): cdef Win obj = Win.__new__(Win) obj.ob_mpi = arg obj.flags |= PyMPI_FLAGS_CONST def_register(arg, obj, name) return obj # ----------------------------------------------------------------------------- # File cdef inline File def_File(MPI_File arg, object name): cdef File obj = File.__new__(File) obj.ob_mpi = arg obj.flags |= PyMPI_FLAGS_CONST def_register(arg, obj, name) return obj # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/opimpl.pxi000066400000000000000000000306131475341043600202200ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef object op_MAX(object x, object y): """maximum""" if y > x: return y else: return x cdef object op_MIN(object x, object y): """minimum""" if y < x: return y else: return x cdef object op_SUM(object x, object y): """sum""" return x + y cdef object op_PROD(object x, object y): """product""" return x * y cdef object op_BAND(object x, object y): """bit-wise and""" return x & y cdef object op_BOR(object x, object y): """bit-wise or""" return x | y cdef object op_BXOR(object x, object y): """bit-wise xor""" return x ^ y cdef object op_LAND(object x, object y): """logical and""" return bool(x) & bool(y) cdef object op_LOR(object x, object y): """logical or""" return bool(x) | bool(y) cdef object op_LXOR(object x, object y): """logical xor""" return bool(x) ^ bool(y) cdef object op_MAXLOC(object x, object y): """maximum and location""" cdef object i, j, u, v u, i = x v, j = y if u > v: return u, i elif v > u: return v, j elif j < i: return v, j else: return u, i cdef object op_MINLOC(object x, object y): """minimum and location""" cdef object i, j, u, v u, i = x v, j = y if u < v: return u, i elif v < u: return v, j elif j < i: return v, j else: return u, i cdef object op_REPLACE(object x, object y): """replace, (x, y) -> y""" x # unused return y cdef object op_NO_OP(object x, object y): """no-op, (x, y) -> x""" y # unused return x # ----------------------------------------------------------------------------- ctypedef fused op_count_t: int MPI_Count ctypedef fused op_usrfn_t: MPI_User_function MPI_User_function_c cdef object op_user_lock = Lock() cdef list op_user_registry = [None]*(1+32) cdef inline object op_user_call_py(int index, object x, object y, object dt): return op_user_registry[index](x, y, dt) cdef inline void op_user_call_mpi( int index, void *a, void *b, MPI_Count n, MPI_Datatype t, ) noexcept with gil: cdef Datatype datatype # errors in user-defined reduction operations are unrecoverable try: datatype = New(Datatype) datatype.ob_mpi = t try: op_user_call_py(index, mpibuf(a, n), mpibuf(b, n), datatype) finally: datatype.ob_mpi = MPI_DATATYPE_NULL except BaseException as exc: # ~> uncovered PyErr_DisplayException(exc) # ~> uncovered PySys_WriteStderr( # ~> uncovered b"Fatal Python error: %s\n", # ~> uncovered b"exception in user-defined reduction operation", # ~> uncovered ) # ~> uncovered MPI_Abort(MPI_COMM_WORLD, 1) # ~> uncovered cdef inline void op_user_call( int index, void *a, void *b, MPI_Count count, MPI_Datatype t, ) noexcept nogil: # make it abort if Python has finalized if not Py_IsInitialized(): MPI_Abort(MPI_COMM_WORLD, 1) # make it abort if module cleanup has been done if not py_module_alive(): MPI_Abort(MPI_COMM_WORLD, 1) # compute the byte-size of memory buffers cdef MPI_Count lb=0, extent=0 MPI_Type_get_extent_c(t, &lb, &extent) cdef MPI_Count n = count * extent # make the actual GIL-safe Python call op_user_call_mpi(index, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_01(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call( 1, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_02(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call( 2, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_03(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call( 3, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_04(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call( 4, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_05(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call( 5, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_06(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call( 6, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_07(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call( 7, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_08(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call( 8, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_09(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call( 9, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_10(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call(10, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_11(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call(11, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_12(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call(12, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_13(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call(13, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_14(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call(14, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_15(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call(15, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_16(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call(16, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_17(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call(17, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_18(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call(18, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_19(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call(19, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_20(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call(20, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_21(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call(21, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_22(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call(22, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_23(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call(23, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_24(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call(24, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_25(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call(25, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_26(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call(26, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_27(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call(27, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_28(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call(28, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_29(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call(29, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_30(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call(30, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_31(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call(31, a, b, n[0], t[0]) @cython.callspec("MPIAPI") cdef void op_user_32(void *a, void *b, op_count_t *n, MPI_Datatype *t) noexcept nogil: op_user_call(32, a, b, n[0], t[0]) cdef inline void op_user_map(int index, op_usrfn_t **fn) noexcept nogil: if index == 1: fn[0] = op_user_01 elif index == 2: fn[0] = op_user_02 elif index == 3: fn[0] = op_user_03 elif index == 4: fn[0] = op_user_04 elif index == 5: fn[0] = op_user_05 elif index == 6: fn[0] = op_user_06 elif index == 7: fn[0] = op_user_07 elif index == 8: fn[0] = op_user_08 elif index == 9: fn[0] = op_user_09 elif index == 10: fn[0] = op_user_10 elif index == 11: fn[0] = op_user_11 elif index == 12: fn[0] = op_user_12 elif index == 13: fn[0] = op_user_13 elif index == 14: fn[0] = op_user_14 elif index == 15: fn[0] = op_user_15 elif index == 16: fn[0] = op_user_16 elif index == 17: fn[0] = op_user_17 elif index == 18: fn[0] = op_user_18 elif index == 19: fn[0] = op_user_19 elif index == 20: fn[0] = op_user_20 elif index == 21: fn[0] = op_user_21 elif index == 22: fn[0] = op_user_22 elif index == 23: fn[0] = op_user_23 elif index == 24: fn[0] = op_user_24 elif index == 25: fn[0] = op_user_25 elif index == 26: fn[0] = op_user_26 elif index == 27: fn[0] = op_user_27 elif index == 28: fn[0] = op_user_28 elif index == 29: fn[0] = op_user_29 elif index == 30: fn[0] = op_user_30 elif index == 31: fn[0] = op_user_31 elif index == 32: fn[0] = op_user_32 cdef inline void op_user_id_set(Op self, int index) noexcept nogil: self.flags &= (1U << 24U) - 1U self.flags |= index << 24U cdef inline int op_user_id_get(Op self) noexcept nogil: return (self.flags >> 24U) cdef inline int op_user_id_pop(Op self) noexcept nogil: cdef int index = op_user_id_get(self) self.flags &= (1U << 24U) - 1U return index cdef inline int op_user_new( Op self, object function, MPI_User_function **fn_i, MPI_User_function_c **fn_c, ) except -1: # check whether the function is callable function.__call__ # find a free slot in the registry # and register the Python function cdef int index = 0 try: with op_user_lock: index = op_user_registry.index(None, 1) op_user_registry[index] = function except ValueError: raise RuntimeError( "cannot create too many user-defined reduction operations", ) # map slot index to the associated C callback, # and return the slot index in the registry op_user_map(index, fn_i) op_user_map(index, fn_c) op_user_id_set(self, index) return 0 cdef inline int op_user_cpy( Op self, Op other, ) except -1: cdef int index = op_user_id_get(other) op_user_id_set(self, index) return 0 cdef inline int op_user_del( Op self, ) except -1: # free slot in the registry cdef int index = op_user_id_pop(self) if index > 0: with op_user_lock: op_user_registry[index] = None return 0 # ----------------------------------------------------------------------------- cdef inline object op_call(MPI_Op op, int index, object x, object y): if op == MPI_MAX : return op_MAX(x, y) if op == MPI_MIN : return op_MIN(x, y) if op == MPI_SUM : return op_SUM(x, y) if op == MPI_PROD : return op_PROD(x, y) if op == MPI_LAND : return op_LAND(x, y) if op == MPI_BAND : return op_BAND(x, y) if op == MPI_LOR : return op_LOR(x, y) if op == MPI_BOR : return op_BOR(x, y) if op == MPI_LXOR : return op_LXOR(x, y) if op == MPI_BXOR : return op_BXOR(x, y) if op == MPI_MAXLOC : return op_MAXLOC(x, y) if op == MPI_MINLOC : return op_MINLOC(x, y) if op == MPI_REPLACE : return op_REPLACE(x, y) if op == MPI_NO_OP : return op_NO_OP(x, y) if index > 0 : return op_user_call_py(index, x, y, None) raise ValueError("cannot call user-defined operation") # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/reqimpl.pxi000066400000000000000000000177101475341043600203740ustar00rootroot00000000000000# ----------------------------------------------------------------------------- @cython.final @cython.internal cdef class _p_rs: cdef int count cdef MPI_Request *requests cdef MPI_Status *status, _status[1] cdef MPI_Status *statuses cdef int outcount cdef int *indices cdef object arg_req cdef object buf_req cdef object buf_sts cdef object buf_ids cdef MPI_Status tmp_sts def __cinit__(self): self.count = 0 self.requests = NULL self.status = MPI_STATUS_IGNORE self.statuses = MPI_STATUSES_IGNORE self.outcount = MPI_UNDEFINED self.indices = NULL self.arg_req = None self.buf_req = None self.buf_sts = None self.buf_ids = None cdef int set_request(self, Request request) except -1: self.arg_req = request return 0 cdef int set_status(self, Status status) except -1: if status is not None: self.status = &status.ob_mpi else: self.status = &self.tmp_sts MPI_Status_set_source ( self.status, MPI_ANY_SOURCE ) MPI_Status_set_tag ( self.status, MPI_ANY_TAG ) MPI_Status_set_error ( self.status, MPI_SUCCESS ) return 0 cdef int set_requests(self, requests) except -1: self.arg_req = requests cdef Py_ssize_t count = len(requests) self.count = count self.outcount = count self.buf_req = allocate(self.count, sizeof(MPI_Request), &self.requests) for i in range(self.count): self.requests[i] = (requests[i]).ob_mpi return 0 cdef int add_statuses(self) except -1: cdef MPI_Status *status = &self.tmp_sts MPI_Status_set_source ( status, MPI_ANY_SOURCE ) MPI_Status_set_tag ( status, MPI_ANY_TAG ) MPI_Status_set_error ( status, MPI_SUCCESS ) self.buf_sts = allocate(self.count, sizeof(MPI_Status), &self.statuses) for i in range(self.count): self.statuses[i] = status[0] return 0 cdef int add_indices(self) except -1: self.outcount = MPI_UNDEFINED self.buf_ids = newarray(self.count, &self.indices) return 0 cdef int acquire(self, requests, statuses=None) except -1: self.set_requests(requests) if statuses is not None: self.add_statuses() return 0 cdef int release(self, statuses=None) except -1: cdef Request request cdef Py_ssize_t nr = self.count for i in range(nr): request = self.arg_req[i] request.ob_mpi = self.requests[i] if request.ob_mpi == MPI_REQUEST_NULL: if request.ob_buf is not None: request.ob_buf = None # if statuses is None: return 0 if self.outcount == MPI_UNDEFINED: return 0 cdef Py_ssize_t outcount = self.outcount cdef Py_ssize_t ns = len(statuses) if outcount > ns: if isinstance(statuses, list): statuses += [ New(Status) for _ in range (ns, outcount) ] ns = outcount for i in range(min(nr, ns)): (statuses[i]).ob_mpi = self.statuses[i] return 0 cdef object get_buffer(self, Py_ssize_t index): cdef Request request if index >= 0: if self.indices != NULL: index = self.indices[index] request = self.arg_req[index] else: request = self.arg_req cdef object buf = request.ob_buf if request.ob_mpi == MPI_REQUEST_NULL: if request.ob_buf is not None: request.ob_buf = None return buf cdef object get_result(self): return self.get_object(-1) cdef object get_object(self, Py_ssize_t index): return PyMPI_load(self.get_buffer(index), self.status) cdef object get_objects(self): if self.outcount == MPI_UNDEFINED: return None return [ PyMPI_load(self.get_buffer(i), &self.statuses[i]) for i in range(self.outcount) ] cdef object get_indices(self): if self.outcount == MPI_UNDEFINED: return None return [self.indices[i] for i in range(self.outcount)] # ----------------------------------------------------------------------------- @cython.final @cython.internal cdef class _p_greq: cdef object query_fn cdef object free_fn cdef object cancel_fn cdef tuple args cdef dict kwargs def __cinit__(self, query_fn, free_fn, cancel_fn, args, kwargs): self.query_fn = query_fn self.free_fn = free_fn self.cancel_fn = cancel_fn self.args = tuple(args) if args is not None else () self.kwargs = dict(kwargs) if kwargs is not None else {} cdef int query(self, MPI_Status *status) except -1: MPI_Status_set_source(status, MPI_ANY_SOURCE) MPI_Status_set_tag(status, MPI_ANY_TAG) MPI_Status_set_error(status, MPI_SUCCESS) MPI_Status_set_elements_c(status, MPI_BYTE, 0) MPI_Status_set_cancelled(status, 0) cdef Status sts if self.query_fn is not None: sts = New(Status) sts.ob_mpi = status[0] self.query_fn(sts, *self.args, **self.kwargs) status[0] = sts.ob_mpi if self.cancel_fn is None: MPI_Status_set_cancelled(status, 0) return MPI_SUCCESS cdef int free(self) except -1: if self.free_fn is not None: self.free_fn(*self.args, **self.kwargs) return MPI_SUCCESS cdef int cancel(self, bint completed) except -1: if self.cancel_fn is not None: self.cancel_fn(completed, *self.args, **self.kwargs) return MPI_SUCCESS cdef int greq_query( void *extra_state, MPI_Status *status, ) noexcept with gil: cdef _p_greq state = <_p_greq>extra_state cdef int ierr = MPI_SUCCESS cdef object exc try: state.query(status) except BaseException as exc: ierr = PyMPI_HandleException(exc) return ierr cdef int greq_free( void *extra_state, ) noexcept with gil: cdef _p_greq state = <_p_greq>extra_state cdef int ierr = MPI_SUCCESS cdef object exc try: state.free() except BaseException as exc: ierr = PyMPI_HandleException(exc) finally: Py_DECREF(extra_state) return ierr cdef int greq_cancel( void *extra_state, int completed, ) noexcept with gil: cdef _p_greq state = <_p_greq>extra_state cdef int ierr = MPI_SUCCESS cdef object exc try: state.cancel(completed) except BaseException as exc: ierr = PyMPI_HandleException(exc) return ierr @cython.callspec("MPIAPI") cdef int greq_query_fn( void *extra_state, MPI_Status *status, ) noexcept nogil: if extra_state == NULL: return MPI_ERR_INTERN if status == NULL: return MPI_ERR_INTERN if not Py_IsInitialized(): return MPI_ERR_INTERN if not py_module_alive(): return MPI_ERR_INTERN return greq_query(extra_state, status) @cython.callspec("MPIAPI") cdef int greq_free_fn( void *extra_state, ) noexcept nogil: if extra_state == NULL: return MPI_ERR_INTERN if not Py_IsInitialized(): return MPI_ERR_INTERN if not py_module_alive(): return MPI_ERR_INTERN return greq_free(extra_state) @cython.callspec("MPIAPI") cdef int greq_cancel_fn( void *extra_state, int completed, ) noexcept nogil: if extra_state == NULL: return MPI_ERR_INTERN if not Py_IsInitialized(): return MPI_ERR_INTERN if not py_module_alive(): return MPI_ERR_INTERN return greq_cancel(extra_state, completed) # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/stdlib.pxi000066400000000000000000000015021475341043600201740ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef extern from * nogil: # "stddef.h" ctypedef unsigned int wchar_t cdef extern from * nogil: # "stdlib.h" const char *getenv(const char *) cdef extern from * nogil: # "string.h" int strcmp(const char *, const char *) int strncmp(const char *, const char *, size_t) cdef extern from * nogil: # "string.h" int memcmp(const void *, const void *, size_t) void *memset(void *, int, size_t) void *memcpy(void *, const void *, size_t) void *memmove(void *, const void *, size_t) cdef extern from * nogil: # "stdio.h" ctypedef struct FILE FILE *stdin, *stdout, *stderr int fprintf(FILE *, char *, ...) int fflush(FILE *) # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/typedec.pxi000066400000000000000000000247221475341043600203610ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef inline str combinername(int combiner): if combiner == MPI_COMBINER_NAMED : return 'NAMED' if combiner == MPI_COMBINER_DUP : return 'DUP' if combiner == MPI_COMBINER_CONTIGUOUS : return 'CONTIGUOUS' if combiner == MPI_COMBINER_VECTOR : return 'VECTOR' if combiner == MPI_COMBINER_HVECTOR : return 'HVECTOR' if combiner == MPI_COMBINER_INDEXED : return 'INDEXED' if combiner == MPI_COMBINER_HINDEXED : return 'HINDEXED' if combiner == MPI_COMBINER_INDEXED_BLOCK : return 'INDEXED_BLOCK' if combiner == MPI_COMBINER_HINDEXED_BLOCK : return 'HINDEXED_BLOCK' if combiner == MPI_COMBINER_STRUCT : return 'STRUCT' if combiner == MPI_COMBINER_SUBARRAY : return 'SUBARRAY' if combiner == MPI_COMBINER_DARRAY : return 'DARRAY' if combiner == MPI_COMBINER_RESIZED : return 'RESIZED' if combiner == MPI_COMBINER_VALUE_INDEX : return 'VALUE_INDEX' if combiner == MPI_COMBINER_F90_INTEGER : return 'F90_INTEGER' if combiner == MPI_COMBINER_F90_REAL : return 'F90_REAL' if combiner == MPI_COMBINER_F90_COMPLEX : return 'F90_COMPLEX' raise ValueError(f"unknown combiner value {combiner}") # ~> unreachable cdef inline list makelist(integral_t *p, MPI_Count start, MPI_Count last): return [p[i] for i from start <= i <= last] cdef inline tuple datatype_decode( Datatype self, bint mark, ): # get the datatype envelope cdef int combiner = MPI_UNDEFINED cdef MPI_Count ni = 0, na = 0, nc = 0, nd = 0 CHKERR( MPI_Type_get_envelope_c( self.ob_mpi, &ni, &na, &nc, &nd, &combiner) ) # return self immediately for named datatypes if combiner == MPI_COMBINER_NAMED: return (self, combinername(combiner), {}) # get the datatype contents cdef int *i = NULL cdef MPI_Aint *a = NULL cdef MPI_Count *c = NULL cdef MPI_Datatype *d = NULL cdef unused1 = allocate(ni, sizeof(int), &i) cdef unused2 = allocate(na, sizeof(MPI_Aint), &a) cdef unused3 = allocate(nc, sizeof(MPI_Count), &c) cdef unused4 = allocate(nd, sizeof(MPI_Datatype), &d) CHKERR( MPI_Type_get_contents_c( self.ob_mpi, ni, na, nc, nd, i, a, c, d) ) # process datatypes in advance cdef Datatype oldtype = __DATATYPE_NULL__ cdef dict params = {} cdef list datatypes = [] if nd == 1: oldtype = ref_Datatype(d[0]) datatypes = [oldtype] elif nd > 1: datatypes = [ref_Datatype(d[k]) for k in range(nd)] # dispatch depending on the combiner value cdef int use_count = 1 if (nc > 0) else 0 cdef MPI_Count s1, e1, s2, e2, s3, e3, s4, e4 cdef object count, blklen, stride, displs cdef object sizes, subsizes, starts, order cdef object lbound, extent if combiner == MPI_COMBINER_DUP: params = {} elif combiner == MPI_COMBINER_CONTIGUOUS: count = c[0] if use_count else i[0] params = { ('count') : count, } elif combiner == MPI_COMBINER_VECTOR: count = c[0] if use_count else i[0] blklen = c[1] if use_count else i[1] stride = c[2] if use_count else i[2] params = { ('count') : count, ('blocklength') : blklen, ('stride') : stride, } elif combiner == MPI_COMBINER_HVECTOR: count = c[0] if use_count else i[0] blklen = c[1] if use_count else i[1] stride = c[2] if use_count else a[0] params = { ('count') : count , ('blocklength') : blklen, ('stride') : stride, } elif combiner == MPI_COMBINER_INDEXED: if use_count: s1, e1 = 1, c[0] s2, e2 = c[0]+1, 2*c[0] blklen = makelist(c, s1, e1) displs = makelist(c, s2, e2) else: s1, e1 = 1, i[0] # ~> uncovered s2, e2 = i[0]+1, 2*i[0] # ~> uncovered blklen = makelist(i, s1, e1) # ~> uncovered displs = makelist(i, s2, e2) # ~> uncovered params = { ('blocklengths') : blklen, ('displacements') : displs, } elif combiner == MPI_COMBINER_HINDEXED: if use_count: s1, e1 = 1, c[0] s2, e2 = c[0]+1, 2*c[0] blklen = makelist(c, s1, e1) displs = makelist(c, s2, e2) else: s1, e1 = 1, i[0] # ~> uncovered s2, e2 = 0, i[0]-1 # ~> uncovered blklen = makelist(i, s1, e1) # ~> uncovered displs = makelist(a, s2, e2) # ~> uncovered params = { ('blocklengths') : blklen, ('displacements') : displs, } elif combiner == MPI_COMBINER_INDEXED_BLOCK: if use_count: s2, e2 = 2, c[0]+1 blklen = c[1] displs = makelist(c, s2, e2) else: s2, e2 = 2, i[0]+1 # ~> uncovered blklen = i[1] # ~> uncovered displs = makelist(i, s2, e2) # ~> uncovered params = { ('blocklength') : blklen, ('displacements') : displs, } elif combiner == MPI_COMBINER_HINDEXED_BLOCK: if use_count: s2, e2 = 2, c[0]+1 blklen = c[1] displs = makelist(c, s2, e2) else: s2, e2 = 0, i[0]-1 # ~> uncovered blklen = i[1] # ~> uncovered displs = makelist(a, s2, e2) # ~> uncovered params = { ('blocklength') : blklen, ('displacements') : displs, } elif combiner == MPI_COMBINER_STRUCT: if use_count: s1, e1 = 1, c[0] s2, e2 = c[0]+1, 2*c[0] blklen = makelist(c, s1, e1) displs = makelist(c, s2, e2) else: s1, e1 = 1, i[0] # ~> uncovered s2, e2 = 0, i[0]-1 # ~> uncovered blklen = makelist(i, s1, e1) # ~> uncovered displs = makelist(a, s2, e2) # ~> uncovered params = { ('blocklengths') : blklen, ('displacements') : displs, ('datatypes') : datatypes, } elif combiner == MPI_COMBINER_SUBARRAY: if use_count: s1, e1 = 0*i[0], 1*i[0]-1 s2, e2 = 1*i[0], 2*i[0]-1 s3, e3 = 2*i[0], 3*i[0]-1 sizes = makelist(c, s1, e1) subsizes = makelist(c, s2, e2) starts = makelist(c, s3, e3) order = i[1] else: s1, e1 = 0*i[0]+1, 1*i[0] # ~> uncovered s2, e2 = 1*i[0]+1, 2*i[0] # ~> uncovered s3, e3 = 2*i[0]+1, 3*i[0] # ~> uncovered sizes = makelist(i, s1, e1) # ~> uncovered subsizes = makelist(i, s2, e2) # ~> uncovered starts = makelist(i, s3, e3) # ~> uncovered order = i[3*i[0]+1] # ~> uncovered params = { ('sizes') : sizes, ('subsizes') : subsizes, ('starts') : starts, ('order') : order, } elif combiner == MPI_COMBINER_DARRAY: if use_count: s1, e1 = 0*i[2]+0, 1*i[2]+0-1 s2, e2 = 0*i[2]+3, 1*i[2]+3-1 s3, e3 = 1*i[2]+3, 2*i[2]+3-1 s4, e4 = 2*i[2]+3, 3*i[2]+3-1 sizes = makelist(c, s1, e1) order = i[3*i[2]+3] else: s1, e1 = 0*i[2]+3, 1*i[2]+3-1 # ~> uncovered s2, e2 = 1*i[2]+3, 2*i[2]+3-1 # ~> uncovered s3, e3 = 2*i[2]+3, 3*i[2]+3-1 # ~> uncovered s4, e4 = 3*i[2]+3, 4*i[2]+3-1 # ~> uncovered sizes = makelist(i, s1, e1) # ~> uncovered order = i[4*i[2]+3] # ~> uncovered params = { ('size') : i[0], ('rank') : i[1], ('gsizes') : sizes, ('distribs') : makelist(i, s2, e2), ('dargs') : makelist(i, s3, e3), ('psizes') : makelist(i, s4, e4), ('order') : order, } elif combiner == MPI_COMBINER_RESIZED: lbound = c[0] if use_count else a[0] extent = c[1] if use_count else a[1] params = { ('lb') : lbound, ('extent') : extent, } elif combiner == MPI_COMBINER_VALUE_INDEX: params = { ('value') : datatypes[0], # ~> MPI-4.1 ('index') : datatypes[1], # ~> MPI-4.1 } elif combiner == MPI_COMBINER_F90_INTEGER: params = { ('r') : i[0], } elif combiner == MPI_COMBINER_F90_REAL: params = { ('p') : i[0], ('r') : i[1], } elif combiner == MPI_COMBINER_F90_COMPLEX: params = { ('p') : i[0], ('r') : i[1], } if mark: datatype_visit(marktemp, datatypes) return (oldtype, combinername(combiner), params) cdef inline Datatype datatype_create( Datatype datatype, str combiner, dict params, bint free, ): cdef object factory cdef Datatype newtype cdef list datatypes if 'datatypes' in params: datatypes = params['datatypes'] elif 'value' in params and 'index' in params: datatypes = list(params.values()) # ~> MPI-4.1 else: datatypes = [datatype] try: combiner = combiner.lower() factory = getattr(datatype, f'Create_{combiner}') newtype = factory(**params).Commit() finally: if free: datatype_visit(freetemp, datatypes) return newtype cdef inline int datatype_visit( int (*visit)(Datatype) except -1, list datatypes, ) except -1: cdef Datatype datatype for datatype in datatypes: visit(datatype) def _datatype_create( Datatype datatype: Datatype, str combiner: str, dict params: dict[str, Any], bint free: bool = False, ) -> Datatype: """ Create datatype from base datatype, combiner name, and parameters. """ return datatype_create(datatype, combiner, params, free) def _datatype_decode( Datatype datatype: Datatype, bint mark: bool = False, ) -> tuple[Datatype, str, dict[str, Any]]: """ Decode datatype to base datatype, combiner name, and parameters. """ return datatype_decode(datatype, mark) # ~> TODO # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/typemap.pxi000066400000000000000000000263451475341043600204060ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef inline int AddTypeMap(dict TD, const char tc[], Datatype dt) except -1: if dt.ob_mpi != MPI_DATATYPE_NULL: TD[pystr(tc)] = dt return 1 return 0 # ----------------------------------------------------------------------------- cdef dict TypeDict = { } _typedict = TypeDict # boolean (C++) AddTypeMap(TypeDict, "?" , __CXX_BOOL__ ) # PEP-3118 & NumPy # boolean (C99) AddTypeMap(TypeDict, "?" , __C_BOOL__ ) # PEP-3118 & NumPy # (signed) integer AddTypeMap(TypeDict, "b" , __SIGNED_CHAR__ ) # MPI-2 AddTypeMap(TypeDict, "h" , __SHORT__ ) AddTypeMap(TypeDict, "i" , __INT__ ) AddTypeMap(TypeDict, "l" , __LONG__ ) AddTypeMap(TypeDict, "q" , __LONG_LONG__ ) # unsigned integer AddTypeMap(TypeDict, "B" , __UNSIGNED_CHAR__ ) AddTypeMap(TypeDict, "H" , __UNSIGNED_SHORT__ ) AddTypeMap(TypeDict, "I" , __UNSIGNED__ ) AddTypeMap(TypeDict, "L" , __UNSIGNED_LONG__ ) AddTypeMap(TypeDict, "Q" , __UNSIGNED_LONG_LONG__ ) # (real) floating AddTypeMap(TypeDict, "f" , __FLOAT__ ) AddTypeMap(TypeDict, "d" , __DOUBLE__ ) AddTypeMap(TypeDict, "g" , __LONG_DOUBLE__ ) # PEP-3118 & NumPy # complex floating (F77) AddTypeMap(TypeDict, "Zf" , __COMPLEX__ ) # PEP-3118 AddTypeMap(TypeDict, "Zd" , __DOUBLE_COMPLEX__ ) # PEP-3118 AddTypeMap(TypeDict, "F" , __COMPLEX__ ) # NumPy AddTypeMap(TypeDict, "D" , __DOUBLE_COMPLEX__ ) # NumPy # complex floating (F90) AddTypeMap(TypeDict, "Zf" , __COMPLEX8__ ) # PEP-3118 AddTypeMap(TypeDict, "Zd" , __COMPLEX16__ ) # PEP-3118 AddTypeMap(TypeDict, "F" , __COMPLEX8__ ) # NumPy AddTypeMap(TypeDict, "D" , __COMPLEX16__ ) # NumPy # complex floating (C++) AddTypeMap(TypeDict, "Zf" , __CXX_FLOAT_COMPLEX__ ) # PEP-3118 AddTypeMap(TypeDict, "Zd" , __CXX_DOUBLE_COMPLEX__ ) # PEP-3118 AddTypeMap(TypeDict, "Zg" , __CXX_LONG_DOUBLE_COMPLEX__ ) # PEP-3118 AddTypeMap(TypeDict, "F" , __CXX_FLOAT_COMPLEX__ ) # NumPy AddTypeMap(TypeDict, "D" , __CXX_DOUBLE_COMPLEX__ ) # NumPy AddTypeMap(TypeDict, "G" , __CXX_LONG_DOUBLE_COMPLEX__ ) # NumPy # complex floating (C99) AddTypeMap(TypeDict, "Zf" , __C_FLOAT_COMPLEX__ ) # PEP-3118 AddTypeMap(TypeDict, "Zd" , __C_DOUBLE_COMPLEX__ ) # PEP-3118 AddTypeMap(TypeDict, "Zg" , __C_LONG_DOUBLE_COMPLEX__ ) # PEP-3118 AddTypeMap(TypeDict, "F" , __C_FLOAT_COMPLEX__ ) # NumPy AddTypeMap(TypeDict, "D" , __C_DOUBLE_COMPLEX__ ) # NumPy AddTypeMap(TypeDict, "G" , __C_LONG_DOUBLE_COMPLEX__ ) # NumPy # boolean (C99/C++) AddTypeMap(TypeDict, "b1" , __CXX_BOOL__ ) # NumPy AddTypeMap(TypeDict, "b1" , __C_BOOL__ ) # NumPy # signed and unsigned integer (C) if sizeof(char) == 1: AddTypeMap(TypeDict, "i1" , __SIGNED_CHAR__ ) # NumPy AddTypeMap(TypeDict, "u1" , __UNSIGNED_CHAR__ ) # NumPy if sizeof(short) == 2: AddTypeMap(TypeDict, "i2" , __SHORT__ ) # NumPy AddTypeMap(TypeDict, "u2" , __UNSIGNED_SHORT__ ) # NumPy if sizeof(long) == 4: AddTypeMap(TypeDict, "i4" , __LONG__ ) # NumPy # ~> 32bit AddTypeMap(TypeDict, "u4" , __UNSIGNED_LONG__ ) # NumPy # ~> 32bit if sizeof(int) == 4: AddTypeMap(TypeDict, "i4" , __INT__ ) # NumPy AddTypeMap(TypeDict, "u4" , __UNSIGNED__ ) # NumPy if sizeof(long long) == 8: AddTypeMap(TypeDict, "i8" , __LONG_LONG__ ) # NumPy AddTypeMap(TypeDict, "u8" , __UNSIGNED_LONG_LONG__ ) # NumPy if sizeof(long) == 8: AddTypeMap(TypeDict, "i8" , __LONG__ ) # NumPy AddTypeMap(TypeDict, "u8" , __UNSIGNED_LONG__ ) # NumPy # signed integer (C99) AddTypeMap(TypeDict, "i1" , __INT8_T__ ) # NumPy AddTypeMap(TypeDict, "i2" , __INT16_T__ ) # NumPy AddTypeMap(TypeDict, "i4" , __INT32_T__ ) # NumPy AddTypeMap(TypeDict, "i8" , __INT64_T__ ) # NumPy # unsigned integer (C99) AddTypeMap(TypeDict, "u1" , __UINT8_T__ ) # NumPy AddTypeMap(TypeDict, "u2" , __UINT16_T__ ) # NumPy AddTypeMap(TypeDict, "u4" , __UINT32_T__ ) # NumPy AddTypeMap(TypeDict, "u8" , __UINT64_T__ ) # NumPy # real (C) and complex (C99/C++) floating if sizeof(float) == 4: AddTypeMap(TypeDict, "f4" , __FLOAT__ ) # NumPy AddTypeMap(TypeDict, "c8" , __CXX_FLOAT_COMPLEX__ ) # NumPy AddTypeMap(TypeDict, "c8" , __C_FLOAT_COMPLEX__ ) # NumPy if sizeof(double) == 8: AddTypeMap(TypeDict, "f8" , __DOUBLE__ ) # NumPy AddTypeMap(TypeDict, "c16" , __CXX_DOUBLE_COMPLEX__ ) # NumPy AddTypeMap(TypeDict, "c16" , __C_DOUBLE_COMPLEX__ ) # NumPy if sizeof(long double) == 12: AddTypeMap(TypeDict, "f12" , __LONG_DOUBLE__ ) # ~> NumPy i386 AddTypeMap(TypeDict, "c24" , __CXX_LONG_DOUBLE_COMPLEX__ ) # ~> NumPy i386 AddTypeMap(TypeDict, "c24" , __C_LONG_DOUBLE_COMPLEX__ ) # ~> NumPy i386 if sizeof(long double) == 16: AddTypeMap(TypeDict, "f16" , __LONG_DOUBLE__ ) # NumPy AddTypeMap(TypeDict, "c32" , __CXX_LONG_DOUBLE_COMPLEX__ ) # NumPy AddTypeMap(TypeDict, "c32" , __C_LONG_DOUBLE_COMPLEX__ ) # NumPy # ssize_t and size_t (C) if sizeof(size_t) == sizeof(long long): AddTypeMap(TypeDict, "n" , __LONG_LONG__ ) AddTypeMap(TypeDict, "N" , __UNSIGNED_LONG_LONG__ ) if sizeof(size_t) == sizeof(long): AddTypeMap(TypeDict, "n" , __LONG__ ) AddTypeMap(TypeDict, "N" , __UNSIGNED_LONG__ ) if sizeof(size_t) == sizeof(int): AddTypeMap(TypeDict, "n" , __INT__ ) # ~> 32bit AddTypeMap(TypeDict, "N" , __UNSIGNED__ ) # ~> 32bit if sizeof(size_t) == sizeof(MPI_Count): AddTypeMap(TypeDict, "n" , __COUNT__ ) # intptr_t and uintptr_t (C99) if sizeof(Py_intptr_t) == sizeof(long long): AddTypeMap(TypeDict, "p" , __LONG_LONG__ ) # NumPy AddTypeMap(TypeDict, "P" , __UNSIGNED_LONG_LONG__ ) # NumPy if sizeof(Py_intptr_t) == sizeof(long): AddTypeMap(TypeDict, "p" , __LONG__ ) # NumPy AddTypeMap(TypeDict, "P" , __UNSIGNED_LONG__ ) # NumPy if sizeof(Py_intptr_t) == sizeof(int): AddTypeMap(TypeDict, "p" , __INT__ ) # ~> NumPy 32bit AddTypeMap(TypeDict, "P" , __UNSIGNED__ ) # ~> NumPy 32bit if sizeof(Py_intptr_t) == sizeof(MPI_Aint): AddTypeMap(TypeDict, "p" , __AINT__ ) # NumPy # character AddTypeMap(TypeDict, "c" , __CHAR__ ) # PEP-3118 & NumPy AddTypeMap(TypeDict, "a" , __CHAR__ ) # NumPy AddTypeMap(TypeDict, "S" , __CHAR__ ) # NumPy AddTypeMap(TypeDict, "S1", __CHAR__ ) # NumPy AddTypeMap(TypeDict, "s", __CHAR__ ) # PEP-3118 AddTypeMap(TypeDict, "1s", __CHAR__ ) # PEP-3118 # wide character if sizeof(wchar_t) == 4: AddTypeMap(TypeDict, "U" , __WCHAR__ ) # NumPy AddTypeMap(TypeDict, "U1", __WCHAR__ ) # NumPy # UTF-16/UCS-2 if sizeof(short) == 2: AddTypeMap(TypeDict, "u" , __UNSIGNED_SHORT__ ) # PEP-3118 AddTypeMap(TypeDict, "1u", __UNSIGNED_SHORT__ ) # PEP-3118 if 2 == 2: AddTypeMap(TypeDict, "u" , __UINT16_T__ ) # PEP-3118 AddTypeMap(TypeDict, "1u", __UINT16_T__ ) # PEP-3118 if sizeof(wchar_t) == 2: AddTypeMap(TypeDict, "u" , __WCHAR__ ) # PEP-3118 # ~> win32 AddTypeMap(TypeDict, "1u", __WCHAR__ ) # PEP-3118 # ~> win32 # UTF-32/UCS-4 if sizeof(int) == 4: AddTypeMap(TypeDict, "w" , __UNSIGNED__ ) # PEP-3118 AddTypeMap(TypeDict, "1w", __UNSIGNED__ ) # PEP-3118 if 4 == 4: AddTypeMap(TypeDict, "w" , __UINT32_T__ ) # PEP-3118 AddTypeMap(TypeDict, "1w", __UINT32_T__ ) # PEP-3118 if sizeof(wchar_t) == 4: AddTypeMap(TypeDict, "w" , __WCHAR__ ) # PEP-3118 AddTypeMap(TypeDict, "1w", __WCHAR__ ) # PEP-3118 # datetime AddTypeMap(TypeDict, "M" , __INT64_T__ ) # NumPy AddTypeMap(TypeDict, "M8", __INT64_T__ ) # NumPy # timedelta AddTypeMap(TypeDict, "m" , __INT64_T__ ) # NumPy AddTypeMap(TypeDict, "m8", __INT64_T__ ) # NumPy # ----------------------------------------------------------------------------- cdef dict CTypeDict = { } _typedict_c = CTypeDict AddTypeMap(CTypeDict, "?" , __C_BOOL__ ) AddTypeMap(CTypeDict, "b" , __SIGNED_CHAR__ ) AddTypeMap(CTypeDict, "h" , __SHORT__ ) AddTypeMap(CTypeDict, "i" , __INT__ ) AddTypeMap(CTypeDict, "l" , __LONG__ ) AddTypeMap(CTypeDict, "q" , __LONG_LONG__ ) AddTypeMap(CTypeDict, "B" , __UNSIGNED_CHAR__ ) AddTypeMap(CTypeDict, "H" , __UNSIGNED_SHORT__ ) AddTypeMap(CTypeDict, "I" , __UNSIGNED__ ) AddTypeMap(CTypeDict, "L" , __UNSIGNED_LONG__ ) AddTypeMap(CTypeDict, "Q" , __UNSIGNED_LONG_LONG__ ) AddTypeMap(CTypeDict, "f" , __FLOAT__ ) AddTypeMap(CTypeDict, "d" , __DOUBLE__ ) AddTypeMap(CTypeDict, "g" , __LONG_DOUBLE__ ) AddTypeMap(CTypeDict, "F" , __C_FLOAT_COMPLEX__ ) AddTypeMap(CTypeDict, "D" , __C_DOUBLE_COMPLEX__ ) AddTypeMap(CTypeDict, "G" , __C_LONG_DOUBLE_COMPLEX__ ) AddTypeMap(CTypeDict, "b1" , __C_BOOL__ ) AddTypeMap(CTypeDict, "i1" , __INT8_T__ ) AddTypeMap(CTypeDict, "i2" , __INT16_T__ ) AddTypeMap(CTypeDict, "i4" , __INT32_T__ ) AddTypeMap(CTypeDict, "i8" , __INT64_T__ ) AddTypeMap(CTypeDict, "u1" , __UINT8_T__ ) AddTypeMap(CTypeDict, "u2" , __UINT16_T__ ) AddTypeMap(CTypeDict, "u4" , __UINT32_T__ ) AddTypeMap(CTypeDict, "u8" , __UINT64_T__ ) if sizeof(float) == 4: AddTypeMap(CTypeDict, "f4" , __FLOAT__ ) AddTypeMap(CTypeDict, "c8" , __C_FLOAT_COMPLEX__ ) if sizeof(double) == 8: AddTypeMap(CTypeDict, "f8" , __DOUBLE__ ) AddTypeMap(CTypeDict, "c16" , __C_DOUBLE_COMPLEX__ ) if sizeof(long double) == 12: AddTypeMap(CTypeDict, "f12" , __LONG_DOUBLE__ ) # ~> i386 AddTypeMap(CTypeDict, "c24" , __C_LONG_DOUBLE_COMPLEX__ ) # ~> i386 if sizeof(long double) == 16: AddTypeMap(CTypeDict, "f16" , __LONG_DOUBLE__ ) AddTypeMap(CTypeDict, "c32" , __C_LONG_DOUBLE_COMPLEX__ ) # ----------------------------------------------------------------------------- cdef dict FTypeDict = { } _typedict_f = FTypeDict AddTypeMap(FTypeDict, "l" , __LOGICAL__ ) AddTypeMap(FTypeDict, "i" , __INTEGER__ ) AddTypeMap(FTypeDict, "r" , __REAL__ ) AddTypeMap(FTypeDict, "s" , __REAL__ ) AddTypeMap(FTypeDict, "d" , __DOUBLE_PRECISION__ ) AddTypeMap(FTypeDict, "c" , __COMPLEX__ ) AddTypeMap(FTypeDict, "z" , __DOUBLE_COMPLEX__ ) AddTypeMap(FTypeDict, "l1" , __LOGICAL1__ ) AddTypeMap(FTypeDict, "l2" , __LOGICAL2__ ) AddTypeMap(FTypeDict, "l4" , __LOGICAL4__ ) AddTypeMap(FTypeDict, "l8" , __LOGICAL8__ ) AddTypeMap(FTypeDict, "i1" , __INTEGER1__ ) AddTypeMap(FTypeDict, "i2" , __INTEGER2__ ) AddTypeMap(FTypeDict, "i4" , __INTEGER4__ ) AddTypeMap(FTypeDict, "i8" , __INTEGER8__ ) AddTypeMap(FTypeDict, "i16" , __INTEGER16__ ) AddTypeMap(FTypeDict, "r2" , __REAL2__ ) AddTypeMap(FTypeDict, "r4" , __REAL4__ ) AddTypeMap(FTypeDict, "r8" , __REAL8__ ) AddTypeMap(FTypeDict, "r16" , __REAL16__ ) AddTypeMap(FTypeDict, "c4" , __COMPLEX4__ ) AddTypeMap(FTypeDict, "c8" , __COMPLEX8__ ) AddTypeMap(FTypeDict, "c16" , __COMPLEX16__ ) AddTypeMap(FTypeDict, "c32" , __COMPLEX32__ ) # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/typestr.pxi000066400000000000000000000446411475341043600204400ustar00rootroot00000000000000# ----------------------------------------------------------------------------- def _typecode(Datatype datatype: Datatype) -> str | None: """ Map MPI datatype to character code or type string. """ cdef const char *tc = DatatypeCode(datatype.ob_mpi) return pystr(tc) if tc != NULL else None def _typealign(Datatype datatype: Datatype) -> int | None: """ Return MPI datatype alignment. """ cdef size_t align = DatatypeAlign(datatype.ob_mpi) return align if align > 0 else None # ----------------------------------------------------------------------------- cdef inline const char* typechr(const char kind[], size_t size) noexcept nogil: cdef char k = kind[0] if k == c'b': # boolean if size == 1: return "?" if size >= 2: return typechr('i', size) return NULL # ~> unreachable if k == c'i': # signed integral if size == sizeof(char) : return "b" if size == sizeof(short) : return "h" if size == sizeof(int) : return "i" if size == sizeof(long) : return "l" if size == sizeof(long long) : return "q" # ~> uncovered return NULL # ~> unreachable if k == c'u': # unsigned integral if size == sizeof(char) : return "B" if size == sizeof(short) : return "H" if size == sizeof(int) : return "I" if size == sizeof(long) : return "L" if size == sizeof(long long) : return "Q" # ~> uncovered return NULL # ~> unreachable if k == c'f': # real floating if size == sizeof(float)//2 : return "e" if size == sizeof(float) : return "f" if size == sizeof(double) : return "d" if size == sizeof(long double) : return "g" return NULL # ~> unreachable if k == c'c': # complex floating if size == 2*sizeof(float)//2 : return "E" if size == 2*sizeof(float) : return "F" if size == 2*sizeof(double) : return "D" if size == 2*sizeof(long double) : return "G" return NULL # ~> unreachable return NULL # ~> unreachable cdef inline const char* typestr(const char kind[], size_t size) noexcept nogil: cdef char k = kind[0] if k == c'b': # boolean if size == 1: return "b1" if size >= 2: return typestr('i', size) # ~> uncovered return NULL # ~> unreachable if k == c'i': # signed integral if size == 1: return "i1" if size == 2: return "i2" if size == 4: return "i4" if size == 8: return "i8" if size == 16: return "i16" # ~> uncovered return NULL # ~> unreachable if k == c'u': # unsigned integral if size == 1: return "u1" if size == 2: return "u2" if size == 4: return "u4" if size == 8: return "u8" if size == 16: return "u16" # ~> uncovered return NULL # ~> unreachable if k == c'f': # real floating if size == 2: return "f2" if size == 4: return "f4" if size == 8: return "f8" if size == 12: return "f12" if size == 16: return "f16" return NULL # ~> unreachable if k == c'c': # complex floating if size == 4: return "c4" if size == 8: return "c8" if size == 16: return "c16" if size == 24: return "c24" if size == 32: return "c32" return NULL # ~> unreachable return NULL # ~> unreachable cdef inline const char* typechr_to_typestr(const char tchr[]) noexcept nogil: if tchr == NULL: return NULL cdef char c = tchr[0] # boolean if c == c'?': return typestr('b', 1) # signed integral if c == c'b': return typestr('i', sizeof(char)) if c == c'h': return typestr('i', sizeof(short)) if c == c'i': return typestr('i', sizeof(int)) if c == c'l': return typestr('i', sizeof(long)) if c == c'q': return typestr('i', sizeof(long long)) if c == c'p': return typestr('i', sizeof(MPI_Aint)) # unsigned integral if c == c'B': return typestr('u', sizeof(char)) if c == c'H': return typestr('u', sizeof(short)) if c == c'I': return typestr('u', sizeof(int)) if c == c'L': return typestr('u', sizeof(long)) if c == c'Q': return typestr('u', sizeof(long long)) if c == c'P': return typestr('u', sizeof(MPI_Aint)) # floating real if c == c'e': return typestr('f', sizeof(float)//2) if c == c'f': return typestr('f', sizeof(float)) if c == c'd': return typestr('f', sizeof(double)) if c == c'g': return typestr('f', sizeof(long double)) # floating complex if c == c'E': return typestr('c', 2*sizeof(float)//2) if c == c'F': return typestr('c', 2*sizeof(float)) if c == c'D': return typestr('c', 2*sizeof(double)) if c == c'G': return typestr('c', 2*sizeof(long double)) # character if c == c'S': return "S1" # NumPy if c == c'U': return "U1" # NumPy if c == c'c': return "S1" # PEP 3118 if c == c'u': return "u2" # PEP 3118 # ~> uncovered if c == c'w': return "U1" # PEP 3118 # ~> uncovered return NULL # ~> uncovered cdef inline const char* mpiaddrchr(size_t size) noexcept nogil: if size == sizeof(MPI_Aint) : return "p" if size == sizeof(long long) : return "q" # ~> uncovered if size == sizeof(long) : return "l" # ~> uncovered if size == sizeof(int) : return "i" # ~> uncovered return NULL # ~> uncovered cdef inline int mpicombiner(MPI_Datatype datatype) noexcept nogil: if not mpi_active(): return MPI_COMBINER_NAMED cdef int combiner = MPI_COMBINER_NAMED cdef MPI_Count ni = 0, na = 0, nc = 0, nd = 0 MPI_Type_get_envelope_c(datatype, &ni, &na, &nc, &nd, &combiner) return combiner cdef inline MPI_Count mpiextent(MPI_Datatype datatype) noexcept nogil: if not mpi_active(): return 0 cdef MPI_Count lb = 0, extent = 0 MPI_Type_get_extent_c(datatype, &lb, &extent) return extent cdef inline const char* mpifortchr( const char kind[], MPI_Datatype datatype, ) noexcept nogil: return typechr(kind, mpiextent(datatype)) cdef inline const char* mpifortstr( const char kind[], MPI_Datatype datatype, ) noexcept nogil: return typestr(kind, mpiextent(datatype)) cdef inline const char* typeDUP( const char *(*convert)(MPI_Datatype) noexcept nogil, MPI_Datatype datatype, ) noexcept nogil: cdef MPI_Datatype basetype = MPI_DATATYPE_NULL MPI_Type_get_contents_c( datatype, 0, 0, 0, 1, NULL, NULL, NULL, &basetype) cdef const char *result = convert(basetype) if not predefined(basetype): MPI_Type_free(&basetype) return result cdef extern from * nogil: """ #include #if defined(__cplusplus) template struct pympi_alignof_struct {char c; T member;}; #define pympi_alignof(type) offsetof(pympi_alignof_struct, member) #else #define pympi_alignof(type) offsetof(struct _{char c; type member;}, member) #endif """ const size_t alignof_bool "pympi_alignof(char)" const size_t alignof_short "pympi_alignof(short)" const size_t alignof_int "pympi_alignof(int)" const size_t alignof_long "pympi_alignof(long)" const size_t alignof_longlong "pympi_alignof(long long)" const size_t alignof_float "pympi_alignof(float)" const size_t alignof_double "pympi_alignof(double)" const size_t alignof_longdouble "pympi_alignof(long double)" const size_t alignof_char "pympi_alignof(char)" const size_t alignof_wchar "pympi_alignof(wchar_t)" const size_t alignof_voidp "pympi_alignof(void*)" cdef inline size_t typealign(const char tchr[]) noexcept nogil: if tchr == NULL: return 0 cdef char c = tchr[0] # bool if c == c'?': return alignof_bool # signed integral if c == c'b': return alignof_char if c == c'h': return alignof_short if c == c'i': return alignof_int if c == c'l': return alignof_long if c == c'q': return alignof_longlong # unsigned integral if c == c'B': return alignof_char if c == c'H': return alignof_short if c == c'I': return alignof_int if c == c'L': return alignof_long if c == c'Q': return alignof_longlong # floating real if c == c'e': return alignof_float//2 if c == c'f': return alignof_float if c == c'd': return alignof_double if c == c'g': return alignof_longdouble # floating complex if c == c'E': return alignof_float//2 if c == c'F': return alignof_float if c == c'D': return alignof_double if c == c'G': return alignof_longdouble # character if c == c'c': return alignof_char if c == c'S': return alignof_char if c == c'U': return alignof_wchar # pointer if c == c'p': return alignof_voidp if c == c'P': return alignof_voidp # ~> uncovered return 0 # ~> unreachable cdef inline size_t typealignpair( const char tc_a[], const char tc_b[], ) noexcept nogil: cdef size_t align_a = typealign(tc_a) cdef size_t align_b = typealign(tc_b) return align_a if align_a > align_b else align_b # ----------------------------------------------------------------------------- cdef inline const char* DatatypeChar(MPI_Datatype datatype) noexcept nogil: if datatype == MPI_DATATYPE_NULL: return NULL # MPI if datatype == MPI_PACKED : return "B" if datatype == MPI_BYTE : return "B" if datatype == MPI_AINT : return mpiaddrchr(sizeof(MPI_Aint)) if datatype == MPI_OFFSET : return mpiaddrchr(sizeof(MPI_Offset)) if datatype == MPI_COUNT : return mpiaddrchr(sizeof(MPI_Count)) # C - character if datatype == MPI_CHAR : return "c" if datatype == MPI_WCHAR and sizeof(wchar_t) == 2: return typechr('u', 2) if datatype == MPI_WCHAR and sizeof(wchar_t) == 4: return "U" # C - (signed) integral if datatype == MPI_SIGNED_CHAR : return "b" if datatype == MPI_SHORT : return "h" if datatype == MPI_INT : return "i" if datatype == MPI_LONG : return "l" if datatype == MPI_LONG_LONG : return "q" # C - unsigned integral if datatype == MPI_UNSIGNED_CHAR : return "B" if datatype == MPI_UNSIGNED_SHORT : return "H" if datatype == MPI_UNSIGNED : return "I" if datatype == MPI_UNSIGNED_LONG : return "L" if datatype == MPI_UNSIGNED_LONG_LONG : return "Q" # C - (real) floating if datatype == MPI_FLOAT : return "f" if datatype == MPI_DOUBLE : return "d" if datatype == MPI_LONG_DOUBLE : return "g" # C99 - boolean if datatype == MPI_C_BOOL : return "?" # C99 - integral if datatype == MPI_INT8_T : return typechr('i', 1) if datatype == MPI_INT16_T : return typechr('i', 2) if datatype == MPI_INT32_T : return typechr('i', 4) if datatype == MPI_INT64_T : return typechr('i', 8) if datatype == MPI_UINT8_T : return typechr('u', 1) if datatype == MPI_UINT16_T : return typechr('u', 2) if datatype == MPI_UINT32_T : return typechr('u', 4) if datatype == MPI_UINT64_T : return typechr('u', 8) # C99 - complex floating if datatype == MPI_C_COMPLEX : return "F" if datatype == MPI_C_FLOAT_COMPLEX : return "F" if datatype == MPI_C_DOUBLE_COMPLEX : return "D" if datatype == MPI_C_LONG_DOUBLE_COMPLEX : return "G" # C++ - boolean if datatype == MPI_CXX_BOOL : return "?" # C++ - complex floating if datatype == MPI_CXX_FLOAT_COMPLEX : return "F" if datatype == MPI_CXX_DOUBLE_COMPLEX : return "D" if datatype == MPI_CXX_LONG_DOUBLE_COMPLEX : return "G" # Fortran 77 if datatype == MPI_CHARACTER : return "c" if datatype == MPI_LOGICAL : return mpifortchr('b', datatype) if datatype == MPI_INTEGER : return mpifortchr('i', datatype) if datatype == MPI_REAL : return mpifortchr('f', datatype) if datatype == MPI_DOUBLE_PRECISION : return mpifortchr('f', datatype) if datatype == MPI_COMPLEX : return mpifortchr('c', datatype) if datatype == MPI_DOUBLE_COMPLEX : return mpifortchr('c', datatype) # Fortran 90 if datatype == MPI_LOGICAL1 : return typechr('b', 1) if datatype == MPI_LOGICAL2 : return typechr('b', 2) if datatype == MPI_LOGICAL4 : return typechr('b', 4) if datatype == MPI_LOGICAL8 : return typechr('b', 8) if datatype == MPI_INTEGER1 : return typechr('i', 1) if datatype == MPI_INTEGER2 : return typechr('i', 2) if datatype == MPI_INTEGER4 : return typechr('i', 4) if datatype == MPI_INTEGER8 : return typechr('i', 8) if datatype == MPI_INTEGER16 : return typechr('i', 16) if datatype == MPI_REAL2 : return typechr('f', 2) if datatype == MPI_REAL4 : return typechr('f', 4) if datatype == MPI_REAL8 : return typechr('f', 8) if datatype == MPI_REAL16 : return typechr('f', 16) if datatype == MPI_COMPLEX4 : return typechr('c', 4) if datatype == MPI_COMPLEX8 : return typechr('c', 8) if datatype == MPI_COMPLEX16 : return typechr('c', 16) if datatype == MPI_COMPLEX32 : return typechr('c', 32) cdef int combiner = mpicombiner(datatype) if combiner == MPI_COMBINER_F90_INTEGER : return mpifortchr('i', datatype) if combiner == MPI_COMBINER_F90_REAL : return mpifortchr('f', datatype) if combiner == MPI_COMBINER_F90_COMPLEX : return mpifortchr('c', datatype) # duplicate if combiner == MPI_COMBINER_DUP: return typeDUP(DatatypeChar, datatype) # fallback return NULL # ----------------------------------------------------------------------------- cdef inline const char* DatatypeStr(MPI_Datatype datatype) noexcept nogil: if datatype == MPI_DATATYPE_NULL : return NULL # C99 if datatype == MPI_C_BOOL : return typestr('b', 1) if datatype == MPI_INT8_T : return typestr('i', 1) if datatype == MPI_INT16_T : return typestr('i', 2) if datatype == MPI_INT32_T : return typestr('i', 4) if datatype == MPI_INT64_T : return typestr('i', 8) if datatype == MPI_UINT8_T : return typestr('u', 1) if datatype == MPI_UINT16_T : return typestr('u', 2) if datatype == MPI_UINT32_T : return typestr('u', 4) if datatype == MPI_UINT64_T : return typestr('u', 8) # Fortran 90 if datatype == MPI_LOGICAL1 : return typestr('b', 1) if datatype == MPI_LOGICAL2 : return typestr('b', 2) if datatype == MPI_LOGICAL4 : return typestr('b', 4) if datatype == MPI_LOGICAL8 : return typestr('b', 8) if datatype == MPI_INTEGER1 : return typestr('i', 1) if datatype == MPI_INTEGER2 : return typestr('i', 2) if datatype == MPI_INTEGER4 : return typestr('i', 4) if datatype == MPI_INTEGER8 : return typestr('i', 8) if datatype == MPI_INTEGER16 : return typestr('i', 16) if datatype == MPI_REAL2 : return typestr('f', 2) if datatype == MPI_REAL4 : return typestr('f', 4) if datatype == MPI_REAL8 : return typestr('f', 8) if datatype == MPI_REAL16 : return typestr('f', 16) if datatype == MPI_COMPLEX4 : return typestr('c', 4) if datatype == MPI_COMPLEX8 : return typestr('c', 8) if datatype == MPI_COMPLEX16 : return typestr('c', 16) if datatype == MPI_COMPLEX32 : return typestr('c', 32) cdef int combiner = mpicombiner(datatype) if combiner == MPI_COMBINER_F90_INTEGER : return mpifortstr('i', datatype) if combiner == MPI_COMBINER_F90_REAL : return mpifortstr('f', datatype) if combiner == MPI_COMBINER_F90_COMPLEX : return mpifortstr('c', datatype) # duplicate if combiner == MPI_COMBINER_DUP: return typeDUP(DatatypeStr, datatype) # fallback return typechr_to_typestr(DatatypeChar(datatype)) # ----------------------------------------------------------------------------- cdef inline const char* DatatypeCode(MPI_Datatype datatype) noexcept nogil: if datatype == MPI_DATATYPE_NULL : return NULL # C99 if datatype == MPI_C_BOOL : return typestr('b', 1) if datatype == MPI_INT8_T : return typestr('i', 1) if datatype == MPI_INT16_T : return typestr('i', 2) if datatype == MPI_INT32_T : return typestr('i', 4) if datatype == MPI_INT64_T : return typestr('i', 8) if datatype == MPI_UINT8_T : return typestr('u', 1) if datatype == MPI_UINT16_T : return typestr('u', 2) if datatype == MPI_UINT32_T : return typestr('u', 4) if datatype == MPI_UINT64_T : return typestr('u', 8) # Fortran 90 if datatype == MPI_LOGICAL1 : return typestr('b', 1) if datatype == MPI_LOGICAL2 : return typestr('b', 2) if datatype == MPI_LOGICAL4 : return typestr('b', 4) if datatype == MPI_LOGICAL8 : return typestr('b', 8) if datatype == MPI_INTEGER1 : return typestr('i', 1) if datatype == MPI_INTEGER2 : return typestr('i', 2) if datatype == MPI_INTEGER4 : return typestr('i', 4) if datatype == MPI_INTEGER8 : return typestr('i', 8) if datatype == MPI_INTEGER16 : return typestr('i', 16) if datatype == MPI_REAL2 : return typestr('f', 2) if datatype == MPI_REAL4 : return typestr('f', 4) if datatype == MPI_REAL8 : return typestr('f', 8) if datatype == MPI_REAL16 : return typestr('f', 16) if datatype == MPI_COMPLEX4 : return typestr('c', 4) if datatype == MPI_COMPLEX8 : return typestr('c', 8) if datatype == MPI_COMPLEX16 : return typestr('c', 16) if datatype == MPI_COMPLEX32 : return typestr('c', 32) cdef int combiner = mpicombiner(datatype) if combiner == MPI_COMBINER_F90_INTEGER : return mpifortstr('i', datatype) if combiner == MPI_COMBINER_F90_REAL : return mpifortstr('f', datatype) if combiner == MPI_COMBINER_F90_COMPLEX : return mpifortstr('c', datatype) # duplicate if combiner == MPI_COMBINER_DUP: return typeDUP(DatatypeCode, datatype) # fallback return DatatypeChar(datatype) # ----------------------------------------------------------------------------- cdef inline size_t DatatypeAlign(MPI_Datatype datatype) noexcept nogil: cdef size_t align = typealign(DatatypeChar(datatype)) if align > 0: return align if datatype == MPI_SHORT_INT : return typealignpair("h", "i") if datatype == MPI_2INT : return typealignpair("i", "i") if datatype == MPI_LONG_INT : return typealignpair("l", "i") if datatype == MPI_FLOAT_INT : return typealignpair("f", "i") if datatype == MPI_DOUBLE_INT : return typealignpair("d", "i") if datatype == MPI_LONG_DOUBLE_INT : return typealignpair("g", "i") return 0 # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/MPI.src/winimpl.pxi000066400000000000000000000016321475341043600203760ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef inline int win_get_base(MPI_Win win, void **base) except -1: cdef int flag = 0 cdef void *attr = NULL CHKERR( MPI_Win_get_attr(win, MPI_WIN_BASE, &attr, &flag) ) base[0] = attr if flag and attr != NULL else NULL return 0 cdef inline int win_get_size(MPI_Win win, MPI_Aint *size) except -1: cdef int flag = 0 cdef MPI_Aint *attr = NULL CHKERR( MPI_Win_get_attr(win, MPI_WIN_SIZE, &attr, &flag) ) size[0] = attr[0] if flag and attr != NULL else 0 return 0 cdef inline int win_get_unit(MPI_Win win, int *disp_unit) except -1: cdef int flag = 0 cdef int *attr = NULL CHKERR( MPI_Win_get_attr(win, MPI_WIN_DISP_UNIT, &attr, &flag) ) disp_unit[0] = attr[0] if flag and attr != NULL else 1 return 0 # ----------------------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/__init__.pxd000066400000000000000000000000701475341043600172310ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com mpi4py-4.0.3/src/mpi4py/__init__.py000066400000000000000000000134761475341043600171040ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """The **MPI for Python** package. The *Message Passing Interface* (MPI) is a standardized and portable message-passing system designed to function on a wide variety of parallel computers. The MPI standard defines the syntax and semantics of library routines and allows users to write portable programs in the main scientific programming languages (Fortran, C, or C++). Since its release, the MPI specification has become the leading standard for message-passing libraries for parallel computers. *MPI for Python* provides MPI bindings for the Python programming language, allowing any Python program to exploit multiple processors. This package build on the MPI specification and provides an object oriented interface which closely follows MPI-2 C++ bindings. """ __version__ = '4.0.3' __author__ = 'Lisandro Dalcin' __credits__ = 'MPI Forum, MPICH Team, Open MPI Team' __all__ = ['MPI'] class Rc: """Runtime configuration options. Attributes ---------- initialize : bool Automatic MPI initialization at import (default: True). threads : bool Request initialization with thread support (default: True). thread_level : {"multiple", "serialized", "funneled", "single"} Level of thread support to request (default: "multiple"). finalize : None or bool Automatic MPI finalization at exit (default: None). fast_reduce : bool Use tree-based reductions for objects (default: True). recv_mprobe : bool Use matched probes to receive objects (default: True). irecv_bufsz : int Default buffer size in bytes for ``irecv()`` (default = 32768). errors : {"exception", "default", "abort", "fatal"} Error handling policy (default: "exception"). """ initialize = True threads = True thread_level = 'multiple' finalize = None fast_reduce = True recv_mprobe = True irecv_bufsz = 32768 errors = 'exception' def __init__(self, **kwargs): """Initialize options.""" self(**kwargs) def __setattr__(self, name, value): """Set option.""" if not hasattr(self, name): raise TypeError(f"object has no attribute {name!r}") super().__setattr__(name, value) def __call__(self, **kwargs): """Update options.""" for key in kwargs: if not hasattr(self, key): raise TypeError(f"unexpected argument {key!r}") for key, value in kwargs.items(): setattr(self, key, value) def __repr__(self): """Return repr(self).""" return f'<{__spec__.name}.rc>' rc = Rc() __import__('sys').modules[__spec__.name + '.rc'] = rc def get_include(): """Return the directory in the package that contains header files. Extension modules that need to compile against mpi4py should use this function to locate the appropriate include directory. Using Python distutils (or perhaps NumPy distutils):: import mpi4py Extension('extension_name', ... include_dirs=[..., mpi4py.get_include()]) """ # pylint: disable=import-outside-toplevel from os.path import join, dirname return join(dirname(__spec__.origin), 'include') def get_config(): """Return a dictionary with information about MPI. .. versionchanged:: 4.0.0 By default, this function returns an empty dictionary. However, downstream packagers and distributors may alter such behavior. To that end, MPI information must be provided under an ``mpi`` section within a UTF-8 encoded INI-style configuration file :file:`mpi.cfg` located at the top-level package directory. The configuration file is read and parsed using the `configparser` module. """ # pylint: disable=import-outside-toplevel from configparser import ConfigParser from os.path import join, dirname parser = ConfigParser() parser.add_section('mpi') mpicfg = join(dirname(__spec__.origin), 'mpi.cfg') parser.read(mpicfg, encoding='utf-8') return dict(parser.items('mpi')) def profile(name, *, path=None): """Support for the MPI profiling interface. Parameters ---------- name : str Name of the profiler library to load. path : `sequence` of str, optional Additional paths to search for the profiler. """ # pylint: disable=import-outside-toplevel import os import sys import warnings try: from _ctypes import dlopen from os import RTLD_NOW, RTLD_GLOBAL except ImportError as exc: # pragma: no cover warnings.warn(exc.args[0], stacklevel=2) return def find_library(name, path): pattern = [('', '')] if sys.platform == 'darwin': # pragma: no cover pattern.append(('lib', '.dylib')) elif os.name == 'posix': # pragma: no cover pattern.append(('lib', '.so')) for pth in path: for (lib, dso) in pattern: filename = os.path.join(pth, lib + name + dso) if os.path.isfile(filename): return os.path.abspath(filename) return None if path is None: path = [''] elif isinstance(path, os.PathLike): path = [path] elif isinstance(path, str): path = path.split(os.pathsep) elif isinstance(path, bytes): path = path.split(os.fsencode(os.pathsep)) name = os.fsdecode(name) path = list(map(os.fsdecode, path)) filename = find_library(name, path) if filename is None: raise ValueError(f"profiler {name!r} not found") try: handle = dlopen(filename, RTLD_NOW | RTLD_GLOBAL) except OSError as exc: warnings.warn(exc.args[0], stacklevel=2) else: registry = vars(profile).setdefault('registry', []) registry.append((name, (handle, filename))) mpi4py-4.0.3/src/mpi4py/__init__.pyi000066400000000000000000000013651475341043600172470ustar00rootroot00000000000000from typing import Sequence __version__: str = ... __author__: str = ... __credits__: str = ... from . import MPI __all__: list[str] = ['MPI'] class Rc: initialize: bool = True threads: bool = True thread_level: str = 'multiple' finalize: bool | None = None fast_reduce: bool = True recv_mprobe: bool = True irecv_bufsz: int = 32768 errors: str = 'exception' def __init__(self, **kwargs: bool | str) -> None: ... def __setattr__(self, name: str, value: bool | str) -> None: ... def __call__(self, **kwargs: bool | str) -> None: ... rc: Rc = ... def get_include() -> str: ... def get_config() -> dict[str, str]: ... def profile( name: str, *, path: str | Sequence[str] | None = None, ) -> None: ... mpi4py-4.0.3/src/mpi4py/__main__.py000066400000000000000000000002451475341043600170530ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Entry-point for ``python -m mpi4py ...``.""" from .run import main if __name__ == '__main__': main() mpi4py-4.0.3/src/mpi4py/__main__.pyi000066400000000000000000000000361475341043600172220ustar00rootroot00000000000000from .run import main as main mpi4py-4.0.3/src/mpi4py/bench.py000066400000000000000000000470641475341043600164240ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Run MPI benchmarks and tests.""" import os as _os import sys as _sys def _prog(cmd=""): pyexe = _os.path.basename(_sys.executable) return f"{pyexe} -m {__spec__.name} {cmd}".strip() def helloworld(comm, args=None, verbose=True): """Hello, World! using MPI.""" # pylint: disable=import-outside-toplevel from argparse import ArgumentParser parser = ArgumentParser(prog=_prog("helloworld")) parser.add_argument("-q", "--quiet", action="store_false", dest="verbose", default=verbose, help="quiet output") options = parser.parse_args(args) from . import MPI size = comm.Get_size() rank = comm.Get_rank() name = MPI.Get_processor_name() message = ( f"Hello, World! I am process " f"{rank:{len(str(size - 1))}d} " f"of {size} on {name}.\n" ) comm.Barrier() if rank > 0: comm.Recv([None, 'B'], rank - 1) if options.verbose: _sys.stdout.write(message) _sys.stdout.flush() if rank < size - 1: comm.Send([None, 'B'], rank + 1) comm.Barrier() return message def ringtest(comm, args=None, verbose=True): """Time a message going around the ring of processes.""" # pylint: disable=too-many-locals # pylint: disable=too-many-statements # pylint: disable=import-outside-toplevel from argparse import ArgumentParser parser = ArgumentParser(prog=_prog("ringtest")) parser.add_argument("-q", "--quiet", action="store_false", dest="verbose", default=verbose, help="quiet output") parser.add_argument("-n", "--size", type=int, dest="size", default=1, help="message size") parser.add_argument("-s", "--skip", type=int, dest="skip", default=0, help="number of warm-up iterations") parser.add_argument("-l", "--loop", type=int, dest="loop", default=1, help="number of iterations") options = parser.parse_args(args) def ring(comm, n=1, loop=1, skip=0): # pylint: disable=invalid-name from array import array from . import MPI iterations = list(range(loop + skip)) size = comm.Get_size() rank = comm.Get_rank() source = (rank - 1) % size dest = (rank + 1) % size Sendrecv = comm.Sendrecv Send = comm.Send Recv = comm.Recv Wtime = MPI.Wtime sendmsg = array('B', [+42]) * n recvmsg = array('B', [0x0]) * n tic = Wtime() if size == 1: for i in iterations: if i == skip: tic = Wtime() Sendrecv(sendmsg, dest, 0, recvmsg, source, 0) else: if rank == 0: for i in iterations: if i == skip: tic = Wtime() Send(sendmsg, dest, 0) Recv(recvmsg, source, 0) else: sendmsg = recvmsg for i in iterations: if i == skip: tic = Wtime() Recv(recvmsg, source, 0) Send(sendmsg, dest, 0) toc = Wtime() if comm.rank == 0 and sendmsg != recvmsg: # pragma: no cover from warnings import warn import traceback try: warn("received message does not match!", stacklevel=1) except UserWarning: traceback.print_exc() comm.Abort(2) return toc - tic size = getattr(options, 'size', 1) loop = getattr(options, 'loop', 1) skip = getattr(options, 'skip', 0) comm.Barrier() elapsed = ring(comm, size, loop, skip) if options.verbose and comm.rank == 0: _sys.stdout.write( f"time for {loop} loops = {elapsed:g} seconds " f"({comm.size} processes, {size} bytes)\n" ) _sys.stdout.flush() return elapsed def pingpong(comm, args=None, verbose=True): """Time messages between processes.""" # pylint: disable=too-many-locals # pylint: disable=too-many-branches # pylint: disable=too-many-statements # pylint: disable=import-outside-toplevel from argparse import ArgumentParser parser = ArgumentParser(prog=_prog("pingpong")) parser.add_argument("-q", "--quiet", action="store_false", dest="verbose", default=verbose, help="quiet output") parser.add_argument("-m", "--min-size", type=int, dest="min_size", default=1, help="minimum message size") parser.add_argument("-n", "--max-size", type=int, dest="max_size", default=1 << 30, help="maximum message size") parser.add_argument("-s", "--skip", type=int, dest="skip", default=100, help="number of warm-up iterations") parser.add_argument("-l", "--loop", type=int, dest="loop", default=10000, help="number of iterations") parser.add_argument("-a", "--array", action="store", dest="array", default="numpy", choices=["numpy", "cupy", "numba", "none"], help="use NumPy/CuPy/Numba arrays") parser.add_argument("-p", "--pickle", action="store_true", dest="pickle", default=False, help="use pickle-based send and receive") parser.add_argument("--protocol", type=int, dest="protocol", default=None, help="pickle protocol version") parser.add_argument("-o", "--outband", action="store_true", dest="outband", default=False, help="use out-of-band pickle-based send and receive") parser.add_argument("--threshold", type=int, dest="threshold", default=None, help="size threshold for out-of-band pickle buffers") parser.add_argument("--skip-large", type=int, dest="skip_large", default=10) parser.add_argument("--loop-large", type=int, dest="loop_large", default=1000) parser.add_argument("--large-size", type=int, dest="large_size", default=1 << 14) parser.add_argument("--skip-huge", type=int, dest="skip_huge", default=1) parser.add_argument("--loop-huge", type=int, dest="loop_huge", default=10) parser.add_argument("--huge-size", type=int, dest="huge_size", default=1 << 20) parser.add_argument("--no-header", action="store_false", dest="print_header", default=True) parser.add_argument("--no-stats", action="store_false", dest="print_stats", default=True) options = parser.parse_args(args) import statistics from . import MPI from .util import pkl5 # pylint: disable=import-error numpy = cupy = numba = None if options.array == 'numpy': try: import numpy except ImportError: # pragma: no cover pass elif options.array == 'cupy': # pragma: no cover import cupy elif options.array == 'numba': # pragma: no cover import numba.cuda skip = options.skip loop = options.loop min_size = options.min_size max_size = options.max_size skip_large = options.skip_large loop_large = options.loop_large large_size = options.large_size skip_huge = options.skip_huge loop_huge = options.loop_huge huge_size = options.huge_size use_pickle = options.pickle or options.outband use_outband = options.outband protocol = options.protocol if use_pickle else None threshold = options.threshold if use_outband else None if use_outband: comm = pkl5.Intracomm(comm) if protocol is not None: MPI.pickle.PROTOCOL = protocol if threshold is not None: pkl5.pickle.THRESHOLD = threshold buf_sizes = [1 << i for i in range(33)] buf_sizes = [n for n in buf_sizes if min_size <= n <= max_size] wtime = MPI.Wtime if use_pickle: send = comm.send recv = comm.recv sendrecv = comm.sendrecv else: send = comm.Send recv = comm.Recv sendrecv = comm.Sendrecv s_msg = r_msg = None def allocate(nbytes): # pragma: no cover if numpy: return numpy.empty(nbytes, 'B') elif cupy: return cupy.empty(nbytes, 'B') elif numba: return numba.cuda.device_array(nbytes, 'B') else: return bytearray(nbytes) def run_pingpong(): rank = comm.Get_rank() size = comm.Get_size() t_start = wtime() if size == 1: sendrecv(s_msg, 0, 0, r_msg, 0, 0) sendrecv(s_msg, 0, 0, r_msg, 0, 0) elif rank == 0: send(s_msg, 1, 0) recv(r_msg, 1, 0) elif rank == 1: recv(r_msg, 0, 0) send(s_msg, 0, 0) t_end = wtime() return (t_end - t_start) / 2 result = [] for nbytes in buf_sizes: if nbytes > large_size: skip = min(skip, skip_large) loop = min(loop, loop_large) if nbytes > huge_size: skip = min(skip, skip_huge) loop = min(loop, loop_huge) iterations = list(range(loop + skip)) if use_pickle: s_msg = allocate(nbytes) else: s_msg = [allocate(nbytes), nbytes, MPI.BYTE] r_msg = [allocate(nbytes), nbytes, MPI.BYTE] t_list = [] comm.Barrier() for i in iterations: elapsed = run_pingpong() if i >= skip: t_list.append(elapsed) s_msg = r_msg = None t_mean = statistics.mean(t_list) if t_list else float('nan') t_stdev = statistics.stdev(t_list) if len(t_list) > 1 else 0.0 result.append((nbytes, t_mean, t_stdev)) if options.verbose and comm.rank == 0: if options.print_header: options.print_header = False print("# MPI PingPong Test") header = "# Size [B] Bandwidth [MB/s]" if options.print_stats: header += " | Time Mean [s] \u00b1 StdDev [s] Samples" print(header, flush=True) bandwidth = nbytes / t_mean message = f"{nbytes:10d}{bandwidth / 1e6:18.2f}" if options.print_stats: message += f" | {t_mean:.7e} \u00b1 {t_stdev:.4e} {loop:8d}" print(message, flush=True) return result def _fn_identity(arg): # pragma: no cover return arg def futures(comm, args=None, verbose=True): """Measure mpi4py.futures task throughput.""" # pylint: disable=too-many-locals # pylint: disable=too-many-statements # pylint: disable=import-outside-toplevel from argparse import ArgumentParser parser = ArgumentParser(prog=_prog("futures")) parser.add_argument( "-q", "--quiet", help="quiet output", action="store_false", dest="verbose", default=verbose, ) parser.add_argument( "-e", "--executor", help="executor backend", action="store", dest="executor", default="mpi", choices=["mpi", "process", "thread"], ) parser.add_argument( "-w", "--num-workers", help="number or workers", type=int, dest="workers", default=None, ) parser.add_argument( "-t", "--num-tasks", help="number of tasks per worker", type=int, dest="tasks", default=50, ) parser.add_argument( "-m", "--min-size", help="minimum task data size", type=int, dest="min_size", default=0, ) parser.add_argument( "-n", "--max-size", help="maximum task data size", type=int, dest="max_size", default=1 << 20, ) parser.add_argument( "-a", "--allocator", help="task data allocator", action="store", dest="allocator", default="numpy", choices=["numpy", "array", "bytes"], ) parser.add_argument( "-c", "--chunksize", help="chunksize parameter", type=int, dest="chunksize", default=1, ) parser.add_argument( "-b", "--backoff", help="backoff parameter", type=float, dest="backoff", default=0.0, ) parser.add_argument( "-o", "--outband", help="use out-of-band pickle", action="store_true", dest="outband", default=False, ) parser.add_argument( "-s", "--skip", help="number of warm-up iterations", type=int, dest="skip", default=1, ) parser.add_argument( "-l", "--loop", help="number of sample iterations", type=int, dest="loop", default=10, ) parser.add_argument( "--skip-large", type=int, dest="skip_large", default=1, ) parser.add_argument( "--loop-large", type=int, dest="loop_large", default=5, ) parser.add_argument( "--large-size", type=int, dest="large_size", default=1 << 16, ) parser.add_argument( "--skip-huge", type=int, dest="skip_huge", default=1, ) parser.add_argument( "--loop-huge", type=int, dest="loop_huge", default=3, ) parser.add_argument( "--huge-size", type=int, dest="huge_size", default=1 << 18, ) parser.add_argument( "--no-header", action="store_false", dest="print_header", default=True, ) parser.add_argument( "--no-stats", action="store_false", dest="print_stats", default=True, ) options = parser.parse_args(args) import time import statistics import concurrent.futures from .futures import MPIPoolExecutor executor_type = options.executor workers = options.workers tasks = options.tasks allocator = options.allocator backoff = options.backoff use_pkl5 = options.outband chunksize = options.chunksize skip = options.skip loop = options.loop min_size = options.min_size max_size = options.max_size skip_large = options.skip_large loop_large = options.loop_large large_size = options.large_size skip_huge = options.skip_huge loop_huge = options.loop_huge huge_size = options.huge_size buf_sizes = [1 << i for i in range(33)] buf_sizes = [n for n in buf_sizes if min_size <= n <= max_size] wtime = time.perf_counter numpy = array = None if allocator == 'numpy': try: import numpy except ImportError: # pragma: no cover pass elif allocator == 'array': import array def allocate(nbytes): if numpy: return numpy.zeros(nbytes, 'B') if array: buf = array.array('B', []) buf.frombytes(bytes(nbytes)) return buf return bytes(nbytes) def create_executor(): if executor_type == "process": return concurrent.futures.ProcessPoolExecutor( max_workers=workers, ) if executor_type == "thread": return concurrent.futures.ThreadPoolExecutor( max_workers=workers, ) assert executor_type == "mpi" # noqa: S101 return MPIPoolExecutor( max_workers=workers, backoff=backoff, use_pkl5=use_pkl5, ) def get_num_workers(): return executor._max_workers # pylint: disable=protected-access def prime_executor(): executor_map(time.sleep, [0.001] * get_num_workers()) def executor_map(task, data): iterator = executor.map( task, data, chunksize=chunksize, ) for _ in iterator: pass def run_futures(): t_start = wtime() executor_map(_fn_identity, data) t_end = wtime() return t_end - t_start executor = create_executor() num_workers = get_num_workers() num_tasks = num_workers * tasks result = [] prime_executor() for nbytes in buf_sizes: if nbytes > large_size: skip = min(skip, skip_large) loop = min(loop, loop_large) if nbytes > huge_size: skip = min(skip, skip_huge) loop = min(loop, loop_huge) iterations = list(range(loop + skip)) data = [allocate(nbytes) for _ in range(num_tasks)] t_list = [] for i in iterations: elapsed = run_futures() if i >= skip: t_list.append(elapsed) data = None t_mean = statistics.mean(t_list) if t_list else float('nan') t_stdev = statistics.stdev(t_list) if len(t_list) > 1 else 0.0 result.append((nbytes, t_mean, t_stdev)) if options.verbose and comm.rank == 0: if options.print_header: options.print_header = False print( f"# {type(executor).__name__} - " f"{num_workers} workers, " f"{tasks} tasks/worker" ) header = "# Size [B] Tasks/s" if options.print_stats: header += " | Time Mean [s] \u00b1 StdDev [s] Samples" print(header, flush=True) throughput = num_tasks / t_mean message = f"{nbytes:10d}{throughput:9.0f}" if options.print_stats: message += f" | {t_mean:.7e} \u00b1 {t_stdev:.4e} {loop:8d}" print(message, flush=True) executor.shutdown() return result def main(args=None): """Entry-point for ``python -m mpi4py.bench``.""" # pylint: disable=import-outside-toplevel from argparse import ArgumentParser, REMAINDER parser = ArgumentParser(prog=_prog(), usage="%(prog)s [options] [args]") parser.add_argument("--threads", action="store_true", dest="threads", default=None, help="initialize MPI with thread support") parser.add_argument("--no-threads", action="store_false", dest="threads", default=None, help="initialize MPI without thread support") parser.add_argument("--thread-level", dest="thread_level", default=None, action="store", metavar="LEVEL", choices="single funneled serialized multiple".split(), help="initialize MPI with required thread level") parser.add_argument("command", action="store", metavar="", help="benchmark command to run") parser.add_argument("args", nargs=REMAINDER, metavar="[args]", help="arguments for benchmark command") options = parser.parse_args(args) from . import rc if options.threads is not None: rc.threads = options.threads if options.thread_level is not None: rc.thread_level = options.thread_level from . import MPI comm = MPI.COMM_WORLD if options.command not in main.commands: if comm.rank == 0: parser.error(f"unknown command {options.command!r}") parser.exit(2) command = main.commands[options.command] command(comm, options.args) parser.exit() main.commands = { # type: ignore[attr-defined] 'helloworld': helloworld, 'ringtest': ringtest, 'pingpong': pingpong, 'futures': futures, } if __name__ == '__main__': main() mpi4py-4.0.3/src/mpi4py/bench.pyi000066400000000000000000000010651475341043600165640ustar00rootroot00000000000000from typing import Sequence from .MPI import Intracomm def helloworld(comm: Intracomm, args: Sequence[str] | None = None, verbose: bool = True) -> str: ... def ringtest(comm: Intracomm, args: Sequence[str] | None = None, verbose: bool = True) -> float: ... def pingpong(comm: Intracomm, args: Sequence[str] | None = None, verbose: bool = True) -> list[tuple[int, float, float]]: ... def futures(comm: Intracomm, args: Sequence[str] | None = None, verbose: bool = True) -> list[tuple[int, float, float]]: ... def main(args: Sequence[str] | None = ...) -> None: ... mpi4py-4.0.3/src/mpi4py/futures/000077500000000000000000000000001475341043600164555ustar00rootroot00000000000000mpi4py-4.0.3/src/mpi4py/futures/__init__.py000066400000000000000000000016761475341043600206000ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Execute computations asynchronously using MPI processes.""" # pylint: disable=redefined-builtin from ._base import ( Future, Executor, wait, FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED, as_completed, CancelledError, TimeoutError, # noqa: A004 InvalidStateError, BrokenExecutor, ) from .pool import ( MPIPoolExecutor, MPICommExecutor, ThreadPoolExecutor, ProcessPoolExecutor, get_comm_workers, ) from .util import ( collect, compose, ) __all__ = [ 'Future', 'Executor', 'wait', 'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED', 'as_completed', 'CancelledError', 'TimeoutError', 'InvalidStateError', 'BrokenExecutor', 'MPIPoolExecutor', 'MPICommExecutor', 'ThreadPoolExecutor', 'ProcessPoolExecutor', 'get_comm_workers', 'collect', 'compose', ] mpi4py-4.0.3/src/mpi4py/futures/__init__.pyi000066400000000000000000000021311475341043600207340ustar00rootroot00000000000000from ._base import ( Future as Future, Executor as Executor, wait as wait, FIRST_COMPLETED as FIRST_COMPLETED, FIRST_EXCEPTION as FIRST_EXCEPTION, ALL_COMPLETED as ALL_COMPLETED, as_completed as as_completed, CancelledError as CancelledError, TimeoutError as TimeoutError, # noqa: A004 InvalidStateError as InvalidStateError, BrokenExecutor as BrokenExecutor, ) from .pool import ( MPIPoolExecutor as MPIPoolExecutor, MPICommExecutor as MPICommExecutor, ThreadPoolExecutor as ThreadPoolExecutor, ProcessPoolExecutor as ProcessPoolExecutor, get_comm_workers as get_comm_workers, ) from .util import ( collect as collect, compose as compose, ) __all__: list[str] = [ 'Future', 'Executor', 'wait', 'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED', 'as_completed', 'CancelledError', 'TimeoutError', 'InvalidStateError', 'BrokenExecutor', 'MPIPoolExecutor', 'MPICommExecutor', 'ThreadPoolExecutor', 'ProcessPoolExecutor', 'get_comm_workers', 'collect', 'compose', ] mpi4py-4.0.3/src/mpi4py/futures/__main__.py000066400000000000000000000044231475341043600205520ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Run Python code using ``mpi4py.futures``. Python code (scripts, modules, zip files) is run in the process with rank 0 in ``MPI.COMM_WORLD`` and creates `MPIPoolExecutor` instances to submit tasks. The other processes team-up in a static-size shared pool of workers executing tasks submitted from the master process. """ def main(): """Entry point for ``python -m mpi4py.futures ...``.""" # pylint: disable=import-outside-toplevel import os import sys from ..run import run_command_line from ..run import set_abort_status from ._core import SharedPoolCtx class UsageExit(SystemExit): # pylint: disable=missing-class-docstring pass def usage(error=None): from textwrap import dedent python = os.path.basename(sys.executable) program = __spec__.parent usage = dedent(f""" usage: {python} -m {program} [arg] ... or: {python} -m {program} -m [arg] ... or: {python} -m {program} -c [arg] ... """).strip() if error: print(error, file=sys.stderr) print(usage, file=sys.stderr) else: print(usage, file=sys.stdout) raise UsageExit(1 if error else 0) def chk_command_line(): args = sys.argv[1:] if not args: usage("No path specified for execution") elif args[0] == '-': pass elif args[0] in ('-h', '--help'): usage() elif args[0] in ('-m', '-c'): if len(args) < 2: usage(f"Argument expected for option: {args[0]}") elif args[0].startswith('-'): usage(f"Unknown option: {args[0]}") elif not os.path.exists(args[0]): usage(f"Path does not exist: {args[0]}") try: with SharedPoolCtx() as context: if context is not None: chk_command_line() run_command_line() except UsageExit: raise except SystemExit as exc: set_abort_status(exc) raise except KeyboardInterrupt as exc: set_abort_status(exc) raise except BaseException: set_abort_status(1) raise if __name__ == '__main__': main() mpi4py-4.0.3/src/mpi4py/futures/__main__.pyi000066400000000000000000000000301475341043600207110ustar00rootroot00000000000000def main() -> None: ... mpi4py-4.0.3/src/mpi4py/futures/_base.py000066400000000000000000000016141475341043600201020ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com # pylint: disable=unused-import # pylint: disable=redefined-builtin # pylint: disable=missing-module-docstring from concurrent.futures import ( # noqa: F401 FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED, CancelledError, TimeoutError, # noqa: A004 Future, Executor, wait, as_completed, ) try: # Python 3.7 from concurrent.futures import BrokenExecutor except ImportError: # pragma: no cover class BrokenExecutor(RuntimeError): """The executor has become non-functional.""" try: # Python 3.8 from concurrent.futures import InvalidStateError except ImportError: # pragma: no cover # pylint: disable=too-few-public-methods # pylint: disable=useless-object-inheritance class InvalidStateError(CancelledError.__base__): """The operation is not allowed in this state.""" mpi4py-4.0.3/src/mpi4py/futures/_base.pyi000066400000000000000000000006431475341043600202540ustar00rootroot00000000000000from concurrent.futures import ( FIRST_COMPLETED as FIRST_COMPLETED, FIRST_EXCEPTION as FIRST_EXCEPTION, ALL_COMPLETED as ALL_COMPLETED, CancelledError as CancelledError, TimeoutError as TimeoutError, # noqa: A004 BrokenExecutor as BrokenExecutor, InvalidStateError as InvalidStateError, Future as Future, Executor as Executor, wait as wait, as_completed as as_completed, ) mpi4py-4.0.3/src/mpi4py/futures/_core.py000066400000000000000000001002761475341043600201240ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Management of MPI worker processes.""" # pylint: disable=broad-except # pylint: disable=too-many-lines # pylint: disable=protected-access # pylint: disable=import-outside-toplevel # pylint: disable=missing-class-docstring # pylint: disable=missing-function-docstring import os import sys import time import atexit import weakref import warnings import itertools import threading import traceback import collections from .. import MPI from ..util import pkl5 from ._base import BrokenExecutor # --- _tls = threading.local() def serialized(function): def wrapper(*args, **kwargs): with serialized.lock: return function(*args, **kwargs) if serialized.lock is None: return function return wrapper serialized.lock = None # type: ignore[attr-defined] def setup_mpi_threads(): with setup_mpi_threads.lock: thread_level = setup_mpi_threads.thread_level if thread_level is None: thread_level = MPI.Query_thread() setup_mpi_threads.thread_level = thread_level if thread_level < MPI.THREAD_MULTIPLE: serialized.lock = threading.Lock() if thread_level < MPI.THREAD_SERIALIZED: warnings.warn( "the level of thread support in MPI " "should be at least MPI_THREAD_SERIALIZED", RuntimeWarning, stacklevel=2, ) setup_mpi_threads.lock = threading.Lock() # type: ignore[attr-defined] setup_mpi_threads.thread_level = None # type: ignore[attr-defined] # --- class RemoteTraceback(Exception): pass def _unwrap_exc(exc, tb): exc.__cause__ = RemoteTraceback(tb) return exc class _ExceptionWrapper(Exception): def __reduce__(self): return _unwrap_exc, self.args def _wrap_exc(exc, tb): exc.__cause__ = None exc.__context__ = None exc.__traceback__ = None return _ExceptionWrapper(exc, tb) def _format_exc(exc, comm): exc_info = (type(exc), exc, exc.__traceback__) tb_lines = traceback.format_exception(*exc_info) body = "".join(tb_lines) host = MPI.Get_processor_name() rank = comm.Get_rank() size = comm.Get_size() info = f"### Worker {rank} of {size} on {host}\n" return f'\n{info}"""\n{body}"""' if sys.version_info >= (3, 11): def sys_exception(): return sys.exception() else: # pragma: no cover def sys_exception(): return sys.exc_info()[1] def os_environ_get(name, default=None): varname = f'MPI4PY_FUTURES_{name}' if varname not in os.environ: oldname = f'MPI4PY_{name}' if oldname in os.environ: # pragma: no cover warnings.warn( f"environment variable {oldname} is deprecated, use {varname}", DeprecationWarning, stacklevel=1, ) return os.environ[oldname] return os.environ.get(varname, default) # --- BACKOFF = 0.001 def _getopt_backoff(options): backoff = options.get('backoff') if backoff is None: backoff = os_environ_get('BACKOFF', BACKOFF) return float(backoff) class Backoff: def __init__(self, seconds=BACKOFF): self.tval = 0.0 self.tmax = max(float(seconds), 0.0) self.tmin = self.tmax / (1 << 10) def reset(self): self.tval = 0.0 def sleep(self): time.sleep(self.tval) self.tval = min(self.tmax, max(self.tmin, self.tval * 2)) class TaskQueue(collections.deque): put = collections.deque.append pop = collections.deque.popleft add = collections.deque.appendleft class WorkerSet(collections.deque): add = collections.deque.append pop = collections.deque.popleft THREADS_QUEUES = weakref.WeakKeyDictionary() # type: weakref.WeakKeyDictionary def join_threads(threads_queues=THREADS_QUEUES): items = list(threads_queues.items()) for _, queue in items: # pragma: no cover queue.put(None) for thread, _ in items: # pragma: no cover thread.join() if hasattr(threading, '_register_atexit'): threading._register_atexit(join_threads) else: # pragma: no cover atexit.register(join_threads) class Pool: def __init__(self, executor, manager, *args): self.size = None self.event = threading.Event() self.queue = queue = TaskQueue() self.exref = weakref.ref(executor, lambda _, q=queue: q.put(None)) args = (self, executor._options, *args) thread = threading.Thread(target=manager, args=args) self.thread = thread setup_mpi_threads() thread.daemon = not hasattr(threading, '_register_atexit') thread.start() THREADS_QUEUES[thread] = queue def wait(self): self.event.wait() def push(self, item): self.queue.put(item) def done(self): self.queue.put(None) def join(self): self.thread.join() def setup(self, size): self.size = size self.event.set() return self.queue def cancel(self, handler=None): queue = self.queue while True: try: item = queue.pop() except LookupError: break if item is None: queue.put(None) break future, task = item if handler: handler(future) else: future.cancel() future.set_running_or_notify_cancel() del future, task, item def broken(self, message): lock = None executor = self.exref() if executor is not None: executor._broken = message if not executor._shutdown: lock = executor._lock def handler(future): if future.set_running_or_notify_cancel(): exception = BrokenExecutor(message) future.set_exception(exception) self.event.set() if lock: lock.acquire() try: self.cancel(handler) finally: if lock: lock.release() def initialize(options): initializer = options.pop('initializer', None) initargs = options.pop('initargs', ()) initkwargs = options.pop('initkwargs', {}) if initializer is not None: try: initializer(*initargs, **initkwargs) except BaseException: return False else: return True return True def _manager_thread(pool, options): size = options.pop('max_workers', 1) queue = pool.setup(size) threads = collections.deque() max_threads = size - 1 def init(): if not initialize(options): pool.broken("initializer failed") return False return True def adjust(): if len(threads) < max_threads: thread = threading.Thread(target=worker) thread.start() threads.append(thread) def execute(future, task): func, args, kwargs = task result = exception = None try: result = func(*args, **kwargs) future.set_result(result) except BaseException: exception = sys_exception() future.set_exception(exception) del result, exception del func, args, kwargs del future, task def finalize(): for thread in threads: thread.join() queue.pop() def worker(): backoff = Backoff(_getopt_backoff(options)) if not init(): queue.put(None) return while True: try: item = queue.pop() backoff.reset() except LookupError: backoff.sleep() continue if item is None: queue.put(None) break future, task = item if future.set_running_or_notify_cancel(): if queue: adjust() execute(future, task) del future, task, item worker() finalize() def _manager_comm(pool, options, comm, sync=True): assert comm != MPI.COMM_NULL # noqa: S101 assert comm.Is_inter() # noqa: S101 assert comm.Get_size() == 1 # noqa: S101 comm = client_sync(comm, options, sync) if not client_init(comm, options): pool.broken("initializer failed") client_stop(comm) return size = comm.Get_remote_size() queue = pool.setup(size) workers = WorkerSet(range(size)) client_exec(comm, options, 0, workers, queue) client_stop(comm) def _manager_spawn(pool, options): pyexe = options.pop('python_exe', None) pyargs = options.pop('python_args', None) nprocs = options.pop('max_workers', None) info = options.pop('mpi_info', None) comm = serialized(client_spawn)(pyexe, pyargs, nprocs, info) _manager_comm(pool, options, comm) def _manager_service(pool, options): service = options.pop('service', None) info = options.pop('mpi_info', None) comm = serialized(client_connect)(service, info) _manager_comm(pool, options, comm) def ThreadPool(executor): # pylint: disable=invalid-name return Pool(executor, _manager_thread) def SpawnPool(executor): # pylint: disable=invalid-name return Pool(executor, _manager_spawn) def ServicePool(executor): # pylint: disable=invalid-name return Pool(executor, _manager_service) def WorkerPool(executor): # pylint: disable=invalid-name if SharedPool is not None: return SharedPool(executor) if 'service' in executor._options: return ServicePool(executor) return SpawnPool(executor) # --- SharedPool = None # pylint: disable=invalid-name def _set_shared_pool(obj): # pylint: disable=global-statement global SharedPool SharedPool = obj class SharedPoolCtx: def __init__(self): self.lock = threading.Lock() self.comm = MPI.COMM_NULL self.intracomm = MPI.COMM_NULL self.on_root = None self.counter = None self.workers = None self.threads = weakref.WeakKeyDictionary() def __reduce__(self): return 'SharedPool' def _initialize_remote(self): barrier(self.intracomm) server_init(self.comm) def _initialize(self, options, tag): if tag == 0: self.comm = client_sync(self.comm, options) return client_init(self.comm, options) if options.get('initializer') is None: return True task = (self._initialize_remote, (), {}) reqs = isendtoall(self.comm, task, tag) waitall(self.comm, reqs, poll=True) success = client_init(self.comm, options) recvfromall(self.comm, tag) return success def _manager(self, pool, options): if self.counter is None: options['max_workers'] = 1 set_comm_server(MPI.COMM_SELF) _manager_thread(pool, options) return with self.lock: tag = next(self.counter) if not self._initialize(options, tag): pool.broken("initializer failed") return comm = self.comm size = comm.Get_remote_size() queue = pool.setup(size) client_exec(comm, options, tag, self.workers, queue) def __call__(self, executor): assert SharedPool is self # noqa: S101 with self.lock: pool = Pool(executor, self._manager) del THREADS_QUEUES[pool.thread] self.threads[pool.thread] = pool.queue return pool def __enter__(self): assert SharedPool is None # noqa: S101 comm, root = MPI.COMM_WORLD, 0 self.on_root = comm.Get_rank() == root self.comm, self.intracomm = comm_split(comm, root) if self.comm != MPI.COMM_NULL and self.on_root: size = self.comm.Get_remote_size() self.counter = itertools.count(0) self.workers = WorkerSet(range(size)) _set_shared_pool(self) return self if self.on_root else None def __exit__(self, *args): assert SharedPool is self # noqa: S101 if self.on_root: join_threads(self.threads) if self.comm != MPI.COMM_NULL: comm = self.comm if self.on_root: if next(self.counter) == 0: options = {'main': False} self._initialize(options, 0) client_stop(comm) else: intracomm = self.intracomm set_comm_server(intracomm) server_main_comm(comm) intracomm.Free() if not self.on_root: join_threads(self.threads) _set_shared_pool(None) self.comm = MPI.COMM_NULL self.intracomm = MPI.COMM_NULL self.on_root = None self.counter = None self.workers = None self.threads.clear() return False # --- def comm_split(comm, root): if comm.Get_size() == 1: comm = MPI.Intercomm(MPI.COMM_NULL) intracomm = MPI.Intracomm(MPI.COMM_NULL) return comm, intracomm rank = comm.Get_rank() if MPI.Get_version() >= (2, 2): allgroup = comm.Get_group() if rank == root: group = allgroup.Incl([root]) else: group = allgroup.Excl([root]) allgroup.Free() intracomm = comm.Create(group) group.Free() else: # pragma: no cover color = 0 if rank == root else 1 intracomm = comm.Split(color, key=0) if rank == root: local_leader = 0 remote_leader = 0 if root else 1 else: local_leader = 0 remote_leader = root intercomm = intracomm.Create_intercomm( local_leader, comm, remote_leader, tag=0) if rank == root: intracomm.Free() return intercomm, intracomm # --- def _comm_executor_helper(executor, comm, root): def _manager(pool, options, comm, root): if comm.Get_size() == 1: options['max_workers'] = 1 set_comm_server(MPI.COMM_SELF) _manager_thread(pool, options) return comm, _ = serialized(comm_split)(comm, root) _manager_comm(pool, options, comm, sync=False) if comm.Get_rank() == root: if SharedPool is not None: pool = SharedPool(executor) else: pool = Pool(executor, _manager, comm, root) executor._pool = pool else: comm, intracomm = comm_split(comm, root) set_comm_server(intracomm) server_main_comm(comm, sync=False) intracomm.Free() # --- def _getenv_use_pkl5(): value = os_environ_get('USE_PKL5') if value is None: return None if value.lower() in ('false', 'no', 'off', 'n', '0'): return False if value.lower() in ('true', 'yes', 'on', 'y', '1'): return True warnings.warn( f"environment variable MPI4PY_FUTURES_USE_PKL5: " f"unexpected value {value!r}", RuntimeWarning, stacklevel=1, ) return False def _setopt_use_pkl5(options): use_pkl5 = options.get('use_pkl5') if use_pkl5 is None: use_pkl5 = _getenv_use_pkl5() if use_pkl5 is not None: options['use_pkl5'] = use_pkl5 def _get_comm(comm, options): use_pkl5 = options.pop('use_pkl5', None) if use_pkl5: return pkl5.Intercomm(comm) return comm def _get_request(comm): use_pkl5 = isinstance(comm, pkl5.Comm) if use_pkl5: return pkl5.Request return MPI.Request # --- def barrier(comm): try: request = comm.Ibarrier() backoff = Backoff() while not request.Test(): backoff.sleep() except (NotImplementedError, MPI.Exception): # pragma: no cover buf = [None, 0, MPI.BYTE] tag = MPI.COMM_WORLD.Get_attr(MPI.TAG_UB) sendreqs, recvreqs = [], [] if comm.Is_inter(): size = comm.Get_remote_size() else: size = comm.Get_size() for pid in range(size): recvreqs.append(comm.Irecv(buf, pid, tag)) sendreqs.append(comm.Issend(buf, pid, tag)) backoff = Backoff() while not MPI.Request.Testall(recvreqs): backoff.sleep() MPI.Request.Waitall(sendreqs) def bcast_send(comm, data): if MPI.VERSION >= 2: comm.bcast(data, MPI.ROOT) else: # pragma: no cover tag = MPI.COMM_WORLD.Get_attr(MPI.TAG_UB) sendtoall(comm, data, tag) def bcast_recv(comm): if MPI.VERSION >= 2: data = comm.bcast(None, 0) else: # pragma: no cover tag = MPI.COMM_WORLD.Get_attr(MPI.TAG_UB) data = comm.recv(None, 0, tag) return data def isendtoall(comm, data, tag=0): size = comm.Get_remote_size() return [comm.issend(data, pid, tag) for pid in range(size)] def waitall(comm, requests, poll=False): if poll: request_testall = _get_request(comm).testall backoff = Backoff() while True: done, objs = request_testall(requests) if done: return objs backoff.sleep() else: request_waitall = _get_request(comm).waitall return request_waitall(requests) def sendtoall(comm, data, tag=0): requests = isendtoall(comm, data, tag) waitall(comm, requests) def recvfromall(comm, tag=0): size = comm.Get_remote_size() return [comm.recv(None, pid, tag) for pid in range(size)] def disconnect(comm): try: comm.Disconnect() except NotImplementedError: # pragma: no cover comm.Free() # --- def client_sync(comm, options, sync=True): serialized(barrier)(comm) _setopt_use_pkl5(options) if sync: options = _sync_get_data(options) serialized(bcast_send)(comm, options) comm = _get_comm(comm, options) return comm def client_init(comm, options): serialized(bcast_send)(comm, _init_get_data(options)) sbuf = bytearray([False]) rbuf = bytearray([False]) serialized(comm.Allreduce)(sbuf, rbuf, op=MPI.LAND) success = bool(rbuf[0]) return success def client_exec(comm, options, tag, worker_set, task_queue): # pylint: disable=too-many-locals # pylint: disable=too-many-statements backoff = Backoff(_getopt_backoff(options)) status = MPI.Status() comm_recv = serialized(comm.recv) comm_isend = serialized(comm.issend) comm_iprobe = serialized(comm.iprobe) request_free = serialized(_get_request(comm).Free) pending = {} def iprobe(): pid = MPI.ANY_SOURCE return comm_iprobe(pid, tag, status) def probe(): pid = MPI.ANY_SOURCE backoff.reset() while not comm_iprobe(pid, tag, status): backoff.sleep() def recv(): pid = MPI.ANY_SOURCE try: task = comm_recv(None, pid, tag, status) except BaseException: task = (None, sys_exception()) pid = status.source worker_set.add(pid) future, request = pending.pop(pid) request_free(request) result, exception = task if exception is None: future.set_result(result) else: future.set_exception(exception) del result, exception del future, task def send(): try: pid = worker_set.pop() except LookupError: # pragma: no cover return False try: item = task_queue.pop() except LookupError: # pragma: no cover worker_set.add(pid) return False if item is None: worker_set.add(pid) return True future, task = item if not future.set_running_or_notify_cancel(): worker_set.add(pid) return False try: request = comm_isend(task, pid, tag) pending[pid] = (future, request) except BaseException: worker_set.add(pid) future.set_exception(sys_exception()) del future, task, item return None while True: if task_queue and worker_set: backoff.reset() stop = send() if stop: break if pending and iprobe(): backoff.reset() recv() backoff.sleep() while pending: probe() recv() def client_stop(comm): serialized(sendtoall)(comm, None) serialized(disconnect)(comm) def server_sync(comm, sync=True): barrier(comm) options = bcast_recv(comm) if sync: options = _sync_set_data(options) comm = _get_comm(comm, options) return comm, options def server_init(comm): options = bcast_recv(comm) success = initialize(options) sbuf = bytearray([success]) rbuf = bytearray([True]) comm.Allreduce(sbuf, rbuf, op=MPI.LAND) return success def server_exec(comm, options): backoff = Backoff(_getopt_backoff(options)) status = MPI.Status() comm_recv = comm.recv comm_isend = comm.issend comm_iprobe = comm.iprobe request_test = _get_request(comm).test def exception(): exc = sys_exception() tb = _format_exc(exc, comm) return _wrap_exc(exc, tb) def recv(): pid, tag = MPI.ANY_SOURCE, MPI.ANY_TAG backoff.reset() while not comm_iprobe(pid, tag, status): backoff.sleep() pid, tag = status.source, status.tag try: task = comm_recv(None, pid, tag, status) except BaseException: task = exception() return task def call(task): if isinstance(task, BaseException): return (None, task) func, args, kwargs = task try: result = func(*args, **kwargs) except BaseException: return (None, exception()) else: return (result, None) def send(task): pid, tag = status.source, status.tag try: request = comm_isend(task, pid, tag) except BaseException: task = (None, exception()) request = comm_isend(task, pid, tag) backoff.reset() while not request_test(request)[0]: backoff.sleep() while True: task = recv() if task is None: break task = call(task) send(task) def server_stop(comm): disconnect(comm) # --- MAIN_RUN_NAME = '__worker__' def import_main(mod_name, mod_path, init_globals, run_name): import types import runpy module = types.ModuleType(run_name) if init_globals is not None: module.__dict__.update(init_globals) module.__name__ = run_name class TempModulePatch(runpy._TempModule): # pylint: disable=too-few-public-methods def __init__(self, mod_name): super().__init__(mod_name) self.module = module TempModule = runpy._TempModule # pylint: disable=invalid-name runpy._TempModule = TempModulePatch import_main.sentinel = (mod_name, mod_path) main_module = sys.modules['__main__'] try: sys.modules['__main__'] = sys.modules[run_name] = module if mod_name: # pragma: no cover runpy.run_module(mod_name, run_name=run_name, alter_sys=True) elif mod_path: # pragma: no branch safe_path = getattr(sys.flags, 'safe_path', sys.flags.isolated) if not safe_path: # pragma: no branch sys.path[0] = os.path.realpath(os.path.dirname(mod_path)) runpy.run_path(mod_path, run_name=run_name) sys.modules['__main__'] = sys.modules[run_name] = module except BaseException: # pragma: no cover sys.modules['__main__'] = main_module raise finally: del import_main.sentinel runpy._TempModule = TempModule def _sync_get_data(options): main = sys.modules['__main__'] sys.modules.setdefault(MAIN_RUN_NAME, main) import_main_module = options.pop('main', True) data = options.copy() data.pop('initializer', None) data.pop('initargs', None) data.pop('initkwargs', None) if import_main_module: spec = getattr(main, '__spec__', None) name = getattr(spec, 'name', None) path = getattr(main, '__file__', None) if name is not None: # pragma: no cover data['@main:mod_name'] = name if path is not None: # pragma: no branch data['@main:mod_path'] = path return data def _sync_set_data(data): if 'path' in data: sys.path.extend(data.pop('path')) if 'wdir' in data: os.chdir(data.pop('wdir')) if 'env' in data: os.environ.update(data.pop('env')) mod_name = data.pop('@main:mod_name', None) mod_path = data.pop('@main:mod_path', None) mod_glbs = data.pop('globals', None) import_main(mod_name, mod_path, mod_glbs, MAIN_RUN_NAME) return data def _init_get_data(options): keys = ('initializer', 'initargs', 'initkwargs') vals = (None, (), {}) data = {k: options.pop(k, v) for k, v in zip(keys, vals)} return data # --- def _check_recursive_spawn(): # pragma: no cover if not hasattr(import_main, 'sentinel'): return main_name, main_path = import_main.sentinel main_info = "\n" if main_name is not None: main_info += f" main name: {main_name!r}\n" if main_path is not None: main_info += f" main path: {main_path!r}\n" main_info += "\n" sys.stderr.write(""" The main script or module attempted to spawn new MPI worker processes. This probably means that you have forgotten to use the proper idiom in your main script or module: if __name__ == '__main__': ... This error is unrecoverable. The MPI execution environment had to be aborted. The name/path of the offending main script/module follows: """ + main_info) sys.stderr.flush() time.sleep(1) MPI.COMM_WORLD.Abort(1) FLAG_OPT_MAP = { 'debug': 'd', 'inspect': 'i', 'interactive': 'i', 'optimize': 'O', 'dont_write_bytecode': 'B', 'no_user_site': 's', 'no_site': 'S', 'ignore_environment': 'E', 'verbose': 'v', 'bytes_warning': 'b', 'quiet': 'q', 'hash_randomization': 'R', 'isolated': 'I', # 'dev_mode': 'Xdev', # 'utf8_mode': 'Xutf8', # 'warn_default_encoding': 'Xwarn_default_encoding', 'safe_path': 'P', # 'int_max_str_digits': 'Xint_max_str_digits=0' } def get_python_flags(): args = [] for flag, opt in FLAG_OPT_MAP.items(): val = getattr(sys.flags, flag, 0) val = val if opt[0] != 'i' else 0 val = val if opt[0] != 'Q' else min(val, 1) if val > 0: args.append('-' + opt * val) for opt in sys.warnoptions: # pragma: no cover args.append('-W' + opt) sys_xoptions = getattr(sys, '_xoptions', {}) for opt, val in sys_xoptions.items(): # pragma: no cover args.append('-X' + opt if val is True else '-X' + opt + '=' + val) return args def get_max_workers(): max_workers = os_environ_get('MAX_WORKERS') if max_workers is not None: return int(max_workers) if MPI.UNIVERSE_SIZE != MPI.KEYVAL_INVALID: # pragma: no branch universe_size = MPI.COMM_WORLD.Get_attr(MPI.UNIVERSE_SIZE) if universe_size is not None: # pragma: no cover world_size = MPI.COMM_WORLD.Get_size() return max(universe_size - world_size, 1) return 1 def get_spawn_module(): return __spec__.parent + '.server' def client_spawn( python_exe=None, python_args=None, max_workers=None, mpi_info=None, ): _check_recursive_spawn() if python_exe is None: python_exe = sys.executable if python_args is None: python_args = [] if max_workers is None: max_workers = get_max_workers() if mpi_info is None: mpi_info = {'soft': f'1:{max_workers}'} args = get_python_flags() + list(python_args) args.extend(['-m', get_spawn_module()]) info = MPI.Info.Create() info.update(mpi_info) comm = MPI.COMM_SELF.Spawn(python_exe, args, max_workers, info) info.Free() return comm # --- SERVICE = __spec__.parent SERVER_HOST = 'localhost' SERVER_BIND = '' SERVER_PORT = 31415 def get_service(): return os_environ_get('SERVICE', SERVICE) def get_server_host(): return os_environ_get('SERVER_HOST', SERVER_HOST) def get_server_bind(): return os_environ_get('SERVER_BIND', SERVER_BIND) def get_server_port(): return int(os_environ_get('SERVER_PORT', SERVER_PORT)) def client_lookup(address): from socket import socket host, port = address host = host or get_server_host() port = port or get_server_port() address = (host, int(port)) sock = socket() sock.connect(address) try: fdes = sock.fileno() peer = MPI.Comm.Join(fdes) finally: sock.close() mpi_port = peer.recv(None, 0) peer.Disconnect() return mpi_port def server_publish(address, mpi_port): from socket import socket from socket import SOL_SOCKET, SO_REUSEADDR host, port = address host = host or get_server_bind() port = port or get_server_port() address = (host, int(port)) serversock = socket() serversock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) serversock.bind(address) serversock.listen(0) try: sock = serversock.accept()[0] finally: serversock.close() try: fdes = sock.fileno() peer = MPI.Comm.Join(fdes) finally: sock.close() peer.send(mpi_port, 0) peer.Disconnect() def client_connect(service, mpi_info=None): info = MPI.INFO_NULL if mpi_info: info = MPI.Info.Create() info.update(mpi_info) if not isinstance(service, (list, tuple)): service = service or get_service() port = MPI.Lookup_name(service, info) else: port = client_lookup(service) comm = MPI.COMM_SELF.Connect(port, info, root=0) if info != MPI.INFO_NULL: info.Free() return comm def server_accept( service, mpi_info=None, comm=MPI.COMM_WORLD, root=0, ): info = MPI.INFO_NULL if comm.Get_rank() == root: if mpi_info: info = MPI.Info.Create() info.update(mpi_info) port = None if comm.Get_rank() == root: port = MPI.Open_port(info) if comm.Get_rank() == root: if not isinstance(service, (list, tuple)): service = service or get_service() MPI.Publish_name(service, port, info) else: server_publish(service, port) service = None comm = comm.Accept(port, info, root) if port is not None: if service is not None: MPI.Unpublish_name(service, port, info) MPI.Close_port(port) if info != MPI.INFO_NULL: info.Free() return comm # --- def get_comm_server(): try: return _tls.comm_server except AttributeError: raise RuntimeError( "communicator is not accessible" ) from None def set_comm_server(intracomm): _tls.comm_server = intracomm def server_main_comm(comm, sync=True): assert comm != MPI.COMM_NULL # noqa: S101 assert comm.Is_inter() # noqa: S101 assert comm.Get_remote_size() == 1 # noqa: S101 comm, options = server_sync(comm, sync) server_init(comm) server_exec(comm, options) server_stop(comm) def server_main_spawn(): comm = MPI.Comm.Get_parent() set_comm_server(MPI.COMM_WORLD) server_main_comm(comm) def server_main_service(): from getopt import getopt # pylint: disable=deprecated-module longopts = ['bind=', 'port=', 'service=', 'info='] optlist, _ = getopt(sys.argv[1:], '', longopts) optdict = {opt[2:]: val for opt, val in optlist} if 'bind' in optdict or 'port' in optdict: bind = optdict.get('bind') or get_server_bind() port = optdict.get('port') or get_server_port() service = (bind, int(port)) else: service = optdict.get('service') or get_service() info = optdict.get('info', '').split(',') info = dict(k_v.split('=', 1) for k_v in info if k_v) comm = server_accept(service, info) set_comm_server(MPI.COMM_WORLD) server_main_comm(comm) def server_main(): from ..run import set_abort_status try: comm = MPI.Comm.Get_parent() if comm != MPI.COMM_NULL: server_main_spawn() else: server_main_service() except BaseException: set_abort_status(1) raise # --- mpi4py-4.0.3/src/mpi4py/futures/_core.pyi000066400000000000000000000130171475341043600202710ustar00rootroot00000000000000import sys from typing import Any, Generic from typing import Callable, Iterable, Iterator, Sequence, Mapping if sys.version_info >= (3, 10): from typing import TypeAlias else: from typing_extensions import TypeAlias if sys.version_info >= (3, 11): from typing import Self else: from typing_extensions import Self import weakref import threading from ..MPI import Info, Intracomm, Intercomm, Request from ._base import Executor, Future from ..typing import T _Task: TypeAlias = tuple[Callable[..., T], tuple[Any, ...], dict[str, Any]] _Item: TypeAlias = tuple[Future[T], _Task[T]] _Info: TypeAlias = Info | Mapping[str, str] | Iterable[tuple[str, str]] def serialized(function: Callable[..., Any]) -> Callable[..., Any]: ... def setup_mpi_threads() -> None: ... class RemoteTraceback(Exception): ... def sys_exception() -> BaseException: ... def os_environ_get(name: str, default: T | None = ...) -> str | T | None: ... BACKOFF: float = ... class Backoff: tval: float tmax: float tmin: float def __init__(self, seconds: float = BACKOFF) -> None: ... def reset(self) -> None: ... def sleep(self) -> None: ... class TaskQueue(Generic[T]): def put(self, __x: T) -> None: ... def pop(self) -> T: ... def add(self, __x: T) -> None: ... class WorkerSet(Generic[T]): def add(self, __x: T) -> None: ... def pop(self) -> T: ... _WeakKeyDict: TypeAlias = weakref.WeakKeyDictionary _ThreadQueueMap: TypeAlias = _WeakKeyDict[threading.Thread, TaskQueue[_Item[Any] | None]] THREADS_QUEUES: _ThreadQueueMap = ... def join_threads(threads_queues: _ThreadQueueMap = ...) -> None: ... class Pool: size: int queue: TaskQueue[_Item[Any] | None] exref: weakref.ReferenceType[Executor] event: threading.Event thread: threading.Thread def __init__( self, executor: Executor, manager: Callable[..., None], *args: Any, ) -> None: ... def wait(self) -> None: ... def push(self, item: _Item[Any]) -> None: ... def done(self) -> None: ... def join(self) -> None: ... def setup(self, size: int) -> TaskQueue[_Item[Any] | None]: ... def cancel( self, handler: Callable[[Future[Any]], None] | None = ..., ) -> None: ... def broken(self, message: str) -> None: ... def initialize(options: Mapping[str, Any]) -> bool: ... def ThreadPool(executor: Executor) -> Pool: ... def SpawnPool(executor: Executor) -> Pool: ... def ServicePool(executor: Executor) -> Pool: ... def WorkerPool(executor: Executor) -> Pool: ... SharedPool: Callable[[Executor], Pool] | None = ... class SharedPoolCtx: comm: Intercomm on_root: bool | None counter: Iterator[int] workers: WorkerSet[int] threads: _ThreadQueueMap def __init__(self) -> None: ... def __call__(self, executor: Executor) -> Pool: ... def __enter__(self) -> Self | None: ... def __exit__(self, *args: object) -> bool: ... def comm_split(comm: Intracomm, root: int) -> tuple[Intercomm, Intracomm]: ... def barrier(comm: Intercomm) -> None: ... def bcast_send(comm: Intercomm, data: Any) -> None: ... def bcast_recv(comm: Intercomm) -> Any: ... def isendtoall(comm: Intercomm, data: Any, tag: int = 0) -> list[Request]: ... def waitall(comm: Intercomm, requests: Sequence[Request], poll: bool = False) -> list[Any]: ... def sendtoall(comm: Intercomm, data: Any, tag: int = 0) -> None: ... def recvfromall(comm: Intercomm, tag: int = 0) -> list[Any]: ... def disconnect(comm: Intercomm) -> None: ... def client_sync( comm: Intercomm, options: Mapping[str, Any], sync: bool = ..., ) -> Intercomm: ... def client_init( comm: Intercomm, options: Mapping[str, Any], ) -> bool: ... def client_exec( comm: Intercomm, options: Mapping[str, Any], tag: int, worker_set: WorkerSet[int], task_queue: TaskQueue[_Item[Any] | None], ) -> None: ... def client_stop( comm: Intercomm, ) -> None: ... def server_sync( comm: Intercomm, sync: bool = ..., ) -> tuple[Intercomm, dict[str, Any]]: ... def server_init( comm: Intercomm, ) -> bool: ... def server_exec( comm: Intercomm, options: Mapping[str, Any], ) -> None: ... def server_stop( comm: Intercomm, ) -> None: ... MAIN_RUN_NAME: str = ... def import_main( mod_name: str, mod_path: str, init_globals: dict[str, Any] | None, run_name: str, ) -> None: ... FLAG_OPT_MAP: dict[str, str] def get_python_flags() -> list[str]: ... def get_max_workers() -> int: ... def get_spawn_module() -> str: ... def client_spawn( python_exe: str | None = ..., python_args: Sequence[str] | None = ..., max_workers: int | None = ..., mpi_info: _Info | None = ..., ) -> Intercomm: ... SERVICE: str = ... SERVER_HOST: str = ... SERVER_BIND: str = ... SERVER_PORT: int = ... def get_service() -> str: ... def get_server_host() -> str: ... def get_server_bind() -> str: ... def get_server_port() -> int: ... _Address: TypeAlias = tuple[str | None, int | None] def client_lookup(address: _Address) -> str: ... def server_publish(address: _Address, mpi_port: str) -> None: ... def client_connect( service: str | _Address, mpi_info: _Info | None = ..., ) -> Intercomm: ... def server_accept( service: str | _Address, mpi_info: _Info | None = ..., comm: Intracomm = ..., root: int = ..., ) -> Intercomm: ... def get_comm_server() -> Intracomm: ... def set_comm_server(intracomm: Intracomm) -> None: ... def server_main_comm(comm: Intercomm, sync: bool = ...) -> None: ... def server_main_spawn() -> None: ... def server_main_service() -> None: ... def server_main() -> None: ... mpi4py-4.0.3/src/mpi4py/futures/aplus.py000066400000000000000000000147661475341043600201710ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Support for Future chaining.""" # pylint: disable=broad-exception-caught # This implementation is heavily inspired in code written by # Daniel Dotsenko [@dvdotsenko] [dotsa at hotmail.com] # https://github.com/dvdotsenko/python-future-then import functools import threading import weakref from ._base import Future class ThenableFuture(Future): """*Thenable* `Future` subclass.""" def then(self, on_success=None, on_failure=None): """Return ``then(self, on_success, on_failure)``.""" return then(self, on_success, on_failure) def catch(self, on_failure=None): """Return ``catch(self, on_failure)``.""" return catch(self, on_failure) def then(future, on_success=None, on_failure=None): """JavaScript-like (`Promises/A+`_) support for Future chaining. Args: future: Input future instance. on_success: Function to be called once the input future completes with success. Once the input future finish running with success, its result value is the input argument for *on_success*. If *on_success* returns a `Future` instance, the result future is chained to the output future. Otherwise, the result of *on_success* is set as the result of the output future. If *on_success* is ``None``, the output future is resolved directly with the result of the input future. on_failure: Function to be called once the input future completes with failure. Once the input future finish running with failure, its exception value is the input argument for *on_failure*. If *on_failure* returns a `Future` instance, the result future is chained to the output future. Otherwise, if *on_failure* returns an `Exception` instance, it is set as the exception of the output future. Otherwise, the result of *on_failure* is set as the result of the output future. If *on_failure* is ``None``, the output future is set as failed with the exception from the input future. Returns: Output future instance to be completed once the input future is completed and either *on_success* or *on_failure* completes. .. _Promises/A+: https://promisesaplus.com/ """ return _chain(future, on_success, on_failure) def catch(future, on_failure=None): """Close equivalent to ``then(future, None, on_failure)``. Args: future: Input future instance. on_failure: Function to be called once the input future completes with failure. Once the input future finish running with failure, its exception value is the input argument for *on_failure*. If *on_failure* returns a `Future` instance, the result future is chained to the output future. Otherwise, if *on_failure* returns an `Exception` instance, it is set as the exception of the output future. Otherwise, the result of *on_failure* is set as the result of the output future. If *on_failure* is ``None``, the output future is set as failed with the exception from the input future. Returns: Output future instance to be completed once the input future is completed and *on_failure* completes. """ if on_failure is None: return then(future, None, lambda _: None) return then(future, None, on_failure) def _chain(future, on_success=None, on_failure=None): new_future = future.__class__() done_cb = functools.partial( _chain_resolve, [new_future], on_success=on_success, on_failure=on_failure, ) future.add_done_callback(done_cb) return new_future _chain_log_lock = threading.Lock() _chain_log_registry = weakref.WeakKeyDictionary() def _chain_check_cycle(new_future, future): if new_future is future: raise RuntimeError( f"chain cycle detected: " f"Future {future} chained with itself" ) with _chain_log_lock: registry = _chain_log_registry try: log = registry[new_future] except KeyError: log = weakref.WeakSet() registry[new_future] = log if future in log: raise RuntimeError( f"chain cycle detected: " f"Future {future} already in chain" ) log.add(future) def _chain_resolve_future(new_future, old_future): _chain_check_cycle(new_future, old_future) done_cb = functools.partial(_chain_resolve, [new_future]) old_future.add_done_callback(done_cb) def _chain_resolve_success(new_future, result, on_success=None): try: if on_success: result = on_success(result) if isinstance(result, Future): _chain_resolve_future(new_future, result) else: new_future.set_result(result) finally: del new_future, result, on_success def _chain_resolve_failure(new_future, reason, on_failure=None): try: if on_failure: try: reason = on_failure(reason) if isinstance(reason, Future): _chain_resolve_future(new_future, reason) elif isinstance(reason, BaseException): new_future.set_exception(reason) else: new_future.set_result(reason) except BaseException as exception: new_future.set_exception(exception) else: new_future.set_exception(reason) finally: del new_future, reason, on_failure def _chain_resolve(ctx, old_future, on_success=None, on_failure=None): new_future = ctx.pop() if old_future.cancelled() or new_future.cancelled(): new_future.cancel() new_future.set_running_or_notify_cancel() return try: if old_future.exception() is not None: _chain_resolve_failure( new_future, old_future.exception(), on_failure, ) else: try: _chain_resolve_success( new_future, old_future.result(), on_success, ) except BaseException as exception: _chain_resolve_failure( new_future, exception, on_failure, ) finally: del new_future, old_future mpi4py-4.0.3/src/mpi4py/futures/aplus.pyi000066400000000000000000000025331475341043600203270ustar00rootroot00000000000000from typing import Generic from typing import Callable from typing import overload from ._base import Future from ..typing import S, T, U class ThenableFuture(Future[T], Generic[T]): def then(self, on_success: Callable[[T], T | Future[T]] | None = None, on_failure: Callable[[BaseException], T | BaseException] | None = None, ) -> ThenableFuture[T]: ... def catch(self, on_failure: Callable[[BaseException], T | BaseException] | None = None, ) -> ThenableFuture[T]: ... @overload def then( future: Future[T], on_success: None = None, on_failure: None = None, ) -> Future[T]: ... @overload def then( future: Future[T], on_success: Callable[[T], S | Future[S]], on_failure: None = None, ) -> Future[S]: ... @overload def then( future: Future[T], on_success: None = None, *, on_failure: Callable[[BaseException], BaseException | U | Future[U]], ) -> Future[T | U]: ... @overload def then( future: Future[T], on_success: Callable[[T], S | Future[S]], on_failure: Callable[[BaseException], BaseException | U | Future[U]], ) -> Future[S | U]: ... @overload def catch( future: Future[T], on_failure: None = None, ) -> Future[T]: ... @overload def catch( future: Future[T], on_failure: Callable[[BaseException], BaseException | U | Future[U]], ) -> Future[T | U]: ... mpi4py-4.0.3/src/mpi4py/futures/pool.py000066400000000000000000000314211475341043600200010ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Implements MPIPoolExecutor.""" import sys import time import functools import itertools import threading from ._base import Future from ._base import Executor from ._base import as_completed from . import _core class MPIPoolExecutor(Executor): """MPI-based asynchronous executor.""" Future = Future def __init__(self, max_workers=None, initializer=None, initargs=(), **kwargs): """Initialize a new MPIPoolExecutor instance. Args: max_workers: The maximum number of MPI processes that can be used to execute the given calls. If ``None`` or not given then the number of worker processes will be determined from the MPI universe size attribute if defined, otherwise a single worker process will be spawned. initializer: An callable used to initialize workers processes. initargs: A tuple of arguments to pass to the initializer. Keyword Args: python_exe: Path to Python executable used to spawn workers. python_args: Command line arguments to pass to Python executable. mpi_info: Mapping or iterable with ``(key, value)`` pairs. globals: Mapping with global variables to set in workers. main: If ``False``, do not import ``__main__`` in workers. path: List of paths to append to ``sys.path`` in workers. wdir: Path to set current working directory in workers. env: Environment variables to update ``os.environ`` in workers. use_pkl5: If ``True``, use pickle5 out-of-band for communication. """ if max_workers is not None: max_workers = int(max_workers) if max_workers <= 0: raise ValueError("max_workers must be greater than 0") kwargs['max_workers'] = max_workers if initializer is not None: if not callable(initializer): raise TypeError("initializer must be a callable") kwargs['initializer'] = initializer kwargs['initargs'] = tuple(initargs) self._options = kwargs self._shutdown = False self._broken = None self._lock = threading.Lock() self._pool = None _make_pool = staticmethod(_core.WorkerPool) def _bootstrap(self): if self._pool is None: self._pool = self._make_pool(self) @property def _max_workers(self): return self.num_workers @property def num_workers(self): """Number or worker processes.""" with self._lock: if self._broken: return 0 if self._shutdown: return 0 self._bootstrap() self._pool.wait() return self._pool.size def bootup(self, wait=True): """Allocate executor resources eagerly. Args: wait: If ``True`` then bootup will not return until the executor resources are ready to process submissions. """ with self._lock: if self._shutdown: raise RuntimeError("cannot bootup after shutdown") self._bootstrap() if wait: self._pool.wait() return self def submit(self, fn, *args, **kwargs): """Submit a callable to be executed with the given arguments. Schedule the callable to be executed as ``fn(*args, **kwargs)`` and return a `Future` instance representing the execution of the callable. Returns: A `Future` representing the given call. """ # pylint: disable=arguments-differ with self._lock: if self._broken: raise _core.BrokenExecutor(self._broken) if self._shutdown: raise RuntimeError("cannot submit after shutdown") self._bootstrap() future = self.Future() task = (fn, args, kwargs) self._pool.push((future, task)) return future if sys.version_info >= (3, 8): # pragma: no branch submit.__text_signature__ = '($self, fn, /, *args, **kwargs)' def map(self, fn, *iterables, timeout=None, chunksize=1, unordered=False): """Return an iterator equivalent to ``map(fn, *iterables)``. Args: fn: A callable that will take as many arguments as there are passed iterables. iterables: Iterables yielding positional arguments to be passed to the callable. timeout: The maximum number of seconds to wait. If ``None``, then there is no limit on the wait time. chunksize: The size of the chunks the iterable will be broken into before being passed to a worker process. unordered: If ``True``, yield results out-of-order, as completed. Returns: An iterator equivalent to built-in ``map(func, *iterables)`` but the calls may be evaluated out-of-order. Raises: TimeoutError: If the entire result iterator could not be generated before the given timeout. Exception: If ``fn(*args)`` raises for any values. """ return self.starmap(fn, zip(*iterables), timeout, chunksize, unordered) def starmap(self, fn, iterable, timeout=None, chunksize=1, unordered=False): """Return an iterator equivalent to ``itertools.starmap(...)``. Args: fn: A callable that will take positional argument from iterable. iterable: An iterable yielding ``args`` tuples to be used as positional arguments to call ``fn(*args)``. timeout: The maximum number of seconds to wait. If ``None``, then there is no limit on the wait time. chunksize: The size of the chunks the iterable will be broken into before being passed to a worker process. unordered: If ``True``, yield results out-of-order, as completed. Returns: An iterator equivalent to ``itertools.starmap(fn, iterable)`` but the calls may be evaluated out-of-order. Raises: TimeoutError: If the entire result iterator could not be generated before the given timeout. Exception: If ``fn(*args)`` raises for any values. """ # pylint: disable=too-many-arguments,too-many-positional-arguments if chunksize < 1: raise ValueError("chunksize must be >= 1.") if chunksize == 1: return _starmap_helper(self.submit, fn, iterable, timeout, unordered) else: return _starmap_chunks(self.submit, fn, iterable, timeout, unordered, chunksize) def shutdown(self, wait=True, *, cancel_futures=False): """Clean-up the resources associated with the executor. It is safe to call this method several times. Otherwise, no other methods can be called after this one. Args: wait: If ``True`` then shutdown will not return until all running futures have finished executing and the resources used by the executor have been reclaimed. cancel_futures: If ``True`` then shutdown will cancel all pending futures. Futures that are completed or running will not be cancelled. """ with self._lock: if not self._shutdown: self._shutdown = True if self._pool is not None: self._pool.done() if cancel_futures: if self._pool is not None: self._pool.cancel() pool = None if wait: pool = self._pool self._pool = None if pool is not None: pool.join() def _starmap_helper(submit, function, iterable, timeout, unordered): timer = time.monotonic end_time = sys.float_info.max if timeout is not None: end_time = timeout + timer() futures = [submit(function, *args) for args in iterable] if unordered: futures = set(futures) def result(future, timeout=None): try: try: return future.result(timeout) finally: future.cancel() finally: del future def result_iterator(): try: if unordered: if timeout is None: iterator = as_completed(futures) else: iterator = as_completed(futures, end_time - timer()) for future in iterator: futures.remove(future) future = [future] yield result(future.pop()) else: futures.reverse() if timeout is None: while futures: yield result(futures.pop()) else: while futures: yield result(futures.pop(), end_time - timer()) finally: while futures: futures.pop().cancel() return result_iterator() def _apply_chunks(function, chunk): return [function(*args) for args in chunk] def _build_chunks(chunksize, iterable): iterable = iter(iterable) while True: chunk = tuple(itertools.islice(iterable, chunksize)) if not chunk: return yield (chunk,) def _chain_from_iterable_of_lists(iterable): for item in iterable: item.reverse() while item: yield item.pop() def _starmap_chunks(submit, function, iterable, timeout, unordered, chunksize): # pylint: disable=too-many-arguments,too-many-positional-arguments function = functools.partial(_apply_chunks, function) iterable = _build_chunks(chunksize, iterable) result = _starmap_helper(submit, function, iterable, timeout, unordered) return _chain_from_iterable_of_lists(result) class MPICommExecutor: """Context manager for `MPIPoolExecutor`. This context manager splits a MPI (intra)communicator in two disjoint sets: a single master process and the remaining worker processes. These sets are then connected through an intercommunicator. The target of the ``with`` statement is assigned either an `MPIPoolExecutor` instance (at the master) or ``None`` (at the workers). Example:: with MPICommExecutor(MPI.COMM_WORLD, root=0) as executor: if executor is not None: # master process executor.submit(...) executor.map(...) """ def __init__(self, comm=None, root=0, **kwargs): """Initialize a new MPICommExecutor instance. Args: comm: MPI (intra)communicator. root: Designated master process. Raises: ValueError: If the communicator has wrong kind or the root value is not in the expected range. """ if comm is None: comm = _core.MPI.COMM_WORLD if comm.Is_inter(): raise ValueError("expecting an intracommunicator") if root < 0 or root >= comm.Get_size(): raise ValueError("expecting root in range(comm.size)") if _core.SharedPool is not None: comm = _core.MPI.COMM_WORLD root = comm.Get_rank() self._comm = comm self._root = root self._options = kwargs self._executor = None def __enter__(self): """Return `MPIPoolExecutor` instance at the root.""" if self._executor is not None: raise RuntimeError("__enter__") comm = self._comm root = self._root options = self._options executor = None if comm.Get_rank() == root: executor = MPIPoolExecutor(**options) _core._comm_executor_helper(executor, comm, root) self._executor = executor return executor def __exit__(self, *args): """Shutdown `MPIPoolExecutor` instance at the root.""" executor = self._executor self._executor = None if executor is not None: executor.shutdown(wait=True) return False else: return True class ThreadPoolExecutor(MPIPoolExecutor): """`MPIPoolExecutor` subclass using a pool of threads.""" _make_pool = staticmethod(_core.ThreadPool) class ProcessPoolExecutor(MPIPoolExecutor): """`MPIPoolExecutor` subclass using a pool of processes.""" _make_pool = staticmethod(_core.SpawnPool) def get_comm_workers(): """Access an intracommunicator grouping MPI worker processes.""" return _core.get_comm_server() mpi4py-4.0.3/src/mpi4py/futures/pool.pyi000066400000000000000000000051141475341043600201520ustar00rootroot00000000000000import sys from typing import Any from typing import Callable, Iterable, Iterator, Mapping, Sequence if sys.version_info >= (3, 10): from typing import ParamSpec from typing import TypeAlias else: from typing_extensions import ParamSpec from typing_extensions import TypeAlias if sys.version_info >= (3, 11): from typing import Self else: from typing_extensions import Self from ..MPI import Intracomm, COMM_WORLD from ._base import Executor, Future from ..typing import T _P = ParamSpec("_P") class MPIPoolExecutor(Executor): Future: TypeAlias = Future def __init__( self, max_workers: int | None = None, initializer: Callable[..., object] | None = None, initargs: Iterable[Any] = (), *, python_exe: str = ..., python_args: Sequence[str] = ..., mpi_info: Mapping[str, str] | Iterable[tuple[str, str]] = ..., globals: Mapping[str, str] | Iterable[tuple[str, str]] = ..., main: bool = True, path: Sequence[str] = ..., wdir: str = ..., env: Mapping[str, str] | Iterable[tuple[str, str]] = ..., **kwargs: Any, ) -> None: ... @property def num_workers(self) -> int: ... def bootup( self, wait: bool = True, ) -> Self: ... if sys.version_info >= (3, 9): def submit( self, __fn: Callable[_P, T], *args: _P.args, **kwargs: _P.kwargs, ) -> Future[T]: ... else: def submit( self, fn: Callable[_P, T], *args: _P.args, **kwargs: _P.kwargs, ) -> Future[T]: ... def map( self, fn: Callable[..., T], *iterables: Iterable[Any], timeout: float | None = None, chunksize: int = 1, unordered: bool = False, ) -> Iterator[T]: ... def starmap( self, fn: Callable[..., T], iterable: Iterable[Any], timeout: float | None = None, chunksize: int = 1, unordered: bool = False, ) -> Iterator[T]: ... def shutdown( self, wait: bool = True, *, cancel_futures: bool = False, ) -> None: ... class MPICommExecutor: def __init__( self, comm: Intracomm | None = COMM_WORLD, root: int = 0, **kwargs: Any, ) -> None: ... def __enter__(self) -> Self | None: ... def __exit__(self, *args: object) -> bool | None: ... class ThreadPoolExecutor(MPIPoolExecutor): ... class ProcessPoolExecutor(MPIPoolExecutor): ... def get_comm_workers() -> Intracomm: ... mpi4py-4.0.3/src/mpi4py/futures/server.py000066400000000000000000000004331475341043600203350ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Entry point for MPI workers.""" def main(): """Entry point for worker processes.""" # pylint: disable=import-outside-toplevel from . import _core _core.server_main() if __name__ == '__main__': main() mpi4py-4.0.3/src/mpi4py/futures/server.pyi000066400000000000000000000000301475341043600204770ustar00rootroot00000000000000def main() -> None: ... mpi4py-4.0.3/src/mpi4py/futures/util.py000066400000000000000000000121641475341043600200100ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Utilities for handling futures.""" # pylint: disable=too-few-public-methods # pylint: disable=broad-exception-caught import collections as _collections import functools as _functools import threading as _threading from . import _base __all__ = [ 'collect', 'compose', ] def collect(fs): """Gather a collection of futures in a new future. Args: fs: Collection of futures. Returns: New future producing as result a list with results from *fs*. """ return _Collect()(fs) class _Collect: def __init__(self): self.lock = _threading.RLock() self.future = None self.result = None self.pending = None def __call__(self, fs): pending = _collections.defaultdict(list) for index, item in enumerate(fs): pending[item].append(index) if pending: self.future = future = next(iter(pending)).__class__() self.result = [None] * sum(map(len, pending.values())) self.pending = pending future.add_done_callback(self._done_cb) for item in list(pending): item.add_done_callback(self._item_cb) else: future = _base.Future() future.set_result([]) return future def _item_cb(self, item): with self.lock: future = self.future result = self.result pending = self.pending if future is None: return if item.cancelled(): future.cancel() return if item.exception() is not None: if future.set_running_or_notify_cancel(): # pragma: no branch future.set_exception(item.exception()) return for index in pending.pop(item): result[index] = item.result() if not pending: if future.set_running_or_notify_cancel(): # pragma: no branch future.set_result(result) def _done_cb(self, future): with self.lock: for item in self.pending: item.cancel() self.future = None self.result = None self.pending = None if future.cancelled(): future.set_running_or_notify_cancel() def compose(future, resulthook=None, excepthook=None): """Compose the completion of a future with result and exception handlers. Args: future: Input future instance. resulthook: Function to be called once the input future completes with success. Once the input future finish running with success, its result value is the input argument for *resulthook*. The result of *resulthook* is set as the result of the output future. If *resulthook* is ``None``, the output future is completed directly with the result of the input future. excepthook: Function to be called once the input future completes with failure. Once the input future finish running with failure, its exception value is the input argument for *excepthook*. If *excepthook* returns an `python:Exception` instance, it is set as the exception of the output future. Otherwise, the result of *excepthook* is set as the result of the output future. If *excepthook* is ``None``, the output future is set as failed with the exception from the input future. Returns: Output future instance to be completed once the input future is completed and either *resulthook* or *excepthook* finish executing. """ new_future = future.__class__() new_done_cb = _functools.partial(_compose_cancel, [future]) new_future.add_done_callback(new_done_cb) context = [(new_future, resulthook, excepthook)] done_cb = _functools.partial(_compose_complete, context) future.add_done_callback(done_cb) return new_future def _compose_cancel(context, new_future): old_future = context.pop() try: if new_future.cancelled(): new_future.set_running_or_notify_cancel() old_future.cancel() finally: new_future = None old_future = None def _compose_complete(context, old_future): new_future, resulthook, excepthook = context.pop() try: if old_future.cancelled(): new_future.cancel() elif old_future.exception() is not None: reason = old_future.exception() if excepthook: reason = excepthook(reason) if isinstance(reason, BaseException): new_future.set_exception(reason) else: new_future.set_result(reason) else: result = old_future.result() if resulthook: result = resulthook(result) new_future.set_result(result) except BaseException as exception: new_future.set_exception(exception) finally: new_future = None old_future = None result = resulthook = None reason = excepthook = None mpi4py-4.0.3/src/mpi4py/futures/util.pyi000066400000000000000000000015271475341043600201620ustar00rootroot00000000000000from typing import Collection from typing import Callable from typing import overload from ._base import Future from ..typing import T, U, V __all__: list[str] = [ 'collect', 'compose', ] def collect( fs: Collection[Future[T]], ) -> Future[list[T]]: ... @overload def compose( future: Future[T], resulthook: None = None, excepthook: None = None, ) -> Future[T]: ... @overload def compose( future: Future[T], resulthook: Callable[[T], U], excepthook: None = None, ) -> Future[U]: ... @overload def compose( future: Future[T], resulthook: None = None, *, excepthook: Callable[[BaseException], BaseException | V], ) -> Future[T | V]: ... @overload def compose( future: Future[T], resulthook: Callable[[T], U], excepthook: Callable[[BaseException], BaseException | V], ) -> Future[U | V]: ... mpi4py-4.0.3/src/mpi4py/include/000077500000000000000000000000001475341043600164035ustar00rootroot00000000000000mpi4py-4.0.3/src/mpi4py/include/mpi4py/000077500000000000000000000000001475341043600176255ustar00rootroot00000000000000mpi4py-4.0.3/src/mpi4py/include/mpi4py/mpi.pxi000066400000000000000000000001241475341043600211310ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com include "../../libmpi.pxd" mpi4py-4.0.3/src/mpi4py/include/mpi4py/mpi4py.h000066400000000000000000000015421475341043600212220ustar00rootroot00000000000000/* Author: Lisandro Dalcin */ /* Contact: dalcinl@gmail.com */ #ifndef MPI4PY_H #define MPI4PY_H #include #if defined(MSMPI_VER) && !defined(PyMPI_HAVE_MPI_Message) # if defined(MPI_MESSAGE_NULL) # define PyMPI_HAVE_MPI_Message 1 # endif #endif #if defined(MSMPI_VER) && !defined(PyMPI_HAVE_MPI_Session) # if defined(MPI_SESSION_NULL) # define PyMPI_HAVE_MPI_Session 1 # endif #endif #if (MPI_VERSION < 3) && !defined(PyMPI_HAVE_MPI_Message) typedef void *PyMPI_MPI_Message; #define MPI_Message PyMPI_MPI_Message #endif #if (MPI_VERSION < 4) && !defined(PyMPI_HAVE_MPI_Session) typedef void *PyMPI_MPI_Session; #define MPI_Session PyMPI_MPI_Session #endif #if defined(MPI4PY_LIMITED_API) #include "pycapi.h" #else #include "../../MPI_api.h" #endif static int import_mpi4py(void) { return import_mpi4py__MPI(); } #endif /* MPI4PY_H */ mpi4py-4.0.3/src/mpi4py/include/mpi4py/mpi4py.i000066400000000000000000000043521475341043600212250ustar00rootroot00000000000000/* Author: Lisandro Dalcin */ /* Contact: dalcinl@gmail.com */ /* ---------------------------------------------------------------- */ #if SWIG_VERSION < 0x010328 %warn "SWIG version < 1.3.28 is not supported" #endif /* ---------------------------------------------------------------- */ %header %{ #include %} %init %{ if (import_mpi4py() < 0) return NULL; %} /* ---------------------------------------------------------------- */ %define %mpi4py_fragments(PyType, Type) /* --- AsPtr --- */ %fragment(SWIG_AsPtr_frag(Type),"header") { SWIGINTERN int SWIG_AsPtr_dec(Type)(SWIG_Object input, Type **p) { if (input == Py_None) { if (p) *p = NULL; return SWIG_OK; } else if (PyObject_TypeCheck(input,&PyMPI##PyType##_Type)) { if (p) *p = PyMPI##PyType##_Get(input); return SWIG_OK; } else { void *argp = NULL; int res = SWIG_ConvertPtr(input,&argp,%descriptor(p_##Type), 0); if (!SWIG_IsOK(res)) return res; if (!argp) return SWIG_ValueError; if (p) *p = %static_cast(argp,Type*); return SWIG_OK; } } } /* --- From --- */ %fragment(SWIG_From_frag(Type),"header") { SWIGINTERN SWIG_Object SWIG_From_dec(Type)(Type v) { return PyMPI##PyType##_New(v); } } %enddef /*mpi4py_fragments*/ /* ---------------------------------------------------------------- */ %define SWIG_TYPECHECK_MPI_Comm 400 %enddef %define SWIG_TYPECHECK_MPI_Datatype 401 %enddef %define SWIG_TYPECHECK_MPI_Request 402 %enddef %define SWIG_TYPECHECK_MPI_Message 403 %enddef %define SWIG_TYPECHECK_MPI_Status 404 %enddef %define SWIG_TYPECHECK_MPI_Op 405 %enddef %define SWIG_TYPECHECK_MPI_Group 406 %enddef %define SWIG_TYPECHECK_MPI_Info 407 %enddef %define SWIG_TYPECHECK_MPI_File 408 %enddef %define SWIG_TYPECHECK_MPI_Win 409 %enddef %define SWIG_TYPECHECK_MPI_Errhandler 410 %enddef %define SWIG_TYPECHECK_MPI_Session 411 %enddef /* ---------------------------------------------------------------- */ %define %mpi4py_typemap(PyType, Type) %types(Type*); %mpi4py_fragments(PyType, Type); %typemaps_asptrfromn(%checkcode(Type), Type); %enddef /*mpi4py_typemap*/ /* ---------------------------------------------------------------- */ /* * Local Variables: * mode: C * End: */ mpi4py-4.0.3/src/mpi4py/include/mpi4py/pycapi.h000066400000000000000000000157501475341043600212730ustar00rootroot00000000000000/* Author: Lisandro Dalcin */ /* Contact: dalcinl@gmail.com */ #ifndef MPI4PY_PYCAPI_H #define MPI4PY_PYCAPI_H #include #include #define _mpi4py_declare_pycapi(Type, star) \ static PyTypeObject *_mpi4py_PyMPI##Type = NULL; \ static PyObject *(*_mpi4py_PyMPI##Type##_New)(MPI_##Type star) = NULL; \ static MPI_##Type *(*_mpi4py_PyMPI##Type##_Get)(PyObject *) = NULL; #ifndef MPI4PY_LIMITED_API_SKIP_DATATYPE _mpi4py_declare_pycapi(Datatype,) #define PyMPIDatatype_Type (*_mpi4py_PyMPIDatatype) #define PyMPIDatatype_New _mpi4py_PyMPIDatatype_New #define PyMPIDatatype_Get _mpi4py_PyMPIDatatype_Get #endif #ifndef MPI4PY_LIMITED_API_SKIP_STATUS _mpi4py_declare_pycapi(Status,*) #define PyMPIStatus_Type (*_mpi4py_PyMPIStatus) #define PyMPIStatus_New _mpi4py_PyMPIStatus_New #define PyMPIStatus_Get _mpi4py_PyMPIStatus_Get #endif #ifndef MPI4PY_LIMITED_API_SKIP_REQUEST _mpi4py_declare_pycapi(Request,) #define PyMPIRequest_Type (*_mpi4py_PyMPIRequest) #define PyMPIRequest_New _mpi4py_PyMPIRequest_New #define PyMPIRequest_Get _mpi4py_PyMPIRequest_Get #endif #ifndef MPI4PY_LIMITED_API_SKIP_MESSAGE _mpi4py_declare_pycapi(Message,) #define PyMPIMessage_Type (*_mpi4py_PyMPIMessage) #define PyMPIMessage_New _mpi4py_PyMPIMessage_New #define PyMPIMessage_Get _mpi4py_PyMPIMessage_Get #endif #ifndef MPI4PY_LIMITED_API_SKIP_OP _mpi4py_declare_pycapi(Op,) #define PyMPIOp_Type (*_mpi4py_PyMPIOp) #define PyMPIOp_New _mpi4py_PyMPIOp_New #define PyMPIOp_Get _mpi4py_PyMPIOp_Get #endif #ifndef MPI4PY_LIMITED_API_SKIP_GROUP _mpi4py_declare_pycapi(Group,) #define PyMPIGroup_Type (*_mpi4py_PyMPIGroup) #define PyMPIGroup_New _mpi4py_PyMPIGroup_New #define PyMPIGroup_Get _mpi4py_PyMPIGroup_Get #endif #ifndef MPI4PY_LIMITED_API_SKIP_INFO _mpi4py_declare_pycapi(Info,) #define PyMPIInfo_Type (*_mpi4py_PyMPIInfo) #define PyMPIInfo_New _mpi4py_PyMPIInfo_New #define PyMPIInfo_Get _mpi4py_PyMPIInfo_Get #endif #ifndef MPI4PY_LIMITED_API_SKIP_ERRHANDLER _mpi4py_declare_pycapi(Errhandler,) #define PyMPIErrhandler_Type (*_mpi4py_PyMPIErrhandler) #define PyMPIErrhandler_New _mpi4py_PyMPIErrhandler_New #define PyMPIErrhandler_Get _mpi4py_PyMPIErrhandler_Get #endif #ifndef MPI4PY_LIMITED_API_SKIP_SESSION _mpi4py_declare_pycapi(Session,) #define PyMPISession_Type (*_mpi4py_PyMPISession) #define PyMPISession_New _mpi4py_PyMPISession_New #define PyMPISession_Get _mpi4py_PyMPISession_Get #endif #ifndef MPI4PY_LIMITED_API_SKIP_COMM _mpi4py_declare_pycapi(Comm,) #define PyMPIComm_Type (*_mpi4py_PyMPIComm) #define PyMPIComm_New _mpi4py_PyMPIComm_New #define PyMPIComm_Get _mpi4py_PyMPIComm_Get #endif #ifndef MPI4PY_LIMITED_API_SKIP_WIN _mpi4py_declare_pycapi(Win,) #define PyMPIWin_Type (*_mpi4py_PyMPIWin) #define PyMPIWin_New _mpi4py_PyMPIWin_New #define PyMPIWin_Get _mpi4py_PyMPIWin_Get #endif #ifndef MPI4PY_LIMITED_API_SKIP_FILE _mpi4py_declare_pycapi(File,) #define PyMPIFile_Type (*_mpi4py_PyMPIFile) #define PyMPIFile_New _mpi4py_PyMPIFile_New #define PyMPIFile_Get _mpi4py_PyMPIFile_Get #endif #undef _mpi4py_define_pycapi static int _mpi4py_ImportType(PyObject *module, const char *type_name, PyTypeObject **type) { PyObject *attr = NULL; attr = PyObject_GetAttrString(module, type_name); if (!attr) goto fn_fail; if (!PyType_Check(attr)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", PyModule_GetName(module), type_name); goto fn_fail; } *type = (PyTypeObject *)attr; return 0; fn_fail: Py_DecRef(attr); return -1; } static int _mpi4py_ImportFunc(PyObject *module, const char *func_name, const char *signature, void (**func)(void)) { PyObject *pyxcapi = NULL; PyObject *capsule = NULL; union { void *obj; void (*fcn)(void); } ptr; pyxcapi = PyObject_GetAttrString(module, (char *)"__pyx_capi__"); if (!pyxcapi) goto fn_fail; capsule = PyDict_GetItemString(pyxcapi, func_name); if (!capsule) { PyErr_Format(PyExc_ImportError, "%.200s does not export expected C function %.200s", PyModule_GetName(module), func_name); goto fn_fail; } if (!PyCapsule_CheckExact(capsule)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a capsule", PyModule_GetName(module), func_name); } if (!signature) { signature = PyCapsule_GetName(capsule); } if (!PyCapsule_IsValid(capsule, signature)) { PyErr_Format(PyExc_TypeError, "C function %.200s.%.200s has wrong signature " "(expected %.500s, got %.500s)", PyModule_GetName(module), func_name, signature, PyCapsule_GetName(capsule)); goto fn_fail; } ptr.obj = PyCapsule_GetPointer(capsule, signature); if (!ptr.obj) goto fn_fail; *func = ptr.fcn; Py_DecRef(pyxcapi); return 0; fn_fail: Py_DecRef(pyxcapi); return -1; } static int import_mpi4py_MPI(void) { PyObject *module = PyImport_ImportModule("mpi4py.MPI"); if (!module) goto fn_fail; #define _mpi4py_import_pycapi(Type) do { \ if (_mpi4py_ImportType(module, #Type, &_mpi4py_PyMPI##Type) < 0) \ goto fn_fail; \ if (_mpi4py_ImportFunc(module, "PyMPI" #Type "_New", NULL, \ (void (**)(void))&_mpi4py_PyMPI##Type##_New) < 0) \ goto fn_fail; \ if (_mpi4py_ImportFunc(module, "PyMPI" #Type "_Get", NULL, \ (void (**)(void))&_mpi4py_PyMPI##Type##_Get) < 0) \ goto fn_fail; \ } while (0) #ifndef MPI4PY_LIMITED_API_SKIP_DATATYPE _mpi4py_import_pycapi(Datatype); #endif #ifndef MPI4PY_LIMITED_API_SKIP_STATUS _mpi4py_import_pycapi(Status); #endif #ifndef MPI4PY_LIMITED_API_SKIP_REQUEST _mpi4py_import_pycapi(Request); #endif #ifndef MPI4PY_LIMITED_API_SKIP_MESSAGE _mpi4py_import_pycapi(Message); #endif #ifndef MPI4PY_LIMITED_API_SKIP_OP _mpi4py_import_pycapi(Op); #endif #ifndef MPI4PY_LIMITED_API_SKIP_GROUP _mpi4py_import_pycapi(Group); #endif #ifndef MPI4PY_LIMITED_API_SKIP_INFO _mpi4py_import_pycapi(Info); #endif #ifndef MPI4PY_LIMITED_API_SKIP_ERRHANDLER _mpi4py_import_pycapi(Errhandler); #endif #ifndef MPI4PY_LIMITED_API_SKIP_SESSION _mpi4py_import_pycapi(Session); #endif #ifndef MPI4PY_LIMITED_API_SKIP_COMM _mpi4py_import_pycapi(Comm); #endif #ifndef MPI4PY_LIMITED_API_SKIP_WIN _mpi4py_import_pycapi(Win); #endif #ifndef MPI4PY_LIMITED_API_SKIP_FILE _mpi4py_import_pycapi(File); #endif #undef _mpi4py_import_pycapi Py_DecRef(module); return 0; fn_fail: Py_DecRef(module); return -1; } #define __PYX_HAVE_API__mpi4py__MPI #define import_mpi4py__MPI import_mpi4py_MPI #endif /* MPI4PY_PYCAPI_H */ mpi4py-4.0.3/src/mpi4py/libmpi.pxd000066400000000000000000002074401475341043600167600ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com cdef extern from "" nogil: #----------------------------------------------------------------- ctypedef long MPI_Aint ctypedef long long MPI_Offset #:= long ctypedef long long MPI_Count #:= MPI_Offset ctypedef struct MPI_Status: int MPI_SOURCE int MPI_TAG int MPI_ERROR ctypedef struct _mpi_datatype_t ctypedef _mpi_datatype_t* MPI_Datatype ctypedef struct _mpi_request_t ctypedef _mpi_request_t* MPI_Request ctypedef struct _mpi_message_t ctypedef _mpi_message_t* MPI_Message ctypedef struct _mpi_op_t ctypedef _mpi_op_t* MPI_Op ctypedef struct _mpi_group_t ctypedef _mpi_group_t* MPI_Group ctypedef struct _mpi_info_t ctypedef _mpi_info_t* MPI_Info ctypedef struct _mpi_errhandler_t ctypedef _mpi_errhandler_t* MPI_Errhandler ctypedef struct _mpi_session_t ctypedef _mpi_session_t* MPI_Session ctypedef struct _mpi_comm_t ctypedef _mpi_comm_t* MPI_Comm ctypedef struct _mpi_win_t ctypedef _mpi_win_t* MPI_Win ctypedef struct _mpi_file_t ctypedef _mpi_file_t* MPI_File #----------------------------------------------------------------- enum: MPI_UNDEFINED #:= -32766 enum: MPI_ANY_SOURCE #:= MPI_UNDEFINED enum: MPI_ANY_TAG #:= MPI_UNDEFINED enum: MPI_PROC_NULL #:= MPI_UNDEFINED enum: MPI_ROOT #:= MPI_PROC_NULL enum: MPI_IDENT #:= 1 enum: MPI_CONGRUENT #:= 2 enum: MPI_SIMILAR #:= 3 enum: MPI_UNEQUAL #:= 4 void* MPI_BOTTOM #:= 0 void* MPI_IN_PLACE #:= 0 enum: MPI_KEYVAL_INVALID #:= 0 enum: MPI_MAX_OBJECT_NAME #:= 64 #----------------------------------------------------------------- # Null datatype MPI_Datatype MPI_DATATYPE_NULL #:= 0 # MPI datatypes MPI_Datatype MPI_PACKED #:= MPI_DATATYPE_NULL MPI_Datatype MPI_BYTE #:= MPI_DATATYPE_NULL MPI_Datatype MPI_AINT #:= MPI_DATATYPE_NULL MPI_Datatype MPI_OFFSET #:= MPI_DATATYPE_NULL MPI_Datatype MPI_COUNT #:= MPI_DATATYPE_NULL # Elementary C datatypes MPI_Datatype MPI_CHAR #:= MPI_DATATYPE_NULL MPI_Datatype MPI_WCHAR #:= MPI_DATATYPE_NULL MPI_Datatype MPI_SIGNED_CHAR #:= MPI_DATATYPE_NULL MPI_Datatype MPI_SHORT #:= MPI_DATATYPE_NULL MPI_Datatype MPI_INT #:= MPI_DATATYPE_NULL MPI_Datatype MPI_LONG #:= MPI_DATATYPE_NULL MPI_Datatype MPI_LONG_LONG #:= MPI_DATATYPE_NULL MPI_Datatype MPI_LONG_LONG_INT #:= MPI_DATATYPE_NULL MPI_Datatype MPI_UNSIGNED_CHAR #:= MPI_DATATYPE_NULL MPI_Datatype MPI_UNSIGNED_SHORT #:= MPI_DATATYPE_NULL MPI_Datatype MPI_UNSIGNED #:= MPI_DATATYPE_NULL MPI_Datatype MPI_UNSIGNED_LONG #:= MPI_DATATYPE_NULL MPI_Datatype MPI_UNSIGNED_LONG_LONG #:= MPI_DATATYPE_NULL MPI_Datatype MPI_FLOAT #:= MPI_DATATYPE_NULL MPI_Datatype MPI_DOUBLE #:= MPI_DATATYPE_NULL MPI_Datatype MPI_LONG_DOUBLE #:= MPI_DATATYPE_NULL # C99 datatypes MPI_Datatype MPI_C_BOOL #:= MPI_DATATYPE_NULL MPI_Datatype MPI_INT8_T #:= MPI_DATATYPE_NULL MPI_Datatype MPI_INT16_T #:= MPI_DATATYPE_NULL MPI_Datatype MPI_INT32_T #:= MPI_DATATYPE_NULL MPI_Datatype MPI_INT64_T #:= MPI_DATATYPE_NULL MPI_Datatype MPI_UINT8_T #:= MPI_DATATYPE_NULL MPI_Datatype MPI_UINT16_T #:= MPI_DATATYPE_NULL MPI_Datatype MPI_UINT32_T #:= MPI_DATATYPE_NULL MPI_Datatype MPI_UINT64_T #:= MPI_DATATYPE_NULL MPI_Datatype MPI_C_COMPLEX #:= MPI_DATATYPE_NULL MPI_Datatype MPI_C_FLOAT_COMPLEX #:= MPI_DATATYPE_NULL MPI_Datatype MPI_C_DOUBLE_COMPLEX #:= MPI_DATATYPE_NULL MPI_Datatype MPI_C_LONG_DOUBLE_COMPLEX #:= MPI_DATATYPE_NULL # C++ datatypes MPI_Datatype MPI_CXX_BOOL #:= MPI_DATATYPE_NULL MPI_Datatype MPI_CXX_FLOAT_COMPLEX #:= MPI_DATATYPE_NULL MPI_Datatype MPI_CXX_DOUBLE_COMPLEX #:= MPI_DATATYPE_NULL MPI_Datatype MPI_CXX_LONG_DOUBLE_COMPLEX #:= MPI_DATATYPE_NULL # C datatypes for reduction operations MPI_Datatype MPI_SHORT_INT #:= MPI_DATATYPE_NULL MPI_Datatype MPI_2INT #:= MPI_DATATYPE_NULL MPI_Datatype MPI_LONG_INT #:= MPI_DATATYPE_NULL MPI_Datatype MPI_FLOAT_INT #:= MPI_DATATYPE_NULL MPI_Datatype MPI_DOUBLE_INT #:= MPI_DATATYPE_NULL MPI_Datatype MPI_LONG_DOUBLE_INT #:= MPI_DATATYPE_NULL # Elementary Fortran datatypes MPI_Datatype MPI_CHARACTER #:= MPI_DATATYPE_NULL MPI_Datatype MPI_LOGICAL #:= MPI_DATATYPE_NULL MPI_Datatype MPI_INTEGER #:= MPI_DATATYPE_NULL MPI_Datatype MPI_REAL #:= MPI_DATATYPE_NULL MPI_Datatype MPI_DOUBLE_PRECISION #:= MPI_DATATYPE_NULL MPI_Datatype MPI_COMPLEX #:= MPI_DATATYPE_NULL MPI_Datatype MPI_DOUBLE_COMPLEX #:= MPI_DATATYPE_NULL # Size-specific Fortran datatypes MPI_Datatype MPI_LOGICAL1 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_LOGICAL2 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_LOGICAL4 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_LOGICAL8 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_INTEGER1 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_INTEGER2 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_INTEGER4 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_INTEGER8 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_INTEGER16 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_REAL2 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_REAL4 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_REAL8 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_REAL16 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_COMPLEX4 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_COMPLEX8 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_COMPLEX16 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_COMPLEX32 #:= MPI_DATATYPE_NULL int MPI_Get_address(void*, MPI_Aint*) #:= MPI_Address MPI_Aint MPI_Aint_add(MPI_Aint, MPI_Aint) MPI_Aint MPI_Aint_diff(MPI_Aint, MPI_Aint) int MPI_Type_dup(MPI_Datatype, MPI_Datatype*) int MPI_Type_contiguous(int, MPI_Datatype, MPI_Datatype*) int MPI_Type_vector(int, int, int, MPI_Datatype, MPI_Datatype*) int MPI_Type_indexed(int, int[], int[], MPI_Datatype, MPI_Datatype*) int MPI_Type_create_indexed_block(int, int, int[], MPI_Datatype, MPI_Datatype*) enum: MPI_ORDER_C #:= 0 enum: MPI_ORDER_FORTRAN #:= 1 int MPI_Type_create_subarray(int, int[], int[], int[], int, MPI_Datatype, MPI_Datatype*) enum: MPI_DISTRIBUTE_NONE #:= 0 enum: MPI_DISTRIBUTE_BLOCK #:= 1 enum: MPI_DISTRIBUTE_CYCLIC #:= 2 enum: MPI_DISTRIBUTE_DFLT_DARG #:= 4 int MPI_Type_create_darray(int, int, int, int[], int[], int[], int[], int, MPI_Datatype, MPI_Datatype*) int MPI_Type_create_hvector(int, int, MPI_Aint, MPI_Datatype, MPI_Datatype*) #:= MPI_Type_hvector int MPI_Type_create_hindexed(int, int[], MPI_Aint[], MPI_Datatype, MPI_Datatype*) #:= MPI_Type_hindexed int MPI_Type_create_hindexed_block(int, int, MPI_Aint[], MPI_Datatype, MPI_Datatype*) int MPI_Type_create_struct(int, int[], MPI_Aint[], MPI_Datatype[], MPI_Datatype*) #:= MPI_Type_struct int MPI_Type_create_resized(MPI_Datatype, MPI_Aint, MPI_Aint, MPI_Datatype*) int MPI_Type_size(MPI_Datatype, int*) int MPI_Type_get_extent(MPI_Datatype, MPI_Aint*, MPI_Aint*) int MPI_Type_get_true_extent(MPI_Datatype, MPI_Aint*, MPI_Aint*) int MPI_Type_size_x(MPI_Datatype, MPI_Count*) int MPI_Type_get_extent_x(MPI_Datatype, MPI_Count*, MPI_Count*) int MPI_Type_get_true_extent_x(MPI_Datatype, MPI_Count*, MPI_Count*) int MPI_Type_create_f90_integer(int, MPI_Datatype*) int MPI_Type_create_f90_real(int, int, MPI_Datatype*) int MPI_Type_create_f90_complex(int, int, MPI_Datatype*) enum: MPI_TYPECLASS_INTEGER #:= MPI_UNDEFINED enum: MPI_TYPECLASS_REAL #:= MPI_UNDEFINED enum: MPI_TYPECLASS_COMPLEX #:= MPI_UNDEFINED int MPI_Type_match_size(int, int, MPI_Datatype*) int MPI_Type_get_value_index(MPI_Datatype, MPI_Datatype, MPI_Datatype*) int MPI_Type_commit(MPI_Datatype*) int MPI_Type_free(MPI_Datatype*) enum: MPI_COMBINER_NAMED #:= MPI_UNDEFINED enum: MPI_COMBINER_DUP #:= MPI_UNDEFINED enum: MPI_COMBINER_CONTIGUOUS #:= MPI_UNDEFINED enum: MPI_COMBINER_VECTOR #:= MPI_UNDEFINED enum: MPI_COMBINER_HVECTOR #:= MPI_UNDEFINED enum: MPI_COMBINER_INDEXED #:= MPI_UNDEFINED enum: MPI_COMBINER_HINDEXED #:= MPI_UNDEFINED enum: MPI_COMBINER_INDEXED_BLOCK #:= MPI_UNDEFINED enum: MPI_COMBINER_HINDEXED_BLOCK #:= MPI_UNDEFINED enum: MPI_COMBINER_STRUCT #:= MPI_UNDEFINED enum: MPI_COMBINER_SUBARRAY #:= MPI_UNDEFINED enum: MPI_COMBINER_DARRAY #:= MPI_UNDEFINED enum: MPI_COMBINER_F90_REAL #:= MPI_UNDEFINED enum: MPI_COMBINER_F90_COMPLEX #:= MPI_UNDEFINED enum: MPI_COMBINER_F90_INTEGER #:= MPI_UNDEFINED enum: MPI_COMBINER_RESIZED #:= MPI_UNDEFINED enum: MPI_COMBINER_VALUE_INDEX #:= MPI_COMBINER_NAMED int MPI_Type_get_envelope(MPI_Datatype, int*, int*, int*, int*) int MPI_Type_get_contents(MPI_Datatype, int, int, int, int[], MPI_Aint[], MPI_Datatype[]) int MPI_Pack(void*, int, MPI_Datatype, void*, int, int*, MPI_Comm) int MPI_Unpack(void*, int, int*, void*, int, MPI_Datatype, MPI_Comm) int MPI_Pack_size(int, MPI_Datatype, MPI_Comm, int*) int MPI_Pack_external(char[], void*, int, MPI_Datatype, void*, MPI_Aint, MPI_Aint*) int MPI_Unpack_external(char[], void*, MPI_Aint, MPI_Aint*, void*, int, MPI_Datatype) int MPI_Pack_external_size(char[], int, MPI_Datatype, MPI_Aint*) int MPI_Type_get_name(MPI_Datatype, char[], int*) int MPI_Type_set_name(MPI_Datatype, char[]) int MPI_Type_get_attr(MPI_Datatype, int, void*, int*) int MPI_Type_set_attr(MPI_Datatype, int, void*) int MPI_Type_delete_attr(MPI_Datatype, int) ctypedef int MPI_Type_copy_attr_function(MPI_Datatype,int,void*,void*,void*,int*) ctypedef int MPI_Type_delete_attr_function(MPI_Datatype,int,void*,void*) MPI_Type_copy_attr_function* MPI_TYPE_NULL_COPY_FN #:= 0 MPI_Type_copy_attr_function* MPI_TYPE_DUP_FN #:= 0 MPI_Type_delete_attr_function* MPI_TYPE_NULL_DELETE_FN #:= 0 int MPI_Type_create_keyval(MPI_Type_copy_attr_function*, MPI_Type_delete_attr_function*, int*, void*) int MPI_Type_free_keyval(int*) # MPI-4 large count functions int MPI_Type_contiguous_c(MPI_Count, MPI_Datatype, MPI_Datatype*) int MPI_Type_vector_c(MPI_Count, MPI_Count, MPI_Count, MPI_Datatype, MPI_Datatype*) int MPI_Type_indexed_c(MPI_Count, MPI_Count[], MPI_Count[], MPI_Datatype, MPI_Datatype*) int MPI_Type_create_indexed_block_c(MPI_Count, MPI_Count, MPI_Count[], MPI_Datatype, MPI_Datatype*) int MPI_Type_create_subarray_c(int, MPI_Count[], MPI_Count[], MPI_Count[], int, MPI_Datatype, MPI_Datatype*) int MPI_Type_create_darray_c(int, int, int, MPI_Count[], int[], int[], int[], int, MPI_Datatype, MPI_Datatype*) int MPI_Type_create_hvector_c(MPI_Count, MPI_Count, MPI_Count, MPI_Datatype, MPI_Datatype*) int MPI_Type_create_hindexed_c(MPI_Count, MPI_Count[], MPI_Count[], MPI_Datatype, MPI_Datatype*) int MPI_Type_create_hindexed_block_c(MPI_Count, MPI_Count, MPI_Count[], MPI_Datatype, MPI_Datatype*) int MPI_Type_create_struct_c(MPI_Count, MPI_Count[], MPI_Count[], MPI_Datatype[], MPI_Datatype*) int MPI_Type_create_resized_c(MPI_Datatype, MPI_Count, MPI_Count, MPI_Datatype*) int MPI_Type_size_c(MPI_Datatype, MPI_Count*) #:= MPI_Type_size_x int MPI_Type_get_extent_c(MPI_Datatype, MPI_Count*, MPI_Count*) #:= MPI_Type_get_extent_x int MPI_Type_get_true_extent_c(MPI_Datatype, MPI_Count*, MPI_Count*) #:= MPI_Type_get_true_extent_x int MPI_Type_get_envelope_c(MPI_Datatype, MPI_Count*, MPI_Count*, MPI_Count*, MPI_Count*, int*) int MPI_Type_get_contents_c(MPI_Datatype, MPI_Count, MPI_Count, MPI_Count, MPI_Count, int[], MPI_Aint[], MPI_Count[], MPI_Datatype[]) int MPI_Pack_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count, MPI_Count*, MPI_Comm) int MPI_Unpack_c(void*, MPI_Count, MPI_Count*, void*, MPI_Count, MPI_Datatype, MPI_Comm) int MPI_Pack_size_c(MPI_Count, MPI_Datatype, MPI_Comm, MPI_Count*) int MPI_Pack_external_c(char[], void*, MPI_Count, MPI_Datatype, void*, MPI_Count, MPI_Count*) int MPI_Unpack_external_c(char[], void*, MPI_Count, MPI_Count*, void*, MPI_Count, MPI_Datatype) int MPI_Pack_external_size_c(char[], MPI_Count, MPI_Datatype, MPI_Count*) #----------------------------------------------------------------- MPI_Status* MPI_STATUS_IGNORE #:= 0 MPI_Status* MPI_STATUSES_IGNORE #:= 0 int MPI_Get_count(MPI_Status*, MPI_Datatype, int*) int MPI_Get_elements(MPI_Status*, MPI_Datatype, int*) int MPI_Status_set_elements(MPI_Status*, MPI_Datatype, int) int MPI_Get_elements_x(MPI_Status*, MPI_Datatype, MPI_Count*) int MPI_Status_set_elements_x(MPI_Status*, MPI_Datatype, MPI_Count) int MPI_Test_cancelled(MPI_Status*, int*) int MPI_Status_set_cancelled(MPI_Status*, int) # MPI-4 large count functions int MPI_Get_count_c(MPI_Status*, MPI_Datatype, MPI_Count*) int MPI_Get_elements_c(MPI_Status*, MPI_Datatype, MPI_Count*) #:= MPI_Get_elements_x int MPI_Status_set_elements_c(MPI_Status*, MPI_Datatype, MPI_Count) #:= MPI_Status_set_elements_x # MPI-41 getters and setters int MPI_Status_get_source(MPI_Status*, int*) int MPI_Status_set_source(MPI_Status*, int) int MPI_Status_get_tag(MPI_Status*, int*) int MPI_Status_set_tag(MPI_Status*, int) int MPI_Status_get_error(MPI_Status*, int*) int MPI_Status_set_error(MPI_Status*, int) #----------------------------------------------------------------- MPI_Request MPI_REQUEST_NULL #:= 0 int MPI_Wait(MPI_Request*, MPI_Status*) int MPI_Test(MPI_Request*, int*, MPI_Status*) int MPI_Request_get_status(MPI_Request, int*, MPI_Status*) int MPI_Waitany(int, MPI_Request[], int*, MPI_Status*) int MPI_Testany(int, MPI_Request[], int*, int*, MPI_Status*) int MPI_Request_get_status_any(int, MPI_Request[], int*, int*, MPI_Status*) int MPI_Waitall(int, MPI_Request[], MPI_Status[]) int MPI_Testall(int, MPI_Request[], int*, MPI_Status[]) int MPI_Request_get_status_all(int, MPI_Request [], int*, MPI_Status[]) int MPI_Waitsome(int, MPI_Request[], int*, int[], MPI_Status[]) int MPI_Testsome(int, MPI_Request[], int*, int[], MPI_Status[]) int MPI_Request_get_status_some(int, MPI_Request[], int*, int[], MPI_Status[]) int MPI_Cancel(MPI_Request*) int MPI_Request_free(MPI_Request*) int MPI_Start(MPI_Request*) int MPI_Startall(int, MPI_Request*) int MPI_Pready(int, MPI_Request) int MPI_Pready_range(int, int, MPI_Request) int MPI_Pready_list(int, int[], MPI_Request) int MPI_Parrived(MPI_Request, int, int*) ctypedef int MPI_Grequest_cancel_function(void*,int) ctypedef int MPI_Grequest_free_function(void*) ctypedef int MPI_Grequest_query_function(void*,MPI_Status*) int MPI_Grequest_start(MPI_Grequest_query_function*, MPI_Grequest_free_function*, MPI_Grequest_cancel_function*, void*, MPI_Request*) int MPI_Grequest_complete(MPI_Request) #----------------------------------------------------------------- MPI_Op MPI_OP_NULL #:= 0 MPI_Op MPI_MAX #:= MPI_OP_NULL MPI_Op MPI_MIN #:= MPI_OP_NULL MPI_Op MPI_SUM #:= MPI_OP_NULL MPI_Op MPI_PROD #:= MPI_OP_NULL MPI_Op MPI_LAND #:= MPI_OP_NULL MPI_Op MPI_BAND #:= MPI_OP_NULL MPI_Op MPI_LOR #:= MPI_OP_NULL MPI_Op MPI_BOR #:= MPI_OP_NULL MPI_Op MPI_LXOR #:= MPI_OP_NULL MPI_Op MPI_BXOR #:= MPI_OP_NULL MPI_Op MPI_MAXLOC #:= MPI_OP_NULL MPI_Op MPI_MINLOC #:= MPI_OP_NULL MPI_Op MPI_REPLACE #:= MPI_OP_NULL MPI_Op MPI_NO_OP #:= MPI_OP_NULL int MPI_Op_free(MPI_Op*) ctypedef void MPI_User_function(void*,void*,int*,MPI_Datatype*) int MPI_Op_create(MPI_User_function*, int, MPI_Op*) int MPI_Op_commutative(MPI_Op, int*) # MPI-4 large count functions ctypedef void MPI_User_function_c(void*,void*,MPI_Count*,MPI_Datatype*) int MPI_Op_create_c(MPI_User_function_c*, int, MPI_Op*) #----------------------------------------------------------------- MPI_Group MPI_GROUP_NULL #:= 0 MPI_Group MPI_GROUP_EMPTY #:= 1 int MPI_Group_free(MPI_Group*) int MPI_Group_size(MPI_Group, int*) int MPI_Group_rank(MPI_Group, int*) int MPI_Group_translate_ranks(MPI_Group, int, int[], MPI_Group, int[]) int MPI_Group_compare(MPI_Group, MPI_Group, int*) int MPI_Group_union(MPI_Group, MPI_Group, MPI_Group*) int MPI_Group_intersection(MPI_Group, MPI_Group, MPI_Group*) int MPI_Group_difference(MPI_Group, MPI_Group, MPI_Group*) int MPI_Group_incl(MPI_Group, int, int[], MPI_Group*) int MPI_Group_excl(MPI_Group, int, int[], MPI_Group*) int MPI_Group_range_incl(MPI_Group, int, int[][3], MPI_Group*) int MPI_Group_range_excl(MPI_Group, int, int[][3], MPI_Group*) #----------------------------------------------------------------- MPI_Info MPI_INFO_NULL #:= 0 MPI_Info MPI_INFO_ENV #:= MPI_INFO_NULL int MPI_Info_free(MPI_Info*) int MPI_Info_create(MPI_Info*) int MPI_Info_dup(MPI_Info, MPI_Info*) int MPI_Info_create_env(int, char*[], MPI_Info*) enum: MPI_MAX_INFO_KEY #:= 1 enum: MPI_MAX_INFO_VAL #:= 1 int MPI_Info_get_string(MPI_Info, char[], int*, char[], int*) int MPI_Info_set(MPI_Info, char[], char[]) int MPI_Info_delete(MPI_Info, char[]) int MPI_Info_get_nkeys(MPI_Info, int*) int MPI_Info_get_nthkey(MPI_Info, int, char[]) #----------------------------------------------------------------- MPI_Errhandler MPI_ERRHANDLER_NULL #:= 0 MPI_Errhandler MPI_ERRORS_RETURN #:= MPI_ERRHANDLER_NULL MPI_Errhandler MPI_ERRORS_ABORT #:= MPI_ERRHANDLER_NULL MPI_Errhandler MPI_ERRORS_ARE_FATAL #:= MPI_ERRHANDLER_NULL int MPI_Errhandler_free(MPI_Errhandler*) #----------------------------------------------------------------- MPI_Session MPI_SESSION_NULL #:= 0 enum: MPI_MAX_PSET_NAME_LEN #:= 1 int MPI_Session_init(MPI_Info, MPI_Errhandler, MPI_Session*) int MPI_Session_finalize(MPI_Session*) int MPI_Session_get_num_psets(MPI_Session, MPI_Info, int*) int MPI_Session_get_nth_pset(MPI_Session, MPI_Info, int, int*, char[]) int MPI_Session_get_info(MPI_Session, MPI_Info*) int MPI_Session_get_pset_info(MPI_Session, char[], MPI_Info*) int MPI_Group_from_session_pset(MPI_Session, char[], MPI_Group*) ctypedef void MPI_Session_errhandler_function(MPI_Session*,int*,...) int MPI_Session_create_errhandler(MPI_Session_errhandler_function*, MPI_Errhandler*) int MPI_Session_get_errhandler(MPI_Session, MPI_Errhandler*) int MPI_Session_set_errhandler(MPI_Session, MPI_Errhandler) int MPI_Session_call_errhandler(MPI_Session, int) #----------------------------------------------------------------- MPI_Comm MPI_COMM_NULL #:= 0 MPI_Comm MPI_COMM_SELF #:= MPI_COMM_NULL MPI_Comm MPI_COMM_WORLD #:= MPI_COMM_NULL int MPI_Comm_free(MPI_Comm*) int MPI_Comm_group(MPI_Comm, MPI_Group*) int MPI_Comm_size(MPI_Comm, int*) int MPI_Comm_rank(MPI_Comm, int*) int MPI_Comm_compare(MPI_Comm, MPI_Comm, int*) int MPI_Topo_test(MPI_Comm, int*) int MPI_Comm_test_inter(MPI_Comm, int*) int MPI_Abort(MPI_Comm, int) enum: MPI_BSEND_OVERHEAD #:= 0 void* MPI_BUFFER_AUTOMATIC #:= 0 int MPI_Buffer_attach(void*, int) int MPI_Buffer_detach(void*, int*) int MPI_Buffer_flush() int MPI_Buffer_iflush(MPI_Request*) int MPI_Comm_attach_buffer(MPI_Comm, void*, int) int MPI_Comm_detach_buffer(MPI_Comm, void*, int*) int MPI_Comm_flush_buffer(MPI_Comm) int MPI_Comm_iflush_buffer(MPI_Comm,MPI_Request*) int MPI_Session_attach_buffer(MPI_Session, void*, int) int MPI_Session_detach_buffer(MPI_Session, void*, int*) int MPI_Session_flush_buffer(MPI_Session) int MPI_Session_iflush_buffer(MPI_Session,MPI_Request*) int MPI_Send(void*, int, MPI_Datatype, int, int, MPI_Comm) int MPI_Recv(void*, int, MPI_Datatype, int, int, MPI_Comm, MPI_Status*) int MPI_Sendrecv(void*, int, MPI_Datatype, int, int, void*, int, MPI_Datatype, int, int, MPI_Comm, MPI_Status*) int MPI_Sendrecv_replace(void*, int, MPI_Datatype, int, int, int, int, MPI_Comm, MPI_Status*) int MPI_Bsend(void*, int, MPI_Datatype, int, int, MPI_Comm) int MPI_Ssend(void*, int, MPI_Datatype, int, int, MPI_Comm) int MPI_Rsend(void*, int, MPI_Datatype, int, int, MPI_Comm) int MPI_Isend(void*, int, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Irecv(void*, int, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Isendrecv(void*, int, MPI_Datatype, int, int, void*, int, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Isendrecv_replace(void*, int, MPI_Datatype, int, int, int, int, MPI_Comm, MPI_Request*) int MPI_Ibsend(void*, int, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Issend(void*, int, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Irsend(void*, int, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Send_init(void*, int, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Bsend_init(void*, int, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Ssend_init(void*, int, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Rsend_init(void*, int, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Recv_init(void*, int, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Psend_init(void*, int, MPI_Count, MPI_Datatype, int, int, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Precv_init(void*, int, MPI_Count, MPI_Datatype, int, int, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Probe(int, int, MPI_Comm, MPI_Status*) int MPI_Iprobe(int, int, MPI_Comm, int*, MPI_Status*) MPI_Message MPI_MESSAGE_NULL #:= 0 MPI_Message MPI_MESSAGE_NO_PROC #:= MPI_MESSAGE_NULL int MPI_Mprobe(int, int, MPI_Comm, MPI_Message*, MPI_Status*) int MPI_Improbe(int, int, MPI_Comm, int*, MPI_Message*, MPI_Status*) int MPI_Mrecv(void*, int, MPI_Datatype, MPI_Message*, MPI_Status*) int MPI_Imrecv(void*, int, MPI_Datatype, MPI_Message*, MPI_Request*) int MPI_Barrier(MPI_Comm) int MPI_Bcast(void*, int, MPI_Datatype, int, MPI_Comm) int MPI_Gather(void*, int, MPI_Datatype, void*, int, MPI_Datatype, int, MPI_Comm) int MPI_Gatherv(void*, int, MPI_Datatype, void*, int[], int[], MPI_Datatype, int, MPI_Comm) int MPI_Scatter(void*, int, MPI_Datatype, void*, int, MPI_Datatype, int, MPI_Comm) int MPI_Scatterv(void*, int[], int[], MPI_Datatype, void*, int, MPI_Datatype, int, MPI_Comm) int MPI_Allgather(void*, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm) int MPI_Allgatherv(void*, int, MPI_Datatype, void*, int[], int[], MPI_Datatype, MPI_Comm) int MPI_Alltoall(void*, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm) int MPI_Alltoallv(void*, int[], int[], MPI_Datatype, void*, int[], int[], MPI_Datatype, MPI_Comm) int MPI_Alltoallw(void*, int[], int[], MPI_Datatype[], void*, int[], int[], MPI_Datatype[], MPI_Comm) int MPI_Reduce_local(void*, void*, int, MPI_Datatype, MPI_Op) int MPI_Reduce(void*, void*, int, MPI_Datatype, MPI_Op, int, MPI_Comm) int MPI_Allreduce(void*, void*, int, MPI_Datatype, MPI_Op, MPI_Comm) int MPI_Reduce_scatter_block(void*, void*, int, MPI_Datatype, MPI_Op, MPI_Comm) int MPI_Reduce_scatter(void*, void*, int[], MPI_Datatype, MPI_Op, MPI_Comm) int MPI_Scan(void*, void*, int, MPI_Datatype, MPI_Op, MPI_Comm) int MPI_Exscan(void*, void*, int, MPI_Datatype, MPI_Op, MPI_Comm) int MPI_Neighbor_allgather(void*, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm) int MPI_Neighbor_allgatherv(void*, int, MPI_Datatype, void*, int[], int[], MPI_Datatype, MPI_Comm) int MPI_Neighbor_alltoall(void*, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm) int MPI_Neighbor_alltoallv(void*, int[], int[], MPI_Datatype, void*, int[], int[], MPI_Datatype, MPI_Comm) int MPI_Neighbor_alltoallw(void*, int[], MPI_Aint[], MPI_Datatype[], void*, int[], MPI_Aint[], MPI_Datatype[], MPI_Comm) int MPI_Ibarrier(MPI_Comm, MPI_Request*) int MPI_Ibcast(void*, int, MPI_Datatype, int, MPI_Comm, MPI_Request*) int MPI_Igather(void*, int, MPI_Datatype, void*, int, MPI_Datatype, int, MPI_Comm, MPI_Request*) int MPI_Igatherv(void*, int, MPI_Datatype, void*, int[], int[], MPI_Datatype, int, MPI_Comm, MPI_Request*) int MPI_Iscatter(void*, int, MPI_Datatype, void*, int, MPI_Datatype, int, MPI_Comm, MPI_Request*) int MPI_Iscatterv(void*, int[], int[], MPI_Datatype, void*, int, MPI_Datatype, int, MPI_Comm, MPI_Request*) int MPI_Iallgather(void*, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Iallgatherv(void*, int, MPI_Datatype, void*, int[], int[], MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Ialltoall(void*, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Ialltoallv(void*, int[], int[], MPI_Datatype, void*, int[], int[], MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Ialltoallw(void*, int[], int[], MPI_Datatype[], void*, int[], int[], MPI_Datatype[], MPI_Comm, MPI_Request*) int MPI_Ireduce(void*, void*, int, MPI_Datatype, MPI_Op, int, MPI_Comm, MPI_Request*) int MPI_Iallreduce(void*, void*, int, MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request*) int MPI_Ireduce_scatter_block(void*, void*, int, MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request*) int MPI_Ireduce_scatter(void*, void*, int[], MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request*) int MPI_Iscan(void*, void*, int, MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request*) int MPI_Iexscan(void*, void*, int, MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request*) int MPI_Ineighbor_allgather(void*, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Ineighbor_allgatherv(void*, int, MPI_Datatype, void*, int[], int[], MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Ineighbor_alltoall(void*, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Ineighbor_alltoallv(void*, int[], int[], MPI_Datatype, void*, int[], int[], MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Ineighbor_alltoallw(void*, int[], MPI_Aint[], MPI_Datatype[], void*, int[], MPI_Aint[], MPI_Datatype[], MPI_Comm, MPI_Request*) int MPI_Barrier_init(MPI_Comm, MPI_Info, MPI_Request*) int MPI_Bcast_init(void*, int, MPI_Datatype, int, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Gather_init(void*, int, MPI_Datatype, void*, int, MPI_Datatype, int, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Gatherv_init(void*, int, MPI_Datatype, void*, int[], int[], MPI_Datatype, int, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Scatter_init(void*, int, MPI_Datatype, void*, int, MPI_Datatype, int, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Scatterv_init(void*, int[], int[], MPI_Datatype, void*, int, MPI_Datatype, int, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Allgather_init(void*, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Allgatherv_init(void*, int, MPI_Datatype, void*, int[], int[], MPI_Datatype, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Alltoall_init(void*, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Alltoallv_init(void*, int[], int[], MPI_Datatype, void*, int[], int[], MPI_Datatype, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Alltoallw_init(void*, int[], int[], MPI_Datatype[], void*, int[], int[], MPI_Datatype[], MPI_Comm, MPI_Info, MPI_Request*) int MPI_Reduce_init(void*, void*, int, MPI_Datatype, MPI_Op, int, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Allreduce_init(void*, void*, int, MPI_Datatype, MPI_Op, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Reduce_scatter_block_init(void*, void*, int, MPI_Datatype, MPI_Op, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Reduce_scatter_init(void*, void*, int[], MPI_Datatype, MPI_Op, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Scan_init(void*, void*, int, MPI_Datatype, MPI_Op, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Exscan_init(void*, void*, int, MPI_Datatype, MPI_Op, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Neighbor_allgather_init(void*, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Neighbor_allgatherv_init(void*, int, MPI_Datatype, void*, int[], int[], MPI_Datatype, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Neighbor_alltoall_init(void*, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Neighbor_alltoallv_init(void*, int[], int[], MPI_Datatype, void*, int[], int[], MPI_Datatype, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Neighbor_alltoallw_init(void*, int[], MPI_Aint[], MPI_Datatype[], void*, int[], MPI_Aint[], MPI_Datatype[], MPI_Comm, MPI_Info, MPI_Request*) int MPI_Comm_dup(MPI_Comm, MPI_Comm*) int MPI_Comm_dup_with_info(MPI_Comm, MPI_Info, MPI_Comm*) int MPI_Comm_idup(MPI_Comm, MPI_Comm*, MPI_Request*) int MPI_Comm_idup_with_info(MPI_Comm, MPI_Info, MPI_Comm*, MPI_Request*) int MPI_Comm_create(MPI_Comm, MPI_Group, MPI_Comm*) int MPI_Comm_create_group(MPI_Comm, MPI_Group, int, MPI_Comm*) enum: MPI_MAX_STRINGTAG_LEN #:= 1 int MPI_Comm_create_from_group(MPI_Group, char[], MPI_Info, MPI_Errhandler, MPI_Comm*) int MPI_Comm_split(MPI_Comm, int, int, MPI_Comm*) enum: MPI_COMM_TYPE_SHARED #:= MPI_UNDEFINED enum: MPI_COMM_TYPE_HW_GUIDED #:= MPI_UNDEFINED enum: MPI_COMM_TYPE_HW_UNGUIDED #:= MPI_UNDEFINED enum: MPI_COMM_TYPE_RESOURCE_GUIDED #:= MPI_UNDEFINED int MPI_Comm_split_type(MPI_Comm, int, int, MPI_Info, MPI_Comm*) int MPI_Comm_set_info(MPI_Comm, MPI_Info) int MPI_Comm_get_info(MPI_Comm, MPI_Info*) enum: MPI_CART #:= MPI_UNDEFINED int MPI_Cart_create(MPI_Comm, int, int[], int[], int, MPI_Comm*) int MPI_Cartdim_get(MPI_Comm, int*) int MPI_Cart_get(MPI_Comm, int, int[], int[], int[]) int MPI_Cart_rank(MPI_Comm, int[], int*) int MPI_Cart_coords(MPI_Comm, int, int, int[]) int MPI_Cart_shift(MPI_Comm, int, int, int[], int[]) int MPI_Cart_sub(MPI_Comm, int[], MPI_Comm*) int MPI_Cart_map(MPI_Comm, int, int[], int[], int*) int MPI_Dims_create(int, int, int[]) enum: MPI_GRAPH #:= MPI_UNDEFINED int MPI_Graph_create(MPI_Comm, int, int[], int[], int, MPI_Comm*) int MPI_Graphdims_get(MPI_Comm, int*, int*) int MPI_Graph_get(MPI_Comm, int, int, int[], int[]) int MPI_Graph_map(MPI_Comm, int, int[], int[], int*) int MPI_Graph_neighbors_count(MPI_Comm, int, int*) int MPI_Graph_neighbors(MPI_Comm, int, int, int[]) enum: MPI_DIST_GRAPH #:= MPI_UNDEFINED int* MPI_UNWEIGHTED #:= 0 int* MPI_WEIGHTS_EMPTY #:= MPI_UNWEIGHTED int MPI_Dist_graph_create_adjacent(MPI_Comm, int, int[], int[], int, int[], int[], MPI_Info, int, MPI_Comm*) int MPI_Dist_graph_create(MPI_Comm, int, int[], int[], int[], int[], MPI_Info, int, MPI_Comm*) int MPI_Dist_graph_neighbors_count(MPI_Comm, int*, int*, int*) int MPI_Dist_graph_neighbors(MPI_Comm, int, int[], int[], int, int[], int[]) int MPI_Intercomm_create(MPI_Comm, int, MPI_Comm, int, int, MPI_Comm*) int MPI_Intercomm_create_from_groups(MPI_Group, int, MPI_Group, int, char[], MPI_Info, MPI_Errhandler, MPI_Comm*) int MPI_Comm_remote_group(MPI_Comm, MPI_Group*) int MPI_Comm_remote_size(MPI_Comm, int*) int MPI_Intercomm_merge(MPI_Comm, int, MPI_Comm*) enum: MPI_MAX_PORT_NAME #:= 1 int MPI_Open_port(MPI_Info, char[]) int MPI_Close_port(char[]) int MPI_Publish_name(char[], MPI_Info, char[]) int MPI_Unpublish_name(char[], MPI_Info, char[]) int MPI_Lookup_name(char[], MPI_Info, char[]) int MPI_Comm_accept(char[], MPI_Info, int, MPI_Comm, MPI_Comm*) int MPI_Comm_connect(char[], MPI_Info, int, MPI_Comm, MPI_Comm*) int MPI_Comm_join(int, MPI_Comm*) int MPI_Comm_disconnect(MPI_Comm*) char** MPI_ARGV_NULL #:= 0 char*** MPI_ARGVS_NULL #:= 0 int* MPI_ERRCODES_IGNORE #:= 0 int MPI_Comm_spawn(char[], char*[], int, MPI_Info, int, MPI_Comm, MPI_Comm*, int[]) int MPI_Comm_spawn_multiple(int, char*[], char**[], int[], MPI_Info[], int, MPI_Comm, MPI_Comm*, int[]) int MPI_Comm_get_parent(MPI_Comm*) int MPI_Comm_get_name(MPI_Comm, char[], int*) int MPI_Comm_set_name(MPI_Comm, char[]) enum: MPI_TAG_UB #:= MPI_KEYVAL_INVALID enum: MPI_IO #:= MPI_KEYVAL_INVALID enum: MPI_WTIME_IS_GLOBAL #:= MPI_KEYVAL_INVALID enum: MPI_UNIVERSE_SIZE #:= MPI_KEYVAL_INVALID enum: MPI_APPNUM #:= MPI_KEYVAL_INVALID enum: MPI_LASTUSEDCODE #:= MPI_KEYVAL_INVALID int MPI_Comm_get_attr(MPI_Comm, int, void*, int*) #:= MPI_Attr_get int MPI_Comm_set_attr(MPI_Comm, int, void*) #:= MPI_Attr_put int MPI_Comm_delete_attr(MPI_Comm, int) #:= MPI_Attr_delete ctypedef int MPI_Comm_copy_attr_function(MPI_Comm,int,void*,void*,void*,int*) #:= MPI_Copy_function ctypedef int MPI_Comm_delete_attr_function(MPI_Comm,int,void*,void*) #:= MPI_Delete_function MPI_Comm_copy_attr_function* MPI_COMM_DUP_FN #:= MPI_DUP_FN MPI_Comm_copy_attr_function* MPI_COMM_NULL_COPY_FN #:= MPI_NULL_COPY_FN MPI_Comm_delete_attr_function* MPI_COMM_NULL_DELETE_FN #:= MPI_NULL_DELETE_FN int MPI_Comm_create_keyval(MPI_Comm_copy_attr_function*, MPI_Comm_delete_attr_function*, int*, void*) #:= MPI_Keyval_create int MPI_Comm_free_keyval(int*) #:= MPI_Keyval_free ctypedef void MPI_Comm_errhandler_fn(MPI_Comm*,int*,...) #:= MPI_Handler_function ctypedef void MPI_Comm_errhandler_function(MPI_Comm*,int*,...) #:= MPI_Comm_errhandler_fn int MPI_Comm_create_errhandler(MPI_Comm_errhandler_function*, MPI_Errhandler*) #:= MPI_Errhandler_create int MPI_Comm_get_errhandler(MPI_Comm, MPI_Errhandler*) #:= MPI_Errhandler_get int MPI_Comm_set_errhandler(MPI_Comm, MPI_Errhandler) #:= MPI_Errhandler_set int MPI_Comm_call_errhandler(MPI_Comm, int) # MPI-4 large count functions int MPI_Buffer_attach_c(void*, MPI_Count) int MPI_Buffer_detach_c(void*, MPI_Count*) int MPI_Comm_attach_buffer_c(MPI_Comm, void*, MPI_Count) int MPI_Comm_detach_buffer_c(MPI_Comm, void*, MPI_Count*) int MPI_Session_attach_buffer_c(MPI_Session, void*, MPI_Count) int MPI_Session_detach_buffer_c(MPI_Session, void*, MPI_Count*) int MPI_Send_c(void*, MPI_Count, MPI_Datatype, int, int, MPI_Comm) int MPI_Recv_c(void*, MPI_Count, MPI_Datatype, int, int, MPI_Comm, MPI_Status*) int MPI_Sendrecv_c(void*, MPI_Count, MPI_Datatype, int, int, void*, MPI_Count, MPI_Datatype, int, int, MPI_Comm, MPI_Status*) int MPI_Sendrecv_replace_c(void*, MPI_Count, MPI_Datatype, int, int, int, int, MPI_Comm, MPI_Status*) int MPI_Bsend_c(void*, MPI_Count, MPI_Datatype, int, int, MPI_Comm) int MPI_Ssend_c(void*, MPI_Count, MPI_Datatype, int, int, MPI_Comm) int MPI_Rsend_c(void*, MPI_Count, MPI_Datatype, int, int, MPI_Comm) int MPI_Isend_c(void*, MPI_Count, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Irecv_c(void*, MPI_Count, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Isendrecv_c(void*, MPI_Count, MPI_Datatype, int, int, void*, MPI_Count, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Isendrecv_replace_c(void*, MPI_Count, MPI_Datatype, int, int, int, int, MPI_Comm, MPI_Request*) int MPI_Ibsend_c(void*, MPI_Count, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Issend_c(void*, MPI_Count, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Irsend_c(void*, MPI_Count, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Send_init_c (void*, MPI_Count, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Recv_init_c (void*, MPI_Count, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Bsend_init_c(void*, MPI_Count, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Ssend_init_c(void*, MPI_Count, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Rsend_init_c(void*, MPI_Count, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Mrecv_c (void*, MPI_Count, MPI_Datatype, MPI_Message*, MPI_Status*) int MPI_Imrecv_c(void*, MPI_Count, MPI_Datatype, MPI_Message*, MPI_Request*) int MPI_Bcast_c (void*, MPI_Count, MPI_Datatype, int, MPI_Comm) int MPI_Gather_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count, MPI_Datatype, int, MPI_Comm) int MPI_Gatherv_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count[], MPI_Aint[], MPI_Datatype, int, MPI_Comm) int MPI_Scatter_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count, MPI_Datatype, int, MPI_Comm) int MPI_Scatterv_c(void*, MPI_Count[], MPI_Aint[], MPI_Datatype, void*, MPI_Count, MPI_Datatype, int, MPI_Comm) int MPI_Allgather_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count, MPI_Datatype, MPI_Comm) int MPI_Allgatherv_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count[], MPI_Aint[], MPI_Datatype, MPI_Comm) int MPI_Alltoall_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count, MPI_Datatype, MPI_Comm) int MPI_Alltoallv_c(void*, MPI_Count[], MPI_Aint[], MPI_Datatype, void*, MPI_Count[], MPI_Aint[], MPI_Datatype, MPI_Comm) int MPI_Alltoallw_c(void*, MPI_Count[], MPI_Aint[], MPI_Datatype[], void*, MPI_Count[], MPI_Aint[], MPI_Datatype[], MPI_Comm) int MPI_Reduce_local_c(void*, void*, MPI_Count, MPI_Datatype, MPI_Op) int MPI_Reduce_c(void*, void*, MPI_Count, MPI_Datatype, MPI_Op, int, MPI_Comm) int MPI_Allreduce_c(void*, void*, MPI_Count, MPI_Datatype, MPI_Op, MPI_Comm) int MPI_Reduce_scatter_block_c(void*, void*, MPI_Count, MPI_Datatype, MPI_Op, MPI_Comm) int MPI_Reduce_scatter_c(void*, void*, MPI_Count[], MPI_Datatype, MPI_Op, MPI_Comm) int MPI_Scan_c(void*, void*, MPI_Count, MPI_Datatype, MPI_Op, MPI_Comm) int MPI_Exscan_c(void*, void*, MPI_Count, MPI_Datatype, MPI_Op, MPI_Comm) int MPI_Neighbor_allgather_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count, MPI_Datatype, MPI_Comm) int MPI_Neighbor_allgatherv_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count[], MPI_Aint[], MPI_Datatype, MPI_Comm) int MPI_Neighbor_alltoall_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count, MPI_Datatype, MPI_Comm) int MPI_Neighbor_alltoallv_c(void*, MPI_Count[], MPI_Aint[], MPI_Datatype, void*, MPI_Count[], MPI_Aint[], MPI_Datatype, MPI_Comm) int MPI_Neighbor_alltoallw_c(void*, MPI_Count[], MPI_Aint[], MPI_Datatype[], void*, MPI_Count[], MPI_Aint[], MPI_Datatype[], MPI_Comm) int MPI_Ibcast_c(void*, MPI_Count, MPI_Datatype, int, MPI_Comm, MPI_Request*) int MPI_Igather_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count, MPI_Datatype, int, MPI_Comm, MPI_Request*) int MPI_Igatherv_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count[], MPI_Aint[], MPI_Datatype, int, MPI_Comm, MPI_Request*) int MPI_Iscatter_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count, MPI_Datatype, int, MPI_Comm, MPI_Request*) int MPI_Iscatterv_c(void*, MPI_Count[], MPI_Aint[], MPI_Datatype, void*, MPI_Count, MPI_Datatype, int, MPI_Comm, MPI_Request*) int MPI_Iallgather_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count, MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Iallgatherv_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count[], MPI_Aint[], MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Ialltoall_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count, MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Ialltoallv_c(void*, MPI_Count[], MPI_Aint[], MPI_Datatype, void*, MPI_Count[], MPI_Aint[], MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Ialltoallw_c(void*, MPI_Count[], MPI_Aint[], MPI_Datatype[], void*, MPI_Count[], MPI_Aint[], MPI_Datatype[], MPI_Comm, MPI_Request*) int MPI_Ireduce_c(void*, void*, MPI_Count, MPI_Datatype, MPI_Op, int, MPI_Comm, MPI_Request*) int MPI_Iallreduce_c(void*, void*, MPI_Count, MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request*) int MPI_Ireduce_scatter_block_c(void*, void*, MPI_Count, MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request*) int MPI_Ireduce_scatter_c(void*, void*, MPI_Count[], MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request*) int MPI_Iscan_c(void*, void*, MPI_Count, MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request*) int MPI_Iexscan_c(void*, void*, MPI_Count, MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request*) int MPI_Ineighbor_allgather_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count, MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Ineighbor_allgatherv_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count[], MPI_Aint[], MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Ineighbor_alltoall_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count, MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Ineighbor_alltoallv_c(void*, MPI_Count[], MPI_Aint[], MPI_Datatype, void*, MPI_Count[], MPI_Aint[], MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Ineighbor_alltoallw_c(void*, MPI_Count[], MPI_Aint[], MPI_Datatype[], void*, MPI_Count[], MPI_Aint[], MPI_Datatype[], MPI_Comm, MPI_Request*) int MPI_Bcast_init_c(void*, MPI_Count, MPI_Datatype, int, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Gather_init_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count, MPI_Datatype, int, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Gatherv_init_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count[], MPI_Aint[], MPI_Datatype, int, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Scatter_init_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count, MPI_Datatype, int, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Scatterv_init_c(void*, MPI_Count[], MPI_Aint[], MPI_Datatype, void*, MPI_Count, MPI_Datatype, int, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Allgather_init_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count, MPI_Datatype, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Allgatherv_init_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count[], MPI_Aint[], MPI_Datatype, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Alltoall_init_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count, MPI_Datatype, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Alltoallv_init_c(void*, MPI_Count[], MPI_Aint[], MPI_Datatype, void*, MPI_Count[], MPI_Aint[], MPI_Datatype, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Alltoallw_init_c(void*, MPI_Count[], MPI_Aint[], MPI_Datatype[], void*, MPI_Count[], MPI_Aint[], MPI_Datatype[], MPI_Comm, MPI_Info, MPI_Request*) int MPI_Reduce_init_c(void*, void*, MPI_Count, MPI_Datatype, MPI_Op, int, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Allreduce_init_c(void*, void*, MPI_Count, MPI_Datatype, MPI_Op, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Reduce_scatter_block_init_c(void*, void*, MPI_Count, MPI_Datatype, MPI_Op, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Reduce_scatter_init_c(void*, void*, MPI_Count[], MPI_Datatype, MPI_Op, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Scan_init_c(void*, void*, MPI_Count, MPI_Datatype, MPI_Op, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Exscan_init_c(void*, void*, MPI_Count, MPI_Datatype, MPI_Op, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Neighbor_allgather_init_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count, MPI_Datatype, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Neighbor_allgatherv_init_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count[], MPI_Aint[], MPI_Datatype, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Neighbor_alltoall_init_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count, MPI_Datatype, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Neighbor_alltoallv_init_c(void*, MPI_Count[], MPI_Aint[], MPI_Datatype, void*, MPI_Count[], MPI_Aint[], MPI_Datatype, MPI_Comm, MPI_Info, MPI_Request*) int MPI_Neighbor_alltoallw_init_c(void*, MPI_Count[], MPI_Aint[], MPI_Datatype[], void*, MPI_Count[], MPI_Aint[], MPI_Datatype[], MPI_Comm, MPI_Info, MPI_Request*) #----------------------------------------------------------------- MPI_Win MPI_WIN_NULL #:= 0 int MPI_Win_free(MPI_Win*) int MPI_Win_create(void*, MPI_Aint, int, MPI_Info, MPI_Comm, MPI_Win*) int MPI_Win_allocate(MPI_Aint, int, MPI_Info, MPI_Comm, void*, MPI_Win*) int MPI_Win_allocate_shared(MPI_Aint, int, MPI_Info, MPI_Comm, void*, MPI_Win*) int MPI_Win_shared_query(MPI_Win, int, MPI_Aint*, int*, void*) int MPI_Win_create_dynamic(MPI_Info, MPI_Comm, MPI_Win*) int MPI_Win_attach(MPI_Win, void*, MPI_Aint) int MPI_Win_detach(MPI_Win, void*) int MPI_Win_set_info(MPI_Win, MPI_Info) int MPI_Win_get_info(MPI_Win, MPI_Info*) int MPI_Win_get_group(MPI_Win, MPI_Group*) int MPI_Get(void*, int, MPI_Datatype, int, MPI_Aint, int, MPI_Datatype, MPI_Win) int MPI_Put(void*, int, MPI_Datatype, int, MPI_Aint, int, MPI_Datatype, MPI_Win) int MPI_Accumulate(void*, int, MPI_Datatype, int, MPI_Aint, int, MPI_Datatype, MPI_Op, MPI_Win) int MPI_Get_accumulate(void*, int, MPI_Datatype, void*, int, MPI_Datatype, int, MPI_Aint, int, MPI_Datatype, MPI_Op, MPI_Win) int MPI_Fetch_and_op(void*, void*, MPI_Datatype, int, MPI_Aint, MPI_Op, MPI_Win) int MPI_Compare_and_swap(void*, void*, void*, MPI_Datatype, int, MPI_Aint, MPI_Win) int MPI_Rget(void*, int, MPI_Datatype, int, MPI_Aint, int, MPI_Datatype, MPI_Win, MPI_Request*) int MPI_Rput(void*, int, MPI_Datatype, int, MPI_Aint, int, MPI_Datatype, MPI_Win, MPI_Request*) int MPI_Raccumulate(void*, int, MPI_Datatype, int, MPI_Aint, int, MPI_Datatype, MPI_Op, MPI_Win, MPI_Request*) int MPI_Rget_accumulate(void*, int, MPI_Datatype, void*, int, MPI_Datatype, int, MPI_Aint, int, MPI_Datatype, MPI_Op, MPI_Win, MPI_Request*) enum: MPI_MODE_NOCHECK #:= MPI_UNDEFINED enum: MPI_MODE_NOSTORE #:= MPI_UNDEFINED enum: MPI_MODE_NOPUT #:= MPI_UNDEFINED enum: MPI_MODE_NOPRECEDE #:= MPI_UNDEFINED enum: MPI_MODE_NOSUCCEED #:= MPI_UNDEFINED int MPI_Win_fence(int, MPI_Win) int MPI_Win_post(MPI_Group, int, MPI_Win) int MPI_Win_start(MPI_Group, int, MPI_Win) int MPI_Win_complete(MPI_Win) int MPI_Win_wait(MPI_Win) int MPI_Win_test(MPI_Win, int*) enum: MPI_LOCK_EXCLUSIVE #:= MPI_UNDEFINED enum: MPI_LOCK_SHARED #:= MPI_UNDEFINED int MPI_Win_lock(int, int, int, MPI_Win) int MPI_Win_unlock(int, MPI_Win) int MPI_Win_lock_all(int, MPI_Win) int MPI_Win_unlock_all(MPI_Win) int MPI_Win_flush(int, MPI_Win) int MPI_Win_flush_all(MPI_Win) int MPI_Win_flush_local(int, MPI_Win) int MPI_Win_flush_local_all(MPI_Win) int MPI_Win_sync(MPI_Win) int MPI_Win_get_name(MPI_Win, char[], int*) int MPI_Win_set_name(MPI_Win, char[]) enum: MPI_WIN_BASE #:= MPI_KEYVAL_INVALID enum: MPI_WIN_SIZE #:= MPI_KEYVAL_INVALID enum: MPI_WIN_DISP_UNIT #:= MPI_KEYVAL_INVALID enum: MPI_WIN_CREATE_FLAVOR #:= MPI_KEYVAL_INVALID enum: MPI_WIN_MODEL #:= MPI_KEYVAL_INVALID enum: MPI_WIN_FLAVOR_CREATE #:= MPI_UNDEFINED enum: MPI_WIN_FLAVOR_ALLOCATE #:= MPI_UNDEFINED enum: MPI_WIN_FLAVOR_DYNAMIC #:= MPI_UNDEFINED enum: MPI_WIN_FLAVOR_SHARED #:= MPI_UNDEFINED enum: MPI_WIN_SEPARATE #:= MPI_UNDEFINED enum: MPI_WIN_UNIFIED #:= MPI_UNDEFINED int MPI_Win_get_attr(MPI_Win, int, void*, int*) int MPI_Win_set_attr(MPI_Win, int, void*) int MPI_Win_delete_attr(MPI_Win, int) ctypedef int MPI_Win_copy_attr_function(MPI_Win,int,void*,void*,void*,int*) ctypedef int MPI_Win_delete_attr_function(MPI_Win,int,void*,void*) MPI_Win_copy_attr_function* MPI_WIN_DUP_FN #:= 0 MPI_Win_copy_attr_function* MPI_WIN_NULL_COPY_FN #:= 0 MPI_Win_delete_attr_function* MPI_WIN_NULL_DELETE_FN #:= 0 int MPI_Win_create_keyval(MPI_Win_copy_attr_function*, MPI_Win_delete_attr_function*, int*, void*) int MPI_Win_free_keyval(int*) ctypedef void MPI_Win_errhandler_fn(MPI_Win*,int*,...) ctypedef void MPI_Win_errhandler_function(MPI_Win*,int*,...) #:= MPI_Win_errhandler_fn int MPI_Win_create_errhandler(MPI_Win_errhandler_function*, MPI_Errhandler*) int MPI_Win_get_errhandler(MPI_Win, MPI_Errhandler*) int MPI_Win_set_errhandler(MPI_Win, MPI_Errhandler) int MPI_Win_call_errhandler(MPI_Win, int) # MPI-4 large count functions int MPI_Win_create_c(void*, MPI_Aint, MPI_Aint, MPI_Info, MPI_Comm, MPI_Win*) int MPI_Win_allocate_c(MPI_Aint, MPI_Aint, MPI_Info, MPI_Comm, void*, MPI_Win*) int MPI_Win_allocate_shared_c(MPI_Aint, MPI_Aint, MPI_Info, MPI_Comm, void*, MPI_Win*) int MPI_Win_shared_query_c(MPI_Win, int, MPI_Aint*, MPI_Aint*, void*) int MPI_Get_c(void*, MPI_Count, MPI_Datatype, int, MPI_Aint, MPI_Count, MPI_Datatype, MPI_Win) int MPI_Put_c(void*, MPI_Count, MPI_Datatype, int, MPI_Aint, MPI_Count, MPI_Datatype, MPI_Win) int MPI_Accumulate_c(void*, MPI_Count, MPI_Datatype, int, MPI_Aint, MPI_Count, MPI_Datatype, MPI_Op, MPI_Win) int MPI_Get_accumulate_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count, MPI_Datatype, int, MPI_Aint, MPI_Count, MPI_Datatype, MPI_Op, MPI_Win) int MPI_Rget_c(void*, MPI_Count, MPI_Datatype, int, MPI_Aint, MPI_Count, MPI_Datatype, MPI_Win, MPI_Request*) int MPI_Rput_c(void*, MPI_Count, MPI_Datatype, int, MPI_Aint, MPI_Count, MPI_Datatype, MPI_Win, MPI_Request*) int MPI_Raccumulate_c(void*, MPI_Count, MPI_Datatype, int, MPI_Aint, MPI_Count, MPI_Datatype, MPI_Op, MPI_Win, MPI_Request*) int MPI_Rget_accumulate_c(void*, MPI_Count, MPI_Datatype, void*, MPI_Count, MPI_Datatype, int, MPI_Aint, MPI_Count, MPI_Datatype, MPI_Op, MPI_Win, MPI_Request*) #----------------------------------------------------------------- MPI_File MPI_FILE_NULL #:= 0 enum: MPI_MODE_RDONLY #:= 1 enum: MPI_MODE_RDWR #:= 2 enum: MPI_MODE_WRONLY #:= 4 enum: MPI_MODE_CREATE #:= 8 enum: MPI_MODE_EXCL #:= 16 enum: MPI_MODE_DELETE_ON_CLOSE #:= 32 enum: MPI_MODE_UNIQUE_OPEN #:= 64 enum: MPI_MODE_APPEND #:= 128 enum: MPI_MODE_SEQUENTIAL #:= 256 int MPI_File_open(MPI_Comm, char[], int, MPI_Info, MPI_File*) int MPI_File_close(MPI_File*) int MPI_File_delete(char[], MPI_Info) int MPI_File_set_size(MPI_File, MPI_Offset) int MPI_File_preallocate(MPI_File, MPI_Offset) int MPI_File_get_size(MPI_File, MPI_Offset*) int MPI_File_get_group(MPI_File, MPI_Group*) int MPI_File_get_amode(MPI_File, int*) int MPI_File_set_info(MPI_File, MPI_Info) int MPI_File_get_info(MPI_File, MPI_Info*) int MPI_File_get_view(MPI_File, MPI_Offset*, MPI_Datatype*, MPI_Datatype*, char[]) int MPI_File_set_view(MPI_File, MPI_Offset, MPI_Datatype, MPI_Datatype, char[], MPI_Info) int MPI_File_read_at (MPI_File, MPI_Offset, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_read_at_all (MPI_File, MPI_Offset, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_write_at (MPI_File, MPI_Offset, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_write_at_all (MPI_File, MPI_Offset, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_iread_at (MPI_File, MPI_Offset, void*, int, MPI_Datatype, MPI_Request*) int MPI_File_iread_at_all (MPI_File, MPI_Offset, void*, int, MPI_Datatype, MPI_Request*) int MPI_File_iwrite_at (MPI_File, MPI_Offset, void*, int, MPI_Datatype, MPI_Request*) int MPI_File_iwrite_at_all(MPI_File, MPI_Offset, void*, int, MPI_Datatype, MPI_Request*) enum: MPI_SEEK_SET #:= 0 enum: MPI_SEEK_CUR #:= 1 enum: MPI_SEEK_END #:= 2 enum: MPI_DISPLACEMENT_CURRENT #:= 0 int MPI_File_seek(MPI_File, MPI_Offset, int) int MPI_File_get_position(MPI_File, MPI_Offset*) int MPI_File_get_byte_offset(MPI_File, MPI_Offset, MPI_Offset*) int MPI_File_read (MPI_File, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_read_all (MPI_File, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_write (MPI_File, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_write_all (MPI_File, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_iread (MPI_File, void*, int, MPI_Datatype, MPI_Request*) int MPI_File_iread_all (MPI_File, void*, int, MPI_Datatype, MPI_Request*) int MPI_File_iwrite (MPI_File, void*, int, MPI_Datatype, MPI_Request*) int MPI_File_iwrite_all (MPI_File, void*, int, MPI_Datatype, MPI_Request*) int MPI_File_read_shared (MPI_File, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_write_shared (MPI_File, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_iread_shared (MPI_File, void*, int, MPI_Datatype, MPI_Request*) int MPI_File_iwrite_shared (MPI_File, void*, int, MPI_Datatype, MPI_Request*) int MPI_File_read_ordered (MPI_File, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_write_ordered (MPI_File, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_seek_shared(MPI_File, MPI_Offset, int) int MPI_File_get_position_shared(MPI_File, MPI_Offset*) int MPI_File_read_at_all_begin (MPI_File, MPI_Offset, void*, int, MPI_Datatype) int MPI_File_read_at_all_end (MPI_File, void*, MPI_Status*) int MPI_File_write_at_all_begin (MPI_File, MPI_Offset, void*, int, MPI_Datatype) int MPI_File_write_at_all_end (MPI_File, void*, MPI_Status*) int MPI_File_read_all_begin (MPI_File, void*, int, MPI_Datatype) int MPI_File_read_all_end (MPI_File, void*, MPI_Status*) int MPI_File_write_all_begin (MPI_File, void*, int, MPI_Datatype) int MPI_File_write_all_end (MPI_File, void*, MPI_Status*) int MPI_File_read_ordered_begin (MPI_File, void*, int, MPI_Datatype) int MPI_File_read_ordered_end (MPI_File, void*, MPI_Status*) int MPI_File_write_ordered_begin (MPI_File, void*, int, MPI_Datatype) int MPI_File_write_ordered_end (MPI_File, void*, MPI_Status*) int MPI_File_get_type_extent(MPI_File, MPI_Datatype, MPI_Aint*) int MPI_File_set_atomicity(MPI_File, int) int MPI_File_get_atomicity(MPI_File, int*) int MPI_File_sync(MPI_File) ctypedef void MPI_File_errhandler_fn(MPI_File*,int*,...) ctypedef void MPI_File_errhandler_function(MPI_File*,int*,...) #:= MPI_File_errhandler_fn int MPI_File_create_errhandler(MPI_File_errhandler_function*, MPI_Errhandler*) int MPI_File_get_errhandler(MPI_File, MPI_Errhandler*) int MPI_File_set_errhandler(MPI_File, MPI_Errhandler) int MPI_File_call_errhandler(MPI_File, int) ctypedef int MPI_Datarep_conversion_function(void*,MPI_Datatype,int,void*,MPI_Offset,void*) ctypedef int MPI_Datarep_extent_function(MPI_Datatype,MPI_Aint*,void*) MPI_Datarep_conversion_function* MPI_CONVERSION_FN_NULL #:= 0 enum: MPI_MAX_DATAREP_STRING #:= 1 int MPI_Register_datarep(char[], MPI_Datarep_conversion_function*, MPI_Datarep_conversion_function*, MPI_Datarep_extent_function*, void*) # MPI-4 large count functions int MPI_File_read_at_c (MPI_File, MPI_Offset, void*, MPI_Count, MPI_Datatype, MPI_Status*) int MPI_File_read_at_all_c (MPI_File, MPI_Offset, void*, MPI_Count, MPI_Datatype, MPI_Status*) int MPI_File_write_at_c (MPI_File, MPI_Offset, void*, MPI_Count, MPI_Datatype, MPI_Status*) int MPI_File_write_at_all_c (MPI_File, MPI_Offset, void*, MPI_Count, MPI_Datatype, MPI_Status*) int MPI_File_iread_at_c (MPI_File, MPI_Offset, void*, MPI_Count, MPI_Datatype, MPI_Request*) int MPI_File_iread_at_all_c (MPI_File, MPI_Offset, void*, MPI_Count, MPI_Datatype, MPI_Request*) int MPI_File_iwrite_at_c (MPI_File, MPI_Offset, void*, MPI_Count, MPI_Datatype, MPI_Request*) int MPI_File_iwrite_at_all_c (MPI_File, MPI_Offset, void*, MPI_Count, MPI_Datatype, MPI_Request*) int MPI_File_read_c (MPI_File, void*, MPI_Count, MPI_Datatype, MPI_Status*) int MPI_File_read_all_c (MPI_File, void*, MPI_Count, MPI_Datatype, MPI_Status*) int MPI_File_write_c (MPI_File, void*, MPI_Count, MPI_Datatype, MPI_Status*) int MPI_File_write_all_c (MPI_File, void*, MPI_Count, MPI_Datatype, MPI_Status*) int MPI_File_iread_c (MPI_File, void*, MPI_Count, MPI_Datatype, MPI_Request*) int MPI_File_iread_all_c (MPI_File, void*, MPI_Count, MPI_Datatype, MPI_Request*) int MPI_File_iwrite_c (MPI_File, void*, MPI_Count, MPI_Datatype, MPI_Request*) int MPI_File_iwrite_all_c (MPI_File, void*, MPI_Count, MPI_Datatype, MPI_Request*) int MPI_File_read_shared_c (MPI_File, void*, MPI_Count, MPI_Datatype, MPI_Status*) int MPI_File_write_shared_c (MPI_File, void*, MPI_Count, MPI_Datatype, MPI_Status*) int MPI_File_iread_shared_c (MPI_File, void*, MPI_Count, MPI_Datatype, MPI_Request*) int MPI_File_iwrite_shared_c (MPI_File, void*, MPI_Count, MPI_Datatype, MPI_Request*) int MPI_File_read_ordered_c (MPI_File, void*, MPI_Count, MPI_Datatype, MPI_Status*) int MPI_File_write_ordered_c (MPI_File, void*, MPI_Count, MPI_Datatype, MPI_Status*) int MPI_File_read_at_all_begin_c (MPI_File, MPI_Offset, void*, MPI_Count, MPI_Datatype) int MPI_File_write_at_all_begin_c (MPI_File, MPI_Offset, void*, MPI_Count, MPI_Datatype) int MPI_File_read_all_begin_c (MPI_File, void*, MPI_Count, MPI_Datatype) int MPI_File_write_all_begin_c (MPI_File, void*, MPI_Count, MPI_Datatype) int MPI_File_read_ordered_begin_c (MPI_File, void*, MPI_Count, MPI_Datatype) int MPI_File_write_ordered_begin_c (MPI_File, void*, MPI_Count, MPI_Datatype) int MPI_File_get_type_extent_c(MPI_File, MPI_Datatype, MPI_Count*) ctypedef int MPI_Datarep_conversion_function_c(void*,MPI_Datatype,MPI_Count,void*,MPI_Offset,void*) MPI_Datarep_conversion_function_c* MPI_CONVERSION_FN_NULL_C #:= 0 int MPI_Register_datarep_c(char[], MPI_Datarep_conversion_function_c*, MPI_Datarep_conversion_function_c*, MPI_Datarep_extent_function*, void*) #----------------------------------------------------------------- enum: MPI_MAX_ERROR_STRING #:= 1 int MPI_Error_class(int, int*) int MPI_Error_string(int, char[], int*) int MPI_Add_error_class(int*) int MPI_Remove_error_class(int) int MPI_Add_error_code(int,int*) int MPI_Remove_error_code(int) int MPI_Add_error_string(int,char[]) int MPI_Remove_error_string(int) # no errors enum: MPI_SUCCESS #:= 0 enum: MPI_ERR_LASTCODE #:= 1 # object handles enum: MPI_ERR_TYPE #:= MPI_ERR_LASTCODE enum: MPI_ERR_REQUEST #:= MPI_ERR_LASTCODE enum: MPI_ERR_OP #:= MPI_ERR_LASTCODE enum: MPI_ERR_GROUP #:= MPI_ERR_LASTCODE enum: MPI_ERR_INFO #:= MPI_ERR_LASTCODE enum: MPI_ERR_ERRHANDLER #:= MPI_ERR_LASTCODE enum: MPI_ERR_SESSION #:= MPI_ERR_LASTCODE enum: MPI_ERR_COMM #:= MPI_ERR_LASTCODE enum: MPI_ERR_WIN #:= MPI_ERR_LASTCODE enum: MPI_ERR_FILE #:= MPI_ERR_LASTCODE # communication arguments enum: MPI_ERR_BUFFER #:= MPI_ERR_LASTCODE enum: MPI_ERR_COUNT #:= MPI_ERR_LASTCODE enum: MPI_ERR_TAG #:= MPI_ERR_LASTCODE enum: MPI_ERR_RANK #:= MPI_ERR_LASTCODE enum: MPI_ERR_ROOT #:= MPI_ERR_LASTCODE enum: MPI_ERR_TRUNCATE #:= MPI_ERR_LASTCODE # multiple completion enum: MPI_ERR_IN_STATUS #:= MPI_ERR_LASTCODE enum: MPI_ERR_PENDING #:= MPI_ERR_LASTCODE # topology enum: MPI_ERR_TOPOLOGY #:= MPI_ERR_LASTCODE enum: MPI_ERR_DIMS #:= MPI_ERR_LASTCODE # other arguments enum: MPI_ERR_ARG #:= MPI_ERR_LASTCODE # other errors enum: MPI_ERR_OTHER #:= MPI_ERR_LASTCODE enum: MPI_ERR_UNKNOWN #:= MPI_ERR_LASTCODE enum: MPI_ERR_INTERN #:= MPI_ERR_LASTCODE # attributes enum: MPI_ERR_KEYVAL #:= MPI_ERR_LASTCODE # memory allocation enum: MPI_ERR_NO_MEM #:= MPI_ERR_LASTCODE # info object enum: MPI_ERR_INFO_KEY #:= MPI_ERR_LASTCODE enum: MPI_ERR_INFO_VALUE #:= MPI_ERR_LASTCODE enum: MPI_ERR_INFO_NOKEY #:= MPI_ERR_LASTCODE # dynamic process management enum: MPI_ERR_SPAWN #:= MPI_ERR_LASTCODE enum: MPI_ERR_PORT #:= MPI_ERR_LASTCODE enum: MPI_ERR_SERVICE #:= MPI_ERR_LASTCODE enum: MPI_ERR_NAME #:= MPI_ERR_LASTCODE enum: MPI_ERR_PROC_ABORTED #:= MPI_ERR_LASTCODE # one-sided communications enum: MPI_ERR_BASE #:= MPI_ERR_LASTCODE enum: MPI_ERR_SIZE #:= MPI_ERR_LASTCODE enum: MPI_ERR_DISP #:= MPI_ERR_LASTCODE enum: MPI_ERR_ASSERT #:= MPI_ERR_LASTCODE enum: MPI_ERR_LOCKTYPE #:= MPI_ERR_LASTCODE enum: MPI_ERR_RMA_CONFLICT #:= MPI_ERR_LASTCODE enum: MPI_ERR_RMA_SYNC #:= MPI_ERR_LASTCODE enum: MPI_ERR_RMA_RANGE #:= MPI_ERR_LASTCODE enum: MPI_ERR_RMA_ATTACH #:= MPI_ERR_LASTCODE enum: MPI_ERR_RMA_SHARED #:= MPI_ERR_LASTCODE enum: MPI_ERR_RMA_FLAVOR #:= MPI_ERR_LASTCODE # input/output enum: MPI_ERR_BAD_FILE #:= MPI_ERR_LASTCODE enum: MPI_ERR_NO_SUCH_FILE #:= MPI_ERR_LASTCODE enum: MPI_ERR_FILE_EXISTS #:= MPI_ERR_LASTCODE enum: MPI_ERR_FILE_IN_USE #:= MPI_ERR_LASTCODE enum: MPI_ERR_AMODE #:= MPI_ERR_LASTCODE enum: MPI_ERR_ACCESS #:= MPI_ERR_LASTCODE enum: MPI_ERR_READ_ONLY #:= MPI_ERR_LASTCODE enum: MPI_ERR_NO_SPACE #:= MPI_ERR_LASTCODE enum: MPI_ERR_QUOTA #:= MPI_ERR_LASTCODE enum: MPI_ERR_UNSUPPORTED_OPERATION #:= MPI_ERR_LASTCODE enum: MPI_ERR_NOT_SAME #:= MPI_ERR_LASTCODE enum: MPI_ERR_IO #:= MPI_ERR_LASTCODE enum: MPI_ERR_UNSUPPORTED_DATAREP #:= MPI_ERR_LASTCODE enum: MPI_ERR_CONVERSION #:= MPI_ERR_LASTCODE enum: MPI_ERR_DUP_DATAREP #:= MPI_ERR_LASTCODE enum: MPI_ERR_VALUE_TOO_LARGE #:= MPI_ERR_LASTCODE #----------------------------------------------------------------- int MPI_Alloc_mem(MPI_Aint, MPI_Info, void*) int MPI_Free_mem(void*) #----------------------------------------------------------------- int MPI_Init(int*, char**[]) int MPI_Finalize() int MPI_Initialized(int*) int MPI_Finalized(int*) enum: MPI_THREAD_SINGLE #:= 0 enum: MPI_THREAD_FUNNELED #:= 1 enum: MPI_THREAD_SERIALIZED #:= 2 enum: MPI_THREAD_MULTIPLE #:= 3 int MPI_Init_thread(int*, char**[], int, int*) int MPI_Query_thread(int*) int MPI_Is_thread_main(int*) #----------------------------------------------------------------- enum: MPI_VERSION #:= 1 enum: MPI_SUBVERSION #:= 0 int MPI_Get_version(int*, int*) enum: MPI_MAX_LIBRARY_VERSION_STRING #:= 1 int MPI_Get_library_version(char[], int*) enum: MPI_MAX_PROCESSOR_NAME #:= 1 int MPI_Get_processor_name(char[], int*) int MPI_Get_hw_resource_info(MPI_Info*) #----------------------------------------------------------------- double MPI_Wtime() double MPI_Wtick() int MPI_Pcontrol(int, ...) #----------------------------------------------------------------- # Fortran INTEGER ctypedef int MPI_Fint enum: MPI_F_SOURCE #:= MPI_UNDEFINED enum: MPI_F_TAG #:= MPI_UNDEFINED enum: MPI_F_ERROR #:= MPI_UNDEFINED enum: MPI_F_STATUS_SIZE #:= MPI_UNDEFINED MPI_Fint* MPI_F_STATUS_IGNORE #:= 0 MPI_Fint* MPI_F_STATUSES_IGNORE #:= 0 int MPI_Status_c2f (MPI_Status*, MPI_Fint*) int MPI_Status_f2c (MPI_Fint*, MPI_Status*) # C -> Fortran MPI_Fint MPI_Type_c2f (MPI_Datatype) MPI_Fint MPI_Request_c2f (MPI_Request) MPI_Fint MPI_Message_c2f (MPI_Message) MPI_Fint MPI_Op_c2f (MPI_Op) MPI_Fint MPI_Group_c2f (MPI_Group) MPI_Fint MPI_Info_c2f (MPI_Info) MPI_Fint MPI_Session_c2f (MPI_Session) MPI_Fint MPI_Comm_c2f (MPI_Comm) MPI_Fint MPI_Win_c2f (MPI_Win) MPI_Fint MPI_File_c2f (MPI_File) MPI_Fint MPI_Errhandler_c2f (MPI_Errhandler) # Fortran -> C MPI_Datatype MPI_Type_f2c (MPI_Fint) MPI_Request MPI_Request_f2c (MPI_Fint) MPI_Message MPI_Message_f2c (MPI_Fint) MPI_Op MPI_Op_f2c (MPI_Fint) MPI_Group MPI_Group_f2c (MPI_Fint) MPI_Info MPI_Info_f2c (MPI_Fint) MPI_Session MPI_Session_f2c (MPI_Fint) MPI_Comm MPI_Comm_f2c (MPI_Fint) MPI_Win MPI_Win_f2c (MPI_Fint) MPI_File MPI_File_f2c (MPI_Fint) MPI_Errhandler MPI_Errhandler_f2c (MPI_Fint) ## ctypedef struct MPI_F08_status #:= MPI_Status ## MPI_F08_status* MPI_F08_STATUS_IGNORE #:= 0 ## MPI_F08_status* MPI_F08_STATUSES_IGNORE #:= 0 ## int MPI_Status_c2f08(MPI_Status*, MPI_F08_status*) ## int MPI_Status_f082c(MPI_F08_status*, MPI_Status*) ## int MPI_Status_f2f08(MPI_Fint*, MPI_F08_status*) ## int MPI_Status_f082f(MPI_F08_status*, MPI_Fint*) #----------------------------------------------------------------- # Deprecated since MPI-4.1 enum: MPI_HOST #:= MPI_KEYVAL_INVALID # Deprecated since MPI-4.0 int MPI_Info_get(MPI_Info, char[], int, char[], int*) int MPI_Info_get_valuelen(MPI_Info, char[], int*, int*) # Deprecated since MPI-2 int MPI_Attr_get(MPI_Comm, int, void*, int*) int MPI_Attr_put(MPI_Comm, int, void*) int MPI_Attr_delete(MPI_Comm, int) ctypedef int MPI_Copy_function(MPI_Comm,int,void*,void*,void*,int*) ctypedef int MPI_Delete_function(MPI_Comm,int,void*,void*) MPI_Copy_function* MPI_DUP_FN #:= 0 MPI_Copy_function* MPI_NULL_COPY_FN #:= 0 MPI_Delete_function* MPI_NULL_DELETE_FN #:= 0 int MPI_Keyval_create(MPI_Copy_function*, MPI_Delete_function*, int*, void*) int MPI_Keyval_free(int*) # Deprecated since MPI-2, removed in MPI-3 int MPI_Errhandler_get(MPI_Comm, MPI_Errhandler*) int MPI_Errhandler_set(MPI_Comm, MPI_Errhandler) ctypedef void MPI_Handler_function(MPI_Comm*,int*,...) int MPI_Errhandler_create(MPI_Handler_function*, MPI_Errhandler*) # Deprecated since MPI-2, removed in MPI-3 int MPI_Address(void*, MPI_Aint*) MPI_Datatype MPI_UB #:= MPI_DATATYPE_NULL MPI_Datatype MPI_LB #:= MPI_DATATYPE_NULL int MPI_Type_lb(MPI_Datatype, MPI_Aint*) int MPI_Type_ub(MPI_Datatype, MPI_Aint*) int MPI_Type_extent(MPI_Datatype, MPI_Aint*) int MPI_Type_hvector(int, int, MPI_Aint, MPI_Datatype, MPI_Datatype*) int MPI_Type_hindexed(int, int[], MPI_Aint[], MPI_Datatype, MPI_Datatype*) int MPI_Type_struct(int, int[], MPI_Aint[], MPI_Datatype[], MPI_Datatype*) enum: MPI_COMBINER_HVECTOR_INTEGER #:= MPI_UNDEFINED enum: MPI_COMBINER_HINDEXED_INTEGER #:= MPI_UNDEFINED enum: MPI_COMBINER_STRUCT_INTEGER #:= MPI_UNDEFINED #----------------------------------------------------------------- # MPI-6 process fault tolerance enum: MPI_ERR_REVOKED #:= MPI_ERR_UNKNOWN enum: MPI_ERR_PROC_FAILED #:= MPI_ERR_UNKNOWN enum: MPI_ERR_PROC_FAILED_PENDING #:= MPI_ERR_UNKNOWN int MPI_Comm_revoke(MPI_Comm) int MPI_Comm_is_revoked(MPI_Comm, int*) int MPI_Comm_get_failed(MPI_Comm, MPI_Group*) int MPI_Comm_ack_failed(MPI_Comm, int, int*) int MPI_Comm_agree(MPI_Comm, int*) int MPI_Comm_iagree(MPI_Comm, int*, MPI_Request*) int MPI_Comm_shrink(MPI_Comm, MPI_Comm*) int MPI_Comm_ishrink(MPI_Comm, MPI_Comm*, MPI_Request*) #----------------------------------------------------------------- mpi4py-4.0.3/src/mpi4py/py.typed000066400000000000000000000000331475341043600164530ustar00rootroot00000000000000# Marker file for PEP 561. mpi4py-4.0.3/src/mpi4py/run.py000066400000000000000000000200361475341043600161370ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Run Python code using ``mpi4py``. Run Python code (scripts, modules, zip files) using the ``runpy`` module. In case of an unhandled exception, abort execution of the MPI program by calling ``MPI.COMM_WORLD.Abort()``. """ def run_command_line(args=None): """Run command line ``[pyfile | -m mod | -c cmd | -] [arg] ...``. * ``pyfile`` : program read from script file * ``-m mod`` : run library module as a script * ``-c cmd`` : program passed in as a command string * ``-`` : program read from standard input (``sys.stdin``) * ``arg ...``: arguments passed to program in ``sys.argv[1:]`` """ # pylint: disable=import-outside-toplevel import sys from runpy import run_module, run_path def run_string(string, init_globals=None, run_name=None, filename='', argv0='-c'): from runpy import _run_module_code code = compile(string, filename, 'exec', 0, True) kwargs = {'script_name': argv0} return _run_module_code(code, init_globals, run_name, **kwargs) sys.argv[:] = args if args is not None else sys.argv[1:] if sys.argv[0] == '-': cmd = sys.stdin.read() run_string(cmd, run_name='__main__', filename='', argv0='-') elif sys.argv[0] == '-c': cmd = sys.argv.pop(1) # Remove "cmd" from argument list run_string(cmd, run_name='__main__', filename='', argv0='-c') elif sys.argv[0] == '-m': del sys.argv[0] # Remove "-m" from argument list run_module(sys.argv[0], run_name='__main__', alter_sys=True) else: from os.path import realpath, dirname if not getattr(sys.flags, 'isolated', 0): # pragma: no branch sys.path[0] = realpath(dirname(sys.argv[0])) # Fix sys.path run_path(sys.argv[0], run_name='__main__') def set_abort_status(status): """Terminate MPI execution environment at Python exit. Terminate MPI execution environment at Python exit by calling ``MPI.COMM_WORLD.Abort(status)``. This function should be called within an ``except`` block. Afterwards, exceptions should be re-raised. """ # pylint: disable=import-outside-toplevel import sys if isinstance(status, SystemExit): status = status.code elif isinstance(status, KeyboardInterrupt): from _signal import SIGINT status = SIGINT + 128 if not isinstance(status, int): status = 0 if status is None else 1 pkg = __spec__.parent mpi = sys.modules.get(f'{pkg}.MPI') if mpi is not None and status: # pylint: disable=protected-access mpi._set_abort_status(status) def main(): """Entry-point for ``python -m mpi4py.run ...``.""" # pylint: disable=too-many-statements # pylint: disable=import-outside-toplevel import os import sys def prefix(): prefix = os.path.dirname(__spec__.origin) print(prefix, file=sys.stdout) sys.exit(0) def version(): from . import __version__ package = __spec__.parent print(f"{package} {__version__}", file=sys.stdout) sys.exit(0) def mpi_std_version(): from . import rc rc.initialize = rc.finalize = False from . import MPI version = ".".join(map(str, (MPI.VERSION, MPI.SUBVERSION))) rtversion = ".".join(map(str, MPI.Get_version())) note = f" (runtime: MPI {rtversion})" if rtversion != version else "" print(f"MPI {version}{note}", file=sys.stdout) sys.exit(0) def mpi_lib_version(): from . import rc rc.initialize = rc.finalize = False from . import MPI library_version = MPI.Get_library_version() print(library_version, file=sys.stdout) sys.exit(0) def usage(errmess=None): from textwrap import dedent python = os.path.basename(sys.executable) program = __spec__.name cmdline = dedent(f""" usage: {python} -m {program} [options] [arg] ... or: {python} -m {program} [options] -m [arg] ... or: {python} -m {program} [options] -c [arg] ... or: {python} -m {program} [options] - [arg] ... """).strip() helptip = dedent(f""" Try `{python} -m {program} -h` for more information. """).strip() options = dedent(""" options: --prefix show install path and exit --version show version number and exit --mpi-std-version show MPI standard version and exit --mpi-lib-version show MPI library version and exit -h|--help show this help message and exit -rc set 'mpi4py.rc.key=value' """).strip() if errmess: print(errmess, file=sys.stderr) print(cmdline, file=sys.stderr) print(helptip, file=sys.stderr) sys.exit(1) else: print(cmdline, file=sys.stdout) print(options, file=sys.stdout) sys.exit(0) def parse_command_line(args=None): # pylint: disable=too-many-branches class Options: # pylint: disable=too-few-public-methods # pylint: disable=missing-class-docstring rc_args = {} def poparg(args): if len(args) < 2 or args[1].startswith('-'): usage('Argument expected for option: ' + args[0]) return args.pop(1) options = Options() args = sys.argv[1:] if args is None else args[:] while args and args[0].startswith('-'): arg0 = args[0] if arg0 in ('-m', '-c', '-'): break # Stop processing options if arg0 in ('-h', '-help', '--help'): usage() # Print help and exit if arg0 in ('-prefix', '--prefix'): prefix() # Print install path and exit if arg0 in ('-version', '--version'): version() # Print version number and exit if arg0 in ('-mpi-std-version', '--mpi-std-version'): mpi_std_version() # Print MPI standard version and exit if arg0 in ('-mpi-lib-version', '--mpi-lib-version'): mpi_lib_version() # Print MPI library version and exit if arg0.startswith('--'): if '=' in arg0: opt, _, arg = arg0[1:].partition('=') if opt in ('-rc',): arg0, args[1:1] = opt, [arg] else: arg0 = arg0[1:] if arg0 == '-rc': from ast import literal_eval for entry in poparg(args).split(','): key, _, val = entry.partition('=') if not key or not val: usage('Cannot parse rc option: ' + entry) try: val = literal_eval(val) except ValueError: pass options.rc_args[key] = val else: usage('Unknown option: ' + args[0]) del args[0] # Check remaining args and return to caller if not args: usage("No path specified for execution") if args[0] in ('-m', '-c') and len(args) < 2: usage("Argument expected for option: " + args[0]) return options, args def bootstrap(options): if options.rc_args: # Set mpi4py.rc parameters from . import rc rc(**options.rc_args) # Parse and process command line options options, args = parse_command_line() bootstrap(options) # Run user code. In case of an unhandled exception, abort # execution of the MPI program by calling 'MPI_Abort()'. try: run_command_line(args) except SystemExit as exc: set_abort_status(exc) raise except KeyboardInterrupt as exc: set_abort_status(exc) raise except BaseException: set_abort_status(1) raise if __name__ == '__main__': main() mpi4py-4.0.3/src/mpi4py/run.pyi000066400000000000000000000003001475341043600163000ustar00rootroot00000000000000from typing import Any from typing import Sequence def run_command_line(args: Sequence[str] | None = None) -> None: ... def set_abort_status(status: Any) -> None: ... def main() -> None: ... mpi4py-4.0.3/src/mpi4py/typing.py000066400000000000000000000120631475341043600166460ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Typing support.""" # pylint: disable=too-few-public-methods import sys from typing import ( Any, Union, Optional, Sequence, List, Dict, Tuple, TypeVar, ) try: # pragma: no branch from typing import Protocol except ImportError: # pragma: no cover try: from typing_extensions import Protocol except ImportError: Protocol = object from numbers import ( Integral, ) from .MPI import ( Datatype, BottomType, InPlaceType, ) __all__ = [ 'SupportsBuffer', 'SupportsDLPack', 'SupportsCAI', 'Buffer', 'Bottom', 'InPlace', 'Aint', 'Count', 'Displ', 'Offset', 'TypeSpec', 'BufSpec', 'BufSpecB', 'BufSpecV', 'BufSpecW', 'TargetSpec', ] _Stream = Union[int, Any] _PyCapsule = object _DeviceType = int _DeviceID = int class SupportsBuffer(Protocol): """Python buffer protocol. .. seealso:: :ref:`python:bufferobjects` """ if sys.version_info >= (3, 12): # pragma: no branch def __buffer__(self, flags: int) -> memoryview: """Create a buffer from a Python object.""" class SupportsDLPack(Protocol): """DLPack data interchange protocol. .. seealso:: :ref:`dlpack:python-spec` """ def __dlpack__(self, *, stream: Optional[_Stream] = None) -> _PyCapsule: """Export data for consumption as a DLPack capsule.""" def __dlpack_device__(self) -> Tuple[_DeviceType, _DeviceID]: """Get device type and device ID in DLPack format.""" class SupportsCAI(Protocol): """CUDA Array Interface (CAI) protocol. .. seealso:: :ref:`numba:cuda-array-interface` """ @property def __cuda_array_interface__(self) -> Dict[str, Any]: """CAI protocol data.""" Buffer = Union[ SupportsBuffer, SupportsDLPack, SupportsCAI, ] """ Buffer-like object. """ Bottom = Union[BottomType, None] """ Start of the address range. """ InPlace = Union[InPlaceType, None] """ In-place buffer argument. """ Aint = Integral """ Address-sized integral type. """ Count = Integral """ Integral type for counts. """ Displ = Integral """ Integral type for displacements. """ Offset = Integral """ Integral type for offsets. """ TypeSpec = Union[Datatype, str] """ Datatype specification. """ BufSpec = Union[ Buffer, Tuple[Buffer, Count], Tuple[Buffer, TypeSpec], Tuple[Buffer, Count, TypeSpec], Tuple[Bottom, Count, Datatype], List[Any], ] """ Buffer specification. * `Buffer` * Tuple[`Buffer`, `Count`] * Tuple[`Buffer`, `TypeSpec`] * Tuple[`Buffer`, `Count`, `TypeSpec`] * Tuple[`Bottom`, `Count`, `Datatype`] """ BufSpecB = Union[ Buffer, Tuple[Buffer, Count], Tuple[Buffer, TypeSpec], Tuple[Buffer, Count, TypeSpec], List[Any], ] """ Buffer specification (block). * `Buffer` * Tuple[`Buffer`, `Count`] * Tuple[`Buffer`, `TypeSpec`] * Tuple[`Buffer`, `Count`, `TypeSpec`] """ BufSpecV = Union[ Buffer, Tuple[Buffer, Sequence[Count]], Tuple[Buffer, Tuple[Sequence[Count], Sequence[Displ]]], Tuple[Buffer, TypeSpec], Tuple[Buffer, Sequence[Count], TypeSpec], Tuple[Buffer, Tuple[Sequence[Count], Sequence[Displ]], TypeSpec], Tuple[Buffer, Sequence[Count], Sequence[Displ], TypeSpec], Tuple[Bottom, Tuple[Sequence[Count], Sequence[Displ]], Datatype], Tuple[Bottom, Sequence[Count], Sequence[Displ], Datatype], List[Any], ] """ Buffer specification (vector). * `Buffer` * Tuple[`Buffer`, Sequence[`Count`]] * Tuple[`Buffer`, Tuple[Sequence[`Count`], Sequence[`Displ`]]] * Tuple[`Buffer`, `TypeSpec`] * Tuple[`Buffer`, Sequence[`Count`], `TypeSpec`] * Tuple[`Buffer`, Tuple[Sequence[`Count`], Sequence[`Displ`]], `TypeSpec`] * Tuple[`Buffer`, Sequence[`Count`], Sequence[`Displ`], `TypeSpec`] * Tuple[`Bottom`, Tuple[Sequence[`Count`], Sequence[`Displ`]], `Datatype`] * Tuple[`Bottom`, Sequence[`Count`], Sequence[`Displ`], `Datatype`] """ BufSpecW = Union[ Tuple[Buffer, Sequence[Datatype]], Tuple[Buffer, Tuple[Sequence[Count], Sequence[Displ]], Sequence[Datatype]], Tuple[Buffer, Sequence[Count], Sequence[Displ], Sequence[Datatype]], Tuple[Bottom, Tuple[Sequence[Count], Sequence[Displ]], Sequence[Datatype]], Tuple[Bottom, Sequence[Count], Sequence[Displ], Sequence[Datatype]], List[Any], ] """ Buffer specification (generalized). * Tuple[`Buffer`, Sequence[`Datatype`]] * Tuple[`Buffer`, \ Tuple[Sequence[`Count`], Sequence[`Displ`]], Sequence[`Datatype`]] * Tuple[`Buffer`, Sequence[`Count`], Sequence[`Displ`], Sequence[`Datatype`]] * Tuple[`Bottom`, \ Tuple[Sequence[`Count`], Sequence[`Displ`]], Sequence[`Datatype`]] * Tuple[`Bottom`, Sequence[`Count`], Sequence[`Displ`], Sequence[`Datatype`]] """ TargetSpec = Union[ Displ, Tuple[()], Tuple[Displ], Tuple[Displ, Count], Tuple[Displ, Count, TypeSpec], List[Any], ] """ Target specification. * `Displ` * Tuple[()] * Tuple[`Displ`] * Tuple[`Displ`, `Count`] * Tuple[`Displ`, `Count`, `Datatype`] """ S = TypeVar('S') T = TypeVar('T') U = TypeVar('U') V = TypeVar('V') mpi4py-4.0.3/src/mpi4py/typing.pyi000066400000000000000000000057341475341043600170260ustar00rootroot00000000000000import sys from typing import ( Any, Union, Optional, Protocol, Sequence, List, Dict, Tuple, TypeVar, ) if sys.version_info >= (3, 10): from typing import TypeAlias else: from typing_extensions import TypeAlias from numbers import ( Integral, ) from .MPI import ( Datatype, BottomType, InPlaceType, ) __all__: List[str] = [ 'SupportsBuffer', 'SupportsDLPack', 'SupportsCAI', 'Buffer', 'Bottom', 'InPlace', 'Aint', 'Count', 'Displ', 'Offset', 'TypeSpec', 'BufSpec', 'BufSpecB', 'BufSpecV', 'BufSpecW', 'TargetSpec', ] _Stream: TypeAlias = Union[int, Any] _PyCapsule: TypeAlias = object _DeviceType: TypeAlias = int _DeviceID: TypeAlias = int class SupportsBuffer(Protocol): if sys.version_info >= (3, 12): def __buffer__(self, __flags: int) -> memoryview: ... class SupportsDLPack(Protocol): def __dlpack__(self, *, stream: Optional[_Stream] = None) -> _PyCapsule: ... def __dlpack_device__(self) -> Tuple[_DeviceType, _DeviceID]: ... class SupportsCAI(Protocol): @property def __cuda_array_interface__(self) -> Dict[str, Any]: ... Buffer: TypeAlias = Union[ SupportsBuffer, SupportsDLPack, SupportsCAI, ] Bottom: TypeAlias = Union[BottomType, None] InPlace: TypeAlias = Union[InPlaceType, None] Aint: TypeAlias = Integral Count: TypeAlias = Integral Displ: TypeAlias = Integral Offset: TypeAlias = Integral TypeSpec: TypeAlias = Union[Datatype, str] BufSpec: TypeAlias = Union[ Buffer, Tuple[Buffer, Count], Tuple[Buffer, TypeSpec], Tuple[Buffer, Count, TypeSpec], Tuple[Bottom, Count, Datatype], List[Any], ] BufSpecB: TypeAlias = Union[ Buffer, Tuple[Buffer, Count], Tuple[Buffer, TypeSpec], Tuple[Buffer, Count, TypeSpec], List[Any], ] BufSpecV: TypeAlias = Union[ Buffer, Tuple[Buffer, Sequence[Count]], Tuple[Buffer, Tuple[Sequence[Count], Sequence[Displ]]], Tuple[Buffer, TypeSpec], Tuple[Buffer, Sequence[Count], TypeSpec], Tuple[Buffer, Tuple[Sequence[Count], Sequence[Displ]], TypeSpec], Tuple[Buffer, Sequence[Count], Sequence[Displ], TypeSpec], Tuple[Bottom, Tuple[Sequence[Count], Sequence[Displ]], Datatype], Tuple[Bottom, Sequence[Count], Sequence[Displ], Datatype], List[Any], ] BufSpecW: TypeAlias = Union[ Tuple[Buffer, Sequence[Datatype]], Tuple[Buffer, Tuple[Sequence[Count], Sequence[Displ]], Sequence[Datatype]], Tuple[Buffer, Sequence[Count], Sequence[Displ], Sequence[Datatype]], Tuple[Bottom, Tuple[Sequence[Count], Sequence[Displ]], Sequence[Datatype]], Tuple[Bottom, Sequence[Count], Sequence[Displ], Sequence[Datatype]], List[Any], ] TargetSpec: TypeAlias = Union[ Displ, Tuple[()], Tuple[Displ], Tuple[Displ, Count], Tuple[Displ, Count, TypeSpec], List[Any], ] S = TypeVar('S') # noqa: PYI001 T = TypeVar('T') # noqa: PYI001 U = TypeVar('U') # noqa: PYI001 V = TypeVar('V') # noqa: PYI001 mpi4py-4.0.3/src/mpi4py/util/000077500000000000000000000000001475341043600157355ustar00rootroot00000000000000mpi4py-4.0.3/src/mpi4py/util/__init__.py000066400000000000000000000001271475341043600200460ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Miscellaneous utilities.""" mpi4py-4.0.3/src/mpi4py/util/__init__.pyi000066400000000000000000000000101475341043600202060ustar00rootroot00000000000000# empty mpi4py-4.0.3/src/mpi4py/util/dtlib.py000066400000000000000000000231531475341043600174110ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Convert NumPy and MPI datatypes.""" # pylint: disable=too-many-locals # pylint: disable=too-many-branches # pylint: disable=too-many-statements # pylint: disable=too-many-return-statements from .. import MPI try: from numpy import dtype as _np_dtype except ImportError: # pragma: no cover _np_dtype = None def _get_datatype(dtype): return MPI.Datatype.fromcode(dtype.char) def _get_typecode(datatype): # pylint: disable=protected-access return MPI._typecode(datatype) def _get_alignment(datatype): # pylint: disable=protected-access return MPI._typealign(datatype) def _is_aligned(datatype, offset=0): """Determine whether an MPI datatype is aligned.""" if datatype.is_predefined: if offset == 0: return True alignment = _get_alignment(datatype) or 0 return offset % alignment == 0 combiner = datatype.combiner basetype, _, info = datatype.decode() types, disps = [basetype], [0] try: if combiner == MPI.COMBINER_RESIZED: disps = [info['extent']] if combiner == MPI.COMBINER_STRUCT: types = info['datatypes'] disps = info['displacements'] if combiner == MPI.COMBINER_HVECTOR: disps = [info['stride'] if info['count'] > 1 else 0] if combiner == MPI.COMBINER_HINDEXED: disps = info['displacements'] if combiner == MPI.COMBINER_HINDEXED_BLOCK: disps = info['displacements'] return all( _is_aligned(t, offset + d) for t, d in zip(types, disps) ) finally: for _tp in types: if not _tp.is_predefined: _tp.Free() def from_numpy_dtype(dtype): """Convert NumPy datatype to MPI datatype.""" if _np_dtype is None: raise RuntimeError("NumPy is not available") from None dtype = _np_dtype(dtype) if dtype.hasobject: raise ValueError("NumPy datatype with object entries") if not dtype.isnative: raise ValueError("NumPy datatype with non-native byteorder") # struct data type fields = dtype.fields if fields: blocklengths = [] displacements = [] datatypes = [] try: for name in dtype.names or (): ftype, fdisp, *_ = fields[name] blocklengths.append(1) displacements.append(fdisp) datatypes.append(from_numpy_dtype(ftype)) datatype = MPI.Datatype.Create_struct( blocklengths, displacements, datatypes, ) finally: for mtp in datatypes: mtp.Free() try: return datatype.Create_resized(0, dtype.itemsize) finally: datatype.Free() # subarray data type subdtype = dtype.subdtype if subdtype: base, shape = subdtype datatype = from_numpy_dtype(base) try: if len(shape) == 1: return datatype.Create_contiguous(shape[0]) starts = (0,) * len(shape) return datatype.Create_subarray(shape, shape, starts) finally: datatype.Free() # elementary data type datatype = _get_datatype(dtype) return datatype.Dup() def to_numpy_dtype(datatype): """Convert MPI datatype to NumPy datatype.""" def mpi2npy(datatype, count): dtype = to_numpy_dtype(datatype) return dtype if count == 1 else (dtype, count) def np_dtype(spec, **kwargs): if _np_dtype is None: return spec if not kwargs else (spec, kwargs) return _np_dtype(spec, **kwargs) if datatype == MPI.DATATYPE_NULL: raise ValueError("cannot convert null MPI datatype to NumPy") combiner = datatype.combiner # named elementary datatype if combiner == MPI.COMBINER_NAMED: # elementary datatype typecode = _get_typecode(datatype) if typecode is not None: return np_dtype(typecode) # pair datatype for MINLOC/MAXLOC reductions names = ('SHORT', 'INT', 'LONG', 'FLOAT', 'DOUBLE', 'LONG_DOUBLE') types = [getattr(MPI, f'{name}_INT') for name in names] typename = names[types.index(datatype)] typecode = _get_typecode(getattr(MPI, typename)) return np_dtype(f'{typecode},i', align=True) # user-defined datatype basetype, _, info = datatype.decode() datatypes = [basetype] try: # duplicated datatype if combiner == MPI.COMBINER_DUP: return to_numpy_dtype(basetype) # contiguous datatype if combiner == MPI.COMBINER_CONTIGUOUS: dtype = to_numpy_dtype(basetype) count = info['count'] return np_dtype((dtype, (count,))) # subarray datatype if combiner == MPI.COMBINER_SUBARRAY: dtype = to_numpy_dtype(basetype) sizes = info['sizes'] subsizes = info['subsizes'] starts = info['starts'] order = info['order'] if subsizes == sizes and min(starts) == max(starts) == 0: if order == MPI.ORDER_FORTRAN: sizes = sizes[::-1] return np_dtype((dtype, tuple(sizes))) raise ValueError("cannot convert subarray MPI datatype to NumPy") # value-index datatype if combiner == MPI.COMBINER_VALUE_INDEX: # pragma: no cover value = to_numpy_dtype(info['value']) index = to_numpy_dtype(info['index']) datatypes = [value, index] return np_dtype( [ ('f0', value), ('f1', index), ], align=True, ) # struct datatype aligned = True if combiner == MPI.COMBINER_RESIZED: if basetype.combiner == MPI.COMBINER_STRUCT: aligned = _is_aligned(basetype, info['extent']) combiner = MPI.COMBINER_STRUCT _, _, info = basetype.decode() datatypes.pop().Free() if combiner == MPI.COMBINER_STRUCT: datatypes = info['datatypes'] blocklengths = info['blocklengths'] displacements = info['displacements'] names = [f'f{i}' for i in range(len(datatypes))] formats = list(map(mpi2npy, datatypes, blocklengths)) offsets = displacements itemsize = datatype.extent aligned &= all(map(_is_aligned, datatypes, offsets)) return np_dtype( { 'names': names, 'formats': formats, 'offsets': offsets, 'itemsize': itemsize, 'aligned': aligned, } ) # vector datatype combiner_vector = ( MPI.COMBINER_VECTOR, MPI.COMBINER_HVECTOR, ) if combiner in combiner_vector: dtype = to_numpy_dtype(basetype) count = info['count'] blocklength = info['blocklength'] stride = info['stride'] if combiner == MPI.COMBINER_VECTOR: stride *= basetype.extent aligned = _is_aligned(basetype) if combiner == MPI.COMBINER_HVECTOR: stride = stride if count > 1 else 0 aligned = _is_aligned(basetype, stride) names = [f'f{i}' for i in range(count)] formats = [(dtype, (blocklength,))] * count offsets = [stride * i for i in range(count)] itemsize = datatype.extent return np_dtype( { 'names': names, 'formats': formats, 'offsets': offsets, 'itemsize': itemsize, 'aligned': aligned, } ) # indexed datatype combiner_indexed = ( MPI.COMBINER_INDEXED, MPI.COMBINER_HINDEXED, MPI.COMBINER_INDEXED_BLOCK, MPI.COMBINER_HINDEXED_BLOCK, ) if combiner in combiner_indexed: dtype = to_numpy_dtype(basetype) stride = 1 aligned = _is_aligned(basetype) blocklengths = [] displacements = info['displacements'] if combiner in combiner_indexed[:2]: blocklengths = info['blocklengths'] if combiner in combiner_indexed[2:]: blocklengths = [info['blocklength']] * len(displacements) if combiner in combiner_indexed[0::2]: stride = basetype.extent if combiner in combiner_indexed[1::2]: aligned &= all(_is_aligned(basetype, d) for d in displacements) names = [f'f{i}' for i in range(len(displacements))] formats = [(dtype, (blen,)) for blen in blocklengths] offsets = [disp * stride for disp in displacements] return np_dtype( { 'names': names, 'formats': formats, 'offsets': offsets, 'aligned': aligned, } ) # Fortran 90 datatype combiner_f90 = ( MPI.COMBINER_F90_INTEGER, MPI.COMBINER_F90_REAL, MPI.COMBINER_F90_COMPLEX, ) if combiner in combiner_f90: datatypes.pop() typecode = _get_typecode(datatype) return np_dtype(typecode) raise ValueError("cannot convert MPI datatype to NumPy") finally: for _tp in datatypes: if not _tp.is_predefined: _tp.Free() mpi4py-4.0.3/src/mpi4py/util/dtlib.pyi000066400000000000000000000004171475341043600175600ustar00rootroot00000000000000from typing import Any from numpy import dtype from numpy.typing import DTypeLike from ..MPI import Datatype # mypy: disable-error-code="no-any-unimported" def from_numpy_dtype(dtype: DTypeLike) -> Datatype: ... def to_numpy_dtype(datatype: Datatype) -> dtype[Any]: ... mpi4py-4.0.3/src/mpi4py/util/pkl5.py000066400000000000000000000526701475341043600171740ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Pickle-based communication using protocol 5.""" import struct as _struct from .. import MPI from ..MPI import ( ROOT, PROC_NULL, ANY_SOURCE, ANY_TAG, Status, ) from ..MPI import ( Pickle, _comm_lock, _commctx_intra, _commctx_inter, ) pickle = Pickle() def _pickle_dumps(obj): return pickle.dumps_oob(obj) def _pickle_loads(data, bufs): return pickle.loads_oob(data, bufs) def _bigmpi_create_type(basetype, count, blocksize): qsize, rsize = divmod(count, blocksize) qtype = basetype.Create_vector( qsize, blocksize, blocksize) rtype = basetype.Create_contiguous(rsize) rdisp = qtype.Get_extent()[1] bigtype = MPI.Datatype.Create_struct( (1, 1), (0, rdisp), (qtype, rtype)) qtype.Free() rtype.Free() return bigtype class _BigMPI: """Support for large message counts.""" blocksize = 1024**3 # 1 GiB if MPI.VERSION >= 4: # pragma: no cover blocksize = 1024**6 # 1 EiB def __init__(self): self.cache = {} def __enter__(self): return self def __exit__(self, *exc): cache = self.cache for dtype in cache.values(): dtype.Free() cache.clear() def __call__(self, buf): buf = memoryview(buf) count = buf.nbytes blocksize = self.blocksize if count < blocksize: return (buf, count, MPI.BYTE) cache = self.cache dtype = cache.get(count) if dtype is not None: return (buf, 1, dtype) dtype = _bigmpi_create_type(MPI.BYTE, count, blocksize) cache[count] = dtype.Commit() return (buf, 1, dtype) _bigmpi = _BigMPI() def _info_typecode(): return 'q' def _info_datatype(): code = _info_typecode() return MPI.Datatype.fromcode(code) def _info_pack(info): code = _info_typecode() size = len(info) return _struct.pack(f"{size}{code}", *info) def _info_alloc(size): code = _info_typecode() itemsize = _struct.calcsize(code) return bytearray(size * itemsize) def _info_unpack(info): code = _info_typecode() itemsize = _struct.calcsize(code) size = len(info) // itemsize return _struct.unpack(f"{size}{code}", info) def _new_buffer(size): return MPI.buffer.allocate(size) def _send_raw(comm, send, data, bufs, dest, tag): # pylint: disable=too-many-arguments,too-many-positional-arguments info = [len(data)] info.extend(len(sbuf) for sbuf in bufs) infotype = _info_datatype() info = _info_pack(info) send(comm, (info, infotype), dest, tag) with _bigmpi as bigmpi: send(comm, bigmpi(data), dest, tag) for sbuf in bufs: send(comm, bigmpi(sbuf), dest, tag) def _send(comm, send, obj, dest, tag): if dest == PROC_NULL: send(comm, (None, 0, MPI.BYTE), dest, tag) return data, bufs = _pickle_dumps(obj) with _comm_lock(comm, 'send'): _send_raw(comm, send, data, bufs, dest, tag) def _isend(comm, isend, obj, dest, tag): def send(comm, buf, dest, tag): sreqs.append(isend(comm, buf, dest, tag)) sreqs = [] _send(comm, send, obj, dest, tag) request = Request(sreqs) return request def _recv_raw(comm, recv, buf, source, tag, status=None): # pylint: disable=too-many-arguments,too-many-positional-arguments if status is None: status = Status() MPI.Comm.Probe(comm, source, tag, status) source = status.Get_source() tag = status.Get_tag() infotype = _info_datatype() infosize = status.Get_elements(infotype) info = _info_alloc(infosize) MPI.Comm.Recv(comm, (info, infotype), source, tag, status) info = _info_unpack(info) if buf is not None: buf = memoryview(buf).cast('B') if len(buf) > info[0]: buf = buf[:info[0]] if len(buf) < info[0]: buf = None data = _new_buffer(info[0]) if buf is None else buf bufs = list(map(_new_buffer, info[1:])) with _bigmpi as bigmpi: recv(comm, bigmpi(data), source, tag) for rbuf in bufs: recv(comm, bigmpi(rbuf), source, tag) status.Set_elements(MPI.BYTE, sum(info)) return data, bufs def _recv(comm, recv, buf, source, tag, status): # pylint: disable=too-many-arguments,too-many-positional-arguments if source == PROC_NULL: recv(comm, (None, 0, MPI.BYTE), source, tag, status) return None with _comm_lock(comm, 'recv'): data, bufs = _recv_raw(comm, recv, buf, source, tag, status) return _pickle_loads(data, bufs) def _mprobe(comm, mprobe, source, tag, status): if source == PROC_NULL: rmsg = MPI.Comm.Mprobe(comm, source, tag, status) return Message([rmsg]) if status is None: status = Status() with _comm_lock(comm, 'recv'): message = [] numbytes = 0 rmsg = mprobe(comm, source, tag, status) if rmsg is None: return None message.append(rmsg) source = status.Get_source() tag = status.Get_tag() infotype = _info_datatype() infosize = status.Get_elements(infotype) for _ in range(infosize): rmsg = MPI.Comm.Mprobe(comm, source, tag, status) message.append(rmsg) numbytes += status.Get_elements(MPI.BYTE) status.Set_elements(MPI.BYTE, numbytes) return Message(message) def _mrecv_info(rmsg, size, status=None): mrecv = MPI.Message.Recv infotype = _info_datatype() info = _info_alloc(size) mrecv(rmsg, (info, infotype), status) info = _info_unpack(info) return info def _mrecv_none(rmsg, mrecv, status): _mrecv_info(rmsg, 0, status) noproc = MPI.MESSAGE_NO_PROC mrecv(noproc, (None, 0, MPI.BYTE)) data, bufs = _pickle_dumps(None) return (bytearray(data), bufs) def _mrecv_data(message, mrecv, status=None): if message[0] == MPI.MESSAGE_NO_PROC: rmsg = message[0] return _mrecv_none(rmsg, mrecv, status) rmsg = iter(message) icnt = len(message) - 1 info = _mrecv_info(next(rmsg), icnt, status) data = _new_buffer(info[0]) bufs = list(map(_new_buffer, info[1:])) with _bigmpi as bigmpi: mrecv(next(rmsg), bigmpi(data)) for rbuf in bufs: mrecv(next(rmsg), bigmpi(rbuf)) if status is not None: status.Set_elements(MPI.BYTE, sum(info)) return (data, bufs) def _mrecv(message, status): def mrecv(rmsg, buf): MPI.Message.Recv(rmsg, buf) data, bufs = _mrecv_data(message, mrecv, status) return _pickle_loads(data, bufs) def _imrecv(message): def mrecv(rmsg, buf): rreqs.append(MPI.Message.Irecv(rmsg, buf)) rreqs = [] data, bufs = _mrecv_data(message, mrecv) request = Request(rreqs) setattr(request, '_data_bufs', (data, bufs)) # noqa: B010 return request def _req_load(request): data_bufs = getattr(request, '_data_bufs', None) if request == MPI.REQUEST_NULL and data_bufs is not None: delattr(request, '_data_bufs') if data_bufs is not None: data, bufs = data_bufs obj = _pickle_loads(data, bufs) return obj return None def _test(request, test, status): statuses = None if status is None else [status] flag = test(request, statuses) if flag: obj = _req_load(request) return (flag, obj) return (flag, None) def _testall(requests, testall, statuses): if isinstance(statuses, list): for _ in range(len(requests) - len(statuses)): statuses.append(Status()) reqarray = [] stsarray = None for req in requests: reqarray.extend(req) if statuses is not None: stsarray = [] for req, sts in zip(requests, statuses): stsarray.extend([sts] * len(req)) flag = testall(reqarray, stsarray) if flag: objs = [_req_load(req) for req in requests] return (flag, objs) return (flag, None) def _bcast_intra_raw(comm, bcast, data, bufs, root): rank = comm.Get_rank() if rank == root: info = [len(data)] info.extend(len(sbuf) for sbuf in bufs) infotype = _info_datatype() infosize = _info_pack([len(info)]) bcast(comm, (infosize, infotype), root) info = _info_pack(info) bcast(comm, (info, infotype), root) else: infotype = _info_datatype() infosize = _info_alloc(1) bcast(comm, (infosize, infotype), root) infosize = _info_unpack(infosize)[0] info = _info_alloc(infosize) bcast(comm, (info, infotype), root) info = _info_unpack(info) data = _new_buffer(info[0]) bufs = list(map(_new_buffer, info[1:])) with _bigmpi as bigmpi: bcast(comm, bigmpi(data), root) for rbuf in bufs: bcast(comm, bigmpi(rbuf), root) return data, bufs def _bcast_intra(comm, bcast, obj, root): rank = comm.Get_rank() if rank == root: data, bufs = _pickle_dumps(obj) else: data, bufs = _pickle_dumps(None) with _comm_lock(comm, 'bcast'): data, bufs = _bcast_intra_raw(comm, bcast, data, bufs, root) return _pickle_loads(data, bufs) def _bcast_inter(comm, bcast, obj, root): rank = comm.Get_rank() size = comm.Get_remote_size() comm, tag, localcomm, _ = _commctx_inter(comm) if root == PROC_NULL: return None elif root == ROOT: send = MPI.Comm.Send data, bufs = _pickle_dumps(obj) _send_raw(comm, send, data, bufs, 0, tag) return None elif 0 <= root < size: if rank == 0: recv = MPI.Comm.Recv data, bufs = _recv_raw(comm, recv, None, root, tag) else: data, bufs = _pickle_dumps(None) with _comm_lock(localcomm, 'bcast'): data, bufs = _bcast_intra_raw(localcomm, bcast, data, bufs, 0) return _pickle_loads(data, bufs) comm.Call_errhandler(MPI.ERR_ROOT) raise MPI.Exception(MPI.ERR_ROOT) def _bcast(comm, bcast, obj, root): if comm.Is_inter(): return _bcast_inter(comm, bcast, obj, root) else: return _bcast_intra(comm, bcast, obj, root) def _get_p2p_backend(): reqs = [] def send(comm, buf, dest, tag): reqs.append(MPI.Comm.Isend(comm, buf, dest, tag)) def recv(comm, buf, source, tag): MPI.Comm.Recv(comm, buf, source, tag) return reqs, send, recv def _gather(comm, obj, root): reqs, send, recv = _get_p2p_backend() if comm.Is_inter(): comm, tag, *_ = _commctx_inter(comm) size = comm.Get_remote_size() if root == PROC_NULL: send = recv = None elif root == MPI.ROOT: send = None elif 0 <= root < size: recv = None else: comm.Call_errhandler(MPI.ERR_ROOT) raise MPI.Exception(MPI.ERR_ROOT) else: comm, tag = _commctx_intra(comm) size = comm.Get_size() if root != comm.Get_rank(): recv = None if root < 0 or root >= size: comm.Call_errhandler(MPI.ERR_ROOT) raise MPI.Exception(MPI.ERR_ROOT) if send: data, bufs = _pickle_dumps(obj) _send_raw(comm, send, data, bufs, root, tag) objs = None if recv: objs = [] for source in range(size): data, bufs = _recv_raw(comm, recv, None, source, tag) obj = _pickle_loads(data, bufs) objs.append(obj) if send: MPI.Request.Waitall(reqs) return objs def _scatter(comm, objs, root): # pylint: disable=too-many-branches reqs, send, recv = _get_p2p_backend() if comm.Is_inter(): comm, tag, *_ = _commctx_inter(comm) size = comm.Get_remote_size() if root == PROC_NULL: send = recv = None elif root == ROOT: recv = None elif 0 <= root < size: send = None else: comm.Call_errhandler(MPI.ERR_ROOT) raise MPI.Exception(MPI.ERR_ROOT) else: comm, tag = _commctx_intra(comm) size = comm.Get_size() if root != comm.Get_rank(): send = None if root < 0 or root >= size: comm.Call_errhandler(MPI.ERR_ROOT) raise MPI.Exception(MPI.ERR_ROOT) if send: if objs is None: objs = [None] * size elif not isinstance(objs, list): objs = list(objs) if len(objs) != size: raise ValueError(f"expecting {size} items, got {len(objs)}") for dest, obj in enumerate(objs): data, bufs = _pickle_dumps(obj) _send_raw(comm, send, data, bufs, dest, tag) obj = None if recv: data, bufs = _recv_raw(comm, recv, None, root, tag) obj = _pickle_loads(data, bufs) if send: MPI.Request.Waitall(reqs) return obj def _allgather(comm, obj): reqs, send, recv = _get_p2p_backend() if comm.Is_inter(): comm, tag, *_ = _commctx_inter(comm) size = comm.Get_remote_size() else: comm, tag = _commctx_intra(comm) size = comm.Get_size() data, bufs = _pickle_dumps(obj) for dest in range(size): _send_raw(comm, send, data, bufs, dest, tag) objs = [] for source in range(size): data, bufs = _recv_raw(comm, recv, None, source, tag) obj = _pickle_loads(data, bufs) objs.append(obj) MPI.Request.Waitall(reqs) return objs def _alltoall(comm, objs): reqs, send, recv = _get_p2p_backend() if comm.Is_inter(): comm, tag, *_ = _commctx_inter(comm) size = comm.Get_remote_size() else: comm, tag = _commctx_intra(comm) size = comm.Get_size() if objs is None: objs = [None] * size elif not isinstance(objs, list): objs = list(objs) if len(objs) != size: raise ValueError(f"expecting {size} items, got {len(objs)}") for dest, obj in enumerate(objs): data, bufs = _pickle_dumps(obj) _send_raw(comm, send, data, bufs, dest, tag) objs = [] for source in range(size): data, bufs = _recv_raw(comm, recv, None, source, tag) obj = _pickle_loads(data, bufs) objs.append(obj) MPI.Request.Waitall(reqs) return objs class Request(tuple): """Request.""" def __new__(cls, request=None): """Create and return a new object.""" if request is None: request = (MPI.REQUEST_NULL,) if isinstance(request, MPI.Request): request = (request,) return super().__new__(cls, request) def __eq__(self, other): """Return ``self==other``.""" if isinstance(other, Request): return tuple(self) == tuple(other) if isinstance(other, MPI.Request): return all(req == other for req in self) return NotImplemented def __ne__(self, other): """Return ``self!=other``.""" if isinstance(other, Request): return tuple(self) != tuple(other) if isinstance(other, MPI.Request): return any(req != other for req in self) return NotImplemented def __bool__(self): """Return ``bool(self)``.""" return any(req for req in self) def Free(self) -> None: """Free a communication request.""" # pylint: disable=invalid-name for req in self: req.Free() def free(self) -> None: """Free a communication request.""" for req in self: req.free() def cancel(self): """Cancel a communication request.""" for req in self: req.Cancel() def get_status(self, status=None): """Non-destructive test for the completion of a request.""" statuses = [status] + [None] * max(len(self) - 1, 0) return all(map(MPI.Request.Get_status, self, statuses)) def test(self, status=None): """Test for the completion of a request.""" return _test(self, MPI.Request.Testall, status) def wait(self, status=None): """Wait for a request to complete.""" return _test(self, MPI.Request.Waitall, status)[1] @classmethod def get_status_all(cls, requests, statuses=None): """Non-destructive test for the completion of all requests.""" arglist = [requests] if statuses is not None: ns, nr = len(statuses), len(requests) statuses += [Status() for _ in range(ns, nr)] arglist.append(statuses) return all(map(Request.get_status, *arglist)) @classmethod def testall(cls, requests, statuses=None): """Test for the completion of all requests.""" return _testall(requests, MPI.Request.Testall, statuses) @classmethod def waitall(cls, requests, statuses=None): """Wait for all requests to complete.""" return _testall(requests, MPI.Request.Waitall, statuses)[1] class Message(tuple): """Message.""" def __new__(cls, message=None): """Create and return a new object.""" if message is None: message = (MPI.MESSAGE_NULL,) if isinstance(message, MPI.Message): message = (message,) return super().__new__(cls, message) def __eq__(self, other): """Return ``self==other``.""" if isinstance(other, Message): return tuple(self) == tuple(other) if isinstance(other, MPI.Message): return all(msg == other for msg in self) return NotImplemented def __ne__(self, other): """Return ``self!=other``.""" if isinstance(other, Message): return tuple(self) != tuple(other) if isinstance(other, MPI.Message): return any(msg != other for msg in self) return NotImplemented def __bool__(self): """Return ``bool(self)``.""" return any(msg for msg in self) def free(self) -> None: """Do nothing.""" for msg in self: msg.free() def recv(self, status=None): """Blocking receive of matched message.""" return _mrecv(self, status) def irecv(self): """Nonblocking receive of matched message.""" return _imrecv(self) @classmethod def probe(cls, comm, source=ANY_SOURCE, tag=ANY_TAG, status=None): """Blocking test for a matched message.""" return _mprobe(comm, MPI.Comm.Mprobe, source, tag, status) @classmethod def iprobe(cls, comm, source=ANY_SOURCE, tag=ANY_TAG, status=None): """Nonblocking test for a matched message.""" return _mprobe(comm, MPI.Comm.Improbe, source, tag, status) class Comm(MPI.Comm): """Communicator.""" def __new__(cls, comm=None): """Create and return a new communicator.""" return MPI.Comm.__new__(cls, comm) def send(self, obj, dest, tag=0): """Blocking send in standard mode.""" _send(self, MPI.Comm.Send, obj, dest, tag) def bsend(self, obj, dest, tag=0): """Blocking send in buffered mode.""" _send(self, MPI.Comm.Bsend, obj, dest, tag) def ssend(self, obj, dest, tag=0): """Blocking send in synchronous mode.""" sreq = _isend(self, MPI.Comm.Issend, obj, dest, tag) MPI.Request.Waitall(sreq) def isend(self, obj, dest, tag=0): """Nonblocking send in standard mode.""" return _isend(self, MPI.Comm.Isend, obj, dest, tag) def ibsend(self, obj, dest, tag=0): """Nonblocking send in buffered mode.""" return _isend(self, MPI.Comm.Ibsend, obj, dest, tag) def issend(self, obj, dest, tag=0): """Nonblocking send in synchronous mode.""" return _isend(self, MPI.Comm.Issend, obj, dest, tag) def recv(self, buf=None, source=ANY_SOURCE, tag=ANY_TAG, status=None): """Blocking receive.""" return _recv(self, MPI.Comm.Recv, buf, source, tag, status) def irecv(self, buf=None, source=ANY_SOURCE, tag=ANY_TAG): # noqa: ARG002 """Nonblocking receive.""" raise RuntimeError("unsupported") def sendrecv(self, sendobj, dest, sendtag=0, recvbuf=None, source=ANY_SOURCE, recvtag=ANY_TAG, status=None): """Send and receive.""" # pylint: disable=too-many-arguments,too-many-positional-arguments sreq = _isend(self, MPI.Comm.Isend, sendobj, dest, sendtag) robj = _recv(self, MPI.Comm.Recv, recvbuf, source, recvtag, status) MPI.Request.Waitall(sreq) return robj def mprobe(self, source=ANY_SOURCE, tag=ANY_TAG, status=None): """Blocking test for a matched message.""" return _mprobe(self, MPI.Comm.Mprobe, source, tag, status) def improbe(self, source=ANY_SOURCE, tag=ANY_TAG, status=None): """Nonblocking test for a matched message.""" return _mprobe(self, MPI.Comm.Improbe, source, tag, status) def bcast(self, obj, root=0): """Broadcast.""" return _bcast(self, MPI.Comm.Bcast, obj, root) def gather(self, sendobj, root=0): """Gather.""" return _gather(self, sendobj, root) def scatter(self, sendobj, root=0): """Scatter.""" return _scatter(self, sendobj, root) def allgather(self, sendobj): """Gather to All.""" return _allgather(self, sendobj) def alltoall(self, sendobj): """All to All Scatter/Gather.""" return _alltoall(self, sendobj) class Intracomm(Comm, MPI.Intracomm): """Intracommunicator.""" class Intercomm(Comm, MPI.Intercomm): """Intercommunicator.""" mpi4py-4.0.3/src/mpi4py/util/pkl5.pyi000066400000000000000000000115071475341043600173370ustar00rootroot00000000000000import sys from typing import Any from typing import Iterable, Sequence from typing import overload if sys.version_info >= (3, 11): from typing import Self else: from typing_extensions import Self from .. import MPI from ..MPI import ( ROOT as ROOT, PROC_NULL as PROC_NULL, ANY_SOURCE as ANY_SOURCE, ANY_TAG as ANY_TAG, Status as Status, Pickle as Pickle, ) from ..typing import Buffer pickle: Pickle = ... class _BigMPI: blocksize: int = ... cache: dict[int, MPI.Datatype] = ... def __init__(self) -> None: ... def __enter__(self) -> Self: ... def __exit__(self, *exc: object) -> None: ... def __call__(self, buf: Buffer) -> tuple[Buffer, int, MPI.Datatype]: ... _bigmpi: _BigMPI = ... class Request(tuple[MPI.Request, ...]): @overload def __new__(cls, request: MPI.Request | None = None) -> Request: ... @overload def __new__(cls, request: Iterable[MPI.Request]) -> Request: ... def __eq__(self, other: object) -> bool: ... def __ne__(self, other: object) -> bool: ... def __bool__(self) -> bool: ... def Free(self) -> None: ... def free(self) -> None: ... def cancel(self) -> None: ... def get_status( self, status: Status | None = None, ) -> bool: ... def test( self, status: Status | None = None, ) -> tuple[bool, Any | None]: ... def wait( self, status: Status | None = None, ) -> Any: ... @classmethod def get_status_all( cls, requests: Sequence[Request], statuses: list[Status] | None = None, ) -> bool: ... @classmethod def testall( cls, requests: Sequence[Request], statuses: list[Status] | None = None, ) -> tuple[bool, list[Any] | None]: ... @classmethod def waitall( cls, requests: Sequence[Request], statuses: list[Status] | None = None, ) -> list[Any]: ... class Message(tuple[MPI.Message, ...]): @overload def __new__(cls, message: MPI.Message | None = None) -> Message: ... @overload def __new__(cls, message: Iterable[MPI.Message]) -> Message: ... def __eq__(self, other: object) -> bool: ... def __ne__(self, other: object) -> bool: ... def __bool__(self) -> bool: ... def free(self) -> None: ... def recv(self, status: Status | None = None) -> Any: ... def irecv(self) -> Request: ... @classmethod def probe( cls, comm: MPI.Comm, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Status | None = None, ) -> Message: ... @classmethod def iprobe( cls, comm: MPI.Comm, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Status | None = None, ) -> Message | None: ... class Comm(MPI.Comm): def __new__(cls, comm: MPI.Comm | None = None) -> Self: ... def send(self, obj: Any, dest: int, tag: int = 0) -> None: ... def bsend(self, obj: Any, dest: int, tag: int = 0) -> None: ... def ssend(self, obj: Any, dest: int, tag: int = 0) -> None: ... def isend(self, obj: Any, dest: int, tag: int = 0) -> Request: ... # type: ignore[override] def ibsend(self, obj: Any, dest: int, tag: int = 0) -> Request: ... # type: ignore[override] def issend(self, obj: Any, dest: int, tag: int = 0) -> Request: ... # type: ignore[override] def recv( self, buf: Buffer | None = None, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Status | None = None, ) -> Any: ... def irecv( # type: ignore[override] self, buf: Buffer | None = None, source: int = ANY_SOURCE, tag: int = ANY_TAG, ) -> Request: ... def sendrecv( self, sendobj: Any, dest: int, sendtag: int = 0, recvbuf: Buffer | None = None, source: int = ANY_SOURCE, recvtag: int = ANY_TAG, status: Status | None = None, ) -> Any: ... def mprobe( # type: ignore[override] self, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Status | None = None, ) -> Message: ... def improbe( # type: ignore[override] self, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Status | None = None, ) -> Message | None: ... def bcast( self, obj: Any, root: int = 0, ) -> Any: ... def gather( self, sendobj: Any, root: int = 0, ) -> list[Any] | None: ... def scatter( self, sendobj: Sequence[Any] | None, root: int = 0, ) -> Any: ... def allgather( self, sendobj: Any, ) -> list[Any]: ... def alltoall( self, sendobj: Sequence[Any], ) -> list[Any]: ... class Intracomm(Comm, MPI.Intracomm): ... class Intercomm(Comm, MPI.Intercomm): ... mpi4py-4.0.3/src/mpi4py/util/pool.py000066400000000000000000000250721475341043600172660ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """`multiprocessing.pool` interface via `mpi4py.futures`.""" import functools as _functools import operator as _operator import os as _os import threading as _threading import warnings as _warnings import weakref as _weakref from .. import futures as _futures __all__ = [ "Pool", "ThreadPool", "AsyncResult", "ApplyResult", "MapResult", ] class Pool: """Pool using MPI processes as workers.""" Executor = _futures.MPIPoolExecutor def __init__( self, processes=None, initializer=None, initargs=(), **kwargs, ): """Initialize a new Pool instance. Args: processes: Number of worker processes. initializer: An callable used to initialize workers processes. initargs: A tuple of arguments to pass to the initializer. .. note:: Additional keyword arguments are passed down to the `MPIPoolExecutor` constructor. .. warning:: The *maxtasksperchild* and *context* arguments of `multiprocessing.pool.Pool` are not supported. Specifying *maxtasksperchild* or *context* with a value other than `None` will issue a warning of category `UserWarning`. """ if processes is not None: if processes < 1: raise ValueError("number of processes must be at least 1") if initializer is not None: if not callable(initializer): raise TypeError("initializer must be a callable") for name in ("maxtasksperchild", "context"): if kwargs.pop(name, None) is not None: message = f"argument {name!r} is not supported" _warnings.warn(message, stacklevel=2) self.executor = self.Executor( processes, initializer=initializer, initargs=initargs, **kwargs, ) def apply(self, func, args=(), kwds={}): # noqa: B006 """Call *func* with arguments *args* and keyword arguments *kwds*. Equivalent to ``func(*args, **kwds)``. """ # pylint: disable=dangerous-default-value future = self.executor.submit(func, *args, **kwds) return future.result() def apply_async( self, func, args=(), kwds={}, # noqa: B006 callback=None, error_callback=None, ): """Asynchronous version of `apply()` returning `ApplyResult`.""" # pylint: disable=too-many-arguments,too-many-positional-arguments # pylint: disable=dangerous-default-value future = self.executor.submit(func, *args, **kwds) return ApplyResult(future, callback, error_callback) def map(self, func, iterable, chunksize=None): """Apply *func* to each element in *iterable*. Equivalent to ``list(map(func, iterable))``. Block until all results are ready and return them in a `list`. The *iterable* is choped into a number of chunks which are submitted as separate tasks. The (approximate) size of these chunks can be specified by setting *chunksize* to a positive integer. Consider using `imap()` or `imap_unordered()` with explicit *chunksize* for better efficiency. """ iterable, chunksize = _chunksize(self, iterable, chunksize) return list(self.imap(func, iterable, chunksize=chunksize)) def map_async( self, func, iterable, chunksize=None, callback=None, error_callback=None, ): """Asynchronous version of `map()` returning `MapResult`.""" # pylint: disable=too-many-arguments,too-many-positional-arguments iterable, chunksize = _chunksize(self, iterable, chunksize) result_iterator = self.imap(func, iterable, chunksize=chunksize) future = _async_executor(self).submit(list, result_iterator) return MapResult(future, callback, error_callback) def imap(self, func, iterable, chunksize=1): """Like `map()` but return an `iterator`. Equivalent to ``map(func, iterable)``. """ return self.executor.map( func, iterable, chunksize=chunksize, ) def imap_unordered(self, func, iterable, chunksize=1): """Like `imap()` but ordering of results is arbitrary.""" return self.executor.map( func, iterable, chunksize=chunksize, unordered=True, ) def starmap(self, func, iterable, chunksize=None): """Apply *func* to each argument tuple in *iterable*. Equivalent to ``list(itertools.starmap(func, iterable))``. Block until all results are ready and return them in a `list`. The *iterable* is choped into a number of chunks which are submitted as separate tasks. The (approximate) size of these chunks can be specified by setting *chunksize* to a positive integer. Consider using `istarmap()` or `istarmap_unordered()` with explicit *chunksize* for better efficiency. """ iterable, chunksize = _chunksize(self, iterable, chunksize) return list(self.istarmap(func, iterable, chunksize=chunksize)) def starmap_async( self, func, iterable, chunksize=None, callback=None, error_callback=None ): """Asynchronous version of `starmap()` returning `MapResult`.""" # pylint: disable=too-many-arguments,too-many-positional-arguments iterable, chunksize = _chunksize(self, iterable, chunksize) result_iterator = self.istarmap(func, iterable, chunksize=chunksize) future = _async_executor(self).submit(list, result_iterator) return MapResult(future, callback, error_callback) def istarmap(self, func, iterable, chunksize=1): """Like `starmap()` but return an `iterator`. Equivalent to ``itertools.starmap(func, iterable)``. """ return self.executor.starmap( func, iterable, chunksize=chunksize, ) def istarmap_unordered(self, func, iterable, chunksize=1): """Like `istarmap()` but ordering of results is arbitrary.""" return self.executor.starmap( func, iterable, chunksize=chunksize, unordered=True, ) def close(self): """Prevent any more tasks from being submitted to the pool.""" self.executor.shutdown(wait=False) def terminate(self): """Stop the worker processes without completing pending tasks.""" self.executor.shutdown(wait=False, cancel_futures=True) def join(self): """Wait for the worker processes to exit.""" self.executor.shutdown(wait=True) _async_executor_shutdown(self) def __enter__(self): """Return pool.""" return self def __exit__(self, *args): """Close pool.""" self.close() class ThreadPool(Pool): """Pool using threads as workers.""" Executor = _futures.ThreadPoolExecutor class AsyncResult: """Asynchronous result.""" def __init__(self, future, callback=None, error_callback=None): """Wrap a future object and register callbacks.""" self.future = future self._event = _threading.Event() done_cb = _functools.partial( _async_future_callback, event=self._event, callback=callback, error_callback=error_callback, ) self.future.add_done_callback(done_cb) def get(self, timeout=None): """Return the result when it arrives. If *timeout* is not `None` and the result does not arrive within *timeout* seconds then raise `TimeoutError`. If the remote call raised an exception then that exception will be reraised. """ self.wait(timeout) if not self.ready(): raise TimeoutError try: return self.future.result() except _futures.CancelledError: raise TimeoutError from None def wait(self, timeout=None): """Wait until the result is available or *timeout* seconds pass.""" self._event.wait(timeout) def ready(self): """Return whether the call has completed.""" return self._event.is_set() def successful(self): """Return whether the call completed without raising an exception. If the result is not ready then raise `ValueError`. """ if not self.ready(): raise ValueError(f"{self!r} not ready") try: return self.future.exception() is None except _futures.CancelledError: return False class ApplyResult(AsyncResult): """Result type of `apply_async()`.""" class MapResult(AsyncResult): """Result type of `map_async()` and `starmap_async()`.""" def _chunksize(pool, iterable, chunksize): if chunksize is None: chunksize = 1 num_workers = getattr(pool.executor, "_max_workers", 0) if num_workers > 0: # pragma: no branch num_tasks = _operator.length_hint(iterable) if num_tasks == 0: iterable = list(iterable) num_tasks = len(iterable) if num_tasks > 0: quot, rem = divmod(num_tasks, num_workers * 4) chunksize = quot + bool(rem) return iterable, chunksize def _async_future_callback(future, event, callback, error_callback): assert future.done() # noqa: S101 try: exception = future.exception() except _futures.CancelledError: exception = TimeoutError() try: if exception is not None: if error_callback is not None: error_callback(exception) else: if callback is not None: callback(future.result()) finally: event.set() _cpu_count = getattr(_os, 'process_cpu_count', _os.cpu_count) _async_executor_lock = _threading.Lock() _async_executor_cache = _weakref.WeakKeyDictionary() \ # type: _weakref.WeakKeyDictionary[Pool, _futures.ThreadPoolExecutor] def _async_get_max_workers(pool): max_workers = getattr(pool, '_async_max_workers', 0) return max_workers or min(4, _cpu_count() or 1) def _async_executor(pool): with _async_executor_lock: executor = _async_executor_cache.get(pool) if executor is None: max_workers = _async_get_max_workers(pool) executor = _futures.ThreadPoolExecutor(max_workers) _async_executor_cache[pool] = executor return executor def _async_executor_shutdown(pool, wait=True): with _async_executor_lock: executor = _async_executor_cache.pop(pool, None) if executor is not None: executor.shutdown(wait=wait) mpi4py-4.0.3/src/mpi4py/util/pool.pyi000066400000000000000000000066331475341043600174410ustar00rootroot00000000000000import sys from typing import ( Any, Generic, ) from typing import ( Callable, Mapping, Iterable, Iterator, ) if sys.version_info >= (3, 11): from typing import Self else: from typing_extensions import Self from .. import futures from ..typing import S, T __all__: list[str] = [ "Pool", "ThreadPool", "AsyncResult", "ApplyResult", "MapResult", ] class Pool: Executor: type[futures.MPIPoolExecutor] executor: futures.Executor def __init__( self, processes: int | None = None, initializer: Callable[..., object] | None = None, initargs: Iterable[Any] = (), **kwargs: Any, ) -> None: ... def apply( self, func: Callable[..., T], args: Iterable[Any] = (), kwds: Mapping[str, Any] = {}, ) -> T: ... def apply_async( self, func: Callable[..., T], args: Iterable[Any] = (), kwds: Mapping[str, Any] = {}, callback: Callable[[T], object] | None = None, error_callback: Callable[[BaseException], object] | None = None, ) -> AsyncResult[T]: ... def map( self, func: Callable[[S], T], iterable: Iterable[S], chunksize: int | None = None, ) -> list[T]: ... def map_async( self, func: Callable[[S], T], iterable: Iterable[S], chunksize: int | None = None, callback: Callable[[T], None] | None = None, error_callback: Callable[[BaseException], None] | None = None, ) -> MapResult[T]: ... def imap( self, func: Callable[[S], T], iterable: Iterable[S], chunksize: int = 1, ) -> Iterator[T]: ... def imap_unordered( self, func: Callable[[S], T], iterable: Iterable[S], chunksize: int = 1, ) -> Iterator[T]: ... def starmap( self, func: Callable[..., T], iterable: Iterable[Iterable[Any]], chunksize: int | None = None, ) -> list[T]: ... def starmap_async( self, func: Callable[..., T], iterable: Iterable[Iterable[Any]], chunksize: int | None = None, callback: Callable[[T], None] | None = None, error_callback: Callable[[BaseException], None] | None = None, ) -> MapResult[T]: ... def istarmap( self, func: Callable[..., T], iterable: Iterable[Iterable[Any]], chunksize: int = 1, ) -> Iterator[T]: ... def istarmap_unordered( self, func: Callable[..., T], iterable: Iterable[Iterable[Any]], chunksize: int = 1, ) -> Iterator[T]: ... def close(self) -> None: ... def terminate(self) -> None: ... def join(self) -> None: ... def __enter__(self) -> Self: ... def __exit__(self, *args: object) -> None: ... class ThreadPool(Pool): Executor: type[futures.ThreadPoolExecutor] class AsyncResult(Generic[T]): future: futures.Future[T] def __init__( self, future: futures.Future[T], callback: Callable[[T], None] | None = None, error_callback: Callable[[BaseException], None] | None = None, ) -> None: ... def get(self, timeout: float | None = None) -> T: ... def wait(self, timeout: float | None = None) -> None: ... def ready(self) -> bool: ... def successful(self) -> bool: ... class ApplyResult(AsyncResult[T]): ... class MapResult(AsyncResult[list[T]]): ... mpi4py-4.0.3/src/mpi4py/util/sync.py000066400000000000000000000463571475341043600173020ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Synchronization utilities.""" import array as _array import time as _time from .. import MPI __all__ = [ "Sequential", "Counter", "Mutex", "Condition", "Semaphore", ] class Sequential: """Sequential execution.""" def __init__(self, comm, tag=0): """Initialize sequential execution. Args: comm: Intracommunicator context. tag: Tag for point-to-point communication. """ self.comm = comm self.tag = int(tag) def __enter__(self): """Enter sequential execution.""" self.begin() return self def __exit__(self, *exc): """Exit sequential execution.""" self.end() def begin(self): """Begin sequential execution.""" comm = self.comm size = comm.Get_size() if size == 1: return rank = comm.Get_rank() buf = (bytearray(), 0, MPI.BYTE) tag = self.tag if rank != 0: comm.Recv(buf, rank - 1, tag) def end(self): """End sequential execution.""" comm = self.comm size = comm.Get_size() if size == 1: return rank = comm.Get_rank() buf = (bytearray(), 0, MPI.BYTE) tag = self.tag if rank != size - 1: comm.Send(buf, rank + 1, tag) class Counter: """Global counter.""" def __init__( self, start=0, step=1, *, typecode='i', comm=MPI.COMM_SELF, info=MPI.INFO_NULL, root=0, ): """Initialize global counter. Args: start: Start value. step: Increment value. typecode: Type code as defined in the `array` module. comm: Intracommunicator context. info: Info object for RMA context creation. root: Process rank holding the counter memory. """ # pylint: disable=too-many-arguments datatype = MPI.Datatype.fromcode(typecode) typechar = datatype.typechar rank = comm.Get_rank() count = 1 if rank == root else 0 unitsize = datatype.Get_size() window = MPI.Win.Allocate(count * unitsize, unitsize, info, comm) self._start = start self._step = step self._window = window self._typechar = typechar self._location = (root, 0) self._comm = comm init = _array.array(typechar, [start] * count) window.Lock(rank, MPI.LOCK_SHARED) window.Accumulate(init, rank, op=MPI.REPLACE) window.Unlock(rank) comm.Barrier() def __iter__(self): """Implement ``iter(self)``.""" return self def __next__(self): """Implement ``next(self)``.""" return self.next() def next(self, incr=None): """Return current value and increment. Args: incr: Increment value. Returns: The counter value before incrementing. """ if not self._window: raise RuntimeError("counter already freed") window = self._window typechar = self._typechar root, disp = self._location incr = incr if incr is not None else self._step incr = _array.array(typechar, [incr]) prev = _array.array(typechar, [0]) op = MPI.SUM if incr[0] != 0 else MPI.NO_OP window.Lock(root, MPI.LOCK_SHARED) window.Fetch_and_op(incr, prev, root, disp, op) window.Unlock(root) return prev[0] def free(self): """Free counter resources.""" window = self._window self._window = MPI.WIN_NULL self._comm = MPI.COMM_NULL window.free() class Mutex: """Mutual exclusion.""" def __init__( self, *, recursive=False, comm=MPI.COMM_SELF, info=MPI.INFO_NULL, ): """Initialize mutex object. Args: comm: Intracommunicator context. recursive: Whether to allow recursive acquisition. info: Info object for RMA context creation. """ null_rank, tail_rank = MPI.PROC_NULL, 0 rank = comm.Get_rank() count = 3 if rank == tail_rank else 2 unitsize = MPI.INT.Get_size() window = MPI.Win.Allocate(count * unitsize, unitsize, info, comm) self._recursive = bool(recursive) self._window = window self._comm = comm init = [False, null_rank, null_rank][:count] init = _array.array('i', init) window.Lock(rank, MPI.LOCK_SHARED) window.Accumulate(init, rank, op=MPI.REPLACE) window.Unlock(rank) comm.Barrier() def _acquire(self, blocking=True): null_rank, tail_rank = MPI.PROC_NULL, 0 lock_id, next_id, tail_id = (0, 1, 2) window = self._window self_rank = window.group_rank window.Lock_all() rank = _array.array('i', [self_rank]) null = _array.array('i', [null_rank]) prev = _array.array('i', [null_rank]) window.Accumulate(null, self_rank, next_id, MPI.REPLACE) if blocking: window.Fetch_and_op(rank, prev, tail_rank, tail_id, MPI.REPLACE) else: window.Compare_and_swap(rank, null, prev, tail_rank, tail_id) window.Flush(tail_rank) locked = int(prev[0] == null_rank) if blocking and not locked: # Add ourselves to the waiting queue window.Accumulate(rank, prev[0], next_id, MPI.REPLACE) # Spin until we are given the lock locked = self._spinloop(lock_id, 0) # Set the local lock flag flag = _array.array('i', [locked]) window.Accumulate(flag, self_rank, lock_id, MPI.REPLACE) window.Unlock_all() return bool(locked) def _release(self): null_rank, tail_rank = MPI.PROC_NULL, 0 lock_id, next_id, tail_id = (0, 1, 2) window = self._window self_rank = window.group_rank window.Lock_all() rank = _array.array('i', [self_rank]) null = _array.array('i', [null_rank]) prev = _array.array('i', [null_rank]) window.Compare_and_swap(null, rank, prev, tail_rank, tail_id) window.Flush(tail_rank) if prev[0] != rank[0]: # Spin until the next process notify us next_rank = self._spinloop(next_id, null_rank) # Pass the lock over to the next process true = _array.array('i', [True]) window.Accumulate(true, next_rank, lock_id, MPI.REPLACE) # Set the local lock flag false = _array.array('i', [False]) window.Accumulate(false, self_rank, lock_id, MPI.REPLACE) window.Unlock_all() def _count_fetch_and_op(self, value, op): lock_id = 0 window = self._window self_rank = window.group_rank incr = _array.array('i', [value]) prev = _array.array('i', [0]) window.Lock(self_rank, MPI.LOCK_SHARED) window.Fetch_and_op(incr, prev, self_rank, lock_id, op) window.Unlock(self_rank) return prev[0] def _acquire_restore(self, state): self._acquire() if self._recursive: self._count_fetch_and_op(state, MPI.REPLACE) def _release_save(self): state = None if self._recursive: state = self._count_fetch_and_op(0, MPI.NO_OP) self._release() return state def _spinloop(self, index, sentinel): window = self._window return _rma_spinloop(window, 'i', index, sentinel) def __enter__(self): """Acquire mutex.""" self.acquire() return self def __exit__(self, *exc): """Release mutex.""" self.release() def acquire(self, blocking=True): """Acquire mutex, blocking or non-blocking. Args: blocking: If `True`, block until the mutex is held. Returns: `True` if the mutex is held, `False` otherwise. """ if not self._window: raise RuntimeError("mutex already freed") if self.locked(): if self._recursive: self._count_fetch_and_op(+1, MPI.SUM) return True raise RuntimeError("cannot acquire already held mutex") return self._acquire(blocking) def release(self): """Release mutex.""" if not self._window: raise RuntimeError("mutex already freed") if not self.locked(): raise RuntimeError("cannot release unheld mutex") if self._recursive: if self._count_fetch_and_op(-1, MPI.SUM) > 1: return self._release() def locked(self): """Return whether the mutex is held.""" if not self._window: raise RuntimeError("mutex already freed") lock_id = 0 memory = memoryview(self._window).cast('i') return bool(memory[lock_id]) def count(self): """Return the recursion count.""" if not self._window: raise RuntimeError("mutex already freed") return self._count_fetch_and_op(0, MPI.NO_OP) def free(self): """Free mutex resources.""" if self._window: if self.locked(): self._release() window = self._window self._window = MPI.WIN_NULL self._comm = MPI.COMM_NULL window.free() class Condition: """Condition variable.""" def __init__( self, mutex=None, *, recursive=True, comm=MPI.COMM_SELF, info=MPI.INFO_NULL, ): """Initialize condition variable. Args: mutex: Mutual exclusion object. recursive: Whether to allow recursive acquisition. comm: Intracommunicator context. info: Info object for RMA context creation. """ if mutex is None: self._mutex = Mutex(recursive=recursive, comm=comm, info=info) self._mutex_free = self._mutex.free else: self._mutex = mutex self._mutex_free = lambda: None comm = mutex._comm # pylint disable=protected-access null_rank, tail_rank = MPI.PROC_NULL, 0 rank = comm.Get_rank() count = 3 if rank == tail_rank else 2 unitsize = MPI.INT.Get_size() window = MPI.Win.Allocate(count * unitsize, unitsize, info, comm) self._window = window self._comm = comm init = [0, null_rank, null_rank][:count] init = _array.array('i', init) window.Lock(rank, MPI.LOCK_SHARED) window.Accumulate(init, rank, op=MPI.REPLACE) window.Unlock(rank) comm.Barrier() def _enqueue(self, process): null_rank, tail_rank = MPI.PROC_NULL, 0 next_id, tail_id = (1, 2) window = self._window rank = _array.array('i', [process]) prev = _array.array('i', [null_rank]) next = _array.array('i', [process]) # pylint: disable=W0622 window.Lock_all() window.Fetch_and_op(rank, prev, tail_rank, tail_id, MPI.REPLACE) window.Flush(tail_rank) if prev[0] != null_rank: window.Fetch_and_op(rank, next, prev[0], next_id, MPI.REPLACE) window.Flush(prev[0]) window.Accumulate(next, rank[0], next_id, MPI.REPLACE) window.Unlock_all() def _dequeue(self, maxnumprocs): null_rank, tail_rank = MPI.PROC_NULL, 0 next_id, tail_id = (1, 2) window = self._window null = _array.array('i', [null_rank]) prev = _array.array('i', [null_rank]) next = _array.array('i', [null_rank]) # pylint: disable=W0622 processes = [] maxnumprocs = max(0, min(maxnumprocs, window.group_size)) window.Lock_all() window.Fetch_and_op(null, prev, tail_rank, tail_id, MPI.NO_OP) window.Flush(tail_rank) if prev[0] != null_rank: empty = False window.Fetch_and_op(null, next, prev[0], next_id, MPI.NO_OP) window.Flush(prev[0]) while len(processes) < maxnumprocs and not empty: rank = next[0] processes.append(rank) window.Fetch_and_op(null, next, rank, next_id, MPI.NO_OP) window.Flush(rank) empty = processes[0] == next[0] if not empty: window.Accumulate(next, prev[0], next_id, MPI.REPLACE) else: window.Accumulate(null, tail_rank, tail_id, MPI.REPLACE) window.Unlock_all() return processes def _sleep(self): flag_id = 0 window = self._window window.Lock_all() _rma_spinloop(window, 'i', flag_id, 0, reset=True) window.Unlock_all() def _wakeup(self, processes): flag_id = 0 window = self._window flag = _array.array('i', [1]) window.Lock_all() for rank in processes: window.Accumulate(flag, rank, flag_id, MPI.REPLACE) window.Unlock_all() def _release_save(self): # pylint: disable=protected-access return self._mutex._release_save() def _acquire_restore(self, state): # pylint: disable=protected-access self._mutex._acquire_restore(state) def _mutex_reset(self): # pylint: disable=protected-access if self._mutex._window: if self._mutex.locked(): self._mutex._release() def __enter__(self): """Acquire the underlying mutex.""" self.acquire() return self def __exit__(self, *exc): """Release the underlying mutex.""" self.release() def acquire(self, blocking=True): """Acquire the underlying mutex.""" if not self._window: raise RuntimeError("condition already freed") return self._mutex.acquire(blocking) def release(self): """Release the underlying mutex.""" if not self._window: raise RuntimeError("condition already freed") self._mutex.release() def locked(self): """Return whether the underlying mutex is held.""" return self._mutex.locked() def wait(self): """Wait until notified by another process. Returns: Always `True`. """ if not self._window: raise RuntimeError("condition already freed") if not self.locked(): raise RuntimeError("cannot wait on unheld mutex") self._enqueue(self._window.group_rank) state = self._release_save() self._sleep() self._acquire_restore(state) return True def wait_for(self, predicate): """Wait until a predicate evaluates to `True`. Args: predicate: callable returning a boolean. Returns: The result of predicate once it evaluates to `True`. """ result = predicate() while not result: self.wait() result = predicate() return result def notify(self, n=1): """Wake up one or more processes waiting on this condition. Args: n: Maximum number of processes to wake up. Returns: The actual number of processes woken up. """ if not self._window: raise RuntimeError("condition already freed") if not self.locked(): raise RuntimeError("cannot notify on unheld mutex") processes = self._dequeue(n) numprocs = len(processes) self._wakeup(processes) return numprocs def notify_all(self): """Wake up all processes waiting on this condition. Returns: The actual number of processes woken up. """ return self.notify((1 << 31) - 1) def free(self): """Free condition resources.""" self._mutex_reset() self._mutex_free() window = self._window self._window = MPI.WIN_NULL self._comm = MPI.COMM_NULL window.free() class Semaphore: """Semaphore object.""" def __init__( self, value=1, *, bounded=True, comm=MPI.COMM_SELF, info=MPI.INFO_NULL, ): """Initialize semaphore object. Args: value: Initial value for internal counter. bounded: Bound internal counter to initial value. comm: Intracommunicator context. info: Info object for RMA context creation. """ if value < 0: raise ValueError("initial value must be non-negative") self._bounded = bool(bounded) self._counter = Counter(value, comm=comm, info=info) self._condvar = Condition(recursive=False, comm=comm, info=info) self._comm = comm def __enter__(self): """Acquire semaphore.""" self.acquire() return self def __exit__(self, *exc): """Release semaphore.""" self.release() def acquire(self, blocking=True): """Acquire semaphore, decrementing the internal counter by one. Args: blocking: If `True`, block until the semaphore is acquired. Returns: `True` if the semaphore is acquired, `False` otherwise. """ with self._condvar: while self._counter.next(0) == 0: if not blocking: return False self._condvar.wait() self._counter.next(-1) return True def release(self, n=1): """Release semaphore, incrementing the internal counter by one or more. Args: n: Increment for the internal counter. """ if n < 1: raise ValueError('increment must be one or more') with self._condvar: if self._bounded: # pylint: disable=protected-access current = self._counter.next(0) initial = self._counter._start if current + n > initial: raise ValueError("semaphore released too many times") self._counter.next(n) self._condvar.notify(n) def free(self): """Free semaphore resources.""" self._counter.free() self._condvar.free() self._comm = MPI.COMM_NULL _BACKOFF_DELAY_MAX = 1 / 1024 _BACKOFF_DELAY_MIN = _BACKOFF_DELAY_MAX / 1024 _BACKOFF_DELAY_INIT = 0.0 _BACKOFF_DELAY_RATIO = 2.0 def _new_backoff( delay_max=_BACKOFF_DELAY_MAX, delay_min=_BACKOFF_DELAY_MIN, delay_init=_BACKOFF_DELAY_INIT, delay_ratio=_BACKOFF_DELAY_RATIO, ): def backoff_iterator(): delay = delay_init while True: _time.sleep(delay) delay = min(delay_max, max(delay_min, delay * delay_ratio)) yield backoff = backoff_iterator() return lambda: next(backoff) def _rma_progress(window): window.Flush(window.group_rank) def _rma_spinloop( window, typecode, index, sentinel, reset=False, backoff=None, progress=None, ): # pylint: disable=too-many-arguments,too-many-positional-arguments memory = memoryview(window).cast(typecode) backoff = backoff or _new_backoff() progress = progress or _rma_progress window.Sync() while memory[index] == sentinel: backoff() progress(window) window.Sync() value = memory[index] if reset: memory[index] = sentinel return value mpi4py-4.0.3/src/mpi4py/util/sync.pyi000066400000000000000000000050601475341043600174350ustar00rootroot00000000000000import sys from typing import ( Callable, Literal, ) if sys.version_info >= (3, 11): from typing import Self else: from typing_extensions import Self from ..typing import T from ..MPI import ( Info, INFO_NULL, Intracomm, COMM_SELF, ) __all__: list[str] = [ "Sequential", "Counter", "Mutex", "Condition", "Semaphore", ] class Sequential: comm: Intracomm tag: int def __init__( self, comm: Intracomm, tag: int = 0, ) -> None: ... def __enter__(self) -> Self: ... def __exit__(self, *exc: object) -> None: ... def begin(self) -> None: ... def end(self) -> None: ... class Counter: def __init__( self, start: int = 0, step: int = 1, *, typecode: str = 'i', comm: Intracomm = COMM_SELF, info: Info = INFO_NULL, root: int = 0, ) -> None: ... def __iter__(self) -> Self: ... def __next__(self) -> int: ... def next(self, incr: int | None = None) -> int: ... def free(self) -> None: ... class Mutex: def __init__( self, *, recursive: bool = False, comm: Intracomm = COMM_SELF, info: Info = INFO_NULL, ) -> None: ... def __enter__(self) -> Self: ... def __exit__(self, *exc: object) -> None: ... def acquire(self, blocking: bool = True) -> bool: ... def release(self) -> None: ... def locked(self) -> bool: ... def count(self) -> int: ... def free(self) -> None: ... class Condition: def __init__( self, mutex: Mutex | None = None, *, recursive: bool = True, comm: Intracomm = COMM_SELF, info: Info = INFO_NULL, ) -> None: ... def __enter__(self) -> Self: ... def __exit__(self, *exc: object) -> None: ... def acquire(self, blocking: bool = True) -> bool: ... def release(self) -> None: ... def locked(self) -> bool: ... def wait(self) -> Literal[True]: ... def wait_for(self, predicate: Callable[[], T]) -> T: ... def notify(self, n: int = 1) -> int: ... def notify_all(self) -> int: ... def free(self) -> None: ... class Semaphore: def __init__( self, value: int = 1, *, bounded: bool = True, comm: Intracomm = COMM_SELF, info: Info = INFO_NULL, ) -> None: ... def __enter__(self) -> Self: ... def __exit__(self, *exc: object) -> None: ... def acquire(self, blocking: bool = True) -> bool: ... def release(self, n: int = 1) -> None: ... def free(self) -> None: ... mpi4py-4.0.3/src/pympicommctx.h000066400000000000000000000125151475341043600164440ustar00rootroot00000000000000/* Author: Lisandro Dalcin */ /* Contact: dalcinl@gmail.com */ #include #ifndef PyMPI_MALLOC #define PyMPI_MALLOC malloc #endif #ifndef PyMPI_FREE #define PyMPI_FREE free #endif #ifndef MPIAPI #define MPIAPI #endif #undef CHKERR #define CHKERR(ierr) do { if (ierr != MPI_SUCCESS) return ierr; } while(0) typedef struct { MPI_Comm dupcomm; MPI_Comm localcomm; int tag; int low_group; } PyMPI_Commctx; static int PyMPI_Commctx_KEYVAL = MPI_KEYVAL_INVALID; static int PyMPI_Commctx_TAG_UB = -1; static int PyMPI_Commctx_new(PyMPI_Commctx **_commctx) { PyMPI_Commctx *commctx; if (PyMPI_Commctx_TAG_UB < 0) { int ierr, *attrval = NULL, flag = 0; ierr = MPI_Comm_get_attr(MPI_COMM_WORLD, MPI_TAG_UB, &attrval, &flag); CHKERR(ierr); PyMPI_Commctx_TAG_UB = (flag && attrval) ? *attrval : 32767; } commctx = (PyMPI_Commctx *)PyMPI_MALLOC(sizeof(PyMPI_Commctx)); if (commctx) { commctx->dupcomm = MPI_COMM_NULL; commctx->localcomm = MPI_COMM_NULL; commctx->tag = 0; commctx->low_group = -1; } *_commctx = commctx; return MPI_SUCCESS; } static int MPIAPI PyMPI_Commctx_free_fn(MPI_Comm comm, int k, void *v, void *xs) { int ierr, finalized = 1; PyMPI_Commctx *commctx = (PyMPI_Commctx *)v; (void)comm; (void)k; (void)xs; /* unused */ if (!commctx) return MPI_SUCCESS; ierr = MPI_Finalized(&finalized); CHKERR(ierr); if (finalized) goto fn_exit; if (commctx->localcomm != MPI_COMM_NULL) {ierr = MPI_Comm_free(&commctx->localcomm); CHKERR(ierr);} if (commctx->dupcomm != MPI_COMM_NULL) {ierr = MPI_Comm_free(&commctx->dupcomm); CHKERR(ierr);} fn_exit: PyMPI_FREE(commctx); return MPI_SUCCESS; } static int PyMPI_Commctx_keyval(int *keyval) { int ierr; if (PyMPI_Commctx_KEYVAL != MPI_KEYVAL_INVALID) goto fn_exit; ierr = MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, PyMPI_Commctx_free_fn, &PyMPI_Commctx_KEYVAL, NULL); CHKERR(ierr); fn_exit: if (keyval) *keyval = PyMPI_Commctx_KEYVAL; return MPI_SUCCESS; } static int PyMPI_Commctx_lookup(MPI_Comm comm, PyMPI_Commctx **_commctx) { int ierr, found = 0, keyval = MPI_KEYVAL_INVALID; PyMPI_Commctx *commctx = NULL; ierr = PyMPI_Commctx_keyval(&keyval); CHKERR(ierr); ierr = MPI_Comm_get_attr(comm, keyval, &commctx, &found); CHKERR(ierr); if (found && commctx) goto fn_exit; ierr = PyMPI_Commctx_new(&commctx); CHKERR(ierr); if (!commctx) {(void)MPI_Comm_call_errhandler(comm, MPI_ERR_INTERN); return MPI_ERR_INTERN;} ierr = MPI_Comm_set_attr(comm, keyval, commctx); CHKERR(ierr); ierr = MPI_Comm_dup(comm, &commctx->dupcomm); CHKERR(ierr); fn_exit: if (commctx->tag >= PyMPI_Commctx_TAG_UB) commctx->tag = 0; if (_commctx) *_commctx = commctx; return MPI_SUCCESS; } static int PyMPI_Commctx_clear(MPI_Comm comm) { int ierr, found = 0, keyval = PyMPI_Commctx_KEYVAL; PyMPI_Commctx *commctx = NULL; if (keyval == MPI_KEYVAL_INVALID) return MPI_SUCCESS; ierr = MPI_Comm_get_attr(comm, keyval, &commctx, &found); CHKERR(ierr); if (found) {ierr = MPI_Comm_delete_attr(comm, keyval); CHKERR(ierr);} return MPI_SUCCESS; } static int PyMPI_Commctx_intra(MPI_Comm comm, MPI_Comm *dupcomm, int *tag) { int ierr; PyMPI_Commctx *commctx = NULL; ierr = PyMPI_Commctx_lookup(comm, &commctx);CHKERR(ierr); if (dupcomm) *dupcomm = commctx->dupcomm; if (tag) *tag = commctx->tag++; return MPI_SUCCESS; } static int PyMPI_Commctx_inter(MPI_Comm comm, MPI_Comm *dupcomm, int *tag, MPI_Comm *localcomm, int *low_group) { int ierr; PyMPI_Commctx *commctx = NULL; ierr = PyMPI_Commctx_lookup(comm, &commctx);CHKERR(ierr); if (commctx->localcomm == MPI_COMM_NULL) { int localsize, remotesize, mergerank; MPI_Comm mergecomm = MPI_COMM_NULL; ierr = MPI_Comm_size(comm, &localsize); CHKERR(ierr); ierr = MPI_Comm_remote_size(comm, &remotesize); CHKERR(ierr); ierr = MPI_Intercomm_merge(comm, localsize>remotesize, &mergecomm); CHKERR(ierr); ierr = MPI_Comm_rank(mergecomm, &mergerank); CHKERR(ierr); commctx->low_group = ((localsize>remotesize) ? 0 : (localsize 2) || (MPI_VERSION == 2 && MPI_SUBVERSION >= 2) { MPI_Group localgroup = MPI_GROUP_NULL; ierr = MPI_Comm_group(comm, &localgroup); CHKERR(ierr); ierr = MPI_Comm_create(mergecomm, localgroup, &commctx->localcomm); CHKERR(ierr); ierr = MPI_Group_free(&localgroup); CHKERR(ierr); } #else ierr = MPI_Comm_split(mergecomm, commctx->low_group, 0, &commctx->localcomm); CHKERR(ierr); #endif ierr = MPI_Comm_free(&mergecomm); CHKERR(ierr); } if (dupcomm) *dupcomm = commctx->dupcomm; if (tag) *tag = commctx->tag++; if (localcomm) *localcomm = commctx->localcomm; if (low_group) *low_group = commctx->low_group; return MPI_SUCCESS; } static int PyMPI_Commctx_finalize(void) { int ierr; if (PyMPI_Commctx_KEYVAL == MPI_KEYVAL_INVALID) return MPI_SUCCESS; ierr = PyMPI_Commctx_clear(MPI_COMM_SELF); CHKERR(ierr); ierr = PyMPI_Commctx_clear(MPI_COMM_WORLD); CHKERR(ierr); ierr = MPI_Comm_free_keyval(&PyMPI_Commctx_KEYVAL); CHKERR(ierr); PyMPI_Commctx_TAG_UB = -1; return MPI_SUCCESS; } #undef CHKERR /* Local variables: c-basic-offset: 2 indent-tabs-mode: nil End: */ mpi4py-4.0.3/src/pympivendor.h000066400000000000000000000114041475341043600162630ustar00rootroot00000000000000/* Author: Lisandro Dalcin */ /* Contact: dalcinl@gmail.com */ #include #include static int PyMPI_Get_vendor(const char **vendor_name, int *version_major, int *version_minor, int *version_micro) { const char *name = "unknown"; int major=0, minor=0, micro=0; #if defined(PyMPI_ABI) || defined(CIBUILDWHEEL) int ierr, len = 0, cnt; char lib[MPI_MAX_LIBRARY_VERSION_STRING] = {0}, *str; ierr = MPI_Get_library_version(lib, &len); if (ierr != MPI_SUCCESS) return ierr; cnt = sscanf(lib, "MPICH Version: %d.%d.%d", &major, &minor, µ); if (cnt > 0) { name = "MPICH"; goto done; } cnt = sscanf(lib, "Open MPI v%d.%d.%d", &major, &minor, µ); if (cnt > 0) { name = "Open MPI"; goto done; } cnt = sscanf(lib, "Intel(R) MPI Library %d.%d.%d", &major, &minor, µ); if (cnt > 0) { name = "Intel MPI"; goto done; } cnt = sscanf(lib, "Microsoft MPI %d.%d", &major, &minor); if (cnt > 0) { name = "Microsoft MPI"; goto done; } str = strstr(lib, "CRAY MPICH version"); if (!str) str = lib; cnt = sscanf(str, "CRAY MPICH version %d.%d.%d", &major, &minor, µ); if (cnt > 0) { name = "Cray MPICH"; goto done; } cnt = sscanf(lib, "MVAPICH Version: %d.%d.%d", &major, &minor, µ); if (cnt > 0) { name = "MVAPICH"; goto done; } cnt = sscanf(lib, "MVAPICH2 Version: %d.%d.%d", &major, &minor, µ); if (cnt > 0) { name = "MVAPICH"; goto done; } done: #elif defined(I_MPI_VERSION) name = "Intel MPI"; #if defined(I_MPI_NUMVERSION) {int version = I_MPI_NUMVERSION/1000; major = version/10000; version -= major*10000; minor = version/100; version -= minor*100; micro = version/1; version -= micro*1; } #else (void)sscanf(I_MPI_VERSION,"%d.%d Update %d",&major,&minor,µ); #endif #elif defined(MSMPI_VER) name = "Microsoft MPI"; major = MSMPI_VER >> 8; minor = MSMPI_VER & 0xFF; #elif defined(CRAY_MPICH_VERSION) name = "Cray MPICH"; # define str(s) #s # define xstr(s) str(s) (void)sscanf(xstr(CRAY_MPICH_VERSION),"%d.%d.%d",&major,&minor,µ); # undef xstr # undef str #elif defined(MVAPICH_VERSION) || defined(MVAPICH_NUMVERSION) name = "MVAPICH"; #if defined(MVAPICH_NUMVERSION) {int version = MVAPICH_NUMVERSION/1000; if (version<1000) version *= 100; major = version/10000; version -= major*10000; minor = version/100; version -= minor*100; micro = version/1; version -= micro*1; } #elif defined(MVAPICH_VERSION) (void)sscanf(MVAPICH_VERSION,"%d.%d.%d",&major,&minor,µ); #endif #elif defined(MVAPICH2_VERSION) || defined(MVAPICH2_NUMVERSION) name = "MVAPICH"; #if defined(MVAPICH2_NUMVERSION) {int version = MVAPICH2_NUMVERSION/1000; if (version<1000) version *= 100; major = version/10000; version -= major*10000; minor = version/100; version -= minor*100; micro = version/1; version -= micro*1; } #elif defined(MVAPICH2_VERSION) (void)sscanf(MVAPICH2_VERSION,"%d.%d.%d",&major,&minor,µ); #endif #elif defined(MPICH_NAME) && (MPICH_NAME >= 3) name = "MPICH"; #if defined(MPICH_NUMVERSION) {int version = MPICH_NUMVERSION/1000; major = version/10000; version -= major*10000; minor = version/100; version -= minor*100; micro = version/1; version -= micro*1; } #elif defined(MPICH_VERSION) (void)sscanf(MPICH_VERSION,"%d.%d.%d",&major,&minor,µ); #endif #elif defined(MPICH_NAME) && (MPICH_NAME == 2) name = "MPICH2"; #if defined(MPICH2_NUMVERSION) {int version = MPICH2_NUMVERSION/1000; major = version/10000; version -= major*10000; minor = version/100; version -= minor*100; micro = version/1; version -= micro*1; } #elif defined(MPICH2_VERSION) (void)sscanf(MPICH2_VERSION,"%d.%d.%d",&major,&minor,µ); #endif #elif defined(MPICH_NAME) && (MPICH_NAME == 1) name = "MPICH1"; #if defined(MPICH_VERSION) (void)sscanf(MPICH_VERSION,"%d.%d.%d",&major,&minor,µ); #endif #elif defined(OPEN_MPI) name = "Open MPI"; #if defined(OMPI_MAJOR_VERSION) major = OMPI_MAJOR_VERSION; #endif #if defined(OMPI_MINOR_VERSION) minor = OMPI_MINOR_VERSION; #endif #if defined(OMPI_RELEASE_VERSION) micro = OMPI_RELEASE_VERSION; #endif #elif defined(LAM_MPI) name = "LAM/MPI"; #if defined(LAM_MAJOR_VERSION) major = LAM_MAJOR_VERSION; #endif #if defined(LAM_MINOR_VERSION) minor = LAM_MINOR_VERSION; #endif #if defined(LAM_RELEASE_VERSION) micro = LAM_RELEASE_VERSION; #endif #endif if (vendor_name) *vendor_name = name; if (version_major) *version_major = major; if (version_minor) *version_minor = minor; if (version_micro) *version_micro = micro; return MPI_SUCCESS; } /* Local variables: c-basic-offset: 2 indent-tabs-mode: nil End: */ mpi4py-4.0.3/src/python.c000066400000000000000000000066161475341043600152340ustar00rootroot00000000000000/* Author: Lisandro Dalcin */ /* Contact: dalcinl@gmail.com */ /* -------------------------------------------------------------------------- */ #include #define MPICH_IGNORE_CXX_SEEK 1 #define OMPI_IGNORE_CXX_SEEK 1 #include #if defined(PYPY_VERSION) PyAPI_FUNC(int) pypy_main_startup(int, char **); #define Py_BytesMain pypy_main_startup #elif PY_MAJOR_VERSION <= 2 #define Py_BytesMain Py_Main #elif PY_VERSION_HEX < 0x03070000 static int Py_BytesMain(int, char **); #elif PY_VERSION_HEX < 0x03080000 PyAPI_FUNC(int) _Py_UnixMain(int, char **); #define Py_BytesMain _Py_UnixMain #endif /* -------------------------------------------------------------------------- */ int main(int argc, char **argv) { int status = 0, flag = 1, finalize = 0; /* MPI initialization */ (void)MPI_Initialized(&flag); if (!flag) { #if defined(MPI_VERSION) && (MPI_VERSION > 1) int required = MPI_THREAD_MULTIPLE; int provided = MPI_THREAD_SINGLE; (void)MPI_Init_thread(&argc, &argv, required, &provided); #else (void)MPI_Init(&argc, &argv); #endif finalize = 1; } /* Python main */ status = Py_BytesMain(argc, argv); /* MPI finalization */ (void)MPI_Finalized(&flag); if (!flag) { if (status) (void)MPI_Abort(MPI_COMM_WORLD, status); if (finalize) (void)MPI_Finalize(); } return status; } /* -------------------------------------------------------------------------- */ #if !defined(PYPY_VERSION) #if PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x03070000 #include #include #include static wchar_t **mk_wargs(int, char **); static wchar_t **cp_wargs(int, wchar_t **); static void rm_wargs(wchar_t **, int); static int Py_BytesMain(int argc, char **argv) { int sts = 0; wchar_t **wargv = mk_wargs(argc, argv); wchar_t **wargv2 = cp_wargs(argc, wargv); if (wargv && wargv2) sts = Py_Main(argc, wargv); else sts = 1; rm_wargs(wargv2, 1); rm_wargs(wargv, 0); return sts; } #if PY_VERSION_HEX < 0x03050000 #define Py_DecodeLocale _Py_char2wchar #endif static wchar_t ** mk_wargs(int argc, char **argv) { int i; char *saved_locale = NULL; wchar_t **args = NULL; args = (wchar_t **)malloc((size_t)(argc+1)*sizeof(wchar_t *)); if (!args) goto oom; saved_locale = strdup(setlocale(LC_ALL, NULL)); if (!saved_locale) goto oom; setlocale(LC_ALL, ""); for (i=0; i (3, 7): dict_type = dict else: from collections import OrderedDict dict_type = OrderedDict typemap = dict_type( (typecode, datatype) for typecode, datatype in entries if datatype != MPI.DATATYPE_NULL ) return typemap TypeMap = make_typemap([ ('b', MPI.SIGNED_CHAR), ('h', MPI.SHORT), ('i', MPI.INT), ('l', MPI.LONG), ('q', MPI.LONG_LONG), ('f', MPI.FLOAT), ('d', MPI.DOUBLE), ('g', MPI.LONG_DOUBLE), ]) TypeMapBool = make_typemap([ ('?', MPI.C_BOOL), ]) TypeMapInteger = make_typemap([ ('b', MPI.SIGNED_CHAR), ('h', MPI.SHORT), ('i', MPI.INT), ('l', MPI.LONG), ('q', MPI.LONG_LONG), ]) TypeMapUnsigned = make_typemap([ ('B', MPI.UNSIGNED_CHAR), ('H', MPI.UNSIGNED_SHORT), ('I', MPI.UNSIGNED_INT), ('L', MPI.UNSIGNED_LONG), ('Q', MPI.UNSIGNED_LONG_LONG), ]) TypeMapFloat = make_typemap([ ('f', MPI.FLOAT), ('d', MPI.DOUBLE), ('g', MPI.LONG_DOUBLE), ]) TypeMapComplex = make_typemap([ ('F', MPI.C_FLOAT_COMPLEX), ('D', MPI.C_DOUBLE_COMPLEX), ('G', MPI.C_LONG_DOUBLE_COMPLEX), ]) ArrayBackends = [] def add_backend(cls): ArrayBackends.append(cls) return cls class BaseArray: backend = None TypeMap = TypeMap.copy() TypeMap.pop('g', None) def __len__(self): return len(self.array) def __getitem__(self, i): return self.array[i] def __setitem__(self, i, v): self.array[i] = v @property def mpidtype(self): try: return self.TypeMap[self.typecode] except KeyError: return MPI.DATATYPE_NULL def as_raw(self): return self.array def as_mpi(self): return (self.as_raw(), self.mpidtype) def as_mpi_c(self, count): return (self.as_raw(), count, self.mpidtype) def as_mpi_v(self, cnt, dsp): return (self.as_raw(), (cnt, dsp), self.mpidtype) if array is not None: def product(seq): res = 1 for s in seq: res = res * s return res def mkshape(shape): return tuple([int(s) for s in shape]) @add_backend class ArrayArray(BaseArray): backend = 'array' def __init__(self, arg, typecode, shape=None): if isinstance(arg, (int, float)): if shape is None: shape = () else: try: shape = mkshape(shape) except TypeError: shape = (int(shape),) size = product(shape) arg = [arg] * size else: size = len(arg) if shape is None: shape = (size,) else: shape = mkshape(shape) assert size == product(shape) self.array = array.array(typecode, arg) @property def address(self): return self.array.buffer_info()[0] @property def typecode(self): return self.array.typecode @property def itemsize(self): return self.array.itemsize @property def flat(self): return self.array @property def size(self): return self.array.buffer_info()[1] if numpy is not None: @add_backend class ArrayNumPy(BaseArray): backend = 'numpy' TypeMap = make_typemap([]) TypeMap.update(TypeMapBool) TypeMap.update(TypeMapInteger) TypeMap.update(TypeMapUnsigned) TypeMap.update(TypeMapFloat) TypeMap.update(TypeMapComplex) def __init__(self, arg, typecode, shape=None): if isinstance(arg, (int, float, complex)): if shape is None: shape = () else: if shape is None: shape = len(arg) self.array = numpy.zeros(shape, typecode) if isinstance(arg, (int, float, complex)): arg = numpy.asarray(arg).astype(typecode) self.array.fill(arg) else: arg = numpy.asarray(arg).astype(typecode) self.array[...] = arg @property def address(self): return self.array.__array_interface__['data'][0] @property def typecode(self): return self.array.dtype.char @property def itemsize(self): return self.array.itemsize @property def flat(self): return self.array.flat @property def size(self): return self.array.size try: import dlpackimpl as dlpack except ImportError: dlpack = None class BaseDLPackCPU: def __dlpack_device__(self): return (dlpack.DLDeviceType.kDLCPU, 0) def __dlpack__(self, stream=None): assert stream is None capsule = dlpack.make_py_capsule(self.array) return capsule def as_raw(self): return self if dlpack is not None and array is not None: @add_backend class DLPackArray(BaseDLPackCPU, ArrayArray): backend = 'dlpack-array' def __init__(self, arg, typecode, shape=None): super().__init__(arg, typecode, shape) if dlpack is not None and numpy is not None: @add_backend class DLPackNumPy(BaseDLPackCPU, ArrayNumPy): backend = 'dlpack-numpy' def __init__(self, arg, typecode, shape=None): super().__init__(arg, typecode, shape) def typestr(typecode, itemsize): typestr = '' if sys.byteorder == 'little': typestr += '<' if sys.byteorder == 'big': typestr += '>' if typecode in '?': typestr += 'b' if typecode in 'bhilq': typestr += 'i' if typecode in 'BHILQ': typestr += 'u' if typecode in 'fdg': typestr += 'f' if typecode in 'FDG': typestr += 'c' typestr += str(itemsize) return typestr class BaseFakeGPUArray: def set_interface(self, shape, readonly=False): self.__cuda_array_interface__ = dict( version = 0, data = (self.address, readonly), typestr = typestr(self.typecode, self.itemsize), shape = shape, ) def as_raw(self): return self if array is not None: @add_backend class FakeGPUArrayBasic(BaseFakeGPUArray, ArrayArray): def __init__(self, arg, typecode, shape=None, readonly=False): super().__init__(arg, typecode, shape) self.set_interface((len(self),), readonly) if numpy is not None: @add_backend class FakeGPUArrayNumPy(BaseFakeGPUArray, ArrayNumPy): def __init__(self, arg, typecode, shape=None, readonly=False): super().__init__(arg, typecode, shape) self.set_interface(self.array.shape, readonly) if cupy is not None: @add_backend class GPUArrayCuPy(BaseArray): backend = 'cupy' TypeMap = make_typemap([]) if cupy_version >= (11, 6): TypeMap.update(TypeMapBool) TypeMap.update(TypeMapInteger) TypeMap.update(TypeMapUnsigned) TypeMap.update(TypeMapFloat) TypeMap.update(TypeMapComplex) try: cupy.array(0, 'g') except ValueError: TypeMap.pop('g', None) try: cupy.array(0, 'G') except ValueError: TypeMap.pop('G', None) def __init__(self, arg, typecode, shape=None, readonly=False): if isinstance(arg, (int, float, complex)): if shape is None: shape = () else: if shape is None: shape = len(arg) self.array = cupy.zeros(shape, typecode) if isinstance(arg, (int, float, complex)): self.array.fill(arg) else: self.array[:] = cupy.asarray(arg, typecode) @property def address(self): return self.array.__cuda_array_interface__['data'][0] @property def typecode(self): return self.array.dtype.char @property def itemsize(self): return self.array.itemsize @property def flat(self): return self.array.ravel() @property def size(self): return self.array.size def as_raw(self): cupy.cuda.get_current_stream().synchronize() return self.array if cupy is not None: # Note: we do not create a BaseDLPackGPU class because each GPU library # has its own way to get device ID etc, so we have to reimplement the # DLPack support anyway @add_backend class DLPackCuPy(GPUArrayCuPy): backend = 'dlpack-cupy' has_dlpack = None dev_type = None def __init__(self, arg, typecode, shape=None): super().__init__(arg, typecode, shape) self.has_dlpack = hasattr(self.array, '__dlpack_device__') # TODO(leofang): test CUDA managed memory? if cupy.cuda.runtime.is_hip: self.dev_type = dlpack.DLDeviceType.kDLROCM else: self.dev_type = dlpack.DLDeviceType.kDLCUDA def __dlpack_device__(self): if self.has_dlpack: return self.array.__dlpack_device__() else: return (self.dev_type, self.array.device.id) def __dlpack__(self, stream=None): cupy.cuda.get_current_stream().synchronize() if self.has_dlpack: return self.array.__dlpack__(stream=-1) else: return self.array.toDlpack() def as_raw(self): return self if numba is not None: @add_backend class GPUArrayNumba(BaseArray): backend = 'numba' TypeMap = make_typemap([]) TypeMap.update(TypeMapBool) TypeMap.update(TypeMapInteger) TypeMap.update(TypeMapUnsigned) TypeMap.update(TypeMapFloat) TypeMap.update(TypeMapComplex) # one can allocate arrays with those types, # but the Numba compiler doesn't support them... TypeMap.pop('g', None) TypeMap.pop('G', None) def __init__(self, arg, typecode, shape=None, readonly=False): if isinstance(arg, (int, float, complex)): if shape is None: shape = () else: if shape is None: shape = len(arg) self.array = numba.cuda.device_array(shape, typecode) if isinstance(arg, (int, float, complex)): if self.array.size > 0: self.array[:] = arg elif arg == [] or arg == (): self.array = numba.cuda.device_array(0, typecode) else: if self.array.size > 0: self.array[:] = numba.cuda.to_device(arg) # def __getitem__(self, i): # if isinstance(i, slice): # return self.array[i] # elif i < self.array.size: # return self.array[i] # else: # raise StopIteration @property def address(self): return self.array.__cuda_array_interface__['data'][0] @property def typecode(self): return self.array.dtype.char @property def itemsize(self): return self.array.dtype.itemsize @property def flat(self): if self.array.ndim <= 1: return self.array else: return self.array.ravel() @property def size(self): return self.array.size def as_raw(self): # numba by default always runs on the legacy default stream numba.cuda.default_stream().synchronize() return self.array def loop(*args): loop.array = None loop.typecode = None for array in ArrayBackends: loop.array = array for typecode in array.TypeMap: loop.typecode = typecode if not args: yield array, typecode else: for prod in itertools.product(*args): yield (array, typecode) + prod del loop.array del loop.typecode def test(case, **kargs): return case.subTest( typecode=loop.typecode, backend=loop.array.backend, **kargs, ) def scalar(arg): return loop.array(arg, loop.typecode, 1)[0] mpi4py-4.0.3/test/coverage.sh000077500000000000000000000233371475341043600160700ustar00rootroot00000000000000#!/bin/bash set -eux MPIEXEC=${MPIEXEC:-mpiexec} PYTHON=${PYTHON:-python${py:-}} export PYTHONDONTWRITEBYTECODE=1 export PYTHONUNBUFFERED=1 export PYTHONWARNINGS=error $PYTHON -m coverage erase $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench --help > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench --threads helloworld -q $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench --no-threads helloworld -q $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench --thread-level=single helloworld -q $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench helloworld > /dev/null $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.bench helloworld > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench helloworld > /dev/null $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.bench helloworld -q $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench ringtest > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench ringtest -q -l 2 -s 1 $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.bench ringtest -q -l 2 -s 1 $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench pingpong -l 2 -s 1 -n 64 --array none > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench pingpong -l 2 -s 1 -n 64 --no-header > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench pingpong -l 2 -s 1 -n 64 --no-stats > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench pingpong -q -l 1 -s 1 -n 2097152 $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.bench pingpong -q -l 1 -s 1 -n 128 $MPIEXEC -n 3 $PYTHON -m coverage run -m mpi4py.bench pingpong -q -l 1 -s 1 -n 128 $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.bench pingpong -q -l 1 -s 1 -n 128 -p $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.bench pingpong -q -l 1 -s 1 -n 128 -o $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.bench pingpong -q -l 1 -s 1 -n 128 -p --protocol 4 $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.bench pingpong -q -l 1 -s 1 -n 128 -o --threshold 32 $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench futures -w 1 -t 1 -l 1 > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench futures -w 1 -t 1 -n 8 --no-header > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench futures -w 1 -t 1 -n 8 --no-stats > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench futures -w 1 -t 1 -n 8 -a numpy -e mpi -q $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench futures -w 1 -t 1 -n 8 -a bytes -e process -q $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench futures -w 1 -t 1 -n 8 -a array -e thread -q $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench > /dev/null 2>&1 || true $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.bench > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench qwerty > /dev/null 2>&1 || true $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.bench qwerty > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.run --help > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py --prefix > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py --version > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py --mpi-std-version > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py --mpi-lib-version > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py --help > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py - < /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py -rc threads=0 -c "import mpi4py.MPI" $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py --rc=thread_level=single -c "import mpi4py.MPI" $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py -m > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py -c > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py -p > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py -bad > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py --bad=a > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py -rc > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py -rc= > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py --rc > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py --rc=a > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py --rc=a= > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py --rc==a > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run test/test_package.py $MPIEXEC -n 1 $PYTHON -m coverage run test/test_toplevel.py $MPIEXEC -n 1 $PYTHON -m coverage run test/test_util_dtlib.py $MPIEXEC -n 1 $PYTHON -m coverage run test/test_util_pkl5.py $MPIEXEC -n 2 $PYTHON -m coverage run test/test_util_pkl5.py $MPIEXEC -n 3 $PYTHON -m coverage run test/test_util_pkl5.py $MPIEXEC -n 1 $PYTHON -m coverage run test/test_util_pool.py $MPIEXEC -n 1 $PYTHON -m coverage run test/test_util_sync.py $MPIEXEC -n 2 $PYTHON -m coverage run test/test_util_sync.py $MPIEXEC -n 3 $PYTHON -m coverage run test/test_util_sync.py $PYTHON -m coverage run demo/test-run/test_run.py $MPIEXEC -n 1 $PYTHON -m coverage run demo/futures/test_futures.py $MPIEXEC -n 2 $PYTHON -m coverage run demo/futures/test_futures.py $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.run -rc threads=False demo/futures/test_futures.py -qf 2> /dev/null || true $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.run -rc threads=False demo/futures/test_futures.py -qf 2> /dev/null || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures demo/futures/test_futures.py $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.futures demo/futures/test_futures.py SharedPoolInitTest $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.futures demo/futures/test_futures.py ProcessPoolPickleTest $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures -h > /dev/null $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.futures -h > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures -m this > /dev/null $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.futures -m this > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures -c "42" $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.futures -c "42" $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures - /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures xy > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures -c > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures -m > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures -x > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures -c "1/0" > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures -c "raise SystemExit(11)" > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures -c "raise SystemExit('')" > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures -c "raise KeyboardInterrupt" > /dev/null 2>&1 || true if [ $(command -v mpichversion) ]; then testdir=demo/futures $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures.server --xyz > /dev/null 2>&1 || true $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.futures.server --bind localhost & mpi4pyserver=$!; sleep 1; $MPIEXEC -n 1 $PYTHON -m coverage run $testdir/test_service.py --host localhost wait $mpi4pyserver $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.futures.server --port 31414 --info "a=x,b=y" & mpi4pyserver=$!; sleep 1; $MPIEXEC -n 1 $PYTHON -m coverage run $testdir/test_service.py --port 31414 --info "a=x,b=y" wait $mpi4pyserver fi if [ $(command -v mpichversion) ] && [ $(command -v hydra_nameserver) ]; then testdir=demo/futures hydra_nameserver & nameserver=$!; sleep 1; $MPIEXEC -nameserver localhost -n 2 $PYTHON -m coverage run -m mpi4py.futures.server & mpi4pyserver=$!; sleep 1; $MPIEXEC -nameserver localhost -n 1 $PYTHON -m coverage run $testdir/test_service.py wait $mpi4pyserver kill -TERM $nameserver wait $nameserver 2>/dev/null || true fi if test -f src/mpi4py/MPI.c && grep -q CYTHON_TRACE src/mpi4py/MPI.c; then export PYTHONWARNINGS=default $MPIEXEC -n 2 $PYTHON -m coverage run test/main.py -f -e test_util_ $MPIEXEC -n 3 $PYTHON -m coverage run test/main.py -f test_cco_buf_inter.TestCCOBufInter $MPIEXEC -n 3 $PYTHON -m coverage run test/main.py -f test_cco_obj_inter.TestCCOObjInter $MPIEXEC -n 4 $PYTHON -m coverage run test/main.py -f test_cco_obj.TestCCOObjWorld env MPI4PY_RC_RECV_MPROBE=false $MPIEXEC -n 2 $PYTHON -m coverage run test/main.py -f test_p2p_obj.TestP2PObjWorld env MPI4PY_RC_FAST_REDUCE=false $MPIEXEC -n 2 $PYTHON -m coverage run test/main.py -f test_cco_obj.TestCCOObjWorld env MPIEXEC="$MPIEXEC" PYTHON="$PYTHON -m coverage run -m mpi4py" demo/init-fini/run.sh env MPIEXEC="$MPIEXEC" PYTHON="$PYTHON -m coverage run -m mpi4py" demo/check-mpiexec/run.sh fi $PYTHON -m coverage combine mpi4py-4.0.3/test/dlpackimpl.py000066400000000000000000000202461475341043600164240ustar00rootroot00000000000000import sys import ctypes from enum import IntEnum if hasattr(sys, 'pypy_version_info'): raise ImportError("unsupported on PyPy") class DLPackVersion(ctypes.Structure): _fields_ = [ ("major", ctypes.c_uint32), ("minor", ctypes.c_uint32), ] class DLDeviceType(IntEnum): kDLCPU = 1 kDLCUDA = 2 kDLCUDAHost = 3 kDLOpenCL = 4 kDLVulkan = 7 kDLMetal = 8 kDLVPI = 9 kDLROCM = 10 kDLROCMHost = 11 kDLExtDev = 12 kDLCUDAManaged = 13 kDLOneAPI = 14 kDLWebGPU = 15 kDLHexagon = 16 class DLDevice(ctypes.Structure): _fields_ = [ ("device_type", ctypes.c_uint), ("device_id", ctypes.c_int32), ] class DLDataTypeCode(IntEnum): kDLInt = 0 kDLUInt = 1 kDLFloat = 2 kDLOpaqueHandle = 3 kDLBfloat = 4 kDLComplex = 5 kDLBool = 6 class DLDataType(ctypes.Structure): _fields_ = [ ("code", ctypes.c_uint8), ("bits", ctypes.c_uint8), ("lanes", ctypes.c_uint16), ] class DLTensor(ctypes.Structure): _fields_ = [ ("data", ctypes.c_void_p), ("device", DLDevice), ("ndim", ctypes.c_int32), ("dtype", DLDataType), ("shape", ctypes.POINTER(ctypes.c_int64)), ("strides", ctypes.POINTER(ctypes.c_int64)), ("byte_offset", ctypes.c_uint64), ] DLManagedTensorDeleter = ctypes.CFUNCTYPE(None, ctypes.c_void_p) class DLManagedTensor(ctypes.Structure): _fields_ = [ ("dl_tensor", DLTensor), ("manager_ctx", ctypes.c_void_p), ("deleter", DLManagedTensorDeleter), ] DLPACK_FLAG_BITMASK_READ_ONLY = 1 << 0 DLPACK_FLAG_BITMASK_IS_COPIED = 1 << 1 DLManagedTensorVersionedDeleter = ctypes.CFUNCTYPE(None, ctypes.c_void_p) class DLManagedTensorVersioned(ctypes.Structure): _fields_ = [ ("version", DLPackVersion), ("manager_ctx", ctypes.c_void_p), ("deleter", DLManagedTensorDeleter), ("flags", ctypes.c_uint64), ("dl_tensor", DLTensor), ] pyapi = ctypes.pythonapi DLManagedTensor_p = ctypes.POINTER(DLManagedTensor) DLManagedTensorVersioned_p = ctypes.POINTER(DLManagedTensorVersioned) Py_IncRef = pyapi.Py_IncRef Py_IncRef.restype = None Py_IncRef.argtypes = [ctypes.py_object] Py_DecRef = pyapi.Py_DecRef Py_DecRef.restype = None Py_DecRef.argtypes = [ctypes.py_object] PyCapsule_Destructor = ctypes.CFUNCTYPE(None, ctypes.c_void_p) PyCapsule_New = pyapi.PyCapsule_New PyCapsule_New.restype = ctypes.py_object PyCapsule_New.argtypes = [ctypes.c_void_p, ctypes.c_char_p, PyCapsule_Destructor] PyCapsule_IsValid = pyapi.PyCapsule_IsValid PyCapsule_IsValid.restype = ctypes.c_int PyCapsule_IsValid.argtypes = [ctypes.py_object, ctypes.c_char_p] PyCapsule_GetPointer = pyapi.PyCapsule_GetPointer PyCapsule_GetPointer.restype = ctypes.c_void_p PyCapsule_GetPointer.argtypes = [ctypes.py_object, ctypes.c_char_p] PyCapsule_SetContext = pyapi.PyCapsule_SetContext PyCapsule_SetContext.restype = ctypes.c_int PyCapsule_SetContext.argtypes = [ctypes.py_object, ctypes.c_void_p] PyCapsule_GetContext = pyapi.PyCapsule_GetContext PyCapsule_GetContext.restype = ctypes.c_void_p PyCapsule_GetContext.argtypes = [ctypes.py_object] def make_dl_version(major, minor): version = DLPackVersion() version.major = major version.minor = minor return version def make_dl_datatype(typecode, itemsize): code = None bits = itemsize * 8 lanes = 1 if typecode in "?": code = DLDataTypeCode.kDLBool if typecode in "bhilqnp": code = DLDataTypeCode.kDLInt if typecode in "BHILQNP": code = DLDataTypeCode.kDLUInt if typecode in "efdg": code = DLDataTypeCode.kDLFloat if typecode in "FDG": code = DLDataTypeCode.kDLComplex if typecode == "G" and itemsize == 32: code = DLDataTypeCode.kDLFloat bits //= 2 lanes *= 2 datatype = DLDataType() datatype.code = code datatype.bits = bits datatype.lanes = lanes return datatype def make_dl_shape(shape, order=None, strides=None): null = ctypes.cast(0, ctypes.POINTER(ctypes.c_int64)) if isinstance(shape, int): shape = [shape] ndim = len(shape) if ndim == 0: shape = null strides = null else: shape = (ctypes.c_int64*ndim)(*shape) if order == 'C': size = 1 strides = [] for i in range(ndim-1, -1, -1): strides.append(size) size *= shape[i] strides = (ctypes.c_int64*ndim)(*strides) elif order == 'F': size = 1 strides = [] for i in range(ndim): strides.append(size) size *= shape[i] strides = (ctypes.c_int64*ndim)(*strides) elif strides is not None: strides = (ctypes.c_int64*ndim)(*strides) else: strides = null return ndim, shape, strides def make_dl_tensor(obj): try: data, size = obj.buffer_info() typecode = obj.typecode itemsize = obj.itemsize except AttributeError: data = obj.ctypes.data size = obj.size typecode = obj.dtype.char itemsize = obj.itemsize device = DLDevice(DLDeviceType.kDLCPU, 0) datatype = make_dl_datatype(typecode, itemsize) ndim, shape, strides = make_dl_shape(size) dltensor = DLTensor() dltensor.data = data if size > 0 else 0 dltensor.device = device dltensor.ndim = ndim dltensor.dtype = datatype dltensor.shape = shape dltensor.strides = strides dltensor.byte_offset = 0 return dltensor def make_dl_manager_ctx(obj): py_obj = ctypes.py_object(obj) if False: Py_IncRef(py_obj) void_p = ctypes.c_void_p.from_buffer(py_obj) return void_p @DLManagedTensorDeleter def dl_managed_tensor_deleter(void_p): managed = ctypes.cast(void_p, DLManagedTensor_p) manager_ctx = managed.contents.manager_ctx py_obj = ctypes.cast(manager_ctx, ctypes.py_object) if False: Py_DecRef(py_obj) @DLManagedTensorVersionedDeleter def dl_managed_tensor_versioned_deleter(void_p): managed = ctypes.cast(void_p, DLManagedTensorVersioned_p) manager_ctx = managed.contents.manager_ctx py_obj = ctypes.cast(manager_ctx, ctypes.py_object) if False: Py_DecRef(py_obj) def make_dl_managed_tensor(obj, versioned=False): if versioned: managed = DLManagedTensorVersioned() managed.version = make_dl_version(1, 0) managed.manager_ctx = make_dl_manager_ctx(obj) managed.deleter = dl_managed_tensor_versioned_deleter managed.flags = 0 managed.dl_tensor = make_dl_tensor(obj) else: managed = DLManagedTensor() managed.dl_tensor = make_dl_tensor(obj) managed.manager_ctx = make_dl_manager_ctx(obj) managed.deleter = dl_managed_tensor_deleter return managed def make_py_context(context): py_obj = ctypes.py_object(context) Py_IncRef(py_obj) context = ctypes.c_void_p.from_buffer(py_obj) return ctypes.c_void_p(context.value) @PyCapsule_Destructor def py_capsule_destructor(void_p): capsule = ctypes.cast(void_p, ctypes.py_object) for py_capsule_name, dl_managed_tensor_type_p in ( (b"dltensor_versioned", DLManagedTensorVersioned_p), (b"dltensor", DLManagedTensor_p), ): if PyCapsule_IsValid(capsule, py_capsule_name): pointer = PyCapsule_GetPointer(capsule, py_capsule_name) managed = ctypes.cast(pointer, dl_managed_tensor_type_p) deleter = managed.contents.deleter if deleter: deleter(managed) break context = PyCapsule_GetContext(capsule) managed = ctypes.cast(context, ctypes.py_object) Py_DecRef(managed) def make_py_capsule(managed, versioned=False): if versioned >= 1: py_capsule_name = b"dltensor_versioned" if not isinstance(managed, DLManagedTensorVersioned): managed = make_dl_managed_tensor_versioned(managed) else: py_capsule_name = b"dltensor" if not isinstance(managed, DLManagedTensor): managed = make_dl_managed_tensor(managed) pointer = ctypes.pointer(managed) capsule = PyCapsule_New(pointer, py_capsule_name, py_capsule_destructor) context = make_py_context(managed) PyCapsule_SetContext(capsule, context) return capsule mpi4py-4.0.3/test/main.py000066400000000000000000000260641475341043600152340ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com import re import os import sys import argparse import fnmatch import unittest __unittest = True def parse_pattern(pattern): return f"*{pattern}*" if "*" not in pattern else pattern def parse_xfile(xfile): with open(xfile) as f: return list(map(parse_pattern, f.read().splitlines())) def setup_parser(parser): parser.add_argument( "-x", # "--exclude-name", help="Skip run tests which match the given substring", action="append", dest="excludePatterns", default=[], type=parse_pattern, metavar="TESTNAMEPATTERNS", ) parser.add_argument( # Py3.8: use action="extend" on "excludePatterns" "--xfile", help="Skip run tests which match the substrings in the given files", action="append", dest="excludeFile", default=[], type=str, metavar="FILENAME", ) parser.add_argument( "-i", # "--include", help="Include test module names matching REGEX", action="append", dest="include", default=[], type=str, metavar="REGEX", ) parser.add_argument( "-e", # "--exclude", help="Exclude test module names matching REGEX", action="append", dest="exclude", default=[], type=str, metavar="REGEX", ) parser.add_argument( "--inplace", help="Enable testing from in-place build", action="store_true", dest="inplace", default=False, ) parser.add_argument( "--no-builddir", help="Disable testing from build directory", action="store_false", dest="builddir", default=True, ) parser.add_argument( "--path", help="Prepend PATH to sys.path", action="append", dest="path", default=[], type=str, metavar="PATH", ) parser.add_argument( "--threads", help="Initialize MPI with thread support", action="store_true", dest="threads", default=None, ) parser.add_argument( "--no-threads", help="Initialize MPI without thread support", action="store_false", dest="threads", default=None, ) parser.add_argument( "--thread-level", help="Initialize MPI with required thread support", choices=["single", "funneled", "serialized", "multiple"], action="store", dest="thread_level", default=None, type=str, metavar="LEVEL", ) parser.add_argument( "--cupy", help="Enable testing with CuPy arrays", action="store_true", dest="cupy", default=False, ) parser.add_argument( "--no-cupy", help="Disable testing with CuPy arrays", action="store_false", dest="cupy", default=False, ) parser.add_argument( "--numba", help="Enable testing with Numba arrays", action="store_true", dest="numba", default=False, ) parser.add_argument( "--no-numba", help="Disable testing with Numba arrays", action="store_false", dest="numba", default=False, ) parser.add_argument( "--no-numpy", help="Disable testing with NumPy arrays", action="store_false", dest="numpy", default=True, ) parser.add_argument( "--no-array", help="Disable testing with builtin array module", action="store_false", dest="array", default=True, ) parser.add_argument( "--no-skip-mpi", help="Disable known failures with backend MPI", action="store_false", dest="skip_mpi", default=True, ) parser.add_argument( "--xml", help="Directory for storing XML reports", action="store", dest="xmloutdir", default=None, nargs="?", const=os.path.curdir, ) return parser def getbuilddir(): try: try: from setuptools.dist import Distribution except ImportError: from distutils.dist import Distribution try: from setuptools.command.build import build except ImportError: from distutils.command.build import build cmd_obj = build(Distribution()) cmd_obj.finalize_options() return cmd_obj.build_platlib except Exception: return None def getprocessorinfo(): from mpi4py import MPI rank = MPI.COMM_WORLD.Get_rank() name = MPI.Get_processor_name() return (rank, name) def getlibraryinfo(): from mpi4py import MPI x, y = MPI.Get_version() info = f"MPI {x}.{y}" name, version = MPI.get_vendor() if name != "unknown": x, y, z = version info += f" ({name} {x}.{y}.{z})" return info def getpythoninfo(): x, y, z = sys.version_info[:3] return f"Python {x}.{y}.{z} ({sys.executable})" def getpackageinfo(pkg): try: pkg = __import__(pkg) except ImportError: return None name = pkg.__name__ version = pkg.__version__ path = pkg.__path__[0] return f"{name} {version} ({path})" def setup_python(options): script = os.path.abspath(__file__) testdir = os.path.dirname(script) rootdir = os.path.dirname(testdir) if options.inplace: srcdir = os.path.join(rootdir, "src") sys.path.insert(0, srcdir) elif options.builddir: builddir = getbuilddir() if builddir is not None: builddir = os.path.join(rootdir, builddir) if os.path.isdir(builddir): sys.path.insert(0, builddir) if options.path: for path in reversed(options.path): for pth in path.split(os.path.pathsep): if os.path.exists(pth): sys.path.insert(0, pth) def setup_modules(options): # if not options.cupy: sys.modules['cupy'] = None if not options.numba: sys.modules['numba'] = None if not options.numpy: sys.modules['numpy'] = None if not options.array: sys.modules['array'] = None # import mpi4py if options.threads is not None: mpi4py.rc.threads = options.threads if options.thread_level is not None: mpi4py.rc.thread_level = options.thread_level # import mpi4py.MPI def setup_unittest(options): from unittest.runner import _WritelnDecorator super_writeln = _WritelnDecorator.writeln def writeln(self, arg=None): try: self.flush() except: pass super_writeln(self, arg) try: self.flush() except: pass _WritelnDecorator.writeln = writeln def print_banner(options): rank, name = getprocessorinfo() prefix = f"[{rank}@{name}]" def writeln(message="", endl="\n"): if message is None: return sys.stderr.flush() sys.stderr.write(f"{prefix} {message}{endl}") sys.stderr.flush() if options.verbosity: writeln(getpythoninfo()) writeln(getpackageinfo('numpy')) writeln(getpackageinfo('numba')) writeln(getpackageinfo('cupy')) writeln(getlibraryinfo()) writeln(getpackageinfo('mpi4py')) class TestLoader(unittest.TestLoader): excludePatterns = None def __init__(self, include=None, exclude=None): super().__init__() if include: self.include = re.compile('|'.join(include)).search else: self.include = lambda arg: True if exclude: self.exclude = re.compile('|'.join(exclude)).search else: self.exclude = lambda arg: False def _match_path(self, path, full_path, pattern): match = super()._match_path(path, full_path, pattern) if match: if not self.include(path): return False if self.exclude(path): return False return match def getTestCaseNames(self, testCaseClass): def exclude(name): modname = testCaseClass.__module__ clsname = testCaseClass.__qualname__ fullname = f'{modname}.{clsname}.{name}' return not any(map( lambda pattern: fnmatch.fnmatchcase(fullname, pattern), self.excludePatterns )) names = super().getTestCaseNames(testCaseClass) if self.excludePatterns: names = list(filter(exclude, names)) return names class TestProgram(unittest.TestProgram): def _getMainArgParser(self, parent): parser = super()._getMainArgParser(parent) setup_parser(parser) return parser def _getDiscoveryArgParser(self, parent): parser = argparse.ArgumentParser(parents=[parent]) setup_parser(parser) return parser if sys.version_info < (3, 7): def _do_discovery(self, argv, Loader=None): if argv is not None: if self._discovery_parser is None: self._initArgParsers() self._discovery_parser.parse_args(argv, self) self.createTests(from_discovery=True, Loader=Loader) def createTests(self, from_discovery=False, Loader=None): setup_python(self) setup_modules(self) setup_unittest(self) testdir = os.path.dirname(__file__) if from_discovery: self.start = testdir self.pattern = "test_*.py" elif testdir not in sys.path: sys.path.insert(0, testdir) if not self.skip_mpi: import mpiunittest mpiunittest.skipMPI = lambda p, *c: lambda f: f for xfile in self.excludeFile: self.excludePatterns.extend(parse_xfile(xfile)) self.testLoader = TestLoader(self.include, self.exclude) self.testLoader.excludePatterns = self.excludePatterns if sys.version_info < (3, 7): if from_discovery: loader = self.testLoader if Loader is None else Loader() self.test = loader.discover(self.start, self.pattern, None) else: super().createTests() return super().createTests(from_discovery, Loader) def _setUpXMLRunner(self): from mpi4py import MPI size = MPI.COMM_WORLD.Get_size() rank = MPI.COMM_WORLD.Get_rank() try: import xmlrunner except ModuleNotFoundError: if rank == 0: print( "Cannot generate XML reports!", "Install 'unittest-xml-reporting'.", file=sys.stderr, flush=True, ) sys.exit(1) runner = xmlrunner.XMLTestRunner(output=self.xmloutdir) runner.outsuffix += f"-{rank}" if size > 1 else "" self.testRunner = runner self.buffer = False self.catchbreak = False def runTests(self): print_banner(self) if self.xmloutdir: self._setUpXMLRunner() try: super().runTests() except SystemExit: pass success = self.result.wasSuccessful() if not success and self.failfast: from mpi4py import run run.set_abort_status(1) sys.exit(not success) main = TestProgram if __name__ == '__main__': sys.dont_write_bytecode = True main(module=None) mpi4py-4.0.3/test/mpiunittest.py000066400000000000000000000073551475341043600166770ustar00rootroot00000000000000from collections import namedtuple import contextlib import unittest class TestCase(unittest.TestCase): def assertAlmostEqual(self, first, second): num = complex(second) - complex(first) den = max(abs(complex(second)), abs(complex(first))) or 1.0 if (abs(num/den) > 1e-2): raise self.failureException(f'{first!r} != {second!r}') @contextlib.contextmanager def catchNotImplementedError(self, version=None, subversion=0): try: yield except NotImplementedError: if version is not None: from mpi4py import MPI mpi_version = (MPI.VERSION, MPI.SUBVERSION) self.assertLess(mpi_version, (version, subversion)) _Version = namedtuple("_Version", ["major", "minor", "patch"]) def _parse_version(version): version = tuple(map(int, version.split('.'))) + (0, 0, 0) return _Version(*version[:3]) class _VersionPredicate: def __init__(self, versionPredicateStr): import re re_name = re.compile(r"(?i)^([a-z_]\w*(?:\.[a-z_]\w*)*)(.*)$") re_pred = re.compile(r"^(<=|>=|<|>|!=|==)(.*)$") def split(item): m = re_pred.match(item) op, version = m.groups() version = _parse_version(version) return op, version vpstr = versionPredicateStr.replace(' ', '') m = re_name.match(vpstr) name, plist = m.groups() if plist: assert plist[0] == '(' and plist[-1] == ')' plist = plist[1:-1] pred = [split(p) for p in plist.split(',') if p] self.name = name self.pred = pred def __str__(self): if self.pred: items = [f"{op}{'.'.join(map(str, ver))}" for op, ver in self.pred] return f"{self.name}({','.join(items)})" else: return self.name def satisfied_by(self, version): from operator import lt, le, gt, ge, eq, ne opmap = {'<': lt, '<=': le, '>': gt, '>=': ge, '==': eq, '!=': ne} version = _parse_version(version) for op, ver in self.pred: if not opmap[op](version, ver): return False return True def mpi_predicate(predicate): from mpi4py import MPI def key(s): s = s.replace(' ', '') s = s.replace('/', '') s = s.replace('-', '') s = s.replace('Intel', 'I') s = s.replace('Microsoft', 'MS') return s.lower() vp = _VersionPredicate(key(predicate)) if vp.name == 'mpi': name, version = 'mpi', MPI.Get_version() version = version + (0,) else: name, version = MPI.get_vendor() if vp.name == key(name): x, y, z = version if vp.satisfied_by(f'{x}.{y}.{z}'): return vp return None def is_mpi(predicate): return mpi_predicate(predicate) def is_mpi_gpu(predicate, array): if array.backend in ('cupy', 'numba', 'dlpack-cupy'): if mpi_predicate(predicate): return True return False SkipTest = unittest.SkipTest skip = unittest.skip skipIf = unittest.skipIf skipUnless = unittest.skipUnless def skipMPI(predicate, *conditions): version = mpi_predicate(predicate) if version: if not conditions or any(conditions): return unittest.skip(str(version)) return unittest.skipIf(False, '') def disable(what, reason): return unittest.skip(reason)(what) @contextlib.contextmanager def capture_stderr(): import io import sys stderr = sys.stderr stream = io.StringIO() sys.stderr = stream try: yield stream finally: sys.stderr = stderr def main(*args, **kwargs): from main import main try: main(*args, **kwargs) except SystemExit: pass mpi4py-4.0.3/test/runtests.py000066400000000000000000000002661475341043600161730ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com if __name__ == '__main__': import sys sys.dont_write_bytecode = True from main import main main(module=None) mpi4py-4.0.3/test/spawn_child.py000066400000000000000000000003441475341043600165740ustar00rootroot00000000000000import sys sys.path.insert(0, sys.argv[1]) from mpi4py import MPI parent = MPI.Comm.Get_parent() parent.Barrier() parent.Disconnect() assert parent == MPI.COMM_NULL parent = MPI.Comm.Get_parent() assert parent == MPI.COMM_NULL mpi4py-4.0.3/test/test_address.py000066400000000000000000000034601475341043600167670ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest from arrayimpl import ( array, numpy, ) class TestAddress(unittest.TestCase): @unittest.skipIf(array is None, 'array') def testGetAddress1(self): from struct import pack, unpack location = array.array('i', range(10)) bufptr, _ = location.buffer_info() addr = MPI.Get_address(location) addr = unpack('P', pack('P', addr))[0] self.assertEqual(addr, bufptr) @unittest.skipIf(numpy is None, 'numpy') def testGetAddress2(self): from struct import pack, unpack location = numpy.asarray(range(10), dtype='i') bufptr, _ = location.__array_interface__['data'] addr = MPI.Get_address(location) addr = unpack('P', pack('P', addr))[0] self.assertEqual(addr, bufptr) def testNone(self): base = MPI.Get_address(None) addr = MPI.Aint_add(base, 0) self.assertEqual(addr, base) diff = MPI.Aint_diff(base, base) self.assertEqual(diff, 0) def testBottom(self): base = MPI.Get_address(MPI.BOTTOM) addr = MPI.Aint_add(base, 0) self.assertEqual(addr, base) diff = MPI.Aint_diff(base, base) self.assertEqual(diff, 0) @unittest.skipIf(array is None, 'array') def testAintAdd(self): location = array.array('i', range(10)) base = MPI.Get_address(location) addr = MPI.Aint_add(base, 4) self.assertEqual(addr, base + 4) @unittest.skipIf(array is None, 'array') def testAintDiff(self): location = array.array('i', range(10)) base = MPI.Get_address(location) addr1 = base + 8 addr2 = base + 4 diff = MPI.Aint_diff(addr1, addr2) self.assertEqual(diff, 4) if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_attributes.py000066400000000000000000000212331475341043600175260ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest try: import array except ImportError: array = None class BaseTestAttr: keyval = MPI.KEYVAL_INVALID def tearDown(self): if self.obj: self.obj.Free() if self.keyval != MPI.KEYVAL_INVALID: self.keyval = type(self.obj).Free_keyval(self.keyval) self.assertEqual(self.keyval, MPI.KEYVAL_INVALID) def testAttr(self, copy_fn=None, delete_fn=None): cls, obj = type(self.obj), self.obj self.keyval = cls.Create_keyval(copy_fn, delete_fn) self.assertNotEqual(self.keyval, MPI.KEYVAL_INVALID) attr = obj.Get_attr(self.keyval) self.assertIsNone(attr) attrval = [1,2,3] obj.Set_attr(self.keyval, attrval) attr = obj.Get_attr(self.keyval) self.assertIs(attr, attrval) if hasattr(obj, 'Dup'): dup = obj.Dup() attr = dup.Get_attr(self.keyval) if copy_fn is True: self.assertIs(attr, attrval) elif not copy_fn: self.assertIsNone(attr) dup.Free() obj.Delete_attr(self.keyval) attr = obj.Get_attr(self.keyval) self.assertIsNone(attr) def testAttrCopyFalse(self): self.testAttr(False) def testAttrCopyTrue(self): self.testAttr(True) def testAttrNoCopy(self): cls, obj = type(self.obj), self.obj def copy_fn(o, k, v): assert k == self.keyval assert v is attrval return NotImplemented self.keyval = cls.Create_keyval(copy_fn, None) self.assertNotEqual(self.keyval, MPI.KEYVAL_INVALID) attr = obj.Get_attr(self.keyval) self.assertIsNone(attr) attrval = [1,2,3] obj.Set_attr(self.keyval, attrval) attr = obj.Get_attr(self.keyval) self.assertIs(attr, attrval) if hasattr(obj, 'Dup'): dup = obj.Dup() attr = dup.Get_attr(self.keyval) self.assertIsNone(attr) dup.Free() obj.Delete_attr(self.keyval) attr = obj.Get_attr(self.keyval) self.assertIsNone(attr) def testAttrNoPython(self, intval=123456789): cls, obj = type(self.obj), self.obj def copy_fn(o, k, v): assert k == self.keyval assert v == intval return v def del_fn(o, k, v): assert k == self.keyval assert v == intval self.keyval = cls.Create_keyval(copy_fn, del_fn, nopython=True) self.assertNotEqual(self.keyval, MPI.KEYVAL_INVALID) attr = obj.Get_attr(self.keyval) self.assertIsNone(attr) obj.Set_attr(self.keyval, intval) attr = obj.Get_attr(self.keyval) self.assertEqual(attr, intval) if hasattr(obj, 'Dup'): dup = obj.Dup() attr = dup.Get_attr(self.keyval) self.assertEqual(attr, intval) dup.Free() obj.Delete_attr(self.keyval) attr = obj.Get_attr(self.keyval) self.assertIsNone(attr) @unittest.skipMPI('openmpi(<=1.10.2)') def testAttrNoPythonZero(self): self.testAttrNoPython(0) @unittest.skipIf(array is None, 'array') def testAttrNoPythonArray(self): cls, obj = type(self.obj), self.obj self.keyval = cls.Create_keyval(nopython=True) # ary = array.array('i', [42]) addr, _ = ary.buffer_info() obj.Set_attr(self.keyval, addr) # attr = obj.Get_attr(self.keyval) self.assertEqual(attr, addr) @unittest.skipMPI('impi(<2021.14.0)') @unittest.skipMPI('mvapich') @unittest.skipMPI('mpich(<4.2.1)') @unittest.skipMPI('openmpi(<5.0.0)') def testAttrCopyException(self): cls, obj = type(self.obj), self.obj if not isinstance(obj, MPI.Datatype): return if not hasattr(cls, 'Dup'): return def copy_fn(o, k, v): raise ValueError self.keyval = cls.Create_keyval(copy_fn, None) try: obj.Set_attr(self.keyval, "value") with self.assertRaises(MPI.Exception) as exc_cm: with unittest.capture_stderr() as stderr: obj.Dup().Free() ierr = exc_cm.exception.Get_error_class() self.assertEqual(ierr, MPI.ERR_OTHER) self.assertIn('ValueError', stderr.getvalue()) finally: obj.Delete_attr(self.keyval) self.keyval = cls.Free_keyval(self.keyval) @unittest.skipMPI('impi(<2021.14.0)') @unittest.skipMPI('mvapich') @unittest.skipMPI('mpich(<4.2.1)') def testAttrDeleteException(self): cls, obj = type(self.obj), self.obj raise_flag = True def delete_fn(o, k, v): raise ValueError self.keyval = cls.Create_keyval(None, delete_fn) obj.Set_attr(self.keyval, "value") try: with self.assertRaises(MPI.Exception) as exc_cm: with unittest.capture_stderr() as stderr: obj.Delete_attr(self.keyval) ierr = exc_cm.exception.Get_error_class() self.assertEqual(ierr, MPI.ERR_OTHER) self.assertIn('ValueError', stderr.getvalue()) finally: self.keyval = cls.Free_keyval(self.keyval) class BaseTestCommAttr(BaseTestAttr): NULL = MPI.COMM_NULL @unittest.skipMPI('openmpi(<=1.5.1)') def testAttrCopyDelete(self): cls, obj, null = type(self.obj), self.obj, self.NULL # self.keyval = cls.Create_keyval( copy_fn=lambda o, k, v: cls.Dup(v), delete_fn=lambda o, k, v: cls.Free(v)) self.assertNotEqual(self.keyval, MPI.KEYVAL_INVALID) # obj1 = obj dup1 = obj1.Dup() obj1.Set_attr(self.keyval, dup1) self.assertNotEqual(dup1, null) obj2 = obj1.Dup() dup2 = obj2.Get_attr(self.keyval) self.assertNotEqual(dup1, dup2) obj2.Free() self.assertEqual(dup2, null) self.obj.Delete_attr(self.keyval) self.assertEqual(dup1, null) class TestCommAttrWorld(BaseTestCommAttr, unittest.TestCase): def setUp(self): self.obj = MPI.COMM_WORLD.Dup() class TestCommAttrSelf(BaseTestCommAttr, unittest.TestCase): def setUp(self): self.obj = MPI.COMM_SELF.Dup() class BaseTestDatatypeAttr(BaseTestAttr): NULL = MPI.DATATYPE_NULL def testAttrCopyDelete(self): cls, obj, null = type(self.obj), self.obj, self.NULL # self.keyval = cls.Create_keyval( copy_fn=lambda o, k, v: cls.Dup(v), delete_fn=lambda o, k, v: cls.Free(v)) self.assertNotEqual(self.keyval, MPI.KEYVAL_INVALID) # obj1 = obj dup1 = obj1.Dup() obj1.Set_attr(self.keyval, dup1) self.assertNotEqual(dup1, null) obj2 = obj1.Dup() dup2 = obj2.Get_attr(self.keyval) self.assertNotEqual(dup1, dup2) obj2.Free() self.assertEqual(dup2, null) self.obj.Delete_attr(self.keyval) self.assertEqual(dup1, null) class TestDatatypeAttrBYTE(BaseTestDatatypeAttr, unittest.TestCase): def setUp(self): self.obj = MPI.BYTE.Dup() class TestDatatypeAttrINT(BaseTestDatatypeAttr, unittest.TestCase): def setUp(self): self.obj = MPI.INT.Dup() class TestDatatypeAttrFLOAT(BaseTestDatatypeAttr, unittest.TestCase): def setUp(self): self.obj = MPI.FLOAT.Dup() class TestWinAttr(BaseTestAttr, unittest.TestCase): NULL = MPI.WIN_NULL def setUp(self): win = MPI.Win.Create(MPI.BOTTOM, 1, MPI.INFO_NULL, MPI.COMM_SELF) self.obj = self.win = win @unittest.skipMPI('openmpi(<=1.5.1)') def testAttrCopyDelete(self): # null = self.NULL def delete_fn(o, k, v): assert isinstance(o, MPI.Win) assert k == self.keyval assert v is win MPI.Win.Free(v) self.keyval = MPI.Win.Create_keyval(delete_fn=delete_fn) self.assertNotEqual(self.keyval, MPI.KEYVAL_INVALID) # win = MPI.Win.Create(MPI.BOTTOM, 1, MPI.INFO_NULL, MPI.COMM_SELF) self.obj.Set_attr(self.keyval, win) self.assertNotEqual(win, null) self.obj.Delete_attr(self.keyval) self.assertEqual(win, null) try: k = MPI.Datatype.Create_keyval() k = MPI.Datatype.Free_keyval(k) except NotImplementedError: unittest.disable(BaseTestDatatypeAttr, 'mpi-type-attr') try: MPI.Win.Create(MPI.BOTTOM, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free() k = MPI.Win.Create_keyval() k = MPI.Win.Free_keyval(k) except (NotImplementedError, MPI.Exception): unittest.disable(TestWinAttr, 'mpi-win-attr') if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_buffer.py000066400000000000000000000273361475341043600166230ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest try: import array except ImportError: array = None class TestBuffer(unittest.TestCase): def testNewEmpty(self): buffer = MPI.buffer buf = buffer() self.assertEqual(buf.address, 0) self.assertIsNone(buf.obj) self.assertEqual(buf.nbytes, 0) self.assertFalse(buf.readonly) self.assertEqual(buf.format, 'B') self.assertEqual(buf.itemsize, 1) self.assertEqual(len(buf), 0) buf[:] = 0 buf[:] = buffer() m = memoryview(buf) self.assertEqual(m.format, 'B') self.assertEqual(m.itemsize, 1) self.assertEqual(m.ndim, 1) self.assertIs(m.readonly, False) self.assertEqual(m.shape, (0,)) self.assertEqual(m.strides, (1,)) self.assertEqual(m.tobytes(), b"") self.assertEqual(m.tolist(), []) buf.release() self.assertEqual(buf.address, 0) self.assertEqual(buf.nbytes, 0) self.assertFalse(buf.readonly) def testNewBad(self): buffer = MPI.buffer for obj in (None, 0, 0.0, [], (), []): self.assertRaises(TypeError, buffer, obj) def testNewBytes(self): buffer = MPI.buffer obj = b"abc" buf = buffer(obj) self.assertEqual(buf.obj, obj) self.assertEqual(buf.nbytes, len(obj)) self.assertIs(buf.readonly, True) with self.assertRaises(TypeError): buf[:] = 0 def testNewBytearray(self): buffer = MPI.buffer obj = bytearray([1,2,3]) buf = buffer(obj) self.assertEqual(buf.obj, obj) self.assertEqual(buf.nbytes, len(obj)) self.assertFalse(buf.readonly) with self.assertRaises(ValueError): buf[0:1] = buf[1:3] @unittest.skipIf(array is None, 'array') def testNewArray(self): buffer = MPI.buffer obj = array.array('i', [1,2,3]) buf = buffer(obj) self.assertEqual(buf.obj, obj) self.assertEqual(buf.nbytes, len(obj)*obj.itemsize) self.assertFalse(buf.readonly) def testAllocate(self): buffer = MPI.buffer for size in (0, 1, 2): buf = buffer.allocate(size) self.assertEqual(buf.nbytes, size) self.assertNotEqual(buf.address, 0) view = memoryview(buf.obj) self.assertEqual(buf.nbytes, view.nbytes) for clear in (False, True): buf = buffer.allocate(1024, clear) self.assertEqual(buf.nbytes, 1024) self.assertNotEqual(buf.address, 0) if clear: self.assertEqual(buf[0], 0) self.assertEqual(buf[-1], 0) self.assertRaises(TypeError, buffer.allocate, None) self.assertRaises(ValueError, buffer.allocate, -1) def testFromBufferBad(self): buffer = MPI.buffer for obj in (None, 0, 0.0, [], (), []): self.assertRaises(TypeError, buffer.frombuffer, obj) def testFromBufferBytes(self): buffer = MPI.buffer buf = buffer.frombuffer(b"abc", readonly=True) self.assertNotEqual(buf.address, 0) self.assertEqual(type(buf.obj), bytes) self.assertEqual(buf.obj, b"abc") self.assertEqual(buf.nbytes, 3) self.assertTrue (buf.readonly) self.assertEqual(buf.format, 'B') self.assertEqual(buf.itemsize, 1) self.assertEqual(len(buf), 3) m = memoryview(buf) self.assertEqual(m.format, 'B') self.assertEqual(m.itemsize, 1) self.assertEqual(m.ndim, 1) self.assertTrue (m.readonly) self.assertEqual(m.shape, (3,)) self.assertEqual(m.strides, (1,)) self.assertEqual(m.tobytes(), b"abc") self.assertEqual(m.tolist(), [ord(c) for c in "abc"]) buf.release() self.assertEqual(buf.address, 0) self.assertEqual(buf.nbytes, 0) self.assertFalse(buf.readonly) @unittest.skipIf(array is None, 'array') def testFromBufferArrayRO(self): buffer = MPI.buffer obj = array.array('B', [1,2,3]) buf = buffer.frombuffer(obj, readonly=True) self.assertNotEqual(buf.address, 0) self.assertEqual(type(buf.obj), array.array) self.assertEqual(buf.nbytes, 3) self.assertTrue (buf.readonly) self.assertEqual(buf.format, 'B') self.assertEqual(buf.itemsize, 1) self.assertEqual(len(buf), 3) m = memoryview(buf) self.assertEqual(m.format, 'B') self.assertEqual(m.itemsize, 1) self.assertEqual(m.ndim, 1) self.assertTrue (m.readonly) self.assertEqual(m.shape, (3,)) self.assertEqual(m.strides, (1,)) self.assertEqual(m.tobytes(), b"\1\2\3") self.assertEqual(m.tolist(), [1,2,3]) buf.release() self.assertEqual(buf.address, 0) self.assertEqual(buf.nbytes, 0) self.assertFalse(buf.readonly) @unittest.skipIf(array is None, 'array') def testFromBufferArrayRW(self): buffer = MPI.buffer obj = array.array('B', [1,2,3]) buf = buffer.frombuffer(obj, readonly=False) self.assertNotEqual(buf.address, 0) self.assertEqual(buf.nbytes, 3) self.assertFalse(buf.readonly) self.assertEqual(len(buf), 3) m = memoryview(buf) self.assertEqual(m.format, 'B') self.assertEqual(m.itemsize, 1) self.assertEqual(m.ndim, 1) self.assertFalse(m.readonly) self.assertEqual(m.shape, (3,)) self.assertEqual(m.strides, (1,)) self.assertEqual(m.tobytes(), b"\1\2\3") self.assertEqual(m.tolist(), [1,2,3]) buf[:] = 1 self.assertEqual(obj, array.array('B', [1]*3)) buf[1:] = array.array('B', [7]*2) self.assertEqual(obj, array.array('B', [1,7,7])) buf[1:2] = array.array('B', [8]*1) self.assertEqual(obj, array.array('B', [1,8,7])) buf.release() self.assertEqual(buf.address, 0) self.assertEqual(buf.nbytes, 0) self.assertFalse(buf.readonly) @unittest.skipIf(array is None, 'array') def testFromAddress(self): buffer = MPI.buffer obj = array.array('B', [1,2,3]) addr, size = obj.buffer_info() nbytes = size * obj.itemsize buf = buffer.fromaddress(addr, nbytes, readonly=False) self.assertNotEqual(buf.address, 0) self.assertEqual(buf.nbytes, 3) self.assertFalse(buf.readonly) self.assertEqual(len(buf), 3) m = memoryview(buf) self.assertEqual(m.format, 'B') self.assertEqual(m.itemsize, 1) self.assertEqual(m.ndim, 1) self.assertFalse(m.readonly) self.assertEqual(m.shape, (3,)) self.assertEqual(m.strides, (1,)) self.assertEqual(m.tobytes(), b"\1\2\3") self.assertEqual(m.tolist(), [1,2,3]) buf[:] = 1 self.assertEqual(obj, array.array('B', [1]*3)) buf[1:] = array.array('B', [7]*2) self.assertEqual(obj, array.array('B', [1,7,7])) buf[1:2] = array.array('B', [8]*1) self.assertEqual(obj, array.array('B', [1,8,7])) buf.release() self.assertEqual(buf.address, 0) self.assertEqual(buf.nbytes, 0) self.assertFalse(buf.readonly) with self.assertRaises(ValueError): buffer.fromaddress(addr, -1) with self.assertRaises(ValueError): buffer.fromaddress(0, 1) def testToReadonly(self): buffer = MPI.buffer obj = bytearray(b"abc") buf1 = buffer.frombuffer(obj) buf2 = buf1.toreadonly() self.assertFalse(buf1.readonly) self.assertTrue (buf2.readonly) self.assertEqual(buf1.address, buf2.address) self.assertEqual(buf1.obj, buf2.obj) self.assertEqual(type(buf1.obj), type(buf2.obj)) self.assertEqual(buf1.nbytes, buf2.nbytes) def testCast(self): buffer = MPI.buffer buf = buffer.allocate(2 * 3 * 4) mem = buf.cast('i') for i in range(2 * 3): mem[i] = i mem = buf.cast('i', (2, 3)) for i in range(2): for j in range(3): self.assertEqual(mem[i, j], 3 * i + j) mem = buf.cast('i', (3, 2)) for i in range(3): for j in range(2): self.assertEqual(mem[i, j], 2 * i + j) def testSequence(self): n = 16 try: mem = MPI.Alloc_mem(n, MPI.INFO_NULL) except NotImplementedError: self.skipTest('mpi-alloc_mem') try: self.assertIs(type(mem), MPI.buffer) self.assertNotEqual(mem.address, 0) self.assertEqual(mem.nbytes, n) self.assertFalse(mem.readonly) self.assertEqual(len(mem), n) def delitem(): del mem[n] def getitem1(): return mem[n] def getitem2(): return mem[::2] def getitem3(): return mem[None] def setitem1(): mem[n] = 0 def setitem2(): mem[::2] = 0 def setitem3(): mem[None] = 0 self.assertRaises(Exception, delitem) self.assertRaises(IndexError, getitem1) self.assertRaises(IndexError, getitem2) self.assertRaises(TypeError, getitem3) self.assertRaises(IndexError, setitem1) self.assertRaises(IndexError, setitem2) self.assertRaises(TypeError, setitem3) for i in range(n): mem[i] = i for i in range(n): self.assertEqual(mem[i], i) mem[:] = 0 for i in range(-n, 0): mem[i] = abs(i) for i in range(-n, 0): self.assertEqual(mem[i], abs(i)) mem[:] = 0 for i in range(n): self.assertEqual(mem[i], 0) mem[:] = 255 for i in range(n): self.assertEqual(mem[i], 255) mem[:n//2] = 1 mem[n//2:] = 0 for i in range(n//2): self.assertEqual(mem[i], 1) for i in range(n//2, n): self.assertEqual(mem[i], 0) mem[:] = 0 mem[1:5] = b"abcd" mem[10:13] = b"xyz" self.assertEqual(mem[0], 0) for i, c in enumerate("abcd"): self.assertEqual(mem[1+i], ord(c)) for i in range(5, 10): self.assertEqual(mem[i], 0) for i, c in enumerate("xyz"): self.assertEqual(mem[10+i], ord(c)) for i in range(13, n): self.assertEqual(mem[i], 0) self.assertEqual(mem[1:5].tobytes(), b"abcd") self.assertEqual(mem[10:13].tobytes(), b"xyz") finally: MPI.Free_mem(mem) self.assertEqual(mem.address, 0) self.assertEqual(mem.nbytes, 0) self.assertFalse(mem.readonly) def testBuffering(self): buf = MPI.Alloc_mem((1<<16)+MPI.BSEND_OVERHEAD) MPI.Attach_buffer(buf) try: with self.catchNotImplementedError(4,1): MPI.Flush_buffer() with self.catchNotImplementedError(4,1): MPI.Iflush_buffer().Wait() finally: oldbuf = MPI.Detach_buffer() self.assertEqual(oldbuf.address, buf.address) self.assertEqual(oldbuf.nbytes, buf.nbytes) MPI.Free_mem(buf) if MPI.BUFFER_AUTOMATIC != 0: MPI.Attach_buffer(MPI.BUFFER_AUTOMATIC) bufauto = MPI.Detach_buffer() self.assertEqual(bufauto, MPI.BUFFER_AUTOMATIC) def testAttachBufferReadonly(self): buf = MPI.buffer(b"abc") self.assertRaises(BufferError, MPI.Attach_buffer, buf) try: MPI.buffer except AttributeError: unittest.disable(TestBuffer, 'mpi4py-buffer') if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_cco_buf.py000066400000000000000000000672431475341043600167530ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl from functools import reduce prod = lambda sequence,start=1: reduce(lambda x, y: x*y, sequence, start) def skip_op(typecode, op): if typecode in '?': return True if typecode in 'FDG': if op in (MPI.MAX, MPI.MIN): return True return False def maxvalue(a): try: typecode = a.typecode except AttributeError: typecode = a.dtype.char if typecode == ('f'): return 1e30 elif typecode == ('d'): return 1e300 else: return 2 ** (a.itemsize * 7) - 1 class BaseTestCCOBuf: COMM = MPI.COMM_NULL def testBarrier(self): self.COMM.Barrier() def testBcast(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) if rank == root: buf = array(root, typecode, root) else: buf = array( -1, typecode, root) self.COMM.Bcast(buf.as_mpi(), root=root) for value in buf: self.assertEqual(value, check) def testGather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) sbuf = array(root, typecode, root+1) if rank == root: rbuf = array(-1, typecode, (size,root+1)) else: rbuf = array([], typecode) self.COMM.Gather(sbuf.as_mpi(), rbuf.as_mpi(), root=root) if rank == root: for value in rbuf.flat: self.assertEqual(value, check) def testScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) rbuf = array(-1, typecode, size) if rank == root: sbuf = array(root, typecode, (size, size)) else: sbuf = array([], typecode) self.COMM.Scatter(sbuf.as_mpi(), rbuf.as_mpi(), root=root) for value in rbuf: self.assertEqual(value, check) def testAllgather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) sbuf = array(root, typecode, root+1) rbuf = array( -1, typecode, (size, root+1)) self.COMM.Allgather(sbuf.as_mpi(), rbuf.as_mpi()) for value in rbuf.flat: self.assertEqual(value, check) def testAlltoall(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) sbuf = array(root, typecode, (size, root+1)) rbuf = array( -1, typecode, (size, root+1)) self.COMM.Alltoall(sbuf.as_mpi(), rbuf.as_mpi_c(root+1)) for value in rbuf.flat: self.assertEqual(value, check) def testReduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue for root in range(size): sbuf = array(range(size), typecode) rbuf = array(-1, typecode, size) self.COMM.Reduce(sbuf.as_mpi(), rbuf.as_mpi(), op, root) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if rank != root: check = arrayimpl.scalar(-1) self.assertEqual(value, check) continue if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testAllreduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue sbuf = array(range(size), typecode) rbuf = array(0, typecode, size) self.COMM.Allreduce(sbuf.as_mpi(), rbuf.as_mpi(), op) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testReduceScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue rcnt = list(range(1,size+1)) sbuf = array([rank+1]*sum(rcnt), typecode) rbuf = array(-1, typecode, rank+1) self.COMM.Reduce_scatter(sbuf.as_mpi(), rbuf.as_mpi(), None, op) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: redval = sum(range(size))+size if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size) elif op == MPI.MIN: self.assertEqual(value, 1) rbuf = array(-1, typecode, rank+1) self.COMM.Reduce_scatter(sbuf.as_mpi(), rbuf.as_mpi(), rcnt, op) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: redval = sum(range(size))+size if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size) elif op == MPI.MIN: self.assertEqual(value, 1) def testReduceScatterBlock(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue for rcnt in range(1, size+1): sbuf = array([rank]*rcnt*size, typecode) rbuf = array(-1, typecode, rcnt) if op == MPI.PROD: sbuf = array([rank+1]*rcnt*size, typecode) self.COMM.Reduce_scatter_block(sbuf.as_mpi(), rbuf.as_mpi(), op) max_val = maxvalue(rbuf) v_sum = (size*(size-1))/2 v_prod = 1 for i in range(1,size+1): v_prod *= i v_max = size-1 v_min = 0 for i, value in enumerate(rbuf): if op == MPI.SUM: if v_sum <= max_val: self.assertAlmostEqual(value, v_sum) elif op == MPI.PROD: if v_prod <= max_val: self.assertAlmostEqual(value, v_prod) elif op == MPI.MAX: self.assertEqual(value, v_max) elif op == MPI.MIN: self.assertEqual(value, v_min) def testScan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() # -- for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue sbuf = array(range(size), typecode) rbuf = array(0, typecode, size) self.COMM.Scan(sbuf.as_mpi(), rbuf.as_mpi(), op) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: if (i * (rank + 1)) < max_val: self.assertAlmostEqual(value, i * (rank + 1)) elif op == MPI.PROD: if (i ** (rank + 1)) < max_val: self.assertAlmostEqual(value, i ** (rank + 1)) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testExscan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue sbuf = array(range(size), typecode) rbuf = array(0, typecode, size) try: self.COMM.Exscan(sbuf.as_mpi(), rbuf.as_mpi(), op) except NotImplementedError: self.skipTest('mpi-exscan') if rank == 1: for i, value in enumerate(rbuf): self.assertEqual(value, i) elif rank > 1: max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: if (i * rank) < max_val: self.assertAlmostEqual(value, i * rank) elif op == MPI.PROD: if (i ** rank) < max_val: self.assertAlmostEqual(value, i ** rank) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testBcastTypeIndexed(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): datatype = array.TypeMap[typecode] for root in range(size): # if rank == root: buf = array(range(10), typecode).as_raw() else: buf = array(-1, typecode, 10).as_raw() indices = list(range(0, len(buf), 2)) newtype = datatype.Create_indexed_block(1, indices) newtype.Commit() newbuf = (buf, 1, newtype) self.COMM.Bcast(newbuf, root=root) newtype.Free() if rank != root: for i, value in enumerate(buf): check = arrayimpl.scalar(-1 if (i % 2) else i) self.assertEqual(value, check) # if rank == root: buf = array(range(10), typecode).as_raw() else: buf = array(-1, typecode, 10).as_raw() indices = list(range(1, len(buf), 2)) newtype = datatype.Create_indexed_block(1, indices) newtype.Commit() newbuf = (buf, 1, newtype) self.COMM.Bcast(newbuf, root) newtype.Free() if rank != root: for i, value in enumerate(buf): check = arrayimpl.scalar(-1 if not (i % 2) else i) self.assertEqual(value, check) class BaseTestCCOBufInplace: def testGather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) count = root+3 if rank == root: sbuf = MPI.IN_PLACE buf = array(-1, typecode, (size, count)) s, e = rank*count, (rank+1)*count for i in range(s, e): buf.flat[i] = check rbuf = buf.as_mpi() else: buf = array(root, typecode, count) sbuf = buf.as_mpi() rbuf = None self.COMM.Gather(sbuf, rbuf, root=root) for value in buf.flat: self.assertEqual(value, check) if rank == root: sbuf = None self.COMM.Gather(sbuf, rbuf, root=root) for value in buf.flat: self.assertEqual(value, check) @unittest.skipMPI('msmpi(==10.0.0)') def testScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) for count in range(1, 10): if rank == root: buf = array(root, typecode, (size, count)) sbuf = buf.as_mpi() rbuf = MPI.IN_PLACE else: buf = array(-1, typecode, count) sbuf = None rbuf = buf.as_mpi() self.COMM.Scatter(sbuf, rbuf, root=root) for value in buf.flat: self.assertEqual(value, check) if rank == root: rbuf = None self.COMM.Scatter(sbuf, rbuf, root=root) for value in buf.flat: self.assertEqual(value, check) def testAllgather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for count in range(1, 10): check = arrayimpl.scalar(count) buf = array(-1, typecode, (size, count)) s, e = rank*count, (rank+1)*count for i in range(s, e): buf.flat[i] = check self.COMM.Allgather(MPI.IN_PLACE, buf.as_mpi()) for value in buf.flat: self.assertEqual(value, check) self.COMM.Allgather(None, buf.as_mpi()) for value in buf.flat: self.assertEqual(value, check) def testReduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue for root in range(size): if rank == root: buf = array(range(size), typecode) sbuf = MPI.IN_PLACE rbuf = buf.as_mpi() else: buf = array(range(size), typecode) buf2 = array(range(size), typecode) sbuf = buf.as_mpi() rbuf = buf2.as_mpi() self.COMM.Reduce(sbuf, rbuf, op, root) if rank == root: max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testAllreduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue buf = array(range(size), typecode) sbuf = MPI.IN_PLACE rbuf = buf.as_mpi() self.COMM.Allreduce(sbuf, rbuf, op) max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testReduceScatterBlock(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): # one of the ranks would fail as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue for rcnt in range(size): if op == MPI.PROD: rbuf = array([rank+1]*rcnt*size, typecode) else: rbuf = array([rank]*rcnt*size, typecode) self.COMM.Reduce_scatter_block(MPI.IN_PLACE, rbuf.as_mpi(), op) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if i >= rcnt: if op == MPI.PROD: self.assertEqual(value, rank+1) else: self.assertEqual(value, rank) else: if op == MPI.SUM: redval = sum(range(size)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size-1) elif op == MPI.MIN: self.assertEqual(value, 0) def testReduceScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue rcnt = list(range(1, size+1)) if op == MPI.PROD: rbuf = array([rank+1]*sum(rcnt), typecode) else: rbuf = array([rank]*sum(rcnt), typecode) self.COMM.Reduce_scatter(MPI.IN_PLACE, rbuf.as_mpi(), rcnt, op) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if i >= rcnt[rank]: if op == MPI.PROD: self.assertEqual(value, rank+1) else: self.assertEqual(value, rank) else: if op == MPI.SUM: redval = sum(range(size)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size-1) elif op == MPI.MIN: self.assertEqual(value, 0) @unittest.skipMPI('openmpi(<=1.8.4)') def testScan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() # -- for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue buf = array(range(size), typecode) self.COMM.Scan(MPI.IN_PLACE, buf.as_mpi(), op) max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * (rank + 1)) < max_val: self.assertAlmostEqual(value, i * (rank + 1)) elif op == MPI.PROD: if (i ** (rank + 1)) < max_val: self.assertAlmostEqual(value, i ** (rank + 1)) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) @unittest.skipMPI('msmpi(<=4.2.0)') @unittest.skipMPI('openmpi(<=1.8.4)') def testExscan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue buf = array(range(size), typecode) try: self.COMM.Exscan(MPI.IN_PLACE, buf.as_mpi(), op) except NotImplementedError: self.skipTest('mpi-exscan') if rank == 1: for i, value in enumerate(buf): self.assertEqual(value, i) elif rank > 1: max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * rank) < max_val: self.assertAlmostEqual(value, i * rank) elif op == MPI.PROD: if (i ** rank) < max_val: self.assertAlmostEqual(value, i ** rank) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) class TestReduceLocal(unittest.TestCase): def testReduceLocal(self): for array, typecode in arrayimpl.loop(): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue size = 5 sbuf = array(range(1,size+1), typecode) rbuf = array(range(0,size+0), typecode) try: op.Reduce_local(sbuf.as_mpi(), rbuf.as_mpi()) except NotImplementedError: self.skipTest('mpi-op-reduce_local') for i, value in enumerate(rbuf): self.assertEqual(sbuf[i], i+1) if op == MPI.SUM: self.assertAlmostEqual(value, i+(i+1)) elif op == MPI.PROD: self.assertAlmostEqual(value, i*(i+1)) elif op == MPI.MAX: self.assertEqual(value, i+1) elif op == MPI.MIN: self.assertEqual(value, i) def testReduceLocalBadCount(self): for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): sbuf = array(range(3), typecode) rbuf = array(range(3), typecode) def f(): op.Reduce_local(sbuf.as_mpi_c(2), rbuf.as_mpi_c(3)) self.assertRaises(ValueError, f) def f(): op.Reduce_local([sbuf.as_raw(), 1, MPI.INT], [rbuf.as_raw(), 1, MPI.SHORT]) self.assertRaises(ValueError, f) class TestCCOBufSelf(BaseTestCCOBuf, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCOBufWorld(BaseTestCCOBuf, unittest.TestCase): COMM = MPI.COMM_WORLD @unittest.skipMPI('MPICH1') @unittest.skipMPI('LAM/MPI') @unittest.skipIf(MPI.IN_PLACE == MPI.BOTTOM, 'mpi-in-place') class TestCCOBufInplaceSelf(BaseTestCCOBufInplace, unittest.TestCase): COMM = MPI.COMM_SELF @unittest.skipMPI('MPICH1') @unittest.skipMPI('LAM/MPI') @unittest.skipIf(MPI.IN_PLACE == MPI.BOTTOM, 'mpi-in-place') class TestCCOBufInplaceWorld(BaseTestCCOBufInplace, unittest.TestCase): COMM = MPI.COMM_WORLD @unittest.skipMPI('mvapich', MPI.COMM_WORLD.Get_size() > 1) def testReduceScatter(self): super().testReduceScatter() class TestCCOBufSelfDup(TestCCOBufSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() @unittest.skipMPI('openmpi(<1.4.0)', MPI.Query_thread() > MPI.THREAD_SINGLE) class TestCCOBufWorldDup(TestCCOBufWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_cco_buf_inter.py000066400000000000000000000242141475341043600201430ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl def skip_op(typecode, op): if typecode in '?': return True if typecode in 'FDG': if op in (MPI.MAX, MPI.MIN): return True return False def maxvalue(a): try: typecode = a.typecode except AttributeError: typecode = a.dtype.char if typecode == ('f'): return 1e30 elif typecode == ('d'): return 1e300 else: return 2 ** (a.itemsize * 7) - 1 @unittest.skipMPI('openmpi(<1.6.0)') @unittest.skipMPI('msmpi', MPI.COMM_WORLD.Get_size() >= 3) @unittest.skipMPI('MPICH1') @unittest.skipIf(MPI.ROOT == MPI.PROC_NULL, 'mpi-root') @unittest.skipIf(MPI.COMM_WORLD.Get_size() < 2, 'mpi-world-size<2') class BaseTestCCOBufInter: BASECOMM = MPI.COMM_NULL INTRACOMM = MPI.COMM_NULL INTERCOMM = MPI.COMM_NULL def setUp(self): size = self.BASECOMM.Get_size() rank = self.BASECOMM.Get_rank() if rank < size // 2: self.COLOR = 0 self.LOCAL_LEADER = 0 self.REMOTE_LEADER = size // 2 else: self.COLOR = 1 self.LOCAL_LEADER = 0 self.REMOTE_LEADER = 0 self.INTRACOMM = self.BASECOMM.Split(self.COLOR, key=0) Create_intercomm = MPI.Intracomm.Create_intercomm self.INTERCOMM = Create_intercomm( self.INTRACOMM, self.LOCAL_LEADER, self.BASECOMM, self.REMOTE_LEADER, ) def tearDown(self): self.INTRACOMM.Free() self.INTERCOMM.Free() def testBarrier(self): self.INTERCOMM.Barrier() def testBcast(self): comm = self.INTERCOMM rank = comm.Get_rank() size = comm.Get_size() rsize = comm.Get_remote_size() for array, typecode in arrayimpl.loop(): for color in (0, 1): if self.COLOR == color: for root in range(size): if root == rank: buf = array(root, typecode, root+color) comm.Bcast(buf.as_mpi(), root=MPI.ROOT) else: comm.Bcast(None, root=MPI.PROC_NULL) else: for root in range(rsize): buf = array(-1, typecode, root+color) comm.Bcast(buf.as_mpi(), root=root) check = arrayimpl.scalar(root) for value in buf: self.assertEqual(value, check) def testGather(self): comm = self.INTERCOMM rank = comm.Get_rank() size = comm.Get_size() rsize = comm.Get_remote_size() for array, typecode in arrayimpl.loop(): for color in (0, 1): if self.COLOR == color: for root in range(size): if root == rank: rbuf = array(-1, typecode, (rsize, root+color)) comm.Gather(None, rbuf.as_mpi(), root=MPI.ROOT) check = arrayimpl.scalar(root) for value in rbuf.flat: self.assertEqual(value, check) else: comm.Gather(None, None, root=MPI.PROC_NULL) else: for root in range(rsize): sbuf = array(root, typecode, root+color) comm.Gather(sbuf.as_mpi(), None, root=root) def testScatter(self): comm = self.INTERCOMM rank = comm.Get_rank() size = comm.Get_size() rsize = comm.Get_remote_size() for array, typecode in arrayimpl.loop(): for color in (0, 1): if self.COLOR == color: for root in range(size): if root == rank: sbuf = array(root, typecode, (rsize, root+color)) comm.Scatter(sbuf.as_mpi(), None, root=MPI.ROOT) else: comm.Scatter(None, None, root=MPI.PROC_NULL) else: for root in range(rsize): rbuf = array(root, typecode, root+color) comm.Scatter(None, rbuf.as_mpi(), root=root) check = arrayimpl.scalar(root) for value in rbuf: self.assertEqual(value, check) def testAllgather(self): comm = self.INTERCOMM rank = comm.Get_rank() size = comm.Get_size() rsize = comm.Get_remote_size() for array, typecode in arrayimpl.loop(): for color in (0, 1): if self.COLOR == color: for n in range(size): sbuf = array( n, typecode, color) rbuf = array(-1, typecode, (rsize, n+color)) comm.Allgather(sbuf.as_mpi(), rbuf.as_mpi()) check = arrayimpl.scalar(n) for value in rbuf.flat: self.assertEqual(value, check) else: for n in range(rsize): sbuf = array( n, typecode, n+color) rbuf = array(-1, typecode, (rsize, color)) comm.Allgather(sbuf.as_mpi(), rbuf.as_mpi()) check = arrayimpl.scalar(n) for value in rbuf.flat: self.assertEqual(value, check) def testAlltoall(self): comm = self.INTERCOMM rank = comm.Get_rank() size = comm.Get_size() rsize = comm.Get_remote_size() for array, typecode in arrayimpl.loop(): for color in (0, 1): if self.COLOR == color: for n in range(size): sbuf = array( n, typecode, (rsize, (n+1)*color)) rbuf = array(-1, typecode, (rsize, n+3*color)) comm.Alltoall(sbuf.as_mpi(), rbuf.as_mpi()) check = arrayimpl.scalar(n) for value in rbuf.flat: self.assertEqual(value, check) else: for n in range(rsize): sbuf = array( n, typecode, (rsize, n+3*color)) rbuf = array(-1, typecode, (rsize, (n+1)*color)) comm.Alltoall(sbuf.as_mpi(), rbuf.as_mpi()) check = arrayimpl.scalar(n) for value in rbuf.flat: self.assertEqual(value, check) @unittest.skipMPI('mvapich', MPI.COMM_WORLD.Get_size() > 2) def testReduce(self): comm = self.INTERCOMM rank = comm.Get_rank() lsize = comm.Get_size() rsize = comm.Get_remote_size() for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue for color in (0, 1): if self.COLOR == color: for root in range(lsize): if root == rank: rbuf = array(-1, typecode, rsize) comm.Reduce( None, rbuf.as_mpi(), op=op, root=MPI.ROOT, ) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: if (i * rsize) < max_val: self.assertAlmostEqual(value, i*rsize) elif op == MPI.PROD: if (i ** rsize) < max_val: self.assertAlmostEqual(value, i**rsize) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) else: comm.Reduce( None, None, op=op, root=MPI.PROC_NULL, ) else: for root in range(rsize): sbuf = array(range(lsize), typecode) comm.Reduce( sbuf.as_mpi(), None, op=op, root=root, ) def testAllreduce(self): comm = self.INTERCOMM rank = comm.Get_rank() size = comm.Get_size() rsize = comm.Get_remote_size() for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue sbuf = array(range(5), typecode) rbuf = array([-1] * 5, typecode) comm.Allreduce(sbuf.as_mpi(), rbuf.as_mpi(), op) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: if (i * rsize) < max_val: self.assertAlmostEqual(value, i*rsize) elif op == MPI.PROD: if (i ** rsize) < max_val: self.assertAlmostEqual(value, i**rsize) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) class TestCCOBufInter(BaseTestCCOBufInter, unittest.TestCase): BASECOMM = MPI.COMM_WORLD class TestCCOBufInterDup(TestCCOBufInter): def setUp(self): self.BASECOMM = self.BASECOMM.Dup() super().setUp() def tearDown(self): self.BASECOMM.Free() super().tearDown() if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_cco_nb_buf.py000066400000000000000000000665501475341043600174320ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl from functools import reduce prod = lambda sequence,start=1: reduce(lambda x, y: x*y, sequence, start) def skip_op(typecode, op): if typecode in '?': return True if typecode in 'FDG': if op in (MPI.MAX, MPI.MIN): return True return False def maxvalue(a): try: typecode = a.typecode except AttributeError: typecode = a.dtype.char if typecode == ('f'): return 1e30 elif typecode == ('d'): return 1e300 else: return 2 ** (a.itemsize * 7) - 1 @unittest.skipMPI('msmpi(<8.1.0)') class BaseTestCCOBuf: COMM = MPI.COMM_NULL def testBarrier(self): self.COMM.Ibarrier().Wait() def testBcast(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) if rank == root: buf = array(root, typecode, root) else: buf = array( -1, typecode, root) self.COMM.Ibcast(buf.as_mpi(), root=root).Wait() for value in buf: self.assertEqual(value, check) def testGather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) sbuf = array(root, typecode, root+1) if rank == root: rbuf = array(-1, typecode, (size,root+1)) else: rbuf = array([], typecode) self.COMM.Igather(sbuf.as_mpi(), rbuf.as_mpi(), root=root).Wait() if rank == root: for value in rbuf.flat: self.assertEqual(value, check) def testScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) rbuf = array(-1, typecode, size) if rank == root: sbuf = array(root, typecode, (size, size)) else: sbuf = array([], typecode) self.COMM.Iscatter(sbuf.as_mpi(), rbuf.as_mpi(), root=root).Wait() for value in rbuf: self.assertEqual(value, check) def testAllgather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) sbuf = array(root, typecode, root+1) rbuf = array( -1, typecode, (size, root+1)) self.COMM.Iallgather(sbuf.as_mpi(), rbuf.as_mpi()).Wait() for value in rbuf.flat: self.assertEqual(value, check) def testAlltoall(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) sbuf = array(root, typecode, (size, root+1)) rbuf = array( -1, typecode, (size, root+1)) self.COMM.Ialltoall(sbuf.as_mpi(), rbuf.as_mpi_c(root+1)).Wait() for value in rbuf.flat: self.assertEqual(value, check) def testReduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue for root in range(size): sbuf = array(range(size), typecode) rbuf = array(-1, typecode, size) self.COMM.Ireduce(sbuf.as_mpi(), rbuf.as_mpi(), op, root).Wait() max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if rank != root: check = arrayimpl.scalar(-1) self.assertEqual(value, check) continue if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testAllreduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue sbuf = array(range(size), typecode) rbuf = array(0, typecode, size) self.COMM.Iallreduce(sbuf.as_mpi(), rbuf.as_mpi(), op).Wait() max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) @unittest.skipMPI('openmpi(<=1.8.3)') def testReduceScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue rcnt = list(range(1,size+1)) sbuf = array([rank+1]*sum(rcnt), typecode) rbuf = array(-1, typecode, rank+1) self.COMM.Ireduce_scatter(sbuf.as_mpi(), rbuf.as_mpi(), None, op).Wait() max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: redval = sum(range(size))+size if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size) elif op == MPI.MIN: self.assertEqual(value, 1) rbuf = array(-1, typecode, rank+1) self.COMM.Ireduce_scatter(sbuf.as_mpi(), rbuf.as_mpi(), rcnt, op).Wait() max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: redval = sum(range(size))+size if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size) elif op == MPI.MIN: self.assertEqual(value, 1) def testReduceScatterBlock(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue for rcnt in range(1, size+1): sbuf = array([rank]*rcnt*size, typecode) rbuf = array(-1, typecode, rcnt) if op == MPI.PROD: sbuf = array([rank+1]*rcnt*size, typecode) self.COMM.Ireduce_scatter_block(sbuf.as_mpi(), rbuf.as_mpi(), op).Wait() max_val = maxvalue(rbuf) v_sum = (size*(size-1))/2 v_prod = 1 for i in range(1,size+1): v_prod *= i v_max = size-1 v_min = 0 for i, value in enumerate(rbuf): if op == MPI.SUM: if v_sum <= max_val: self.assertAlmostEqual(value, v_sum) elif op == MPI.PROD: if v_prod <= max_val: self.assertAlmostEqual(value, v_prod) elif op == MPI.MAX: self.assertEqual(value, v_max) elif op == MPI.MIN: self.assertEqual(value, v_min) def testScan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() # -- for array, typecode in arrayimpl.loop(): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue sbuf = array(range(size), typecode) rbuf = array(0, typecode, size) self.COMM.Iscan(sbuf.as_mpi(), rbuf.as_mpi(), op).Wait() max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: if (i * (rank + 1)) < max_val: self.assertAlmostEqual(value, i * (rank + 1)) elif op == MPI.PROD: if (i ** (rank + 1)) < max_val: self.assertAlmostEqual(value, i ** (rank + 1)) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) @unittest.skipMPI('openmpi(<=1.8.1)') def testExscan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue sbuf = array(range(size), typecode) rbuf = array(0, typecode, size) self.COMM.Iexscan(sbuf.as_mpi(), rbuf.as_mpi(), op).Wait() if rank == 1: for i, value in enumerate(rbuf): self.assertEqual(value, i) elif rank > 1: max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: if (i * rank) < max_val: self.assertAlmostEqual(value, i * rank) elif op == MPI.PROD: if (i ** rank) < max_val: self.assertAlmostEqual(value, i ** rank) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testBcastTypeIndexed(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): datatype = array.TypeMap[typecode] for root in range(size): # if rank == root: buf = array(range(10), typecode).as_raw() else: buf = array(-1, typecode, 10).as_raw() indices = list(range(0, len(buf), 2)) newtype = datatype.Create_indexed_block(1, indices) newtype.Commit() newbuf = (buf, 1, newtype) self.COMM.Ibcast(newbuf, root=root).Wait() newtype.Free() if rank != root: for i, value in enumerate(buf): check = arrayimpl.scalar(-1 if (i % 2) else i) self.assertEqual(value, check) # if rank == root: buf = array(range(10), typecode).as_raw() else: buf = array(-1, typecode, 10).as_raw() indices = list(range(1, len(buf), 2)) newtype = datatype.Create_indexed_block(1, indices) newtype.Commit() newbuf = (buf, 1, newtype) self.COMM.Ibcast(newbuf, root).Wait() newtype.Free() if rank != root: for i, value in enumerate(buf): check = arrayimpl.scalar(-1 if not (i % 2) else i) self.assertEqual(value, check) @unittest.skipMPI('msmpi(<8.1.0)') class BaseTestCCOBufInplace: def testGather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) count = root+3 if rank == root: sbuf = MPI.IN_PLACE buf = array(-1, typecode, (size, count)) #buf.flat[(rank*count):((rank+1)*count)] = \ # array(root, typecode, count) s, e = rank*count, (rank+1)*count for i in range(s, e): buf.flat[i] = check rbuf = buf.as_mpi() else: buf = array(root, typecode, count) sbuf = buf.as_mpi() rbuf = None self.COMM.Igather(sbuf, rbuf, root=root).Wait() for value in buf.flat: self.assertEqual(value, check) if rank == root: sbuf = None self.COMM.Igather(sbuf, rbuf, root=root).Wait() for value in buf.flat: self.assertEqual(value, check) @unittest.skipMPI('msmpi(==10.0.0)') def testScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) for count in range(1, 10): if rank == root: buf = array(root, typecode, (size, count)) sbuf = buf.as_mpi() rbuf = MPI.IN_PLACE else: buf = array(-1, typecode, count) sbuf = None rbuf = buf.as_mpi() self.COMM.Iscatter(sbuf, rbuf, root=root).Wait() for value in buf.flat: self.assertEqual(value, check) if rank == root: rbuf = None self.COMM.Iscatter(sbuf, rbuf, root=root).Wait() for value in buf.flat: self.assertEqual(value, check) def testAllgather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for count in range(1, 10): check = arrayimpl.scalar(count) buf = array(-1, typecode, (size, count)) s, e = rank*count, (rank+1)*count for i in range(s, e): buf.flat[i] = check self.COMM.Iallgather(MPI.IN_PLACE, buf.as_mpi()).Wait() for value in buf.flat: self.assertEqual(value, check) self.COMM.Iallgather(None, buf.as_mpi()).Wait() for value in buf.flat: self.assertEqual(value, check) def testReduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue for root in range(size): if rank == root: buf = array(range(size), typecode) sbuf = MPI.IN_PLACE rbuf = buf.as_mpi() else: buf = array(range(size), typecode) buf2 = array(range(size), typecode) sbuf = buf.as_mpi() rbuf = buf2.as_mpi() self.COMM.Ireduce(sbuf, rbuf, op, root).Wait() if rank == root: max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testAllreduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue buf = array(range(size), typecode) sbuf = MPI.IN_PLACE rbuf = buf.as_mpi() self.COMM.Iallreduce(sbuf, rbuf, op).Wait() max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) @unittest.skipMPI('openmpi(<=1.8.6)') def testReduceScatterBlock(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue for rcnt in range(size): if op == MPI.PROD: rbuf = array([rank+1]*rcnt*size, typecode) else: rbuf = array([rank]*rcnt*size, typecode) self.COMM.Ireduce_scatter_block(MPI.IN_PLACE, rbuf.as_mpi(), op).Wait() max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if i >= rcnt: if op == MPI.PROD: self.assertEqual(value, rank+1) else: self.assertEqual(value, rank) else: if op == MPI.SUM: redval = sum(range(size)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size-1) elif op == MPI.MIN: self.assertEqual(value, 0) @unittest.skipMPI('openmpi(<=1.8.6)') def testReduceScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue rcnt = list(range(1, size+1)) if op == MPI.PROD: rbuf = array([rank+1]*sum(rcnt), typecode) else: rbuf = array([rank]*sum(rcnt), typecode) self.COMM.Ireduce_scatter(MPI.IN_PLACE, rbuf.as_mpi(), rcnt, op).Wait() max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if i >= rcnt[rank]: if op == MPI.PROD: self.assertEqual(value, rank+1) else: self.assertEqual(value, rank) else: if op == MPI.SUM: redval = sum(range(size)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size-1) elif op == MPI.MIN: self.assertEqual(value, 0) @unittest.skipMPI('openmpi(<=1.8.4)') def testScan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() # -- for array, typecode in arrayimpl.loop(): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue buf = array(range(size), typecode) self.COMM.Iscan(MPI.IN_PLACE, buf.as_mpi(), op).Wait() max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * (rank + 1)) < max_val: self.assertAlmostEqual(value, i * (rank + 1)) elif op == MPI.PROD: if (i ** (rank + 1)) < max_val: self.assertAlmostEqual(value, i ** (rank + 1)) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) @unittest.skipMPI('openmpi(<=1.8.4)') def testExscan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue buf = array(range(size), typecode) try: self.COMM.Iexscan(MPI.IN_PLACE, buf.as_mpi(), op).Wait() except NotImplementedError: self.skipTest('mpi-exscan') if rank == 1: for i, value in enumerate(buf): self.assertEqual(value, i) elif rank > 1: max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * rank) < max_val: self.assertAlmostEqual(value, i * rank) elif op == MPI.PROD: if (i ** rank) < max_val: self.assertAlmostEqual(value, i ** rank) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) class TestCCOBufSelf(BaseTestCCOBuf, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCOBufWorld(BaseTestCCOBuf, unittest.TestCase): COMM = MPI.COMM_WORLD class TestCCOBufInplaceSelf(BaseTestCCOBufInplace, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCOBufInplaceWorld(BaseTestCCOBufInplace, unittest.TestCase): COMM = MPI.COMM_WORLD @unittest.skipMPI('mvapich(<3.0.0)', MPI.COMM_WORLD.Get_size() > 1) def testReduceScatter(self): super().testReduceScatter() class TestCCOBufSelfDup(TestCCOBufSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() class TestCCOBufWorldDup(TestCCOBufWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() try: MPI.COMM_SELF.Ibarrier().Wait() except NotImplementedError: unittest.disable(BaseTestCCOBuf, 'mpi-nbc') unittest.disable(BaseTestCCOBufInplace, 'mpi-nbc') if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_cco_nb_vec.py000066400000000000000000000460551475341043600174310ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl def maxvalue(a): try: typecode = a.typecode except AttributeError: typecode = a.dtype.char if typecode == ('f'): return 1e30 elif typecode == ('d'): return 1e300 else: return 2 ** (a.itemsize * 7) - 1 @unittest.skipMPI('msmpi(<8.1.0)') @unittest.skipMPI('openmpi(<2.0.0)') class BaseTestCCOVec: COMM = MPI.COMM_NULL def testGatherv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check_a = arrayimpl.scalar(root) check_b = arrayimpl.scalar(-1) for count in range(size): sbuf = array(root, typecode, count) rbuf = array( -1, typecode, size*size) counts = [count] * size displs = list(range(0, size*size, size)) recvbuf = rbuf.as_mpi_v(counts, displs) if rank != root: recvbuf=None self.COMM.Igatherv(sbuf.as_mpi(), recvbuf, root).Wait() if recvbuf is not None: for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, check_a) for vb in b: self.assertEqual(vb, check_b) def testGatherv2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check_a = arrayimpl.scalar(root) check_b = arrayimpl.scalar(-1) for count in range(size): sbuf = array(root, typecode, size) rbuf = array( -1, typecode, size*size) sendbuf = sbuf.as_mpi_c(count) recvbuf = rbuf.as_mpi_v(count, size) if rank != root: recvbuf=None self.COMM.Igatherv(sendbuf, recvbuf, root).Wait() if recvbuf is not None: for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, check_a) for vb in b: self.assertEqual(vb, check_b) def testGatherv3(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) for count in range(size+1): # sbuf = array(root, typecode, count).as_raw() rbuf = array( -1, typecode, count*size).as_raw() sendbuf = sbuf recvbuf = [rbuf, count] if rank != root: recvbuf=None self.COMM.Igatherv(sendbuf, recvbuf, root).Wait() if recvbuf is not None: for v in rbuf: self.assertEqual(v, check) # sbuf = array(root, typecode, count).as_raw() if rank == root: rbuf = array( -1, typecode, count*size).as_raw() else: rbuf = None self.COMM.Gatherv(sbuf, rbuf, root) self.COMM.Barrier() if rank == root: for v in rbuf: self.assertEqual(v, check) def testScatterv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) for count in range(size): sbuf = array(root, typecode, size*size) rbuf = array( -1, typecode, count) counts = [count] * size displs = list(range(0, size*size, size)) sendbuf = sbuf.as_mpi_v(counts, displs) if rank != root: sendbuf = None self.COMM.Iscatterv(sendbuf, rbuf.as_mpi(), root).Wait() for vr in rbuf: self.assertEqual(vr, check) def testScatterv2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check_a = arrayimpl.scalar(root) check_b = arrayimpl.scalar(-1) for count in range(size): sbuf = array(root, typecode, size*size) rbuf = array( -1, typecode, size) sendbuf = sbuf.as_mpi_v(count, size) recvbuf = rbuf.as_mpi_c(count) if rank != root: sendbuf = None self.COMM.Iscatterv(sendbuf, recvbuf, root).Wait() a, b = rbuf[:count], rbuf[count:] for va in a: self.assertEqual(va, check_a) for vb in b: self.assertEqual(vb, check_b) def testScatterv3(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) for count in range(size+1): # sbuf = array(root, typecode, count*size).as_raw() rbuf = array( -1, typecode, count).as_raw() sendbuf = [sbuf, count] recvbuf = rbuf if rank != root: sendbuf = None self.COMM.Iscatterv(sendbuf, recvbuf, root).Wait() for v in rbuf: self.assertEqual(v, check) # if rank == root: sbuf = array(root, typecode, count*size).as_raw() else: sbuf = None rbuf = array( -1, typecode, count).as_raw() self.COMM.Scatterv(sbuf, rbuf, root) for v in rbuf: self.assertEqual(v, check) def testAllgatherv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check_a = arrayimpl.scalar(root) check_b = arrayimpl.scalar(-1) for count in range(size): sbuf = array(root, typecode, count) rbuf = array( -1, typecode, size*size) counts = [count] * size displs = list(range(0, size*size, size)) sendbuf = sbuf.as_mpi() recvbuf = rbuf.as_mpi_v(counts, displs) self.COMM.Iallgatherv(sendbuf, recvbuf).Wait() for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, check_a) for vb in b: self.assertEqual(vb, check_b) def testAllgatherv2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check_a = arrayimpl.scalar(root) check_b = arrayimpl.scalar(-1) for count in range(size): sbuf = array(root, typecode, size) rbuf = array( -1, typecode, size*size) sendbuf = sbuf.as_mpi_c(count) recvbuf = rbuf.as_mpi_v(count, size) self.COMM.Iallgatherv(sendbuf, recvbuf).Wait() for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, check_a) for vb in b: self.assertEqual(vb, check_b) def testAllgatherv3(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) for count in range(size+1): # sbuf = array(root, typecode, count).as_raw() rbuf = array( -1, typecode, count*size).as_raw() sendbuf = sbuf recvbuf = [rbuf, count] self.COMM.Iallgatherv(sendbuf, recvbuf).Wait() for v in rbuf: self.assertEqual(v, check) # sbuf = array(root, typecode, count).as_raw() rbuf = array( -1, typecode, count*size).as_raw() self.COMM.Iallgatherv(sbuf, rbuf).Wait() for v in rbuf: self.assertEqual(v, check) def testAlltoallv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check_a = arrayimpl.scalar(root) check_b = arrayimpl.scalar(-1) for count in range(size): sbuf = array(root, typecode, size*size) rbuf = array( -1, typecode, size*size) counts = [count] * size displs = list(range(0, size*size, size)) sendbuf = sbuf.as_mpi_v(counts, displs) recvbuf = rbuf.as_mpi_v(counts, displs) self.COMM.Ialltoallv(sendbuf, recvbuf).Wait() for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, check_a) for vb in b: self.assertEqual(vb, check_b) def testAlltoallv2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check_a = arrayimpl.scalar(root) check_b = arrayimpl.scalar(-1) for count in range(size): sbuf = array(root, typecode, size*size) rbuf = array( -1, typecode, size*size) sendbuf = sbuf.as_mpi_v(count, size) recvbuf = rbuf.as_mpi_v(count, size) self.COMM.Ialltoallv(sendbuf, recvbuf).Wait() for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, check_a) for vb in b: self.assertEqual(vb, check_b) def testAlltoallv3(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) for count in range(size+1): # sbuf = array(root, typecode, count*size).as_raw() rbuf = array( -1, typecode, count*size).as_raw() sendbuf = [sbuf, count] recvbuf = [rbuf, count] self.COMM.Ialltoallv(sendbuf, recvbuf).Wait() for v in rbuf: self.assertEqual(v, check) # sbuf = array(root, typecode, count*size).as_raw() rbuf = array( -1, typecode, count*size).as_raw() self.COMM.Ialltoallv(sbuf, rbuf).Wait() for v in rbuf: self.assertEqual(v, check) def testAlltoallw(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for n in range(1, size+1): check = arrayimpl.scalar(n) sbuf = array( n, typecode, (size, n)) rbuf = array(-1, typecode, (size, n)) sdt, rdt = sbuf.mpidtype, rbuf.mpidtype sdsp = list(range(0, size*n*sdt.extent, n*sdt.extent)) rdsp = list(range(0, size*n*rdt.extent, n*rdt.extent)) smsg = (sbuf.as_raw(), ([n]*size, sdsp), [sdt]*size) rmsg = (rbuf.as_raw(), ([n]*size, rdsp), [rdt]*size) try: self.COMM.Ialltoallw(smsg, rmsg).Wait() except NotImplementedError: self.skipTest('mpi-alltoallw') for value in rbuf.flat: self.assertEqual(value, check) @unittest.skipMPI('openmpi(<4.1.7)') def testAlltoallwBottom(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for n in range(1, size+1): check = arrayimpl.scalar(n) sbuf = array( n, typecode, (size, n)) rbuf = array(-1, typecode, (size, n)) saddr = MPI.Get_address(sbuf.as_raw()) raddr = MPI.Get_address(rbuf.as_raw()) sdt, rdt = sbuf.mpidtype, rbuf.mpidtype stypes = [ MPI.Datatype.Create_struct([n], [saddr+d], [sdt]).Commit() for d in list(range(0, size*n*sdt.extent, n*sdt.extent)) ] rtypes = [ MPI.Datatype.Create_struct([n], [raddr+d], [sdt]).Commit() for d in list(range(0, size*n*rdt.extent, n*rdt.extent)) ] smsg = (MPI.BOTTOM, ([1]*size, [0]*size), stypes) rmsg = (MPI.BOTTOM, ([1]*size, [0]*size), rtypes) try: self.COMM.Ialltoallw(smsg, rmsg).Wait() except NotImplementedError: self.skipTest('mpi-alltoallw') finally: for t in stypes: t.Free() for t in rtypes: t.Free() for value in rbuf.flat: self.assertEqual(value, check) @unittest.skipMPI('msmpi(<8.1.0)') @unittest.skipMPI('openmpi(<2.0.0)') class BaseTestCCOVecInplace: COMM = MPI.COMM_NULL def testAlltoallv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for count in range(size): rbuf = array(-1, typecode, size*size) counts = [count] * size displs = list(range(0, size*size, size)) for i in range(size): for j in range(count): rbuf[i*size+j] = rank recvbuf = rbuf.as_mpi_v(counts, displs) self.COMM.Ialltoallv(MPI.IN_PLACE, recvbuf).Wait() for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: check_a = arrayimpl.scalar(i) self.assertEqual(va, check_a) for vb in b: check_b = arrayimpl.scalar(-1) self.assertEqual(vb, check_b) def testAlltoallw(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for count in range(size): rbuf = array(-1, typecode, size*size) for i in range(size): for j in range(count): rbuf[i*size+j] = rank rdt = rbuf.mpidtype rdsp = list(range(0, size*size*rdt.extent, size*rdt.extent)) rmsg = (rbuf.as_raw(), ([count]*size, rdsp), [rdt]*size) try: self.COMM.Ialltoallw(MPI.IN_PLACE, rmsg).Wait() except NotImplementedError: self.skipTest('mpi-ialltoallw') for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: check_a = arrayimpl.scalar(i) self.assertEqual(va, check_a) for vb in b: check_b = arrayimpl.scalar(-1) self.assertEqual(vb, check_b) def testAlltoallw2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for count in range(size): rbuf = array(-1, typecode, size*size) for i in range(size): for j in range(count): rbuf[i*size+j] = rank rdt = rbuf.mpidtype rdsp = list(range(0, size*size*rdt.extent, size*rdt.extent)) rmsg = (rbuf.as_raw(), [count]*size, rdsp, [rdt]*size) try: self.COMM.Ialltoallw(MPI.IN_PLACE, rmsg).Wait() except NotImplementedError: self.skipTest('mpi-ialltoallw') for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: check_a = arrayimpl.scalar(i) self.assertEqual(va, check_a) for vb in b: check_b = arrayimpl.scalar(-1) self.assertEqual(vb, check_b) class TestCCOVecSelf(BaseTestCCOVec, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCOVecWorld(BaseTestCCOVec, unittest.TestCase): COMM = MPI.COMM_WORLD class TestCCOVecSelfDup(TestCCOVecSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() class TestCCOVecWorldDup(TestCCOVecWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() class TestCCOVecInplaceSelf(BaseTestCCOVecInplace, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCOVecInplaceWorld(BaseTestCCOVecInplace, unittest.TestCase): COMM = MPI.COMM_WORLD try: MPI.COMM_SELF.Ibarrier().Wait() except NotImplementedError: unittest.disable(BaseTestCCOVec, 'mpi-nbc') if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_cco_ngh_buf.py000066400000000000000000000225541475341043600176030ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl def create_topo_comms(comm): size = comm.Get_size() rank = comm.Get_rank() # Cartesian n = int(size**1/2.0) m = int(size**1/3.0) if m*m*m == size: dims = [m, m, m] elif n*n == size: dims = [n, n] else: dims = [size] periods = [True] * len(dims) yield comm.Create_cart(dims, periods=periods) # Graph index, edges = [0], [] for i in range(size): pos = index[-1] index.append(pos+2) edges.append((i-1)%size) edges.append((i+1)%size) yield comm.Create_graph(index, edges) # Dist Graph sources = [(rank-2)%size, (rank-1)%size] destinations = [(rank+1)%size, (rank+2)%size] yield comm.Create_dist_graph_adjacent(sources, destinations) def get_neighbors_count(comm): topo = comm.Get_topology() if topo == MPI.CART: ndim = comm.Get_dim() return 2*ndim, 2*ndim if topo == MPI.GRAPH: rank = comm.Get_rank() nneighbors = comm.Get_neighbors_count(rank) return nneighbors, nneighbors if topo == MPI.DIST_GRAPH: indeg, outdeg, w = comm.Get_dist_neighbors_count() return indeg, outdeg return 0, 0 def have_feature(): cartcomm = MPI.COMM_SELF.Create_cart([1], periods=[0]) try: cartcomm.neighbor_allgather(None) return True except NotImplementedError: return False finally: cartcomm.Free() @unittest.skipIf(not have_feature(), 'mpi-neighbor') class BaseTestCCONghBuf: COMM = MPI.COMM_NULL def testNeighborAllgather(self): for comm in create_topo_comms(self.COMM): rsize, ssize = get_neighbors_count(comm) for array, typecode in arrayimpl.loop(): if unittest.is_mpi_gpu('openmpi', array): # segfault as of OpenMPI 4.1.1; TODO(leofang): why? if array.backend == 'numba': continue for v in range(3): check = arrayimpl.scalar(v) sbuf = array( v, typecode, 3) rbuf = array(-1, typecode, (rsize, 3)) comm.Neighbor_allgather(sbuf.as_mpi(), rbuf.as_mpi()) for value in rbuf.flat: self.assertEqual(value, check) sbuf = array( v, typecode, 3) rbuf = array(-1, typecode, (rsize, 3)) comm.Neighbor_allgatherv(sbuf.as_mpi_c(3), rbuf.as_mpi_c(3)) for value in rbuf.flat: self.assertEqual(value, check) sbuf = array( v, typecode, 3) rbuf = array(-1, typecode, (rsize, 3)) comm.Ineighbor_allgather(sbuf.as_mpi(), rbuf.as_mpi()).Wait() for value in rbuf.flat: self.assertEqual(value, check) sbuf = array( v, typecode, 3) rbuf = array(-1, typecode, (rsize, 3)) comm.Ineighbor_allgatherv(sbuf.as_mpi_c(3), rbuf.as_mpi_c(3)).Wait() for value in rbuf.flat: self.assertEqual(value, check) comm.Free() def testNeighborAlltoall(self): for comm in create_topo_comms(self.COMM): rsize, ssize = get_neighbors_count(comm) for array, typecode in arrayimpl.loop(): for v in range(3): check = arrayimpl.scalar(v) sbuf = array( v, typecode, (ssize, 3)) rbuf = array(-1, typecode, (rsize, 3)) comm.Neighbor_alltoall(sbuf.as_mpi(), rbuf.as_mpi_c(3)) for value in rbuf.flat: self.assertEqual(value, check) sbuf = array( v, typecode, (ssize, 3)) rbuf = array(-1, typecode, (rsize, 3)) comm.Neighbor_alltoall(sbuf.as_mpi(), rbuf.as_mpi()) for value in rbuf.flat: self.assertEqual(value, check) sbuf = array( v, typecode, (ssize, 3)) rbuf = array(-1, typecode, (rsize, 3)) comm.Neighbor_alltoallv(sbuf.as_mpi_c(3), rbuf.as_mpi_c(3)) for value in rbuf.flat: self.assertEqual(value, check) sbuf = array( v, typecode, (ssize, 3)) rbuf = array(-1, typecode, (rsize, 3)) comm.Ineighbor_alltoall(sbuf.as_mpi(), rbuf.as_mpi()).Wait() for value in rbuf.flat: self.assertEqual(value, check) sbuf = array( v, typecode, (ssize, 3)) rbuf = array(-1, typecode, (rsize, 3)) comm.Ineighbor_alltoallv(sbuf.as_mpi_c(3), rbuf.as_mpi_c(3)).Wait() for value in rbuf.flat: self.assertEqual(value, check) comm.Free() def testNeighborAlltoallw(self): size = self.COMM.Get_size() for comm in create_topo_comms(self.COMM): rsize, ssize = get_neighbors_count(comm) for array, typecode in arrayimpl.loop(): for n in range(1, 4): for v in range(3): check = arrayimpl.scalar(v) sbuf = array( v, typecode, (ssize, n)) rbuf = array(-1, typecode, (rsize, n)) sdt, rdt = sbuf.mpidtype, rbuf.mpidtype sdsp = list(range(0, ssize*n*sdt.extent, n*sdt.extent)) rdsp = list(range(0, rsize*n*rdt.extent, n*rdt.extent)) smsg = [sbuf.as_raw(), ([n]*ssize, sdsp), [sdt]*ssize] rmsg = (rbuf.as_raw(), ([n]*rsize, rdsp), [rdt]*rsize) try: comm.Neighbor_alltoallw(smsg, rmsg) except NotImplementedError: self.skipTest('mpi-neighbor_alltoallw') for value in rbuf.flat: self.assertEqual(value, check) check = arrayimpl.scalar(v+1) smsg[0] = array(v+1, typecode, (ssize, n)).as_raw() try: comm.Ineighbor_alltoallw(smsg, rmsg).Wait() except NotImplementedError: self.skipTest('mpi-ineighbor_alltoallw') for value in rbuf.flat: self.assertEqual(value, check) comm.Free() def testNeighborAlltoallwBottom(self): size = self.COMM.Get_size() for comm in create_topo_comms(self.COMM): rsize, ssize = get_neighbors_count(comm) for array, typecode in arrayimpl.loop(): if unittest.is_mpi_gpu('openmpi', array): continue for n in range(1,4): for v in range(3): check = arrayimpl.scalar(v) sbuf = array( v, typecode, (ssize, n)) rbuf = array(-1, typecode, (rsize, n)) saddr = MPI.Get_address(sbuf.as_raw()) raddr = MPI.Get_address(rbuf.as_raw()) sdt, rdt = sbuf.mpidtype, rbuf.mpidtype sdsp = list(range(0, ssize*n*sdt.extent, n*sdt.extent)) rdsp = list(range(0, rsize*n*rdt.extent, n*rdt.extent)) sdsp = [saddr + d for d in sdsp] rdsp = [raddr + d for d in rdsp] smsg = [MPI.BOTTOM, ([n]*ssize, sdsp), [sdt]*ssize] rmsg = (MPI.BOTTOM, ([n]*rsize, rdsp), [rdt]*rsize) try: comm.Neighbor_alltoallw(smsg, rmsg) except NotImplementedError: self.skipTest('mpi-neighbor_alltoallw') for value in rbuf.flat: self.assertEqual(value, check) check = arrayimpl.scalar(v+1) sbuf.flat[:] = array(v+1, typecode, (ssize, n)).flat try: comm.Ineighbor_alltoallw(smsg, rmsg).Wait() except NotImplementedError: self.skipTest('mpi-neighbor_alltoallw') for value in rbuf.flat: self.assertEqual(value, check) comm.Free() class TestCCONghBufSelf(BaseTestCCONghBuf, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCONghBufWorld(BaseTestCCONghBuf, unittest.TestCase): COMM = MPI.COMM_WORLD class TestCCONghBufSelfDup(TestCCONghBufSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() class TestCCONghBufWorldDup(TestCCONghBufWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() name, version = MPI.get_vendor() if name == 'Open MPI': if version < (1,8,4): _create_topo_comms = create_topo_comms def create_topo_comms(comm): for c in _create_topo_comms(comm): if c.size * 2 < sum(c.degrees): c.Free(); continue yield c if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_cco_ngh_obj.py000066400000000000000000000065501475341043600175770ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest _basic = [ None, True, False, -7, 0, 7, 2**31, -2**63+1, 2**63-1, -2.17, 0.0, 3.14, 1+2j, 2-3j, 'mpi4py', ] messages = list(_basic) messages += [ list(_basic), tuple(_basic), {f'k{k}': v for k, v in enumerate(_basic)}, ] messages = messages + [messages] def create_topo_comms(comm): size = comm.Get_size() rank = comm.Get_rank() # Cartesian n = int(size**1/2.0) m = int(size**1/3.0) if m*m*m == size: dims = [m, m, m] elif n*n == size: dims = [n, n] else: dims = [size] periods = [True] * len(dims) yield comm.Create_cart(dims, periods=periods) # Graph index, edges = [0], [] for i in range(size): pos = index[-1] index.append(pos+2) edges.append((i-1)%size) edges.append((i+1)%size) yield comm.Create_graph(index, edges) # Dist Graph sources = [(rank-2)%size, (rank-1)%size] destinations = [(rank+1)%size, (rank+2)%size] yield comm.Create_dist_graph_adjacent(sources, destinations) def get_neighbors_count(comm): topo = comm.Get_topology() if topo == MPI.CART: ndim = comm.Get_dim() return 2*ndim, 2*ndim if topo == MPI.GRAPH: rank = comm.Get_rank() nneighbors = comm.Get_neighbors_count(rank) return nneighbors, nneighbors if topo == MPI.DIST_GRAPH: indeg, outdeg, w = comm.Get_dist_neighbors_count() return indeg, outdeg return 0, 0 def have_feature(): cartcomm = MPI.COMM_SELF.Create_cart([1], periods=[0]) try: cartcomm.neighbor_allgather(None) return True except NotImplementedError: return False finally: cartcomm.Free() @unittest.skipIf(not have_feature(), 'mpi-neighbor') class BaseTestCCONghObj: COMM = MPI.COMM_NULL @unittest.skipMPI('openmpi(<2.2.0)') def testNeighborAllgather(self): for comm in create_topo_comms(self.COMM): rsize, ssize = get_neighbors_count(comm) for smess in messages: rmess = comm.neighbor_allgather(smess) self.assertEqual(rmess, [smess] * rsize) comm.Free() def testNeighborAlltoall(self): for comm in create_topo_comms(self.COMM): rsize, ssize = get_neighbors_count(comm) for smess in messages: rmess = comm.neighbor_alltoall([smess] * ssize) self.assertEqual(rmess, [smess] * rsize) comm.Free() class TestCCONghObjSelf(BaseTestCCONghObj, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCONghObjWorld(BaseTestCCONghObj, unittest.TestCase): COMM = MPI.COMM_WORLD class TestCCONghObjSelfDup(TestCCONghObjSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() class TestCCONghObjWorldDup(TestCCONghObjWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() name, version = MPI.get_vendor() if name == 'Open MPI': if version < (1,8,4): _create_topo_comms = create_topo_comms def create_topo_comms(comm): for c in _create_topo_comms(comm): if c.size * 2 < sum(c.degrees): c.Free(); continue yield c if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_cco_obj.py000066400000000000000000000173301475341043600167410ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest from functools import reduce cumsum = lambda seq: reduce(lambda x, y: x+y, seq, 0) cumprod = lambda seq: reduce(lambda x, y: x*y, seq, 1) _basic = [ None, True, False, -7, 0, 7, 2**31, -2**63+1, 2**63-1, -2.17, 0.0, 3.14, 1+2j, 2-3j, 'mpi4py', ] messages = list(_basic) messages += [ list(_basic), tuple(_basic), {f'k{k}': v for k, v in enumerate(_basic)}, ] class BaseTestCCOObj: COMM = MPI.COMM_NULL def testBarrier(self): self.COMM.barrier() def testBcast(self): for smess in messages: for root in range(self.COMM.Get_size()): rmess = self.COMM.bcast(smess, root=root) self.assertEqual(smess, rmess) def testGather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages + [messages]: for root in range(size): rmess = self.COMM.gather(smess, root=root) if rank == root: self.assertEqual(rmess, [smess] * size) else: self.assertIsNone(rmess) def testScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages + [messages]: for root in range(size): if rank == root: rmess = self.COMM.scatter([smess] * size, root=root) else: rmess = self.COMM.scatter(None, root=root) self.assertEqual(rmess, smess) def testAllgather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages + [messages]: rmess = self.COMM.allgather(smess) self.assertEqual(rmess, [smess] * size) def testAlltoall(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages + [messages]: rmess = self.COMM.alltoall([smess] * size) self.assertEqual(rmess, [smess] * size) def testReduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for root in range(size): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN, MPI.MAXLOC, MPI.MINLOC, MPI.REPLACE, MPI.NO_OP): if op == MPI.OP_NULL: continue if op in (MPI.MAXLOC, MPI.MINLOC): sendobj = (rank, rank) else: sendobj = rank value = self.COMM.reduce(sendobj, op=op, root=root) if rank != root: self.assertIsNone(value) else: if op == MPI.SUM: self.assertEqual(value, cumsum(range(size))) elif op == MPI.PROD: self.assertEqual(value, cumprod(range(size))) elif op == MPI.MAX: self.assertEqual(value, size-1) elif op == MPI.MIN: self.assertEqual(value, 0) elif op == MPI.MAXLOC: self.assertEqual(value[0], size-1) self.assertEqual(value[1], size-1) elif op == MPI.MINLOC: self.assertEqual(value[0], 0) self.assertEqual(value[1], 0) elif op == MPI.REPLACE: self.assertEqual(value, size-1) elif op == MPI.NO_OP: self.assertEqual(value, 0) badroots = {-size, size} for root in badroots: with self.assertRaises(MPI.Exception) as cm: self.COMM.reduce(None, op=MPI.NO_OP, root=root) ierr = cm.exception.Get_error_class() self.assertEqual(ierr, MPI.ERR_ROOT) def testAllreduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN, MPI.MAXLOC, MPI.MINLOC, MPI.REPLACE, MPI.NO_OP): if op == MPI.OP_NULL: continue if op in (MPI.MAXLOC, MPI.MINLOC): sendobj = (rank, rank) else: sendobj = rank value = self.COMM.allreduce(sendobj, op) if op == MPI.SUM: self.assertEqual(value, cumsum(range(size))) elif op == MPI.PROD: self.assertEqual(value, cumprod(range(size))) elif op == MPI.MAX: self.assertEqual(value, size-1) elif op == MPI.MIN: self.assertEqual(value, 0) elif op == MPI.MAXLOC: self.assertEqual(value[1], size-1) elif op == MPI.MINLOC: self.assertEqual(value[1], 0) elif op == MPI.REPLACE: self.assertEqual(value, size-1) elif op == MPI.NO_OP: self.assertEqual(value, 0) def testScan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() # -- sscan = self.COMM.scan(size, op=MPI.SUM) self.assertEqual(sscan, cumsum([size]*(rank+1))) # -- rscan = self.COMM.scan(rank, op=MPI.SUM) self.assertEqual(rscan, cumsum(range(rank+1))) # -- minloc = self.COMM.scan((rank, rank), op=MPI.MINLOC) maxloc = self.COMM.scan((rank, rank), op=MPI.MAXLOC) self.assertEqual(minloc, (0, 0)) self.assertEqual(maxloc, (rank, rank)) # -- if MPI.REPLACE != MPI.OP_NULL: rscan = self.COMM.scan(rank, op=MPI.REPLACE) self.assertEqual(rscan, rank) # -- if MPI.NO_OP != MPI.OP_NULL: rscan = self.COMM.scan(rank, op=MPI.NO_OP) self.assertEqual(rscan, 0) def testExscan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() # -- sscan = self.COMM.exscan(size, op=MPI.SUM) if rank == 0: self.assertIsNone(sscan) else: self.assertEqual(sscan, cumsum([size]*(rank))) # -- rscan = self.COMM.exscan(rank, op=MPI.SUM) if rank == 0: self.assertIsNone(sscan) else: self.assertEqual(rscan, cumsum(range(rank))) # -- minloc = self.COMM.exscan((rank, rank), op=MPI.MINLOC) maxloc = self.COMM.exscan((rank, rank), op=MPI.MAXLOC) if rank == 0: self.assertIsNone(minloc) self.assertIsNone(maxloc) else: self.assertEqual(minloc, (0, 0)) self.assertEqual(maxloc, (rank-1, rank-1)) # -- if MPI.REPLACE != MPI.OP_NULL: rscan = self.COMM.exscan(rank, op=MPI.REPLACE) if rank == 0: self.assertIsNone(rscan) else: self.assertEqual(rscan, rank-1) # -- if MPI.NO_OP != MPI.OP_NULL: rscan = self.COMM.exscan(rank, op=MPI.NO_OP) if rank == 0: self.assertIsNone(rscan) else: self.assertEqual(rscan, 0) class TestCCOObjSelf(BaseTestCCOObj, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCOObjWorld(BaseTestCCOObj, unittest.TestCase): COMM = MPI.COMM_WORLD class TestCCOObjSelfDup(TestCCOObjSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() @unittest.skipMPI('openmpi(<1.4.0)', MPI.Query_thread() > MPI.THREAD_SINGLE) class TestCCOObjWorldDup(TestCCOObjWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_cco_obj_inter.py000066400000000000000000000173051475341043600201440ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest from functools import reduce cumsum = lambda seq: reduce(lambda x, y: x+y, seq, 0) cumprod = lambda seq: reduce(lambda x, y: x*y, seq, 1) _basic = [ None, True, False, -7, 0, 7, 2**31, -2**63+1, 2**63-1, -2.17, 0.0, 3.14, 1+2j, 2-3j, 'mpi4py', ] messages = list(_basic) messages += [ list(_basic), tuple(_basic), {f'k{k}': v for k, v in enumerate(_basic)}, ] @unittest.skipMPI('openmpi(<1.6.0)') @unittest.skipMPI('msmpi', MPI.COMM_WORLD.Get_size() >= 3) @unittest.skipMPI('MPICH1') @unittest.skipIf(MPI.ROOT == MPI.PROC_NULL, 'mpi-root') @unittest.skipIf(MPI.COMM_WORLD.Get_size() < 2, 'mpi-world-size<2') class BaseTestCCOObjInter: BASECOMM = MPI.COMM_NULL INTRACOMM = MPI.COMM_NULL INTERCOMM = MPI.COMM_NULL def setUp(self): size = self.BASECOMM.Get_size() rank = self.BASECOMM.Get_rank() if rank < size // 2: self.COLOR = 0 self.LOCAL_LEADER = 0 self.REMOTE_LEADER = size // 2 else: self.COLOR = 1 self.LOCAL_LEADER = 0 self.REMOTE_LEADER = 0 self.INTRACOMM = self.BASECOMM.Split(self.COLOR, key=0) Create_intercomm = MPI.Intracomm.Create_intercomm self.INTERCOMM = Create_intercomm(self.INTRACOMM, self.LOCAL_LEADER, self.BASECOMM, self.REMOTE_LEADER) def tearDown(self): self.INTRACOMM.Free() self.INTERCOMM.Free() @unittest.skipMPI('MPICH2(<1.0.8)') def testBarrier(self): self.INTERCOMM.barrier() def testBcast(self): rank = self.INTERCOMM.Get_rank() size = self.INTERCOMM.Get_size() rsize = self.INTERCOMM.Get_remote_size() for smess in messages + [messages]: for color in [0, 1]: if self.COLOR == color: for root in range(size): if root == rank: rmess = self.INTERCOMM.bcast(smess, root=MPI.ROOT) else: rmess = self.INTERCOMM.bcast(None, root=MPI.PROC_NULL) self.assertIsNone(rmess) else: for root in range(rsize): rmess = self.INTERCOMM.bcast(None, root=root) self.assertEqual(rmess, smess) def testGather(self): rank = self.INTERCOMM.Get_rank() size = self.INTERCOMM.Get_size() rsize = self.INTERCOMM.Get_remote_size() for smess in messages + [messages]: for color in [0, 1]: if self.COLOR == color: for root in range(size): if root == rank: rmess = self.INTERCOMM.gather(smess, root=MPI.ROOT) self.assertEqual(rmess, [smess] * rsize) else: rmess = self.INTERCOMM.gather(None, root=MPI.PROC_NULL) self.assertIsNone(rmess) else: for root in range(rsize): rmess = self.INTERCOMM.gather(smess, root=root) self.assertIsNone(rmess) @unittest.skipMPI('msmpi(<8.0.0)') def testScatter(self): rank = self.INTERCOMM.Get_rank() size = self.INTERCOMM.Get_size() rsize = self.INTERCOMM.Get_remote_size() for smess in messages + [messages]: for color in [0, 1]: if self.COLOR == color: for root in range(size): if root == rank: rmess = self.INTERCOMM.scatter([smess] * rsize, root=MPI.ROOT) else: rmess = self.INTERCOMM.scatter(None, root=MPI.PROC_NULL) self.assertIsNone(rmess) else: for root in range(rsize): rmess = self.INTERCOMM.scatter(None, root=root) self.assertEqual(rmess, smess) @unittest.skipMPI('MPICH2(<1.0.8)') def testAllgather(self): rank = self.INTERCOMM.Get_rank() size = self.INTERCOMM.Get_size() rsize = self.INTERCOMM.Get_remote_size() for smess in messages + [messages]: rmess = self.INTERCOMM.allgather(smess) self.assertEqual(rmess, [smess] * rsize) def testAlltoall(self): rank = self.INTERCOMM.Get_rank() size = self.INTERCOMM.Get_size() rsize = self.INTERCOMM.Get_remote_size() for smess in messages + [messages]: rmess = self.INTERCOMM.alltoall([smess] * rsize) self.assertEqual(rmess, [smess] * rsize) def testReduce(self): rank = self.INTERCOMM.Get_rank() size = self.INTERCOMM.Get_size() rsize = self.INTERCOMM.Get_remote_size() for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): for color in [0, 1]: if self.COLOR == color: for root in range(size): if root == rank: value = self.INTERCOMM.reduce(None, op=op, root=MPI.ROOT) if op == MPI.SUM: self.assertEqual(value, cumsum(range(rsize))) elif op == MPI.PROD: self.assertEqual(value, cumprod(range(rsize))) elif op == MPI.MAX: self.assertEqual(value, rsize-1) elif op == MPI.MIN: self.assertEqual(value, 0) else: value = self.INTERCOMM.reduce(None, op=op, root=MPI.PROC_NULL) self.assertIsNone(value) else: for root in range(rsize): value = self.INTERCOMM.reduce(rank, op=op, root=root) self.assertIsNone(value) badroots = {-3, -2, -1, rsize}.difference({MPI.ROOT, MPI.PROC_NULL}) for root in badroots: with self.assertRaises(MPI.Exception) as cm: self.INTERCOMM.reduce(None, op=MPI.NO_OP, root=root) ierr = cm.exception.Get_error_class() self.assertEqual(ierr, MPI.ERR_ROOT) @unittest.skipMPI('MPICH2(<1.0.8)') def testAllreduce(self): rank = self.INTERCOMM.Get_rank() size = self.INTERCOMM.Get_size() rsize = self.INTERCOMM.Get_remote_size() for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): value = self.INTERCOMM.allreduce(rank, op) if op == MPI.SUM: self.assertEqual(value, cumsum(range(rsize))) elif op == MPI.PROD: self.assertEqual(value, cumprod(range(rsize))) elif op == MPI.MAX: self.assertEqual(value, rsize-1) elif op == MPI.MIN: self.assertEqual(value, 0) class TestCCOObjInter(BaseTestCCOObjInter, unittest.TestCase): BASECOMM = MPI.COMM_WORLD class TestCCOObjInterDup(TestCCOObjInter): def setUp(self): self.BASECOMM = self.BASECOMM.Dup() super().setUp() def tearDown(self): self.BASECOMM.Free() super().tearDown() class TestCCOObjInterDupDup(TestCCOObjInterDup): BASECOMM = MPI.COMM_WORLD INTERCOMM_ORIG = MPI.COMM_NULL def setUp(self): super().setUp() self.INTERCOMM_ORIG = self.INTERCOMM self.INTERCOMM = self.INTERCOMM.Dup() def tearDown(self): super().tearDown() self.INTERCOMM_ORIG.Free() if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_cco_pr_buf.py000066400000000000000000000655761475341043600174630ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl from functools import reduce prod = lambda sequence,start=1: reduce(lambda x, y: x*y, sequence, start) def skip_op(typecode, op): if typecode in '?': return True if typecode in 'FDG': if op in (MPI.MAX, MPI.MIN): return True return False def maxvalue(a): try: typecode = a.typecode except AttributeError: typecode = a.dtype.char if typecode == ('f'): return 1e30 elif typecode == ('d'): return 1e300 else: return 2 ** (a.itemsize * 7) - 1 def StartWaitFree(request): request.Start() request.Wait() request.Free() class BaseTestCCOBuf: COMM = MPI.COMM_NULL def testBarrier(self): StartWaitFree( self.COMM.Barrier_init() ) def testBcast(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) if rank == root: buf = array(root, typecode, root) else: buf = array( -1, typecode, root) StartWaitFree( self.COMM.Bcast_init(buf.as_mpi(), root=root) ) for value in buf: self.assertEqual(value, check) def testGather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) sbuf = array(root, typecode, root+1) if rank == root: rbuf = array(-1, typecode, (size,root+1)) else: rbuf = array([], typecode) StartWaitFree( self.COMM.Gather_init(sbuf.as_mpi(), rbuf.as_mpi(), root=root) ) if rank == root: for value in rbuf.flat: self.assertEqual(value, check) def testScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) rbuf = array(-1, typecode, size) if rank == root: sbuf = array(root, typecode, (size, size)) else: sbuf = array([], typecode) StartWaitFree( self.COMM.Scatter_init(sbuf.as_mpi(), rbuf.as_mpi(), root=root) ) for value in rbuf: self.assertEqual(value, check) def testAllgather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) sbuf = array(root, typecode, root+1) rbuf = array( -1, typecode, (size, root+1)) StartWaitFree( self.COMM.Allgather_init(sbuf.as_mpi(), rbuf.as_mpi()) ) for value in rbuf.flat: self.assertEqual(value, check) def testAlltoall(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) sbuf = array(root, typecode, (size, root+1)) rbuf = array( -1, typecode, (size, root+1)) StartWaitFree( self.COMM.Alltoall_init(sbuf.as_mpi(), rbuf.as_mpi_c(root+1)) ) for value in rbuf.flat: self.assertEqual(value, check) def testReduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue for root in range(size): sbuf = array(range(size), typecode) rbuf = array(-1, typecode, size) StartWaitFree( self.COMM.Reduce_init(sbuf.as_mpi(), rbuf.as_mpi(), op, root) ) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if rank != root: check = arrayimpl.scalar(-1) self.assertEqual(value, check) continue if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testAllreduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue sbuf = array(range(size), typecode) rbuf = array(0, typecode, size) StartWaitFree( self.COMM.Allreduce_init(sbuf.as_mpi(), rbuf.as_mpi(), op) ) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testReduceScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue rcnt = list(range(1,size+1)) sbuf = array([rank+1]*sum(rcnt), typecode) rbuf = array(-1, typecode, rank+1) StartWaitFree( self.COMM.Reduce_scatter_init(sbuf.as_mpi(), rbuf.as_mpi(), None, op) ) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: redval = sum(range(size))+size if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size) elif op == MPI.MIN: self.assertEqual(value, 1) rbuf = array(-1, typecode, rank+1) StartWaitFree( self.COMM.Reduce_scatter_init(sbuf.as_mpi(), rbuf.as_mpi(), rcnt, op) ) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: redval = sum(range(size))+size if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size) elif op == MPI.MIN: self.assertEqual(value, 1) def testReduceScatterBlock(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue for rcnt in range(1, size+1): sbuf = array([rank]*rcnt*size, typecode) rbuf = array(-1, typecode, rcnt) if op == MPI.PROD: sbuf = array([rank+1]*rcnt*size, typecode) StartWaitFree( self.COMM.Reduce_scatter_block_init(sbuf.as_mpi(), rbuf.as_mpi(), op) ) max_val = maxvalue(rbuf) v_sum = (size*(size-1))/2 v_prod = 1 for i in range(1,size+1): v_prod *= i v_max = size-1 v_min = 0 for i, value in enumerate(rbuf): if op == MPI.SUM: if v_sum <= max_val: self.assertAlmostEqual(value, v_sum) elif op == MPI.PROD: if v_prod <= max_val: self.assertAlmostEqual(value, v_prod) elif op == MPI.MAX: self.assertEqual(value, v_max) elif op == MPI.MIN: self.assertEqual(value, v_min) def testScan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() # -- for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue sbuf = array(range(size), typecode) rbuf = array(0, typecode, size) StartWaitFree( self.COMM.Scan_init(sbuf.as_mpi(), rbuf.as_mpi(), op) ) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: if (i * (rank + 1)) < max_val: self.assertAlmostEqual(value, i * (rank + 1)) elif op == MPI.PROD: if (i ** (rank + 1)) < max_val: self.assertAlmostEqual(value, i ** (rank + 1)) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testExscan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue sbuf = array(range(size), typecode) rbuf = array(0, typecode, size) StartWaitFree( self.COMM.Exscan_init(sbuf.as_mpi(), rbuf.as_mpi(), op) ) if rank == 1: for i, value in enumerate(rbuf): self.assertEqual(value, i) elif rank > 1: max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: if (i * rank) < max_val: self.assertAlmostEqual(value, i * rank) elif op == MPI.PROD: if (i ** rank) < max_val: self.assertAlmostEqual(value, i ** rank) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testBcastTypeIndexed(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): datatype = array.TypeMap[typecode] for root in range(size): # if rank == root: buf = array(range(10), typecode).as_raw() else: buf = array(-1, typecode, 10).as_raw() indices = list(range(0, len(buf), 2)) newtype = datatype.Create_indexed_block(1, indices) newtype.Commit() newbuf = (buf, 1, newtype) StartWaitFree( self.COMM.Bcast_init(newbuf, root=root) ) newtype.Free() if rank != root: for i, value in enumerate(buf): check = arrayimpl.scalar(-1 if (i % 2) else i) self.assertEqual(value, check) # if rank == root: buf = array(range(10), typecode).as_raw() else: buf = array(-1, typecode, 10).as_raw() indices = list(range(1, len(buf), 2)) newtype = datatype.Create_indexed_block(1, indices) newtype.Commit() newbuf = (buf, 1, newtype) StartWaitFree( self.COMM.Bcast_init(newbuf, root) ) newtype.Free() if rank != root: for i, value in enumerate(buf): check = arrayimpl.scalar(-1 if not (i % 2) else i) self.assertEqual(value, check) class BaseTestCCOBufInplace: def testGather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) count = root+3 if rank == root: sbuf = MPI.IN_PLACE buf = array(-1, typecode, (size, count)) s, e = rank*count, (rank+1)*count for i in range(s, e): buf.flat[i] = check rbuf = buf.as_mpi() else: buf = array(root, typecode, count) sbuf = buf.as_mpi() rbuf = None StartWaitFree( self.COMM.Gather_init(sbuf, rbuf, root=root) ) for value in buf.flat: self.assertEqual(value, check) if rank == root: sbuf = None StartWaitFree( self.COMM.Gather_init(sbuf, rbuf, root=root) ) for value in buf.flat: self.assertEqual(value, check) def testScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) for count in range(1, 10): if rank == root: buf = array(root, typecode, (size, count)) sbuf = buf.as_mpi() rbuf = MPI.IN_PLACE else: buf = array(-1, typecode, count) sbuf = None rbuf = buf.as_mpi() StartWaitFree( self.COMM.Scatter_init(sbuf, rbuf, root=root) ) for value in buf.flat: self.assertEqual(value, check) if rank == root: rbuf = None StartWaitFree( self.COMM.Scatter_init(sbuf, rbuf, root=root) ) for value in buf.flat: self.assertEqual(value, check) def testAllgather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for count in range(1, 10): check = arrayimpl.scalar(count) buf = array(-1, typecode, (size, count)) s, e = rank*count, (rank+1)*count for i in range(s, e): buf.flat[i] = check StartWaitFree( self.COMM.Allgather_init(MPI.IN_PLACE, buf.as_mpi()) ) for value in buf.flat: self.assertEqual(value, check) StartWaitFree( self.COMM.Allgather_init(None, buf.as_mpi()) ) for value in buf.flat: self.assertEqual(value, check) def testReduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue for root in range(size): if rank == root: buf = array(range(size), typecode) sbuf = MPI.IN_PLACE rbuf = buf.as_mpi() else: buf = array(range(size), typecode) buf2 = array(range(size), typecode) sbuf = buf.as_mpi() rbuf = buf2.as_mpi() StartWaitFree( self.COMM.Reduce_init(sbuf, rbuf, op, root) ) if rank == root: max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testAllreduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue buf = array(range(size), typecode) sbuf = MPI.IN_PLACE rbuf = buf.as_mpi() StartWaitFree( self.COMM.Allreduce_init(sbuf, rbuf, op) ) max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testReduceScatterBlock(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue for rcnt in range(size): if op == MPI.PROD: rbuf = array([rank+1]*rcnt*size, typecode) else: rbuf = array([rank]*rcnt*size, typecode) StartWaitFree( self.COMM.Reduce_scatter_block_init(MPI.IN_PLACE, rbuf.as_mpi(), op) ) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if i >= rcnt: if op == MPI.PROD: self.assertEqual(value, rank+1) else: self.assertEqual(value, rank) else: if op == MPI.SUM: redval = sum(range(size)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size-1) elif op == MPI.MIN: self.assertEqual(value, 0) def testReduceScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue rcnt = list(range(1, size+1)) if op == MPI.PROD: rbuf = array([rank+1]*sum(rcnt), typecode) else: rbuf = array([rank]*sum(rcnt), typecode) StartWaitFree( self.COMM.Reduce_scatter_init(MPI.IN_PLACE, rbuf.as_mpi(), rcnt, op) ) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if i >= rcnt[rank]: if op == MPI.PROD: self.assertEqual(value, rank+1) else: self.assertEqual(value, rank) else: if op == MPI.SUM: redval = sum(range(size)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size-1) elif op == MPI.MIN: self.assertEqual(value, 0) @unittest.skipMPI('openmpi(<=1.8.4)') def testScan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() # -- for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue buf = array(range(size), typecode) StartWaitFree( self.COMM.Scan_init(MPI.IN_PLACE, buf.as_mpi(), op) ) max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * (rank + 1)) < max_val: self.assertAlmostEqual(value, i * (rank + 1)) elif op == MPI.PROD: if (i ** (rank + 1)) < max_val: self.assertAlmostEqual(value, i ** (rank + 1)) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testExscan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue buf = array(range(size), typecode) StartWaitFree( self.COMM.Exscan_init(MPI.IN_PLACE, buf.as_mpi(), op) ) if rank == 1: for i, value in enumerate(buf): self.assertEqual(value, i) elif rank > 1: max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * rank) < max_val: self.assertAlmostEqual(value, i * rank) elif op == MPI.PROD: if (i ** rank) < max_val: self.assertAlmostEqual(value, i ** rank) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) class TestCCOBufSelf(BaseTestCCOBuf, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCOBufWorld(BaseTestCCOBuf, unittest.TestCase): COMM = MPI.COMM_WORLD class TestCCOBufInplaceSelf(BaseTestCCOBufInplace, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCOBufInplaceWorld(BaseTestCCOBufInplace, unittest.TestCase): COMM = MPI.COMM_WORLD class TestCCOBufSelfDup(TestCCOBufSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() class TestCCOBufWorldDup(TestCCOBufWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() try: StartWaitFree( MPI.COMM_SELF.Barrier_init() ) except NotImplementedError: unittest.disable(BaseTestCCOBuf, 'mpi-coll-persist') unittest.disable(BaseTestCCOBufInplace, 'mpi-coll-persist') if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_cco_pr_ngh_buf.py000066400000000000000000000127161475341043600203030ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl def create_topo_comms(comm): size = comm.Get_size() rank = comm.Get_rank() # Cartesian n = int(size**1/2.0) m = int(size**1/3.0) if m*m*m == size: dims = [m, m, m] elif n*n == size: dims = [n, n] else: dims = [size] periods = [True] * len(dims) yield comm.Create_cart(dims, periods=periods) # Graph index, edges = [0], [] for i in range(size): pos = index[-1] index.append(pos+2) edges.append((i-1)%size) edges.append((i+1)%size) yield comm.Create_graph(index, edges) # Dist Graph sources = [(rank-2)%size, (rank-1)%size] destinations = [(rank+1)%size, (rank+2)%size] yield comm.Create_dist_graph_adjacent(sources, destinations) def get_neighbors_count(comm): topo = comm.Get_topology() if topo == MPI.CART: ndim = comm.Get_dim() return 2*ndim, 2*ndim if topo == MPI.GRAPH: rank = comm.Get_rank() nneighbors = comm.Get_neighbors_count(rank) return nneighbors, nneighbors if topo == MPI.DIST_GRAPH: indeg, outdeg, w = comm.Get_dist_neighbors_count() return indeg, outdeg return 0, 0 def StartWaitFree(request): request.Start() request.Wait() request.Free() class BaseTestCCONghBuf: COMM = MPI.COMM_NULL def testNeighborAllgather(self): for comm in create_topo_comms(self.COMM): rsize, ssize = get_neighbors_count(comm) for array, typecode in arrayimpl.loop(): for v in range(3): check = arrayimpl.scalar(v) sbuf = array( v, typecode, 3) rbuf = array(-1, typecode, (rsize, 3)) StartWaitFree( comm.Neighbor_allgather_init(sbuf.as_mpi(), rbuf.as_mpi()) ) for value in rbuf.flat: self.assertEqual(value, check) sbuf = array( v, typecode, 3) rbuf = array(-1, typecode, (rsize, 3)) StartWaitFree( comm.Neighbor_allgatherv_init(sbuf.as_mpi_c(3), rbuf.as_mpi_c(3)) ) for value in rbuf.flat: self.assertEqual(value, check) comm.Free() def testNeighborAlltoall(self): for comm in create_topo_comms(self.COMM): rsize, ssize = get_neighbors_count(comm) for array, typecode in arrayimpl.loop(): for v in range(3): check = arrayimpl.scalar(v) sbuf = array( v, typecode, (ssize, 3)) rbuf = array(-1, typecode, (rsize, 3)) StartWaitFree( comm.Neighbor_alltoall_init(sbuf.as_mpi(), rbuf.as_mpi_c(3)) ) for value in rbuf.flat: self.assertEqual(value, check) sbuf = array( v, typecode, (ssize, 3)) rbuf = array(-1, typecode, (rsize, 3)) StartWaitFree( comm.Neighbor_alltoall_init(sbuf.as_mpi(), rbuf.as_mpi()) ) for value in rbuf.flat: self.assertEqual(value, check) sbuf = array( v, typecode, (ssize, 3)) rbuf = array(-1, typecode, (rsize, 3)) StartWaitFree( comm.Neighbor_alltoallv_init(sbuf.as_mpi_c(3), rbuf.as_mpi_c(3)) ) for value in rbuf.flat: self.assertEqual(value, check) comm.Free() def testNeighborAlltoallw(self): size = self.COMM.Get_size() for comm in create_topo_comms(self.COMM): rsize, ssize = get_neighbors_count(comm) for array, typecode in arrayimpl.loop(): for n in range(1,4): for v in range(3): check = arrayimpl.scalar(v) sbuf = array( v, typecode, (ssize, n)) rbuf = array(-1, typecode, (rsize, n)) sdt, rdt = sbuf.mpidtype, rbuf.mpidtype sdsp = list(range(0, ssize*n*sdt.extent, n*sdt.extent)) rdsp = list(range(0, rsize*n*rdt.extent, n*rdt.extent)) smsg = [sbuf.as_raw(), ([n]*ssize, sdsp), [sdt]*ssize] rmsg = (rbuf.as_raw(), ([n]*rsize, rdsp), [rdt]*rsize) StartWaitFree( comm.Neighbor_alltoallw_init(smsg, rmsg) ) for value in rbuf.flat: self.assertEqual(value, check) comm.Free() class TestCCONghBufSelf(BaseTestCCONghBuf, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCONghBufWorld(BaseTestCCONghBuf, unittest.TestCase): COMM = MPI.COMM_WORLD class TestCCONghBufSelfDup(TestCCONghBufSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() class TestCCONghBufWorldDup(TestCCONghBufWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() try: StartWaitFree( MPI.COMM_SELF.Barrier_init() ) except NotImplementedError: unittest.disable(BaseTestCCONghBuf, 'mpi-coll-persist') if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_cco_pr_vec.py000066400000000000000000000473131475341043600174510ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl def maxvalue(a): try: typecode = a.typecode except AttributeError: typecode = a.dtype.char if typecode == ('f'): return 1e30 elif typecode == ('d'): return 1e300 else: return 2 ** (a.itemsize * 7) - 1 def StartWaitFree(request): request.Start() request.Wait() request.Free() class BaseTestCCOVec: COMM = MPI.COMM_NULL def testGatherv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check_a = arrayimpl.scalar(root) check_b = arrayimpl.scalar(-1) for count in range(size): sbuf = array(root, typecode, count) rbuf = array( -1, typecode, size*size) counts = [count] * size displs = list(range(0, size*size, size)) recvbuf = rbuf.as_mpi_v(counts, displs) if rank != root: recvbuf=None StartWaitFree( self.COMM.Gatherv_init(sbuf.as_mpi(), recvbuf, root) ) if recvbuf is not None: for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, check_a) for vb in b: self.assertEqual(vb, check_b) def testGatherv2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check_a = arrayimpl.scalar(root) check_b = arrayimpl.scalar(-1) for count in range(size): sbuf = array(root, typecode, size) rbuf = array( -1, typecode, size*size) sendbuf = sbuf.as_mpi_c(count) recvbuf = rbuf.as_mpi_v(count, size) if rank != root: recvbuf=None StartWaitFree( self.COMM.Gatherv_init(sendbuf, recvbuf, root) ) if recvbuf is not None: for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, check_a) for vb in b: self.assertEqual(vb, check_b) def testGatherv3(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) for count in range(size+1): # sbuf = array(root, typecode, count).as_raw() rbuf = array( -1, typecode, count*size).as_raw() sendbuf = sbuf recvbuf = [rbuf, count] if rank != root: recvbuf=None StartWaitFree( self.COMM.Gatherv_init(sendbuf, recvbuf, root) ) if recvbuf is not None: for v in rbuf: self.assertEqual(v, check) # sbuf = array(root, typecode, count).as_raw() if rank == root: rbuf = array( -1, typecode, count*size).as_raw() else: rbuf = None StartWaitFree( self.COMM.Gatherv_init(sbuf, rbuf, root) ) if rank == root: for v in rbuf: self.assertEqual(v, check) def testScatterv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) for count in range(size): sbuf = array(root, typecode, size*size) rbuf = array( -1, typecode, count) counts = [count] * size displs = list(range(0, size*size, size)) sendbuf = sbuf.as_mpi_v(counts, displs) if rank != root: sendbuf = None StartWaitFree( self.COMM.Scatterv_init(sendbuf, rbuf.as_mpi(), root) ) for vr in rbuf: self.assertEqual(vr, check) def testScatterv2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check_a = arrayimpl.scalar(root) check_b = arrayimpl.scalar(-1) for count in range(size): sbuf = array(root, typecode, size*size) rbuf = array( -1, typecode, size) sendbuf = sbuf.as_mpi_v(count, size) recvbuf = rbuf.as_mpi_c(count) if rank != root: sendbuf = None StartWaitFree( self.COMM.Scatterv_init(sendbuf, recvbuf, root) ) a, b = rbuf[:count], rbuf[count:] for va in a: self.assertEqual(va, check_a) for vb in b: self.assertEqual(vb, check_b) def testScatterv3(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) for count in range(size+1): # sbuf = array(root, typecode, count*size).as_raw() rbuf = array( -1, typecode, count).as_raw() sendbuf = [sbuf, count] recvbuf = rbuf if rank != root: sendbuf = None StartWaitFree( self.COMM.Scatterv_init(sendbuf, recvbuf, root) ) for v in rbuf: self.assertEqual(v, check) # if rank == root: sbuf = array(root, typecode, count*size).as_raw() else: sbuf = None rbuf = array( -1, typecode, count).as_raw() StartWaitFree( self.COMM.Scatterv_init(sbuf, rbuf, root) ) for v in rbuf: self.assertEqual(v, check) def testAllgatherv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check_a = arrayimpl.scalar(root) check_b = arrayimpl.scalar(-1) for count in range(size): sbuf = array(root, typecode, count) rbuf = array( -1, typecode, size*size) counts = [count] * size displs = list(range(0, size*size, size)) sendbuf = sbuf.as_mpi() recvbuf = rbuf.as_mpi_v(counts, displs) StartWaitFree( self.COMM.Allgatherv_init(sendbuf, recvbuf) ) for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, check_a) for vb in b: self.assertEqual(vb, check_b) def testAllgatherv2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check_a = arrayimpl.scalar(root) check_b = arrayimpl.scalar(-1) for count in range(size): sbuf = array(root, typecode, size) rbuf = array( -1, typecode, size*size) sendbuf = sbuf.as_mpi_c(count) recvbuf = rbuf.as_mpi_v(count, size) StartWaitFree( self.COMM.Allgatherv_init(sendbuf, recvbuf) ) for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, check_a) for vb in b: self.assertEqual(vb, check_b) def testAllgatherv3(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) for count in range(size+1): # sbuf = array(root, typecode, count).as_raw() rbuf = array( -1, typecode, count*size).as_raw() sendbuf = sbuf recvbuf = [rbuf, count] StartWaitFree( self.COMM.Allgatherv_init(sendbuf, recvbuf) ) for v in rbuf: self.assertEqual(v, check) # sbuf = array(root, typecode, count).as_raw() rbuf = array( -1, typecode, count*size).as_raw() self.COMM.Allgatherv(sbuf, rbuf) for v in rbuf: self.assertEqual(v, check) def testAlltoallv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check_a = arrayimpl.scalar(root) check_b = arrayimpl.scalar(-1) for count in range(size): sbuf = array(root, typecode, size*size) rbuf = array( -1, typecode, size*size) counts = [count] * size displs = list(range(0, size*size, size)) sendbuf = sbuf.as_mpi_v(counts, displs) recvbuf = rbuf.as_mpi_v(counts, displs) StartWaitFree( self.COMM.Alltoallv_init(sendbuf, recvbuf) ) for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, check_a) for vb in b: self.assertEqual(vb, check_b) def testAlltoallv2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check_a = arrayimpl.scalar(root) check_b = arrayimpl.scalar(-1) for count in range(size): sbuf = array(root, typecode, size*size) rbuf = array( -1, typecode, size*size) sendbuf = sbuf.as_mpi_v(count, size) recvbuf = rbuf.as_mpi_v(count, size) StartWaitFree( self.COMM.Alltoallv_init(sendbuf, recvbuf) ) for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, check_a) for vb in b: self.assertEqual(vb, check_b) def testAlltoallv3(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) for count in range(size+1): # sbuf = array(root, typecode, count*size).as_raw() rbuf = array( -1, typecode, count*size).as_raw() sendbuf = [sbuf, count] recvbuf = [rbuf, count] StartWaitFree( self.COMM.Alltoallv_init(sendbuf, recvbuf) ) for v in rbuf: self.assertEqual(v, check) # sbuf = array(root, typecode, count*size).as_raw() rbuf = array( -1, typecode, count*size).as_raw() StartWaitFree( self.COMM.Alltoallv_init(sbuf, rbuf) ) for v in rbuf: self.assertEqual(v, check) def testAlltoallw(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for n in range(1, size+1): check = arrayimpl.scalar(n) sbuf = array( n, typecode, (size, n)) rbuf = array(-1, typecode, (size, n)) sdt, rdt = sbuf.mpidtype, rbuf.mpidtype sdsp = list(range(0, size*n*sdt.extent, n*sdt.extent)) rdsp = list(range(0, size*n*rdt.extent, n*rdt.extent)) smsg = (sbuf.as_raw(), ([n]*size, sdsp), [sdt]*size) rmsg = (rbuf.as_raw(), ([n]*size, rdsp), [rdt]*size) StartWaitFree( self.COMM.Alltoallw_init(smsg, rmsg) ) for value in rbuf.flat: self.assertEqual(value, check) def testAlltoallwBottom(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for n in range(1, size+1): check = arrayimpl.scalar(n) sbuf = array( n, typecode, (size, n)) rbuf = array(-1, typecode, (size, n)) saddr = MPI.Get_address(sbuf.as_raw()) raddr = MPI.Get_address(rbuf.as_raw()) sdt, rdt = sbuf.mpidtype, rbuf.mpidtype stypes = [ MPI.Datatype.Create_struct([n], [saddr+d], [sdt]).Commit() for d in list(range(0, size*n*sdt.extent, n*sdt.extent)) ] rtypes = [ MPI.Datatype.Create_struct([n], [raddr+d], [sdt]).Commit() for d in list(range(0, size*n*rdt.extent, n*rdt.extent)) ] smsg = (MPI.BOTTOM, ([1]*size, [0]*size), stypes) rmsg = (MPI.BOTTOM, ([1]*size, [0]*size), rtypes) try: StartWaitFree( self.COMM.Alltoallw_init(smsg, rmsg) ) finally: for t in stypes: t.Free() for t in rtypes: t.Free() for value in rbuf.flat: self.assertEqual(value, check) @unittest.skipMPI('msmpi(<8.1.0)') @unittest.skipMPI('openmpi(<1.8.0)') @unittest.skipIf(MPI.BOTTOM == MPI.IN_PLACE, 'mpi-in-place') class BaseTestCCOVecInplace: COMM = MPI.COMM_NULL def testAlltoallv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for count in range(size): rbuf = array(-1, typecode, size*size) counts = [count] * size displs = list(range(0, size*size, size)) for i in range(size): for j in range(count): rbuf[i*size+j] = rank recvbuf = rbuf.as_mpi_v(counts, displs) StartWaitFree( self.COMM.Alltoallv_init(MPI.IN_PLACE, recvbuf) ) for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: check_a = arrayimpl.scalar(i) self.assertEqual(va, check_a) for vb in b: check_b = arrayimpl.scalar(-1) self.assertEqual(vb, check_b) def testAlltoallw(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for count in range(size): rbuf = array(-1, typecode, size*size) for i in range(size): for j in range(count): rbuf[i*size+j] = rank rdt = rbuf.mpidtype rdsp = list(range(0, size*size*rdt.extent, size*rdt.extent)) rmsg = (rbuf.as_raw(), ([count]*size, rdsp), [rdt]*size) StartWaitFree( self.COMM.Alltoallw_init(MPI.IN_PLACE, rmsg) ) for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: check_a = arrayimpl.scalar(i) self.assertEqual(va, check_a) for vb in b: check_b = arrayimpl.scalar(-1) self.assertEqual(vb, check_b) def testAlltoallw2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for count in range(size): rbuf = array(-1, typecode, size*size) for i in range(size): for j in range(count): rbuf[i*size+j] = rank rdt = rbuf.mpidtype rdsp = list(range(0, size*size*rdt.extent, size*rdt.extent)) rmsg = (rbuf.as_raw(), [count]*size, rdsp, [rdt]*size) StartWaitFree( self.COMM.Alltoallw_init(MPI.IN_PLACE, rmsg) ) for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: check_a = arrayimpl.scalar(i) self.assertEqual(va, check_a) for vb in b: check_b = arrayimpl.scalar(-1) self.assertEqual(vb, check_b) class TestCCOVecSelf(BaseTestCCOVec, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCOVecWorld(BaseTestCCOVec, unittest.TestCase): COMM = MPI.COMM_WORLD class TestCCOVecSelfDup(TestCCOVecSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() class TestCCOVecWorldDup(TestCCOVecWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() class TestCCOVecInplaceSelf(BaseTestCCOVecInplace, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCOVecInplaceWorld(BaseTestCCOVecInplace, unittest.TestCase): COMM = MPI.COMM_WORLD try: StartWaitFree( MPI.COMM_SELF.Barrier_init() ) except NotImplementedError: unittest.disable(BaseTestCCOVec, 'mpi-coll-persist') unittest.disable(BaseTestCCOVecInplace, 'mpi-coll-persist') if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_cco_vec.py000066400000000000000000000460301475341043600167430ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl def maxvalue(a): try: typecode = a.typecode except AttributeError: typecode = a.dtype.char if typecode == ('f'): return 1e30 elif typecode == ('d'): return 1e300 else: return 2 ** (a.itemsize * 7) - 1 class BaseTestCCOVec: COMM = MPI.COMM_NULL def testGatherv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check_a = arrayimpl.scalar(root) check_b = arrayimpl.scalar(-1) for count in range(size): sbuf = array(root, typecode, count) rbuf = array( -1, typecode, size*size) counts = [count] * size displs = list(range(0, size*size, size)) recvbuf = rbuf.as_mpi_v(counts, displs) if rank != root: recvbuf=None self.COMM.Barrier() self.COMM.Gatherv(sbuf.as_mpi(), recvbuf, root) self.COMM.Barrier() if recvbuf is not None: for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, check_a) for vb in b: self.assertEqual(vb, check_b) def testGatherv2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check_a = arrayimpl.scalar(root) check_b = arrayimpl.scalar(-1) for count in range(size): sbuf = array(root, typecode, size) rbuf = array( -1, typecode, size*size) sendbuf = sbuf.as_mpi_c(count) recvbuf = rbuf.as_mpi_v(count, size) if rank != root: recvbuf=None self.COMM.Barrier() self.COMM.Gatherv(sendbuf, recvbuf, root) self.COMM.Barrier() if recvbuf is not None: for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, check_a) for vb in b: self.assertEqual(vb, check_b) def testGatherv3(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) for count in range(size+1): # sbuf = array(root, typecode, count).as_raw() rbuf = array( -1, typecode, count*size).as_raw() sendbuf = sbuf recvbuf = [rbuf, count] if rank != root: recvbuf=None self.COMM.Barrier() self.COMM.Gatherv(sendbuf, recvbuf, root) self.COMM.Barrier() if recvbuf is not None: for v in rbuf: self.assertEqual(v, check) # sbuf = array(root, typecode, count).as_raw() if rank == root: rbuf = array( -1, typecode, count*size).as_raw() else: rbuf = None self.COMM.Gatherv(sbuf, rbuf, root) self.COMM.Barrier() if rank == root: for v in rbuf: self.assertEqual(v, check) def testScatterv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) for count in range(size): sbuf = array(root, typecode, size*size) rbuf = array( -1, typecode, count) counts = [count] * size displs = list(range(0, size*size, size)) sendbuf = sbuf.as_mpi_v(counts, displs) if rank != root: sendbuf = None self.COMM.Scatterv(sendbuf, rbuf.as_mpi(), root) for vr in rbuf: self.assertEqual(vr, check) def testScatterv2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check_a = arrayimpl.scalar(root) check_b = arrayimpl.scalar(-1) for count in range(size): sbuf = array(root, typecode, size*size) rbuf = array( -1, typecode, size) sendbuf = sbuf.as_mpi_v(count, size) recvbuf = rbuf.as_mpi_c(count) if rank != root: sendbuf = None self.COMM.Scatterv(sendbuf, recvbuf, root) a, b = rbuf[:count], rbuf[count:] for va in a: self.assertEqual(va, check_a) for vb in b: self.assertEqual(vb, check_b) def testScatterv3(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) for count in range(size+1): # sbuf = array(root, typecode, count*size).as_raw() rbuf = array( -1, typecode, count).as_raw() sendbuf = [sbuf, count] recvbuf = rbuf if rank != root: sendbuf = None self.COMM.Scatterv(sendbuf, recvbuf, root) for v in rbuf: self.assertEqual(v, check) # if rank == root: sbuf = array(root, typecode, count*size).as_raw() else: sbuf = None rbuf = array( -1, typecode, count).as_raw() self.COMM.Scatterv(sbuf, rbuf, root) for v in rbuf: self.assertEqual(v, check) def testAllgatherv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check_a = arrayimpl.scalar(root) check_b = arrayimpl.scalar(-1) for count in range(size): sbuf = array(root, typecode, count) rbuf = array( -1, typecode, size*size) counts = [count] * size displs = list(range(0, size*size, size)) sendbuf = sbuf.as_mpi() recvbuf = rbuf.as_mpi_v(counts, displs) self.COMM.Allgatherv(sendbuf, recvbuf) for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, check_a) for vb in b: self.assertEqual(vb, check_b) def testAllgatherv2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check_a = arrayimpl.scalar(root) check_b = arrayimpl.scalar(-1) for count in range(size): sbuf = array(root, typecode, size) rbuf = array( -1, typecode, size*size) sendbuf = sbuf.as_mpi_c(count) recvbuf = rbuf.as_mpi_v(count, size) self.COMM.Allgatherv(sendbuf, recvbuf) for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, check_a) for vb in b: self.assertEqual(vb, check_b) def testAllgatherv3(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) for count in range(size+1): # sbuf = array(root, typecode, count).as_raw() rbuf = array( -1, typecode, count*size).as_raw() sendbuf = sbuf recvbuf = [rbuf, count] self.COMM.Allgatherv(sendbuf, recvbuf) for v in rbuf: self.assertEqual(v, check) # sbuf = array(root, typecode, count).as_raw() rbuf = array( -1, typecode, count*size).as_raw() self.COMM.Allgatherv(sbuf, rbuf) for v in rbuf: self.assertEqual(v, check) def testAlltoallv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check_a = arrayimpl.scalar(root) check_b = arrayimpl.scalar(-1) for count in range(size): sbuf = array(root, typecode, size*size) rbuf = array( -1, typecode, size*size) counts = [count] * size displs = list(range(0, size*size, size)) sendbuf = sbuf.as_mpi_v(counts, displs) recvbuf = rbuf.as_mpi_v(counts, displs) self.COMM.Alltoallv(sendbuf, recvbuf) for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, check_a) for vb in b: self.assertEqual(vb, check_b) def testAlltoallv2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check_a = arrayimpl.scalar(root) check_b = arrayimpl.scalar(-1) for count in range(size): sbuf = array(root, typecode, size*size) rbuf = array( -1, typecode, size*size) sendbuf = sbuf.as_mpi_v(count, size) recvbuf = rbuf.as_mpi_v(count, size) self.COMM.Alltoallv(sendbuf, recvbuf) for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, check_a) for vb in b: self.assertEqual(vb, check_b) def testAlltoallv3(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for root in range(size): check = arrayimpl.scalar(root) for count in range(size+1): # sbuf = array(root, typecode, count*size).as_raw() rbuf = array( -1, typecode, count*size).as_raw() sendbuf = [sbuf, count] recvbuf = [rbuf, count] self.COMM.Alltoallv(sendbuf, recvbuf) for v in rbuf: self.assertEqual(v, check) # sbuf = array(root, typecode, count*size).as_raw() rbuf = array( -1, typecode, count*size).as_raw() self.COMM.Alltoallv(sbuf, rbuf) for v in rbuf: self.assertEqual(v, check) def testAlltoallw(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for n in range(1, size+1): check = arrayimpl.scalar(n) sbuf = array( n, typecode, (size, n)) rbuf = array(-1, typecode, (size, n)) sdt, rdt = sbuf.mpidtype, rbuf.mpidtype sdsp = list(range(0, size*n*sdt.extent, n*sdt.extent)) rdsp = list(range(0, size*n*rdt.extent, n*rdt.extent)) smsg = (sbuf.as_raw(), ([n]*size, sdsp), [sdt]*size) rmsg = (rbuf.as_raw(), ([n]*size, rdsp), [rdt]*size) try: self.COMM.Alltoallw(smsg, rmsg) except NotImplementedError: self.skipTest('mpi-alltoallw') for value in rbuf.flat: self.assertEqual(value, check) def testAlltoallwBottom(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): if unittest.is_mpi_gpu('openmpi', array): continue for n in range(1, size+1): check = arrayimpl.scalar(n) sbuf = array( n, typecode, (size, n)) rbuf = array(-1, typecode, (size, n)) saddr = MPI.Get_address(sbuf.as_raw()) raddr = MPI.Get_address(rbuf.as_raw()) sdt, rdt = sbuf.mpidtype, rbuf.mpidtype stypes = [ MPI.Datatype.Create_struct([n], [saddr+d], [sdt]).Commit() for d in list(range(0, size*n*sdt.extent, n*sdt.extent)) ] rtypes = [ MPI.Datatype.Create_struct([n], [raddr+d], [sdt]).Commit() for d in list(range(0, size*n*rdt.extent, n*rdt.extent)) ] smsg = (MPI.BOTTOM, ([1]*size, [0]*size), stypes) rmsg = (MPI.BOTTOM, ([1]*size, [0]*size), rtypes) try: self.COMM.Alltoallw(smsg, rmsg) except NotImplementedError: self.skipTest('mpi-alltoallw') finally: for t in stypes: t.Free() for t in rtypes: t.Free() for value in rbuf.flat: self.assertEqual(value, check) @unittest.skipMPI('msmpi(<8.1.0)') @unittest.skipMPI('openmpi(<1.8.0)') @unittest.skipIf(MPI.BOTTOM == MPI.IN_PLACE, 'mpi-in-place') class BaseTestCCOVecInplace: COMM = MPI.COMM_NULL def testAlltoallv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for count in range(size): rbuf = array(-1, typecode, size*size) counts = [count] * size displs = list(range(0, size*size, size)) for i in range(size): for j in range(count): rbuf[i*size+j] = rank recvbuf = rbuf.as_mpi_v(counts, displs) self.COMM.Alltoallv(MPI.IN_PLACE, recvbuf) for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: check_a = arrayimpl.scalar(i) self.assertEqual(va, check_a) for vb in b: check_b = arrayimpl.scalar(-1) self.assertEqual(vb, check_b) def testAlltoallw(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for count in range(size): rbuf = array(-1, typecode, size*size) for i in range(size): for j in range(count): rbuf[i*size+j] = rank rdt = rbuf.mpidtype rdsp = list(range(0, size*size*rdt.extent, size*rdt.extent)) rmsg = (rbuf.as_raw(), ([count]*size, rdsp), [rdt]*size) try: self.COMM.Alltoallw(MPI.IN_PLACE, rmsg) except NotImplementedError: self.skipTest('mpi-alltoallw') for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: check_a = arrayimpl.scalar(i) self.assertEqual(va, check_a) for vb in b: check_b = arrayimpl.scalar(-1) self.assertEqual(vb, check_b) def testAlltoallw2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): for count in range(size): rbuf = array(-1, typecode, size*size) for i in range(size): for j in range(count): rbuf[i*size+j] = rank rdt = rbuf.mpidtype rdsp = list(range(0, size*size*rdt.extent, size*rdt.extent)) rmsg = (rbuf.as_raw(), [count]*size, rdsp, [rdt]*size) try: self.COMM.Alltoallw(MPI.IN_PLACE, rmsg) except NotImplementedError: self.skipTest('mpi-alltoallw') for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: check_a = arrayimpl.scalar(i) self.assertEqual(va, check_a) for vb in b: check_b = arrayimpl.scalar(-1) self.assertEqual(vb, check_b) class TestCCOVecSelf(BaseTestCCOVec, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCOVecWorld(BaseTestCCOVec, unittest.TestCase): COMM = MPI.COMM_WORLD class TestCCOVecSelfDup(TestCCOVecSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() class TestCCOVecWorldDup(TestCCOVecWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() class TestCCOVecInplaceSelf(BaseTestCCOVecInplace, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCOVecInplaceWorld(BaseTestCCOVecInplace, unittest.TestCase): COMM = MPI.COMM_WORLD if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_cco_vec_inter.py000066400000000000000000000171621475341043600201500ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl @unittest.skipMPI('openmpi(<1.6.0)') @unittest.skipMPI('MPICH1') @unittest.skipIf(MPI.ROOT == MPI.PROC_NULL, 'mpi-root') @unittest.skipIf(MPI.COMM_WORLD.Get_size() < 2, 'mpi-world-size<2') class BaseTestCCOVecInter: BASECOMM = MPI.COMM_NULL INTRACOMM = MPI.COMM_NULL INTERCOMM = MPI.COMM_NULL def setUp(self): size = self.BASECOMM.Get_size() rank = self.BASECOMM.Get_rank() if rank < size // 2: self.COLOR = 0 self.LOCAL_LEADER = 0 self.REMOTE_LEADER = size // 2 else: self.COLOR = 1 self.LOCAL_LEADER = 0 self.REMOTE_LEADER = 0 self.INTRACOMM = self.BASECOMM.Split(self.COLOR, key=0) Create_intercomm = MPI.Intracomm.Create_intercomm self.INTERCOMM = Create_intercomm( self.INTRACOMM, self.LOCAL_LEADER, self.BASECOMM, self.REMOTE_LEADER, ) def tearDown(self): self.INTRACOMM.Free() self.INTERCOMM.Free() def testGatherv(self): comm = self.INTERCOMM rank = comm.Get_rank() size = comm.Get_size() rsize = comm.Get_remote_size() for array, typecode in arrayimpl.loop(): for color in (0, 1): if self.COLOR == color: for root in range(size): if root == rank: rbuf = array(-1, typecode, (rsize, root+color)) comm.Gatherv(None, rbuf.as_mpi(), root=MPI.ROOT) check = arrayimpl.scalar(root) for value in rbuf.flat: self.assertEqual(value, check) else: comm.Gatherv(None, None, root=MPI.PROC_NULL) else: for root in range(rsize): sbuf = array(root, typecode, root+color) comm.Gatherv(sbuf.as_mpi(), None, root=root) def testScatterv(self): comm = self.INTERCOMM rank = comm.Get_rank() size = comm.Get_size() rsize = comm.Get_remote_size() for array, typecode in arrayimpl.loop(): for color in (0, 1): if self.COLOR == color: for root in range(size): if root == rank: sbuf = array(root, typecode, (rsize, root+color)) comm.Scatterv(sbuf.as_mpi(), None, root=MPI.ROOT) else: comm.Scatterv(None, None, root=MPI.PROC_NULL) else: for root in range(rsize): rbuf = array(root, typecode, root+color) comm.Scatterv(None, rbuf.as_mpi(), root=root) check = arrayimpl.scalar(root) for value in rbuf: self.assertEqual(value, check) def testAllgatherv(self): comm = self.INTERCOMM rank = comm.Get_rank() size = comm.Get_size() rsize = comm.Get_remote_size() for array, typecode in arrayimpl.loop(): for color in (0, 1): if self.COLOR == color: for n in range(size): sbuf = array( n, typecode, color) rbuf = array(-1, typecode, (rsize, n+color)) comm.Allgatherv(sbuf.as_mpi(), rbuf.as_mpi()) check = arrayimpl.scalar(n) for value in rbuf.flat: self.assertEqual(value, check) else: for n in range(rsize): sbuf = array( n, typecode, n+color) rbuf = array(-1, typecode, (rsize, color)) comm.Allgatherv(sbuf.as_mpi(), rbuf.as_mpi()) check = arrayimpl.scalar(n) for value in rbuf.flat: self.assertEqual(value, check) def testAlltoallv(self): comm = self.INTERCOMM rank = comm.Get_rank() size = comm.Get_size() rsize = comm.Get_remote_size() for array, typecode in arrayimpl.loop(): for color in (0, 1): if self.COLOR == color: for n in range(size): sbuf = array( n, typecode, (rsize, (n+1)*color)) rbuf = array(-1, typecode, (rsize, n+3*color)) comm.Alltoallv(sbuf.as_mpi(), rbuf.as_mpi()) check = arrayimpl.scalar(n) for value in rbuf.flat: self.assertEqual(value, check) else: for n in range(rsize): sbuf = array( n, typecode, (rsize, n+3*color)) rbuf = array(-1, typecode, (rsize, (n+1)*color)) comm.Alltoallv(sbuf.as_mpi(), rbuf.as_mpi()) check = arrayimpl.scalar(n) for value in rbuf.flat: self.assertEqual(value, check) def testAlltoallw(self): comm = self.INTERCOMM rank = comm.Get_rank() size = comm.Get_size() rsize = comm.Get_remote_size() for array, typecode in arrayimpl.loop(): for color in (0, 1): if self.COLOR == color: for n in range(size): sbuf = array( n, typecode, (rsize, (n+1)*color)) rbuf = array(-1, typecode, (rsize, n+3*color)) comm.Alltoallv(sbuf.as_mpi(), rbuf.as_mpi()) check = arrayimpl.scalar(n) for value in rbuf.flat: self.assertEqual(value, check) else: for n in range(rsize): sbuf = array( n, typecode, (rsize, n+3*color)) rbuf = array(-1, typecode, (rsize, (n+1)*color)) comm.Alltoallv(sbuf.as_mpi(), rbuf.as_mpi()) check = arrayimpl.scalar(n) for value in rbuf.flat: self.assertEqual(value, check) def testAlltoallw(self): comm = self.INTERCOMM size = comm.Get_size() rsize = comm.Get_remote_size() for array, typecode in arrayimpl.loop(): for n in range(5): check = arrayimpl.scalar(n) sbuf = array( n, typecode, (rsize, n)) rbuf = array(-1, typecode, (rsize, n)) sdt, rdt = sbuf.mpidtype, rbuf.mpidtype sex, rex = sdt.extent, rdt.extent sdsp = [i*n*sex for i in range(rsize)] rdsp = [i*n*rex for i in range(rsize)] smsg = (sbuf.as_raw(), ([n]*rsize, sdsp), [sdt]*rsize) rmsg = (rbuf.as_raw(), ([n]*rsize, rdsp), [rdt]*rsize) try: comm.Alltoallw(smsg, rmsg) except NotImplementedError: self.skipTest('mpi-alltoallw') for value in rbuf.flat: self.assertEqual(value, check) class TestCCOVecInter(BaseTestCCOVecInter, unittest.TestCase): BASECOMM = MPI.COMM_WORLD class TestCCOVecInterDup(TestCCOVecInter): def setUp(self): self.BASECOMM = self.BASECOMM.Dup() super().setUp() def tearDown(self): self.BASECOMM.Free() super().tearDown() if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_cffi.py000066400000000000000000000043161475341043600162520ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest try: import cffi except ImportError: cffi = None @unittest.skipIf(cffi is None, 'cffi') class TestCFFI(unittest.TestCase): mpitypes = [ MPI.Datatype, MPI.Request, MPI.Info, MPI.Errhandler, MPI.Session, MPI.Group, MPI.Win, MPI.Op, MPI.File, MPI.Message, MPI.Comm, ] objects = [ MPI.DATATYPE_NULL, MPI.INT, MPI.DOUBLE, MPI.REQUEST_NULL, MPI.INFO_NULL, MPI.INFO_ENV, MPI.ERRHANDLER_NULL, MPI.ERRORS_RETURN, MPI.ERRORS_ARE_FATAL, MPI.GROUP_NULL, MPI.GROUP_EMPTY, MPI.WIN_NULL, MPI.OP_NULL, MPI.SUM, MPI.MIN, MPI.MAX, MPI.FILE_NULL, MPI.MESSAGE_NULL, MPI.MESSAGE_NO_PROC, MPI.COMM_NULL, MPI.COMM_SELF, MPI.COMM_WORLD, ] def testHandleAddress(self): ffi = cffi.FFI() typemap = { ffi.sizeof('int'): 'int', ffi.sizeof('void*'): 'void*', } typename = lambda t: t.__name__.rsplit('.', 1)[-1] for tp in self.mpitypes: handle_t = typemap[MPI._sizeof(tp)] mpi_t = 'MPI_' + typename(tp) ffi.cdef(f"typedef {handle_t} {mpi_t};") for obj in self.objects: if isinstance(obj, MPI.Comm): mpi_t = 'MPI_Comm' else: mpi_t = 'MPI_' + typename(type(obj)) oldobj = obj newobj = type(obj)() handle_old = ffi.cast(mpi_t+'*', MPI._addressof(oldobj)) handle_new = ffi.cast(mpi_t+'*', MPI._addressof(newobj)) handle_new[0] = handle_old[0] self.assertEqual(oldobj, newobj) def testHandleValue(self): ffi = cffi.FFI() typemap = {ffi.sizeof('uint32_t'): 'uint32_t', ffi.sizeof('uint64_t'): 'uint64_t',} for obj in self.objects: uintptr_t = typemap[MPI._sizeof(obj)] handle = ffi.cast(uintptr_t+'*', MPI._addressof(obj))[0] self.assertEqual(handle, MPI._handleof(obj)) if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_comm.py000066400000000000000000000372441475341043600163040ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class TestCommNull(unittest.TestCase): def testConstructor(self): comm = MPI.Comm() self.assertEqual(comm, MPI.COMM_NULL) self.assertIsNot(comm, MPI.COMM_NULL) def construct(): MPI.Comm((1,2,3)) self.assertRaises(TypeError, construct) def testConstructorIntra(self): comm_null = MPI.Intracomm() self.assertEqual(comm_null, MPI.COMM_NULL) self.assertIsNot(comm_null, MPI.COMM_NULL) def testConstructorInter(self): comm_null = MPI.Intercomm() self.assertEqual(comm_null, MPI.COMM_NULL) self.assertIsNot(comm_null, MPI.COMM_NULL) def testGetName(self): name = MPI.COMM_NULL.Get_name() self.assertEqual(name, "MPI_COMM_NULL") def testPickle(self): from pickle import dumps, loads comm_null = loads(dumps(MPI.COMM_NULL)) self.assertIs(comm_null, MPI.COMM_NULL) comm_null = loads(dumps(MPI.Comm(MPI.COMM_NULL))) self.assertIsNot(comm_null, MPI.COMM_NULL) self.assertEqual(comm_null, MPI.COMM_NULL) class BaseTestComm: def testConstructor(self): comm = MPI.Comm(self.COMM) self.assertEqual(comm, self.COMM) self.assertIsNot(comm, self.COMM) def testSize(self): size = self.COMM.Get_size() self.assertGreaterEqual(size, 1) def testRank(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() self.assertGreaterEqual(rank, 0) self.assertLess(rank, size) def testGroup(self): comm = self.COMM group = self.COMM.Get_group() self.assertEqual(comm.Get_size(), group.Get_size()) self.assertEqual(comm.Get_rank(), group.Get_rank()) group.Free() self.assertEqual(group, MPI.GROUP_NULL) def testCloneFree(self): comm = self.COMM.Clone() comm.Free() self.assertEqual(comm, MPI.COMM_NULL) def testCompare(self): results = (MPI.IDENT, MPI.CONGRUENT, MPI.SIMILAR, MPI.UNEQUAL) ccmp = MPI.Comm.Compare(self.COMM, MPI.COMM_WORLD) self.assertIn(ccmp, results) ccmp = MPI.Comm.Compare(self.COMM, self.COMM) self.assertEqual(ccmp, MPI.IDENT) comm = self.COMM.Dup() ccmp = MPI.Comm.Compare(self.COMM, comm) comm.Free() self.assertEqual(ccmp, MPI.CONGRUENT) def testIsInter(self): is_inter = self.COMM.Is_inter() self.assertIs(type(is_inter), bool) def testGetSetName(self): try: name = self.COMM.Get_name() self.COMM.Set_name("mycomm") self.assertEqual(self.COMM.Get_name(), "mycomm") self.COMM.Set_name(name) self.assertEqual(self.COMM.Get_name(), name) self.COMM.name = self.COMM.name except NotImplementedError: self.skipTest('mpi-comm-name') def testGetParent(self): try: parent = MPI.Comm.Get_parent() self.assertIsInstance(parent, MPI.Intercomm) except NotImplementedError: self.skipTest('mpi-comm-get_parent') def testDupWithInfo(self): info = None self.COMM.Dup(info).Free() info = MPI.INFO_NULL self.COMM.Dup(info).Free() self.COMM.Dup_with_info(info).Free() info = MPI.Info.Create() self.COMM.Dup(info).Free() self.COMM.Dup_with_info(info).Free() info.Free() @unittest.skipMPI('mpich(<=3.1.0)', MPI.Query_thread() > MPI.THREAD_SINGLE) def testIDup(self): try: comm, request = self.COMM.Idup() except NotImplementedError: self.skipTest('mpi-comm-idup') request.Wait() ccmp = MPI.Comm.Compare(self.COMM, comm) comm.Free() self.assertEqual(ccmp, MPI.CONGRUENT) @unittest.skipMPI('mpich(<=3.1.0)', MPI.Query_thread() > MPI.THREAD_SINGLE) def testIDupWithInfo(self): try: comm, request = self.COMM.Idup_with_info(MPI.INFO_NULL) except NotImplementedError: self.skipTest('mpi-comm-idup-info') request.Wait() ccmp = MPI.Comm.Compare(self.COMM, comm) comm.Free() self.assertEqual(ccmp, MPI.CONGRUENT) # new_info = MPI.Info.Create() for info in (None, MPI.INFO_NULL, new_info): comm, request = self.COMM.Idup(info) request.Wait() ccmp = MPI.Comm.Compare(self.COMM, comm) comm.Free() self.assertEqual(ccmp, MPI.CONGRUENT) new_info.Free() def testGetSetInfo(self): info = MPI.Info.Create() self.COMM.Set_info(info) info.Free() info = self.COMM.Get_info() self.COMM.Set_info(info) info.Free() def testCreate(self): group = self.COMM.Get_group() comm = self.COMM.Create(group) ccmp = MPI.Comm.Compare(self.COMM, comm) self.assertEqual(ccmp, MPI.CONGRUENT) ccmp = self.COMM.Compare(comm) self.assertEqual(ccmp, MPI.CONGRUENT) comm.Free() group.Free() @unittest.skipMPI('openmpi(<=1.8.1)') def testCreateGroup(self): group = self.COMM.Get_group() try: comm = self.COMM.Create_group(group) except NotImplementedError: self.assertLess(MPI.VERSION, 3) self.skipTest('mpi-comm-create_group') else: ccmp = MPI.Comm.Compare(self.COMM, comm) comm.Free() self.assertEqual(ccmp, MPI.CONGRUENT) finally: group.Free() def testCreateFromGroup(self): group = self.COMM.Get_group() try: comm = MPI.Intracomm.Create_from_group(group) except NotImplementedError: self.assertLess(MPI.VERSION, 4) self.skipTest('mpi-comm-create_from_group') except MPI.Exception as exc: # openmpi UNSUPPORTED = MPI.ERR_UNSUPPORTED_OPERATION if exc.Get_error_class() != UNSUPPORTED: raise else: ccmp = MPI.Comm.Compare(self.COMM, comm) comm.Free() self.assertEqual(ccmp, MPI.CONGRUENT) finally: group.Free() def testSplit(self): base = self.COMM comm = base.Split(42, 42) self.assertEqual(comm.Get_rank(), base.Get_rank()) self.assertEqual(comm.Get_size(), base.Get_size()) comm.Free() color = base.Get_rank() comm = base.Split(color, 42) self.assertEqual(comm.Get_rank(), 0) self.assertEqual(comm.Get_size(), 1) comm.Free() @unittest.skipMPI('openmpi(==2.0.0)') def testSplitTypeShared(self): try: MPI.COMM_SELF.Split_type(MPI.COMM_TYPE_SHARED).Free() except NotImplementedError: self.skipTest('mpi-comm-split_type') comm = self.COMM.Split_type(MPI.UNDEFINED) self.assertEqual(comm, MPI.COMM_NULL) comm = self.COMM.Split_type(MPI.COMM_TYPE_SHARED) self.assertNotEqual(comm, MPI.COMM_NULL) size = self.COMM.Get_size() rank = self.COMM.Get_rank() if size == 1: self.assertEqual(comm.size, 1) self.assertEqual(comm.rank, 0) comm.Free() for root in range(size): if rank == root: split_type = MPI.COMM_TYPE_SHARED else: split_type = MPI.UNDEFINED comm = self.COMM.Split_type(split_type) if rank == root: self.assertNotEqual(comm, MPI.COMM_NULL) self.assertEqual(comm.size, 1) self.assertEqual(comm.rank, 0) comm.Free() else: self.assertEqual(comm, MPI.COMM_NULL) def testSplitTypeHWGuided(self): try: MPI.COMM_SELF.Split_type(MPI.COMM_TYPE_SHARED).Free() except NotImplementedError: self.skipTest('mpi-comm-split_type') if MPI.COMM_TYPE_HW_GUIDED == MPI.UNDEFINED: self.skipTest("mpi-comm-split_type-hw_guided") split_type = MPI.COMM_TYPE_HW_GUIDED # comm = self.COMM.Split_type(split_type) self.assertEqual(comm, MPI.COMM_NULL) comm = self.COMM.Split_type(split_type, info=MPI.INFO_NULL) self.assertEqual(comm, MPI.COMM_NULL) info = MPI.Info.Create() comm = self.COMM.Split_type(split_type, info=info) self.assertEqual(comm, MPI.COMM_NULL) info.Set("foo", "bar") comm = self.COMM.Split_type(split_type, info=info) self.assertEqual(comm, MPI.COMM_NULL) info.Set("mpi_hw_resource_type", "@dont-thread-on-me@") comm = self.COMM.Split_type(split_type, info=info) if unittest.is_mpi('impi(==2021.14.0)'): comm.free() if unittest.is_mpi('impi(==2021.14.1)'): comm.free() self.assertEqual(comm, MPI.COMM_NULL) info.Free() # if unittest.is_mpi('impi'): return restype = "mpi_hw_resource_type" shmem = "mpi_shared_memory" info = MPI.Info.Create() info.Set(restype, shmem) comm = self.COMM.Split_type(split_type, info=info) self.assertNotEqual(comm, MPI.COMM_NULL) self.assertEqual(info.Get(restype), shmem) comm.Free() size = self.COMM.Get_size() rank = self.COMM.Get_rank() for root in range(size): if rank == root: split_type = MPI.COMM_TYPE_HW_GUIDED else: split_type = MPI.UNDEFINED comm = self.COMM.Split_type(split_type, info=info) self.assertEqual(info.Get(restype), shmem) if rank == root: self.assertNotEqual(comm, MPI.COMM_NULL) self.assertEqual(comm.size, 1) self.assertEqual(comm.rank, 0) comm.Free() else: self.assertEqual(comm, MPI.COMM_NULL) info.Free() def testSplitTypeResourceGuided(self): try: MPI.COMM_SELF.Split_type(MPI.COMM_TYPE_SHARED).Free() except NotImplementedError: self.skipTest('mpi-comm-split_type') if MPI.COMM_TYPE_RESOURCE_GUIDED == MPI.UNDEFINED: self.skipTest("mpi-comm-split_type-resource_guided") split_type = MPI.COMM_TYPE_RESOURCE_GUIDED # comm = self.COMM.Split_type(split_type) self.assertEqual(comm, MPI.COMM_NULL) comm = self.COMM.Split_type(split_type, info=MPI.INFO_NULL) self.assertEqual(comm, MPI.COMM_NULL) info = MPI.Info.Create() comm = self.COMM.Split_type(split_type, info=info) self.assertEqual(comm, MPI.COMM_NULL) info.Set("foo", "bar") comm = self.COMM.Split_type(split_type, info=info) self.assertEqual(comm, MPI.COMM_NULL) info.Set("mpi_hw_resource_type", "@dont-thread-on-me@") comm = self.COMM.Split_type(split_type, info=info) if unittest.is_mpi('impi(==2021.14.0)'): comm.free() if unittest.is_mpi('impi(==2021.14.1)'): comm.free() self.assertEqual(comm, MPI.COMM_NULL) info.Free() # restype = "mpi_hw_resource_type" shmem = "mpi_shared_memory" info = MPI.Info.Create() info.Set(restype, shmem) comm = self.COMM.Split_type(split_type, info=info) self.assertNotEqual(comm, MPI.COMM_NULL) self.assertEqual(info.Get(restype), shmem) comm.Free() size = self.COMM.Get_size() rank = self.COMM.Get_rank() for root in range(size): if rank == root: split_type = MPI.COMM_TYPE_RESOURCE_GUIDED else: split_type = MPI.UNDEFINED comm = self.COMM.Split_type(split_type, info=info) self.assertEqual(info.Get(restype), shmem) if rank == root: self.assertNotEqual(comm, MPI.COMM_NULL) self.assertEqual(comm.size, 1) self.assertEqual(comm.rank, 0) comm.Free() else: self.assertEqual(comm, MPI.COMM_NULL) info.Free() def testSplitTypeHWUnguided(self): try: MPI.COMM_SELF.Split_type(MPI.COMM_TYPE_SHARED).Free() except NotImplementedError: self.skipTest('mpi-comm-split_type') if MPI.COMM_TYPE_HW_UNGUIDED == MPI.UNDEFINED: self.skipTest("mpi-comm-split_type-hw_unguided") hwcomm = [self.COMM] while len(hwcomm) < 32: rank = hwcomm[-1].Get_rank() info = MPI.Info.Create() comm = hwcomm[-1].Split_type( MPI.COMM_TYPE_HW_UNGUIDED, key=rank, info=info, ) if comm != MPI.COMM_NULL: self.assertTrue(info.Get("mpi_hw_resource_type")) self.assertLess(comm.Get_size(), hwcomm[-1].Get_size()) info.Free() if comm == MPI.COMM_NULL: break hwcomm.append(comm) for comm in hwcomm[1:]: comm.Free() def testBuffering(self): comm = self.COMM.Dup() buf = MPI.Alloc_mem((1<<16)+MPI.BSEND_OVERHEAD) try: with self.catchNotImplementedError(4, 1): comm.Attach_buffer(buf) with self.catchNotImplementedError(4, 1): comm.Flush_buffer() with self.catchNotImplementedError(4, 1): comm.Iflush_buffer().Wait() finally: with self.catchNotImplementedError(4, 1): oldbuf = comm.Detach_buffer() self.assertEqual(oldbuf.address, buf.address) self.assertEqual(oldbuf.nbytes, buf.nbytes) MPI.Free_mem(buf) with self.catchNotImplementedError(4, 1): comm.Attach_buffer(MPI.BUFFER_AUTOMATIC) bufauto = comm.Detach_buffer() self.assertEqual(bufauto, MPI.BUFFER_AUTOMATIC) comm.Free() def testPickle(self): from pickle import dumps, loads COMM = self.COMM if COMM in (MPI.COMM_SELF, MPI.COMM_WORLD): comm = loads(dumps(COMM)) self.assertIs(comm, COMM) comm = loads(dumps(MPI.Intracomm(COMM))) self.assertIsNot(comm, COMM) self.assertEqual(comm, COMM) else: self.assertRaises(ValueError, dumps, COMM) def testPyProps(self): comm = self.COMM self.assertEqual(comm.Get_size(), comm.size) self.assertEqual(comm.Get_rank(), comm.rank) self.assertEqual(comm.Is_intra(), comm.is_intra) self.assertEqual(comm.Is_inter(), comm.is_inter) self.assertEqual(comm.Get_topology(), comm.topology) # group = comm.group self.assertEqual(type(group), MPI.Group) group.Free() # info = comm.info self.assertIs(type(info), MPI.Info) comm.info = info info.Free() class TestCommSelf(BaseTestComm, unittest.TestCase): def setUp(self): self.COMM = MPI.COMM_SELF def testSize(self): size = self.COMM.Get_size() self.assertEqual(size, 1) def testRank(self): rank = self.COMM.Get_rank() self.assertEqual(rank, 0) @unittest.skipMPI('openmpi(<5.0.6)') def testCreateFromGroup(self): super().testCreateFromGroup() class TestCommWorld(BaseTestComm, unittest.TestCase): def setUp(self): self.COMM = MPI.COMM_WORLD class TestCommSelfDup(TestCommSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() @unittest.skipMPI('openmpi(<1.4.0)', MPI.Query_thread() > MPI.THREAD_SINGLE) class TestCommWorldDup(TestCommWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_comm_inter.py000066400000000000000000000176451475341043600175100ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest def ch3_nemesis(): return 'ch3:nemesis' in MPI.Get_library_version() @unittest.skipIf(MPI.COMM_WORLD.Get_size() < 2, 'mpi-world-size<2') class BaseTestIntercomm: BASECOMM = MPI.COMM_NULL INTRACOMM = MPI.COMM_NULL INTERCOMM = MPI.COMM_NULL def setUp(self): size = self.BASECOMM.Get_size() rank = self.BASECOMM.Get_rank() if rank < size // 2 : self.COLOR = 0 self.LOCAL_LEADER = 0 self.REMOTE_LEADER = size // 2 else: self.COLOR = 1 self.LOCAL_LEADER = 0 self.REMOTE_LEADER = 0 self.INTRACOMM = self.BASECOMM.Split(self.COLOR, key=0) Create_intercomm = MPI.Intracomm.Create_intercomm self.INTERCOMM = Create_intercomm(self.INTRACOMM, self.LOCAL_LEADER, self.BASECOMM, self.REMOTE_LEADER) def testConstructor(self): with self.assertRaises(TypeError): MPI.Intercomm(self.INTRACOMM) with self.assertRaises(TypeError): MPI.Intracomm(self.INTERCOMM) def tearDown(self): self.INTRACOMM.Free() self.INTERCOMM.Free() def testFortran(self): intercomm = self.INTERCOMM fint = intercomm.py2f() newcomm = MPI.Comm.f2py(fint) self.assertEqual(newcomm, intercomm) self.assertIs(type(newcomm), MPI.Intercomm) def testLocalGroupSizeRank(self): intercomm = self.INTERCOMM local_group = intercomm.Get_group() self.assertEqual(local_group.size, intercomm.Get_size()) self.assertEqual(local_group.size, intercomm.size) self.assertEqual(local_group.rank, intercomm.Get_rank()) self.assertEqual(local_group.rank, intercomm.rank) local_group.Free() def testRemoteGroupSize(self): intercomm = self.INTERCOMM remote_group = intercomm.Get_remote_group() self.assertEqual(remote_group.size, intercomm.Get_remote_size()) self.assertEqual(remote_group.size, intercomm.remote_size) remote_group.Free() def testMerge(self): basecomm = self.BASECOMM intercomm = self.INTERCOMM if basecomm.rank < basecomm.size // 2: high = False else: high = True intracomm = intercomm.Merge(high) self.assertEqual(intracomm.size, basecomm.size) self.assertEqual(intracomm.rank, basecomm.rank) intracomm.Free() def testCreateFromGroups(self): lgroup = self.INTERCOMM.Get_group() rgroup = self.INTERCOMM.Get_remote_group() try: Create_from_groups = MPI.Intercomm.Create_from_groups intercomm = Create_from_groups(lgroup, 0, rgroup, 0) except NotImplementedError: self.assertLess(MPI.VERSION, 4) self.skipTest('mpi-comm-create_from_group') except MPI.Exception as exc: UNSUPPORTED = MPI.ERR_UNSUPPORTED_OPERATION if exc.Get_error_class() != UNSUPPORTED: raise else: ccmp = MPI.Comm.Compare(self.INTERCOMM, intercomm) intercomm.Free() self.assertEqual(ccmp, MPI.CONGRUENT) finally: lgroup.Free() rgroup.Free() def testSplit(self): base = self.INTERCOMM comm = base.Split(42, 42) self.assertEqual(comm.Get_rank(), base.Get_rank()) self.assertEqual(comm.Get_size(), base.Get_size()) self.assertEqual(comm.Get_remote_size(), base.Get_remote_size()) comm.Free() color = base.Get_rank() comm = base.Split(color, 42) if comm != MPI.COMM_NULL: self.assertEqual(comm.Get_rank(), 0) self.assertEqual(comm.Get_size(), 1) self.assertEqual(comm.Get_remote_size(), 1) comm.Free() @unittest.skipMPI('msmpi') @unittest.skipMPI('openmpi(<5.0.0)') @unittest.skipMPI('mpich(<4.2.0)', ch3_nemesis()) @unittest.skipMPI('mvapich(<3.0.0)', MPI.COMM_WORLD.Get_size() > 2) def testSplitTypeShared(self): try: comm = self.INTERCOMM.Split_type(MPI.COMM_TYPE_SHARED) except NotImplementedError: self.skipTest('mpi-comm-split_type') if comm != MPI.COMM_NULL: comm.Free() comm = self.INTERCOMM.Split_type(MPI.UNDEFINED) self.assertEqual(comm, MPI.COMM_NULL) def testPyProps(self): comm = self.INTERCOMM # self.assertEqual(comm.rank, comm.Get_rank()) self.assertEqual(comm.size, comm.Get_size()) self.assertEqual(comm.remote_size, comm.Get_remote_size()) # group = comm.remote_group self.assertEqual(type(group), MPI.Group) group.Free() # info = comm.info self.assertEqual(type(info), MPI.Info) info.Free() # self.assertTrue(comm.is_inter) self.assertFalse(comm.is_intra) class TestIntercomm(BaseTestIntercomm, unittest.TestCase): BASECOMM = MPI.COMM_WORLD class TestIntercommDup(TestIntercomm): def setUp(self): self.BASECOMM = self.BASECOMM.Dup() super().setUp() def tearDown(self): self.BASECOMM.Free() super().tearDown() class TestIntercommDupDup(TestIntercomm): def setUp(self): super().setUp() INTERCOMM = self.INTERCOMM self.INTERCOMM = self.INTERCOMM.Dup() INTERCOMM.Free() @unittest.skipIf(MPI.COMM_WORLD.Get_size() < 2, 'mpi-world-size<2') class TestIntercommCreateFromGroups(unittest.TestCase): @unittest.skipMPI('openmpi', MPI.COMM_WORLD.Get_size() > 2) def testPair(self): done = True rank = MPI.COMM_WORLD.Get_rank() if rank < 2: world_group = MPI.COMM_WORLD.Get_group() local_group = world_group.Incl([rank]) remote_group = world_group.Incl([1 - rank]) world_group.Free() try: comm = MPI.Intercomm.Create_from_groups( local_group, 0, remote_group, 0, ) self.assertEqual(comm.Get_size(), 1) self.assertEqual(comm.Get_remote_size(), 1) comm.Free() except NotImplementedError: done = False finally: local_group.Free() remote_group.Free() done = MPI.COMM_WORLD.allreduce(done, op=MPI.LAND) if not done: self.assertLess(MPI.VERSION, 4) self.skipTest('mpi-intercomm-create_from_groups') def testHalf(self): done = True size = MPI.COMM_WORLD.Get_size() rank = MPI.COMM_WORLD.Get_rank() world_group = MPI.COMM_WORLD.Get_group() low_group = world_group.Range_incl([(0, size//2-1, 1)]) high_group = world_group.Range_incl([(size//2, size-1, 1)]) world_group.Free() if rank <= size//2-1: local_group, remote_group = low_group, high_group local_leader, remote_leader = 0, high_group.Get_size()-1 else: local_group, remote_group = high_group, low_group local_leader, remote_leader = high_group.Get_size()-1, 0 try: comm = MPI.Intercomm.Create_from_groups( local_group, local_leader, remote_group, remote_leader, ) self.assertEqual(comm.Get_rank(), local_group.Get_rank()) self.assertEqual(comm.Get_size(), local_group.Get_size()) self.assertEqual(comm.Get_remote_size(), remote_group.Get_size()) comm.Free() except NotImplementedError: done = False finally: local_group.Free() remote_group.Free() if not done: self.assertLess(MPI.VERSION, 4) self.skipTest('mpi-intercomm-create_from_groups') if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_comm_topo.py000066400000000000000000000255761475341043600173520ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class TestTopoConstructor(unittest.TestCase): def testConstructorTopocomm(self): comm = MPI.COMM_SELF with self.assertRaises(TypeError): MPI.Topocomm(comm) def testConstructorCartcomm(self): comm = MPI.COMM_SELF cart = comm.Create_cart([1]) with self.assertRaises(TypeError): MPI.Graphcomm(cart) with self.assertRaises(TypeError): MPI.Distgraphcomm(cart) cart.Free() def testConstructorGraphcomm(self): comm = MPI.COMM_SELF graph = comm.Create_graph([0, 1], [0]) with self.assertRaises(TypeError): MPI.Cartcomm(graph) with self.assertRaises(TypeError): MPI.Distgraphcomm(graph) graph.Free() def testConstructorDistGraphcomm(self): comm = MPI.COMM_SELF try: distgraph = comm.Create_dist_graph([], [], []) except NotImplementedError: self.skipTest('mpi-comm-create_dist_graph') with self.assertRaises(TypeError): MPI.Cartcomm(distgraph) with self.assertRaises(TypeError): MPI.Graphcomm(distgraph) distgraph.Free() with self.assertRaises(ValueError): comm.Create_dist_graph_adjacent( [0], [0], MPI.WEIGHTS_EMPTY, MPI.WEIGHTS_EMPTY, ) class BaseTestTopo: COMM = MPI.COMM_NULL def checkFortran(self, oldcomm): fint = oldcomm.py2f() newcomm = MPI.Comm.f2py(fint) self.assertEqual(newcomm, oldcomm) self.assertEqual(type(newcomm), type(oldcomm)) def testCartcomm(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() for ndim in (1, 2, 3, 4, 5): dims = MPI.Compute_dims(size, [0]*ndim) periods = [True] * len(dims) topo = comm.Create_cart(dims, periods=periods) self.assertTrue(topo.is_topo) self.assertEqual(topo.topology, MPI.CART) self.checkFortran(topo) self.assertEqual(topo.dim, len(dims)) self.assertEqual(topo.ndim, len(dims)) coords = topo.coords self.assertEqual(coords, topo.Get_coords(topo.rank)) self.assertEqual(topo.topo, (dims, periods, coords)) neighbors = [] coordinates = topo.Get_coords(topo.rank) for i in range(ndim): for d in (-1, +1): coord = list(coordinates) coord[i] = (coord[i]+d) % dims[i] neigh = topo.Get_cart_rank(coord) self.assertEqual(coord, topo.Get_coords(neigh)) source, dest = topo.Shift(i, d) self.assertEqual(neigh, dest) neighbors.append(neigh) self.assertEqual(topo.indegree, len(neighbors)) self.assertEqual(topo.outdegree, len(neighbors)) self.assertEqual(topo.inedges, neighbors) self.assertEqual(topo.outedges, neighbors) inedges, outedges = topo.inoutedges self.assertEqual(inedges, neighbors) self.assertEqual(outedges, neighbors) if ndim == 1: topo.Free() continue for i in range(ndim): rem_dims = [1]*ndim rem_dims[i] = 0 sub = topo.Sub(rem_dims) if sub != MPI.COMM_NULL: self.assertEqual(sub.dim, ndim-1) dims = topo.dims del dims[i] self.assertEqual(sub.dims, dims) sub.Free() topo.Free() with self.assertRaises(ValueError): topo = comm.Create_cart([comm.Get_size()], []) with self.assertRaises(ValueError): topo = comm.Create_cart([comm.Get_size()], [0, 0]) @unittest.skipMPI('MPI(<2.0)') def testCartcommZeroDim(self): comm = self.COMM topo = comm.Create_cart([]) if topo == MPI.COMM_NULL: return self.assertEqual(topo.dim, 0) self.assertEqual(topo.dims, []) self.assertEqual(topo.periods, []) self.assertEqual(topo.coords, []) rank = topo.Get_cart_rank([]) self.assertEqual(rank, 0) inedges, outedges = topo.inoutedges self.assertEqual(inedges, []) self.assertEqual(outedges, []) topo.Free() def testGraphcomm(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() index, edges = [0], [] for i in range(size): pos = index[-1] index.append(pos+2) edges.append((i-1)%size) edges.append((i+1)%size) topo = comm.Create_graph(index[1:], edges) self.assertTrue(topo.is_topo) self.assertEqual(topo.topology, MPI.GRAPH) self.checkFortran(topo) topo.Free() topo = comm.Create_graph(index, edges) self.assertEqual(topo.dims, (len(index)-1, len(edges))) self.assertEqual(topo.nnodes, len(index)-1) self.assertEqual(topo.nedges, len(edges)) self.assertEqual(topo.index, index[1:]) self.assertEqual(topo.edges, edges) self.assertEqual(topo.topo, (index[1:], edges)) neighbors = edges[index[rank]:index[rank+1]] self.assertEqual(neighbors, topo.neighbors) self.assertEqual(len(neighbors), topo.nneighbors) for rank in range(size): neighs = topo.Get_neighbors(rank) self.assertEqual(neighs, [(rank-1)%size, (rank+1)%size]) self.assertEqual(topo.indegree, len(neighbors)) self.assertEqual(topo.outdegree, len(neighbors)) self.assertEqual(topo.inedges, neighbors) self.assertEqual(topo.outedges, neighbors) inedges, outedges = topo.inoutedges self.assertEqual(inedges, neighbors) self.assertEqual(outedges, neighbors) topo.Free() @unittest.skipMPI('msmpi') def testDistgraphcommAdjacent(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() try: topo = comm.Create_dist_graph_adjacent(None, None) topo.Free() except NotImplementedError: self.skipTest('mpi-comm-create_dist_graph_adjacent') # sources = [(rank-2)%size, (rank-1)%size] destinations = [(rank+1)%size, (rank+2)%size] topo = comm.Create_dist_graph_adjacent(sources, destinations) self.assertTrue(topo.is_topo) self.assertEqual(topo.topology, MPI.DIST_GRAPH) self.checkFortran(topo) self.assertEqual(topo.Get_dist_neighbors_count(), (2, 2, False)) self.assertEqual(topo.Get_dist_neighbors(), (sources, destinations, None)) self.assertEqual(topo.indegree, len(sources)) self.assertEqual(topo.outdegree, len(destinations)) self.assertEqual(topo.inedges, sources) self.assertEqual(topo.outedges, destinations) inedges, outedges = topo.inoutedges self.assertEqual(inedges, sources) self.assertEqual(outedges, destinations) topo.Free() # sourceweights = [1, 2] destweights = [3, 4] weights = (sourceweights, destweights) topo = comm.Create_dist_graph_adjacent(sources, destinations, sourceweights, destweights) self.assertEqual(topo.Get_dist_neighbors_count(), (2, 2, True)) self.assertEqual(topo.Get_dist_neighbors(), (sources, destinations, weights)) topo.Free() # topo = comm.Create_dist_graph_adjacent(sources, None, MPI.UNWEIGHTED, None) self.assertEqual(topo.Get_dist_neighbors_count(), (2, 0, False)) self.assertEqual(topo.Get_dist_neighbors(), (sources, [], None)) topo.Free() topo = comm.Create_dist_graph_adjacent(None, destinations, None, MPI.UNWEIGHTED) self.assertEqual(topo.Get_dist_neighbors_count(), (0, 2, False)) self.assertEqual(topo.Get_dist_neighbors(), ([], destinations, None)) topo.Free() if MPI.VERSION < 3: return topo = comm.Create_dist_graph_adjacent([], [], MPI.WEIGHTS_EMPTY, MPI.WEIGHTS_EMPTY) self.assertEqual(topo.Get_dist_neighbors_count(), (0, 0, True)) self.assertEqual(topo.Get_dist_neighbors(), ([], [], ([], []))) topo.Free() @unittest.skipMPI('msmpi') def testDistgraphcomm(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() # try: topo = comm.Create_dist_graph([], [], [], MPI.UNWEIGHTED) topo.Free() except NotImplementedError: self.skipTest('mpi-comm-create_dist_graph') # sources = [rank] degrees = [3] destinations = [(rank-1)%size, rank, (rank+1)%size] topo = comm.Create_dist_graph(sources, degrees, destinations, MPI.UNWEIGHTED) self.assertTrue(topo.is_topo) self.assertEqual(topo.topology, MPI.DIST_GRAPH) self.checkFortran(topo) self.assertEqual(topo.Get_dist_neighbors_count(), (3, 3, False)) topo.Free() weights = list(range(1,4)) topo = comm.Create_dist_graph(sources, degrees, destinations, weights) self.assertEqual(topo.Get_dist_neighbors_count(), (3, 3, True)) topo.Free() def testCartMap(self): comm = self.COMM size = comm.Get_size() for ndim in (1, 2, 3, 4, 5): for periods in (None, True, False): dims = MPI.Compute_dims(size, ndim) topo = comm.Create_cart(dims, periods, reorder=True) rank = comm.Cart_map(dims, periods) self.assertEqual(topo.Get_rank(), rank) topo.Free() def testGraphMap(self): comm = self.COMM size = comm.Get_size() index, edges = [0], [] for i in range(size): pos = index[-1] index.append(pos+2) edges.append((i-1)%size) edges.append((i+1)%size) # Version 1 topo = comm.Create_graph(index, edges, reorder=True) rank = comm.Graph_map(index, edges) self.assertEqual(topo.Get_rank(), rank) topo.Free() # Version 2 topo = comm.Create_graph(index[1:], edges, reorder=True) rank = comm.Graph_map(index[1:], edges) self.assertEqual(topo.Get_rank(), rank) topo.Free() class TestTopoSelf(BaseTestTopo, unittest.TestCase): COMM = MPI.COMM_SELF class TestTopoWorld(BaseTestTopo, unittest.TestCase): COMM = MPI.COMM_WORLD class TestTopoSelfDup(TestTopoSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() class TestTopoWorldDup(TestTopoWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_ctypes.py000066400000000000000000000032401475341043600166450ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest try: import ctypes except ImportError: ctypes = None @unittest.skipIf(ctypes is None, 'ctypes') class TestCTYPES(unittest.TestCase): objects = [ MPI.DATATYPE_NULL, MPI.INT, MPI.DOUBLE, MPI.REQUEST_NULL, MPI.INFO_NULL, MPI.INFO_ENV, MPI.ERRHANDLER_NULL, MPI.ERRORS_RETURN, MPI.ERRORS_ARE_FATAL, MPI.GROUP_NULL, MPI.GROUP_EMPTY, MPI.WIN_NULL, MPI.OP_NULL, MPI.SUM, MPI.MIN, MPI.MAX, MPI.FILE_NULL, MPI.MESSAGE_NULL, MPI.MESSAGE_NO_PROC, MPI.COMM_NULL, MPI.COMM_SELF, MPI.COMM_WORLD, ] def testHandleAdress(self): typemap = {ctypes.sizeof(ctypes.c_int): ctypes.c_int, ctypes.sizeof(ctypes.c_void_p): ctypes.c_void_p} for obj in self.objects: handle_t = typemap[MPI._sizeof(obj)] oldobj = obj newobj = type(obj)() handle_old = handle_t.from_address(MPI._addressof(oldobj)) handle_new = handle_t.from_address(MPI._addressof(newobj)) handle_new.value = handle_old.value self.assertEqual(obj, newobj) def testHandleValue(self): typemap = {ctypes.sizeof(ctypes.c_uint32): ctypes.c_uint32, ctypes.sizeof(ctypes.c_uint64): ctypes.c_uint64} for obj in self.objects: uintptr_t = typemap[MPI._sizeof(obj)] handle = uintptr_t.from_address(MPI._addressof(obj)) self.assertEqual(handle.value, MPI._handleof(obj)) if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_datatype.py000066400000000000000000000537751475341043600171730ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import struct datatypes_c = [ MPI.CHAR, MPI.WCHAR, MPI.SIGNED_CHAR, MPI.SHORT, MPI.INT, MPI.LONG, MPI.UNSIGNED_CHAR, MPI.UNSIGNED_SHORT, MPI.UNSIGNED, MPI.UNSIGNED_LONG, MPI.LONG_LONG, MPI.UNSIGNED_LONG_LONG, MPI.FLOAT, MPI.DOUBLE, MPI.LONG_DOUBLE, ] datatypes_c99 = [ MPI.C_BOOL, MPI.INT8_T, MPI.INT16_T, MPI.INT32_T, MPI.INT64_T, MPI.UINT8_T, MPI.UINT16_T, MPI.UINT32_T, MPI.UINT64_T, MPI.C_COMPLEX, MPI.C_FLOAT_COMPLEX, MPI.C_DOUBLE_COMPLEX, MPI.C_LONG_DOUBLE_COMPLEX, ] datatypes_f = [ MPI.CHARACTER, MPI.LOGICAL, MPI.INTEGER, MPI.REAL, MPI.DOUBLE_PRECISION, MPI.COMPLEX, MPI.DOUBLE_COMPLEX, ] datatypes_f90 = [ MPI.LOGICAL1, MPI.LOGICAL2, MPI.LOGICAL4, MPI.LOGICAL8, MPI.INTEGER1, MPI.INTEGER2, MPI.INTEGER4, MPI.INTEGER8, MPI.INTEGER16, MPI.REAL2, MPI.REAL4, MPI.REAL8, MPI.REAL16, MPI.COMPLEX4, MPI.COMPLEX8, MPI.COMPLEX16, MPI.COMPLEX32, ] datatypes_mpi = [ MPI.PACKED, MPI.BYTE, MPI.AINT, MPI.OFFSET, ] datatypes = [] datatypes += datatypes_c datatypes += datatypes_c99 datatypes += datatypes_f datatypes += datatypes_f90 datatypes += datatypes_mpi for typelist in [datatypes, datatypes_f, datatypes_f90]: typelist[:] = [ t for t in datatypes if t != MPI.DATATYPE_NULL and t.Get_name() != 'MPI_DATATYPE_NULL' and t.Get_size() != 0 ] del typelist combiner_map = {} class TestDatatypeNull(unittest.TestCase): def testConstructor(self): datatype = MPI.Datatype() self.assertEqual(datatype, MPI.DATATYPE_NULL) self.assertIsNot(datatype, MPI.DATATYPE_NULL) def construct(): MPI.Datatype((1,2,3)) self.assertRaises(TypeError, construct) def testGetName(self): name = MPI.DATATYPE_NULL.Get_name() self.assertEqual(name, "MPI_DATATYPE_NULL") class TestDatatype(unittest.TestCase): def testBoolEqNe(self): for dtype in datatypes: self.assertTrue(not not dtype) eq = (dtype == MPI.Datatype(dtype)) ne = (dtype != MPI.Datatype(dtype)) self.assertTrue(eq) self.assertFalse(ne) def testGetExtent(self): for dtype in datatypes: lb, ext = dtype.Get_extent() self.assertEqual(dtype.lb, lb) self.assertEqual(dtype.ub, lb+ext) self.assertEqual(dtype.extent, ext) def testGetSize(self): for dtype in datatypes: size = dtype.Get_size() self.assertEqual(dtype.size, size) def testGetTrueExtent(self): for dtype in datatypes: try: lb, ext = dtype.Get_true_extent() self.assertEqual(dtype.true_lb, lb) self.assertEqual(dtype.true_ub, lb+ext) self.assertEqual(dtype.true_extent, ext) except NotImplementedError: self.skipTest('mpi-type-get_true_extent') match_size_integer = [1, 2, 4, 8] match_size_real = [4, 8] match_size_complex = [8, 16] @unittest.skipMPI('MPI(<2.0)') @unittest.skipMPI('openmpi', (MPI.CHARACTER == MPI.DATATYPE_NULL or MPI.CHARACTER.Get_size() == 0)) def testMatchSize(self): typeclass = MPI.TYPECLASS_INTEGER for size in self.match_size_integer: datatype = MPI.Datatype.Match_size(typeclass, size) self.assertEqual(size, datatype.size) typeclass = MPI.TYPECLASS_REAL for size in self.match_size_real: datatype = MPI.Datatype.Match_size(typeclass, size) self.assertEqual(size, datatype.size) typeclass = MPI.TYPECLASS_COMPLEX for size in self.match_size_complex: datatype = MPI.Datatype.Match_size(typeclass, size) self.assertEqual(size, datatype.size) def testGetValueIndex(self): typenames = ('SHORT', 'INT', 'LONG', 'FLOAT', 'DOUBLE', 'LONG_DOUBLE') value_types = [getattr(MPI, f'{attr}') for attr in typenames] pair_types = [getattr(MPI, f'{attr}_INT') for attr in typenames] for value, pair in zip(value_types, pair_types): result = MPI.Datatype.Get_value_index(value, MPI.INT) self.assertEqual(result, pair) for value in value_types: result = MPI.Datatype.Get_value_index(value, MPI.FLOAT) self.assertEqual(result, MPI.DATATYPE_NULL) def testGetEnvelope(self): for dtype in datatypes: try: envelope = dtype.Get_envelope() except NotImplementedError: self.skipTest('mpi-type-get_envelope') if ('LAM/MPI' == MPI.get_vendor()[0] and "COMPLEX" in dtype.name): continue ni, na, nc, nd, combiner = envelope self.assertEqual(combiner, MPI.COMBINER_NAMED) self.assertEqual(ni, 0) self.assertEqual(na, 0) self.assertEqual(nc, 0) self.assertEqual(nd, 0) self.assertEqual(dtype.envelope, envelope) self.assertEqual(dtype.combiner, combiner) self.assertTrue(dtype.is_named) self.assertTrue(dtype.is_predefined) otype, combiner, params = dtype.decode() self.assertIs(dtype, otype) self.assertEqual(combiner, "NAMED") self.assertEqual(params, {}) def testGetSetName(self): name = MPI.DATATYPE_NULL.Get_name() self.assertEqual(name, "MPI_DATATYPE_NULL") for dtype in datatypes: try: name = dtype.Get_name() self.assertTrue(name) dtype.Set_name(name) self.assertEqual(name, dtype.Get_name()) dtype.name = dtype.name except NotImplementedError: self.skipTest('mpi-type-name') def testCommit(self): for dtype in datatypes: dtype.Commit() def testCodeCharStr(self): f90datatypes = [] try: try: for r in (1, 2, 4): f90datatypes.append(MPI.Datatype.Create_f90_integer(r)) for p, r in ((6, 30), (15, 300)): f90datatypes.append(MPI.Datatype.Create_f90_real(p, r)) f90datatypes.append(MPI.Datatype.Create_f90_complex(p, r)) except MPI.Exception: if not unittest.is_mpi('msmpi'): raise f90datatypes = [ dtype for dtype in f90datatypes if dtype and dtype.size > 0 ] except NotImplementedError: f90datatypes = [] pass largef90datatypes = [] if MPI.INTEGER16 != MPI.DATATYPE_NULL: largef90datatypes += [MPI.INTEGER16] if struct.calcsize('P') == 4 or MPI.DOUBLE.extent == MPI.LONG_DOUBLE.extent: largef90datatypes += [MPI.REAL16, MPI.COMPLEX32] for dtype in datatypes + f90datatypes: with self.subTest(datatype=dtype.name or "f90"): if dtype in largef90datatypes: continue code = dtype.tocode() self.assertIsNotNone(code) mpitype = MPI.Datatype.fromcode(code) self.assertEqual(dtype.typechar, mpitype.typechar) self.assertEqual(dtype.typestr, mpitype.typestr) try: mpitypedup1 = mpitype.Dup() self.assertEqual(mpitypedup1.tocode(), mpitype.tocode()) self.assertEqual(mpitypedup1.typestr, mpitype.typestr) self.assertEqual(mpitypedup1.typechar, mpitype.typechar) mpitypedup2 = mpitypedup1.Dup() self.assertEqual(mpitypedup2.tocode(), mpitype.tocode()) self.assertEqual(mpitypedup2.typestr, mpitype.typestr) self.assertEqual(mpitypedup2.typechar, mpitype.typechar) finally: mpitypedup1.Free() mpitypedup2.Free() with self.assertRaises(ValueError): MPI.Datatype.fromcode("abc@xyz") with self.assertRaises(ValueError): MPI.DATATYPE_NULL.tocode() with self.assertRaises(ValueError): MPI.INT_INT.tocode() self.assertEqual(MPI.INT_INT.typechar, 'V') self.assertEqual(MPI.INT_INT.typestr, f'V{MPI.INT.extent*2}') class BaseTestDatatypeCreateMixin: def free(self, newtype): if newtype == MPI.DATATYPE_NULL: return *_, combiner = newtype.Get_envelope() if combiner in ( MPI.COMBINER_NAMED, MPI.COMBINER_F90_INTEGER, MPI.COMBINER_F90_REAL, MPI.COMBINER_F90_COMPLEX, ): return newtype.Free() def check_contents(self, factory, newtype, oldtype): try: envelope = newtype.Get_envelope() contents = newtype.Get_contents() except NotImplementedError: self.skipTest('mpi-type-get_envelope') ni, na, nc, nd, combiner = envelope i, a, c, d = contents self.assertEqual(ni, len(i)) self.assertEqual(na, len(a)) self.assertEqual(nc, len(c)) self.assertEqual(nd, len(d)) self.assertNotEqual(combiner, MPI.COMBINER_NAMED) self.assertEqual(newtype.envelope, envelope) self.assertEqual(newtype.combiner, combiner) self.assertFalse(newtype.is_named) if combiner in (MPI.COMBINER_F90_INTEGER, MPI.COMBINER_F90_REAL, MPI.COMBINER_F90_COMPLEX,): self.assertTrue(newtype.is_predefined) else: self.assertFalse(newtype.is_predefined) for dt in d: self.free(dt) contents = newtype.contents self.assertEqual(contents[:-1], (i, a, c)) for dt in contents[-1]: self.free(dt) def check_recreate(self, factory, newtype): name = factory.__name__ name = name.replace('Get_value_index', 'Create_value_index') NAME = name.replace('Create_', '').upper() symbol = getattr(MPI, 'COMBINER_' + NAME) if symbol == MPI.UNDEFINED: return if combiner_map is None: return symbol = combiner_map.get(symbol, symbol) if symbol is None: return self.assertEqual(symbol, newtype.combiner) decoded1 = newtype.decode() oldtype, constructor, kwargs = decoded1 prefix = 'create' if constructor != 'VALUE_INDEX' else 'get' constructor = prefix.title() + '_' + constructor.lower() newtype2 = getattr(oldtype, constructor)(**kwargs) decoded2 = newtype2.decode() types1 = decoded1[2].pop('datatypes', []) types2 = decoded2[2].pop('datatypes', []) for dt1, dt2 in zip(types1, types2): self.assertEqual(dt1.combiner, dt2.combiner) self.assertEqual(dt1.typechar, dt2.typechar) self.assertEqual(dt1.typestr, dt2.typestr) self.free(dt1) self.free(dt2) self.assertEqual(decoded1[1], decoded2[1]) self.assertEqual(decoded2[2], decoded2[2]) for dec in (decoded1, decoded2): self.free(dec[0]) self.free(newtype2) def testDup(self): for dtype in datatypes: factory = MPI.Datatype.Dup self.check(dtype, factory) def testContiguous(self): for dtype in datatypes: for count in range(5): factory = MPI.Datatype.Create_contiguous args = (count, ) self.check(dtype, factory, *args) def testVector(self): for dtype in datatypes: for count in range(5): for blocklength in range(5): for stride in range(5): factory = MPI.Datatype.Create_vector args = (count, blocklength, stride) self.check(dtype, factory, *args) def testHvector(self): for dtype in datatypes: for count in range(5): for blocklength in range(5): for stride in range(5): factory = MPI.Datatype.Create_hvector args = (count, blocklength, stride) self.check(dtype, factory, *args) def testIndexed(self): for dtype in datatypes: for block in range(5): blocklengths = list(range(block, block+5)) displacements = [0] for b in blocklengths[:-1]: stride = displacements[-1] + b * dtype.extent + 1 displacements.append(stride) factory = MPI.Datatype.Create_indexed args = (blocklengths, displacements) self.check(dtype, factory, *args) #args = (block, displacements) XXX #self.check(dtype, factory, *args) XXX def testIndexedBlock(self): for dtype in datatypes: for block in range(5): blocklengths = list(range(block, block+5)) displacements = [0] for b in blocklengths[:-1]: stride = displacements[-1] + b * dtype.extent + 1 displacements.append(stride) factory = MPI.Datatype.Create_indexed_block args = (block, displacements) self.check(dtype, factory, *args) def testHindexed(self): for dtype in datatypes: for block in range(5): blocklengths = list(range(block, block+5)) displacements = [0] for b in blocklengths[:-1]: stride = displacements[-1] + b * dtype.extent + 1 displacements.append(stride) factory = MPI.Datatype.Create_hindexed args = (blocklengths, displacements) self.check(dtype, factory, *args) #args = (block, displacements) XXX #self.check(dtype, factory, *args) XXX @unittest.skipMPI('openmpi(<=1.8.1)', MPI.VERSION == 3) def testHindexedBlock(self): for dtype in datatypes: for block in range(5): displacements = [0] for i in range(5): stride = displacements[-1] + block * dtype.extent + 1 displacements.append(stride) factory = MPI.Datatype.Create_hindexed_block args = (block, displacements) self.check(dtype, factory, *args) def testStruct(self): for dtype1 in datatypes: for dtype2 in datatypes: dtypes = (dtype1, dtype2) blocklengths = (2, 3) displacements = [0] for dtype in dtypes[:-1]: stride = displacements[-1] + dtype.extent displacements.append(stride) factory = MPI.Datatype.Create_struct args = (blocklengths, displacements, dtypes) self.check(None, factory, *args) for dtype in datatypes: factory = MPI.Datatype.Create_struct dtypes = [dtype.Dup()] dtypes.append(dtypes[-1].Create_contiguous(2)) dtypes.append(dtypes[-1].Dup()) dtypes.append(dtypes[-1].Create_struct([1],[0],[dtypes[-1]])) dtypes.append(dtypes[-1].Dup()) dtypes.append(dtypes[-1].Create_resized(0, dtypes[-1].extent)) dtypes.append(dtypes[-1].Dup()) for dt in dtypes: args = [[1, 1], [0, dt.extent*2], (dt, dt)] self.check(None, factory, *args) dt.Free() with self.assertRaises(ValueError): factory = MPI.Datatype.Create_struct factory([1], [0], [MPI.INT, MPI.FLOAT]) def testSubarray(self): for dtype in datatypes: for ndim in range(1, 5): for size in range(1, 5): for subsize in range(1, size): for start in range(size-subsize): for order in [ MPI.ORDER_C, MPI.ORDER_FORTRAN, MPI.ORDER_F, ]: sizes = [size] * ndim subsizes = [subsize] * ndim starts = [start] * ndim factory = MPI.Datatype.Create_subarray args = sizes, subsizes, starts, order self.check(dtype, factory, *args) def testDarray(self): for dtype in datatypes: for ndim in range(1, 3+1): for size in (4, 8, 9, 27): for rank in (0, size-1): for dist in [ MPI.DISTRIBUTE_BLOCK, MPI.DISTRIBUTE_CYCLIC ]: for order in [MPI.ORDER_C, MPI.ORDER_F]: gsizes = [size]*ndim distribs = [dist]*ndim dargs = [MPI.DISTRIBUTE_DFLT_DARG]*ndim psizes = MPI.Compute_dims(size, [0]*ndim) factory = MPI.Datatype.Create_darray args = ( size, rank, gsizes, distribs, dargs, psizes, order, ) self.check(dtype, factory, *args) def testF90Integer(self): for r in (1, 2, 4): factory = MPI.Datatype.Create_f90_integer args = (r,) self.check(None, factory, *args) @unittest.skipMPI('openmpi(<3.0.0)') @unittest.skipMPI('msmpi') def testF90RealSingle(self): (p, r) = (6, 30) factory = MPI.Datatype.Create_f90_real args = (p, r) self.check(None, factory, *args) @unittest.skipMPI('openmpi(<3.0.0)') @unittest.skipMPI('msmpi') def testF90RealDouble(self): (p, r) = (15, 300) factory = MPI.Datatype.Create_f90_real args = (p, r) self.check(None, factory, *args) @unittest.skipMPI('openmpi(<3.0.0)') @unittest.skipMPI('msmpi') def testF90ComplexSingle(self): (p, r) = (6, 30) factory = MPI.Datatype.Create_f90_complex args = (p, r) self.check(None, factory, *args) @unittest.skipMPI('openmpi(<3.0.0)') @unittest.skipMPI('msmpi') def testF90ComplexDouble(self): (p, r) = (15, 300) factory = MPI.Datatype.Create_f90_complex args = (p, r) self.check(None, factory, *args) def testResized(self): for dtype in datatypes: for lb in range(-10, 10): for extent in range(1, 10): factory = MPI.Datatype.Create_resized args = lb, extent self.check(dtype, factory, *args) def testValueIndex(self): integral_types = datatypes_c[2:-3] + datatypes_c99[1:9] floating_types = datatypes_c[-3:] value_types = integral_types + floating_types index_types = integral_types for value in value_types: if value == MPI.DATATYPE_NULL: continue for index in index_types: if index == MPI.DATATYPE_NULL: continue factory = MPI.Datatype.Get_value_index pair = factory(value, index) if pair == MPI.DATATYPE_NULL: continue if pair.is_named: continue self.check(None, factory, value, index) class TestDatatypeCreate(BaseTestDatatypeCreateMixin, unittest.TestCase): def check(self, oldtype, factory, *args): try: if oldtype is not None: newtype = factory(oldtype, *args) else: newtype = factory(*args) if newtype == MPI.DATATYPE_NULL: return except NotImplementedError: self.skipTest('mpi-type-constructor') self.check_contents(factory, newtype, oldtype) self.check_recreate(factory, newtype) newtype.Commit() self.check_contents(factory, newtype, oldtype) self.check_recreate(factory, newtype) self.free(newtype) class TestDatatypePickle(BaseTestDatatypeCreateMixin, unittest.TestCase): def check(self, oldtype, factory, *args): from pickle import dumps, loads try: if oldtype is not None: newtype0 = factory(oldtype, *args) else: newtype0 = factory(*args) if newtype0 == MPI.DATATYPE_NULL: return except NotImplementedError: self.skipTest('mpi-type-constructor') newtype1 = loads(dumps(newtype0)) self.check_contents(factory, newtype1, oldtype) self.free(newtype1) self.free(newtype0) def testNamed(self): from pickle import dumps, loads for dtype in [MPI.DATATYPE_NULL] + datatypes: newdtype = loads(dumps(dtype)) self.assertIs(newdtype, dtype) newdtype = loads(dumps(MPI.Datatype(dtype))) self.assertIsNot(newdtype, dtype) self.assertEqual(newdtype, dtype) name, version = MPI.get_vendor() if name == 'LAM/MPI': combiner_map[MPI.COMBINER_INDEXED_BLOCK] = MPI.COMBINER_INDEXED elif name == 'MPICH1': combiner_map[MPI.COMBINER_VECTOR] = None combiner_map[MPI.COMBINER_HVECTOR] = None combiner_map[MPI.COMBINER_INDEXED] = None combiner_map[MPI.COMBINER_HINDEXED_BLOCK] = None for t in datatypes_f: if t in datatypes: datatypes.remove(t) if t in datatypes_f: datatypes_f.remove(t) elif MPI.Get_version() < (2,0): combiner_map = None if name == 'Open MPI': if (1,6,0) < version < (1,7,0): TestDatatype.match_size_complex[:] = [] if version < (1,5,2): for t in [getattr(MPI, f'COMPLEX{i}') for i in (4, 8, 16, 32)]: if t in datatypes: datatypes.remove(t) if t in datatypes_f90: datatypes_f90.remove(t) if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_doc.py000066400000000000000000000060071475341043600161070ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import sys ModuleType = type(MPI) ClassType = type(MPI.Comm) FunctionType = type(MPI.Init) StaticMethodType = type(MPI.buffer.allocate) ClassMethodType = type(MPI.Comm.Get_parent) MethodDescrType = type(MPI.Comm.Get_rank) GetSetDescrType = type(MPI.Comm.rank) def getdocstr(mc, docstrings, namespace=None): name = getattr(mc, '__name__', None) if name is None: return if name in ('__builtin__', 'builtins'): return if namespace: name = f'{namespace}.{name}' if type(mc) in ( ModuleType, ClassType, ): doc = getattr(mc, '__doc__', None) if doc == "": return docstrings[name] = doc for k, v in vars(mc).items(): if isinstance(v, (classmethod, staticmethod)): v = v.__get__(mc) getdocstr(v, docstrings, name) elif type(mc) in ( FunctionType, StaticMethodType, ClassMethodType, MethodDescrType, GetSetDescrType, ): doc = getattr(mc, '__doc__', None) if doc == "": return if doc is not None: sig, _, doc = doc.partition('\n') docstrings[name] = doc @unittest.skipIf(hasattr(sys, 'pypy_version_info'), 'pypy') class TestDoc(unittest.TestCase): def testDoc(self): ignore = {'py2f', 'f2py'} invalid = False missing = False docs = { } getdocstr(MPI, docs) for k in docs: doc = docs[k] name = k.split('.')[-1] if name in ignore: continue if not doc and name.startswith('_'): continue if doc is None: print (f"'{k}': missing docstring") missing = True continue if not doc.strip(): print (f"'{k}': empty docstring") missing = True continue if doc.startswith('\n') and not doc.endswith(' '): print (f"'{k}': mismatch start and end whitespace") invalid = True if not doc.startswith('\n') and doc.endswith(' '): print (f"'{k}': mismatch start and end whitespace") invalid = True if doc.replace(' ', '').endswith('\n\n'): print (f"'{k}': docstring ends with too many newlines") invalid = True doc = doc.strip() if doc[0] == doc[0].lower(): print (f"'{k}': docstring starts with lowercase") invalid = True if not doc.endswith('.'): print (f"'{k}': docstring does not end with '.'") invalid = True summary, _, description = doc.partition('\n') if not summary.endswith('.'): print (f"'{k}': summary line does not end with '.'") invalid = True self.assertFalse(missing) self.assertFalse(invalid) if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_dynproc.py000066400000000000000000000174141475341043600170240ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import os try: import socket except ImportError: socket = None def github(): return os.environ.get('GITHUB_ACTIONS') == 'true' def ch4_ucx(): return 'ch4:ucx' in MPI.Get_library_version() def ch4_ofi(): return 'ch4:ofi' in MPI.Get_library_version() def appnum(): if MPI.APPNUM == MPI.KEYVAL_INVALID: return None return MPI.COMM_WORLD.Get_attr(MPI.APPNUM) def badport(): if MPI.get_vendor()[0] != 'MPICH': return False try: port = MPI.Open_port() MPI.Close_port(port) except: port = "" return port == "" @unittest.skipMPI('mpich(<4.3.0)', badport()) @unittest.skipMPI('openmpi(<2.0.0)') @unittest.skipMPI('openmpi(>=5.0.0,<5.0.4)') @unittest.skipMPI('msmpi(<8.1.0)') @unittest.skipMPI('mvapich(<3.0.0)') @unittest.skipIf(MPI.COMM_WORLD.Get_size() < 2, 'mpi-world-size<2') class TestDPM(unittest.TestCase): message = [ None, True, False, -7, 0, 7, -2**63+1, 2**63-1, -2.17, 0.0, 3.14, 1+2j, 2-3j, 'mpi4py', (1, 2, 3), [1, 2, 3], {1:2}, ] def testNamePublishing(self): rank = MPI.COMM_WORLD.Get_rank() service = f"mpi4py-{rank}" port = MPI.Open_port() MPI.Publish_name(service, port) found = MPI.Lookup_name(service) self.assertEqual(port, found) MPI.Unpublish_name(service, port) MPI.Close_port(port) @unittest.skipMPI('mpich(==3.4.1)', ch4_ofi()) @unittest.skipMPI('mvapich', ch4_ofi()) @unittest.skipMPI('impi', MPI.COMM_WORLD.Get_size() > 2) def testAcceptConnect(self): comm_self = MPI.COMM_SELF comm_world = MPI.COMM_WORLD wrank = comm_world.Get_rank() group_world = comm_world.Get_group() group = group_world.Excl([0]) group_world.Free() comm = comm_world.Create(group) group.Free() if wrank == 0: self.assertEqual(comm, MPI.COMM_NULL) else: self.assertNotEqual(comm, MPI.COMM_NULL) self.assertEqual(comm.size, comm_world.size-1) self.assertEqual(comm.rank, comm_world.rank-1) if wrank == 0: port = MPI.Open_port() comm_world.send(port, dest=1) intercomm = comm_self.Accept(port) self.assertEqual(intercomm.remote_size, comm_world.size-1) self.assertEqual(intercomm.size, 1) self.assertEqual(intercomm.rank, 0) MPI.Close_port(port) else: if wrank == 1: port = comm_world.recv(source=0) else: port = None intercomm = comm.Connect(port, root=0) self.assertEqual(intercomm.remote_size, 1) self.assertEqual(intercomm.size, comm_world.size-1) self.assertEqual(intercomm.rank, comm.rank) comm.Free() if wrank == 0: message = TestDPM.message root = MPI.ROOT else: message = None root = 0 message = intercomm.bcast(message, root) if wrank == 0: self.assertIsNone(message) else: self.assertEqual(message, TestDPM.message) intercomm.Free() def testConnectAccept(self): comm_self = MPI.COMM_SELF comm_world = MPI.COMM_WORLD wrank = comm_world.Get_rank() group_world = comm_world.Get_group() group = group_world.Excl([0]) group_world.Free() comm = comm_world.Create(group) group.Free() if wrank == 0: self.assertEqual(comm, MPI.COMM_NULL) else: self.assertNotEqual(comm, MPI.COMM_NULL) self.assertEqual(comm.size, comm_world.size-1) self.assertEqual(comm.rank, comm_world.rank-1) if wrank == 0: port = comm_world.recv(source=1) intercomm = comm_self.Connect(port) self.assertEqual(intercomm.remote_size, comm_world.size-1) self.assertEqual(intercomm.size, 1) self.assertEqual(intercomm.rank, 0) else: if wrank == 1: port = MPI.Open_port() comm_world.send(port, dest=0) else: port = None intercomm = comm.Accept(port, root=0) if wrank == 1: MPI.Close_port(port) self.assertEqual(intercomm.remote_size, 1) self.assertEqual(intercomm.size, comm_world.size-1) self.assertEqual(intercomm.rank, comm.rank) comm.Free() if wrank == 0: message = TestDPM.message root = MPI.ROOT else: message = None root = 0 message = intercomm.bcast(message, root) if wrank == 0: self.assertIsNone(message) else: self.assertEqual(message, TestDPM.message) intercomm.Free() @unittest.skipIf(socket is None, 'socket') @unittest.skipMPI('impi', MPI.COMM_WORLD.Get_size() > 2 and github() and os.name == 'nt') def testJoin(self): size = MPI.COMM_WORLD.Get_size() rank = MPI.COMM_WORLD.Get_rank() server = client = address = None host = socket.gethostname() addrinfo = socket.getaddrinfo(host, None, type=socket.SOCK_STREAM) addr_families = [info[0] for info in addrinfo] # if both INET and INET6 are available, don't assume the order # is the same on both server and client. Prefer INET if available. addr_family = None if socket.AF_INET in addr_families: addr_family = socket.AF_INET elif socket.AF_INET6 in addr_families: addr_family = socket.AF_INET6 addr_family = MPI.COMM_WORLD.bcast(addr_family, root=0) supported = (addr_family in addr_families) supported = MPI.COMM_WORLD.allreduce(supported, op=MPI.LAND) if not supported: self.skipTest("socket-inet") # create server/client sockets if rank == 0: # server server = socket.socket(addr_family, socket.SOCK_STREAM) server.bind((host, 0)) server.listen(0) if rank == 1: # client client = socket.socket(addr_family, socket.SOCK_STREAM) # communicate address if rank == 0: address = server.getsockname() MPI.COMM_WORLD.ssend(address, 1) if rank == 1: address = MPI.COMM_WORLD.recv(None, 0) MPI.COMM_WORLD.Barrier() # stablish client/server connection connected = False if rank == 0: # server client = server.accept()[0] server.close() if rank == 1: # client try: client.connect(address) connected = True except OSError: raise connected = MPI.COMM_WORLD.bcast(connected, root=1) # test Comm.Join() MPI.COMM_WORLD.Barrier() if client: fd = client.fileno() intercomm = MPI.Comm.Join(fd) client.close() if intercomm != MPI.COMM_NULL: self.assertEqual(intercomm.remote_size, 1) self.assertEqual(intercomm.size, 1) self.assertEqual(intercomm.rank, 0) if rank == 0: message = TestDPM.message root = MPI.ROOT else: message = None root = 0 message = intercomm.bcast(message, root) if rank == 0: self.assertIsNone(message) else: self.assertEqual(message, TestDPM.message) intercomm.Free() MPI.COMM_WORLD.Barrier() if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_environ.py000066400000000000000000000062371475341043600170270ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import os def appnum(): if MPI.APPNUM == MPI.KEYVAL_INVALID: return None return MPI.COMM_WORLD.Get_attr(MPI.APPNUM) class TestEnviron(unittest.TestCase): def testIsInitialized(self): flag = MPI.Is_initialized() self.assertIs(type(flag), bool) self.assertTrue(flag) def testIsFinalized(self): flag = MPI.Is_finalized() self.assertIs(type(flag), bool) self.assertFalse(flag) def testGetVersion(self): version = MPI.Get_version() self.assertEqual(len(version), 2) major, minor = version self.assertIs(type(major), int) self.assertIs(type(minor), int) self.assertGreaterEqual(major, 1) self.assertGreaterEqual(minor, 0) def testGetLibraryVersion(self): version = MPI.Get_library_version() self.assertIsInstance(version, str) self.assertGreater(len(version), 0) def testGetProcessorName(self): procname = MPI.Get_processor_name() self.assertIsInstance(procname, str) def testGetHWResourceInfo(self): with self.catchNotImplementedError(4, 1): info = MPI.Get_hw_resource_info() self.assertIsInstance(info, MPI.Info) def testWTime(self): time1 = MPI.Wtime() self.assertIs(type(time1), float) time2 = MPI.Wtime() self.assertIs(type(time2), float) self.assertGreaterEqual(time2, time1) @unittest.skipMPI('impi', os.name == 'nt') def testWTick(self): tick = MPI.Wtick() self.assertIs(type(tick), float) self.assertGreater(tick, 0.0) def testPControl(self): for level in (2, 1, 0): MPI.Pcontrol(level) MPI.Pcontrol(1) class TestWorldAttrs(unittest.TestCase): def testWTimeIsGlobal(self): wtg = MPI.COMM_WORLD.Get_attr(MPI.WTIME_IS_GLOBAL) if wtg is not None: self.assertIn(wtg, (True, False)) def testIOProcessor(self): size = MPI.COMM_WORLD.Get_size() vals = list(range(size)) vals += [MPI.UNDEFINED, MPI.ANY_SOURCE, MPI.PROC_NULL] ioproc = MPI.COMM_WORLD.Get_attr(MPI.IO) if ioproc is not None: self.assertIn(ioproc, vals) @unittest.skipIf(MPI.APPNUM == MPI.KEYVAL_INVALID, 'mpi-appnum') def testAppNum(self): appnum = MPI.COMM_WORLD.Get_attr(MPI.APPNUM) if appnum is not None: self.assertTrue(appnum == MPI.UNDEFINED or appnum >= 0) @unittest.skipMPI('mpich(<4.1.0)', appnum() is None) @unittest.skipMPI('mvapich', appnum() is None) @unittest.skipMPI('MPICH2', appnum() is None) @unittest.skipIf(MPI.UNIVERSE_SIZE == MPI.KEYVAL_INVALID, 'mpi-universe-size') def testUniverseSize(self): univsz = MPI.COMM_WORLD.Get_attr(MPI.UNIVERSE_SIZE) if univsz is not None: self.assertTrue(univsz == MPI.UNDEFINED or univsz >= 0) @unittest.skipIf(MPI.LASTUSEDCODE == MPI.KEYVAL_INVALID, 'mpi-lastusedcode') def testLastUsedCode(self): lastuc = MPI.COMM_WORLD.Get_attr(MPI.LASTUSEDCODE) self.assertGreaterEqual(lastuc, 0) if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_errhandler.py000066400000000000000000000144471475341043600174770ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class TestErrhandler(unittest.TestCase): def testPredefined(self): self.assertFalse(MPI.ERRHANDLER_NULL) self.assertTrue(MPI.ERRORS_ARE_FATAL) self.assertTrue(MPI.ERRORS_RETURN) if MPI.VERSION >= 4: self.assertTrue(MPI.ERRORS_ABORT) elif MPI.ERRORS_ABORT != MPI.ERRHANDLER_NULL: self.assertTrue(MPI.ERRORS_ABORT) else: self.assertFalse(MPI.ERRORS_ABORT) def testPickle(self): from pickle import dumps, loads for errhandler in [ MPI.ERRHANDLER_NULL, MPI.ERRORS_ARE_FATAL, MPI.ERRORS_RETURN, MPI.ERRORS_ABORT, ]: if not errhandler: continue errh = loads(dumps(errhandler)) self.assertIs(errh, errhandler) errh = loads(dumps(MPI.Errhandler(errhandler))) self.assertIsNot(errh, errhandler) self.assertEqual(errh, errhandler) class BaseTestErrhandler: def testCreate(self): MAX_USER_EH = 32 # max user-defined error handlers mpiobj = self.mpiobj index = None called = check = False def get_errhandler_fn(idx): def errhandler_fn(arg, err): nonlocal mpiobj, index nonlocal called, check called = check = True check &= (arg == mpiobj) check &= (err == MPI.ERR_OTHER) check &= (idx == index) return errhandler_fn def check_fortran(eh): try: fint = eh.py2f() except NotImplementedError: return clon = type(eh).f2py(fint) self.assertEqual(eh, clon) errhandlers = [] for index in range(MAX_USER_EH): try: fn = get_errhandler_fn(index) eh = type(mpiobj).Create_errhandler(fn) errhandlers.append(eh) except NotImplementedError: clsname = type(mpiobj).__name__.lower() self.skipTest(f'mpi-{clsname}-create_errhandler') with self.assertRaises(RuntimeError): type(mpiobj).Create_errhandler(lambda arg, err: None) for eh in errhandlers: self.assertTrue(eh) check_fortran(eh) with self.assertRaises(ValueError): eh.__reduce__() eh_orig = mpiobj.Get_errhandler() try: for index, eh in enumerate(errhandlers): called = check = False mpiobj.Set_errhandler(eh) try: mpiobj.Call_errhandler(MPI.SUCCESS) mpiobj.Call_errhandler(MPI.ERR_OTHER) except NotImplementedError: if MPI.VERSION >= 2: raise else: self.assertTrue(called) self.assertTrue(check) finally: mpiobj.Set_errhandler(eh_orig) finally: for eh in errhandlers: eh.Free() def testCall(self): mpiobj = self.mpiobj mpiobj.Set_errhandler(MPI.ERRORS_RETURN) try: mpiobj.Call_errhandler(MPI.SUCCESS) mpiobj.Call_errhandler(MPI.ERR_OTHER) except NotImplementedError: if MPI.VERSION >= 2: raise clsname = type(mpiobj).__name__.lower() self.skipTest(f'mpi-{clsname}-call_errhandler') def testGetFree(self): mpiobj = self.mpiobj errhdls = [] for i in range(100): e = mpiobj.Get_errhandler() errhdls.append(e) for e in errhdls: e.Free() for e in errhdls: self.assertEqual(e, MPI.ERRHANDLER_NULL) def _run_test_get_set(self, errhandler): mpiobj = self.mpiobj errhdl_1 = mpiobj.Get_errhandler() self.assertNotEqual(errhdl_1, MPI.ERRHANDLER_NULL) mpiobj.Set_errhandler(errhandler) errhdl_2 = mpiobj.Get_errhandler() self.assertEqual(errhdl_2, errhandler) errhdl_2.Free() self.assertEqual(errhdl_2, MPI.ERRHANDLER_NULL) mpiobj.Set_errhandler(errhdl_1) errhdl_1.Free() self.assertEqual(errhdl_1, MPI.ERRHANDLER_NULL) def testErrorsReturn(self): self._run_test_get_set(MPI.ERRORS_RETURN) def testErrorsFatal(self): self._run_test_get_set(MPI.ERRORS_ARE_FATAL) @unittest.skipUnless(MPI.ERRORS_ABORT, 'mpi-errors-abort') @unittest.skipMPI('mpich(<4.1.0)') @unittest.skipMPI('impi(<2021.14.0)') def testErrorsAbort(self): self._run_test_get_set(MPI.ERRORS_ABORT) class TestErrhandlerComm(BaseTestErrhandler, unittest.TestCase): def setUp(self): self.mpiobj = MPI.COMM_SELF.Dup() def tearDown(self): self.mpiobj.Free() class TestErrhandlerWin(BaseTestErrhandler, unittest.TestCase): def setUp(self): try: self.mpiobj = MPI.Win.Create(MPI.BOTTOM, 1, MPI.INFO_NULL, MPI.COMM_SELF) except NotImplementedError: self.skipTest('mpi-win') except MPI.Exception: self.skipTest('mpi-win') def tearDown(self): self.mpiobj.Free() def testCall(self): super().testCall() class TestErrhandlerFile(BaseTestErrhandler, unittest.TestCase): def setUp(self): import os, tempfile rank = MPI.COMM_WORLD.Get_rank() fd, filename = tempfile.mkstemp(prefix='mpi4py-', suffix=f"-{rank}") os.close(fd) amode = MPI.MODE_WRONLY | MPI.MODE_CREATE | MPI.MODE_DELETE_ON_CLOSE try: self.mpiobj = MPI.File.Open(MPI.COMM_SELF, filename, amode, MPI.INFO_NULL) except NotImplementedError: try: os.remove(filename) except OSError: pass self.skipTest('mpi-file') def tearDown(self): self.mpiobj.Close() @unittest.skipMPI('msmpi') def testCall(self): super().testCall() class TestErrhandlerSession(BaseTestErrhandler, unittest.TestCase): def setUp(self): try: self.mpiobj = MPI.Session.Init() except NotImplementedError: self.skipTest('mpi-session') def tearDown(self): self.mpiobj.Finalize() if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_errorcode.py000066400000000000000000000147061475341043600173330ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class TestErrorCode(unittest.TestCase): errorclasses = [item[1] for item in vars(MPI).items() if item[0].startswith('ERR_')] errorclasses.insert(0, MPI.SUCCESS) while MPI.ERR_LASTCODE in errorclasses: errorclasses.remove(MPI.ERR_LASTCODE) def testGetErrorClass(self): self.assertEqual(self.errorclasses[0], 0) for ierr in self.errorclasses: errcls = MPI.Get_error_class(ierr) self.assertGreaterEqual(errcls, MPI.SUCCESS) self.assertLessEqual(errcls, MPI.ERR_LASTCODE) self.assertEqual(errcls, ierr) def testGetErrorStrings(self): for ierr in self.errorclasses: errstr = MPI.Get_error_string(ierr) self.assertGreater(len(errstr), 0) def testException(self): success = MPI.Exception(MPI.SUCCESS) lasterr = MPI.Exception(MPI.ERR_LASTCODE) for ierr in self.errorclasses: errstr = MPI.Get_error_string(ierr) errcls = MPI.Get_error_class(ierr) errexc = MPI.Exception(ierr) self.assertEqual(errexc.Get_error_code(), ierr) self.assertEqual(errexc.Get_error_class(), errcls) self.assertEqual(errexc.Get_error_string(), errstr) self.assertEqual(errexc.error_code, ierr) self.assertEqual(errexc.error_class, errcls) self.assertEqual(errexc.error_string, errstr) self.assertEqual(repr(errexc), f"MPI.Exception({ierr})") self.assertEqual(str(errexc), errstr) self.assertEqual(int(errexc), ierr) self.assertEqual(hash(errexc), hash(errexc.error_code)) self.assertTrue(bool(errexc == ierr)) self.assertTrue(bool(errexc == errexc)) self.assertFalse(bool(errexc != ierr)) self.assertFalse(bool(errexc != errexc)) self.assertTrue(bool(success <= ierr <= lasterr)) self.assertTrue(bool(success <= errexc <= lasterr)) self.assertTrue(bool(errexc >= ierr)) self.assertTrue(bool(errexc >= success)) self.assertTrue(bool(lasterr >= ierr)) self.assertTrue(bool(lasterr >= errexc)) if errexc == success: self.assertFalse(errexc) else: self.assertTrue(errexc) self.assertTrue(bool(errexc > success)) self.assertTrue(bool(success < errexc)) exc = MPI.Exception(MPI.SUCCESS-1) self.assertEqual(exc, MPI.ERR_UNKNOWN) exc = MPI.Exception(MPI.ERR_LASTCODE+1) self.assertEqual(exc, MPI.ERR_LASTCODE+1) @unittest.skipMPI('openmpi(<1.10.0)') def testAddErrorClass(self): try: errclass = MPI.Add_error_class() except NotImplementedError: self.skipTest('mpi-add_error_class') self.assertGreaterEqual(errclass, MPI.ERR_LASTCODE) try: MPI.Remove_error_class(errclass) except NotImplementedError: pass @unittest.skipMPI('openmpi(<1.10.0)') def testAddErrorCode(self): try: errcode = MPI.Add_error_code(MPI.ERR_OTHER) except NotImplementedError: self.skipTest('mpi-add_error_code') try: MPI.Remove_error_code(errcode) except NotImplementedError: pass @unittest.skipMPI('openmpi(<1.10.0)') def testAddErrorClassCodeString(self): LASTUSED = MPI.COMM_WORLD.Get_attr(MPI.LASTUSEDCODE) try: errclass = MPI.Add_error_class() except NotImplementedError: self.skipTest('mpi-add_error_class') lastused = MPI.COMM_WORLD.Get_attr(MPI.LASTUSEDCODE) self.assertEqual(errclass, lastused) errstr = MPI.Get_error_string(errclass) self.assertEqual(errstr, "") MPI.Add_error_string(errclass, "error class") self.assertEqual(MPI.Get_error_string(errclass), "error class") errcode1 = MPI.Add_error_code(errclass) errstr = MPI.Get_error_string(errcode1) self.assertEqual(errstr, "") MPI.Add_error_string(errcode1, "error code 1") self.assertEqual(MPI.Get_error_class(errcode1), errclass) self.assertEqual(MPI.Get_error_string(errcode1), "error code 1") errcode2 = MPI.Add_error_code(errclass) errstr = MPI.Get_error_string(errcode2) self.assertEqual(errstr, "") MPI.Add_error_string(errcode2, "error code 2") self.assertEqual(MPI.Get_error_class(errcode2), errclass) self.assertEqual(MPI.Get_error_string(errcode2), "error code 2") # with self.catchNotImplementedError(4, 1): with self.assertRaises(MPI.Exception): MPI.Remove_error_class(errclass) with self.assertRaises(MPI.Exception): MPI.Remove_error_code(errcode2) MPI.Remove_error_string(errcode2) self.assertEqual(MPI.Get_error_string(errcode2), "") MPI.Remove_error_code(errcode2) with self.assertRaises(MPI.Exception): MPI.Remove_error_code(errcode2) # with self.assertRaises(MPI.Exception): MPI.Remove_error_class(errclass) with self.assertRaises(MPI.Exception): MPI.Remove_error_code(errcode1) MPI.Remove_error_string(errcode1) self.assertEqual(MPI.Get_error_string(errcode1), "") MPI.Remove_error_code(errcode1) with self.assertRaises(MPI.Exception): MPI.Remove_error_code(errcode1) # with self.assertRaises(MPI.Exception): MPI.Remove_error_class(errclass) MPI.Remove_error_string(errclass) self.assertEqual(MPI.Get_error_string(errclass), "") MPI.Remove_error_class(errclass) with self.assertRaises(MPI.Exception): MPI.Remove_error_class(errclass) # try: MPI.Remove_error_class(0) self.fail("expected Exception") except (MPI.Exception, NotImplementedError): pass try: MPI.Remove_error_code(0) self.fail("expected Exception") except (MPI.Exception, NotImplementedError): pass try: MPI.Remove_error_string(0) self.fail("expected Exception") except (MPI.Exception, NotImplementedError): pass if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_exceptions.py000066400000000000000000000415541475341043600175310ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest # ------------------------------------------------------------------------------ _errclasscache = {} def ErrClassName(ierr): if not _errclasscache: from mpi4py import MPI _errclasscache[MPI.SUCCESS] = 'SUCCESS' for entry in dir(MPI): if entry.startswith('ERR_'): errcls = getattr(MPI, entry) _errclasscache[errcls] = entry return _errclasscache.get(ierr, '') @unittest.skipMPI('MPICH2') class BaseTestCase(unittest.TestCase): def setUp(self): self.errhdl_world = MPI.COMM_WORLD.Get_errhandler() MPI.COMM_WORLD.Set_errhandler(MPI.ERRORS_RETURN) self.errhdl_self = MPI.COMM_SELF.Get_errhandler() MPI.COMM_SELF.Set_errhandler(MPI.ERRORS_RETURN) def tearDown(self): MPI.COMM_WORLD.Set_errhandler(self.errhdl_world) self.errhdl_world.Free() MPI.COMM_SELF.Set_errhandler(self.errhdl_self) self.errhdl_self.Free() def assertRaisesMPI(self, IErrClass, callableObj, *args, **kwargs): from mpi4py import MPI excClass = MPI.Exception try: callableObj(*args, **kwargs) except NotImplementedError: if (MPI.VERSION, MPI.SUBVERSION) < (2, 0): raise self.failureException("raised NotImplementedError") raise except excClass as excValue: error_class = excValue.Get_error_class() else: error_class = None if error_class is not None: if isinstance(IErrClass, (list, tuple, set)): match = (error_class in IErrClass) else: match = (error_class == IErrClass) if not match: if isinstance(IErrClass, (list, tuple, set)): IErrClassName = [ErrClassName(e) for e in IErrClass] IErrClassName = type(IErrClass)(IErrClassName) else: IErrClassName = ErrClassName(IErrClass) raise self.failureException( f"generated error class " f"is '{ErrClassName(error_class)}' ({error_class}), " f"but expected '{IErrClassName}' ({IErrClass})" ) else: raise self.failureException(f"{excClass.__name__} not raised") # ------------------------------------------------------------------------------ class TestExcDatatypeNull(BaseTestCase): def testDup(self): self.assertRaisesMPI( MPI.ERR_TYPE, MPI.DATATYPE_NULL.Dup, ) def testCommit(self): self.assertRaisesMPI( MPI.ERR_TYPE, MPI.DATATYPE_NULL.Commit, ) def testFree(self): self.assertRaisesMPI( MPI.ERR_TYPE, MPI.DATATYPE_NULL.Free, ) class TestExcDatatype(BaseTestCase): DATATYPES = ( MPI.BYTE, MPI.PACKED, MPI.CHAR, MPI.WCHAR, MPI.SIGNED_CHAR, MPI.UNSIGNED_CHAR, MPI.SHORT, MPI.UNSIGNED_SHORT, MPI.INT, MPI.UNSIGNED, MPI.UNSIGNED_INT, MPI.LONG, MPI.UNSIGNED_LONG, MPI.LONG_LONG, MPI.UNSIGNED_LONG_LONG, MPI.FLOAT, MPI.DOUBLE, MPI.LONG_DOUBLE, MPI.SHORT_INT, MPI.TWOINT, MPI.INT_INT, MPI.LONG_INT, MPI.FLOAT_INT, MPI.DOUBLE_INT, MPI.LONG_DOUBLE_INT, ) ERR_TYPE = MPI.ERR_TYPE @unittest.skipMPI('msmpi') def testFreePredefined(self): for dtype in self.DATATYPES: if dtype == MPI.DATATYPE_NULL: continue self.assertRaisesMPI( self.ERR_TYPE, dtype.Free, ) self.assertNotEqual(dtype, MPI.DATATYPE_NULL) def testKeyvalInvalid(self): for dtype in self.DATATYPES: if dtype == MPI.DATATYPE_NULL: continue try: self.assertRaisesMPI( [MPI.ERR_KEYVAL, MPI.ERR_OTHER], dtype.Get_attr, MPI.KEYVAL_INVALID, ) except NotImplementedError: self.skipTest('mpi-type-get_attr') name, version = MPI.get_vendor() if name == 'Open MPI': if version < (1,4,3): TestExcDatatype.DATATYPES = TestExcDatatype.DATATYPES[1:] TestExcDatatype.ERR_TYPE = MPI.ERR_INTERN # ------------------------------------------------------------------------------ @unittest.skipMPI('msmpi(<=4.2.0)') class TestExcStatus(BaseTestCase): def testGetCount(self): status = MPI.Status() self.assertRaisesMPI( MPI.ERR_TYPE, status.Get_count, MPI.DATATYPE_NULL, ) def testGetElements(self): status = MPI.Status() self.assertRaisesMPI( MPI.ERR_TYPE, status.Get_elements, MPI.DATATYPE_NULL, ) @unittest.skipMPI('MPICH1') def testSetElements(self): status = MPI.Status() self.assertRaisesMPI( MPI.ERR_TYPE, status.Set_elements, MPI.DATATYPE_NULL, 0, ) # ------------------------------------------------------------------------------ class TestExcRequestNull(BaseTestCase): def testFree(self): self.assertRaisesMPI( MPI.ERR_REQUEST, MPI.REQUEST_NULL.Free, ) def testCancel(self): self.assertRaisesMPI( MPI.ERR_REQUEST, MPI.REQUEST_NULL.Cancel, ) # ------------------------------------------------------------------------------ class TestExcOpNull(BaseTestCase): def testFree(self): self.assertRaisesMPI( [MPI.ERR_OP, MPI.ERR_ARG], MPI.OP_NULL.Free, ) class TestExcOp(BaseTestCase): OPS = ( MPI.MAX, MPI.MIN, MPI.SUM, MPI.PROD, MPI.LAND, MPI.BAND, MPI.LOR, MPI.BOR, MPI.LXOR, MPI.BXOR, MPI.MAXLOC, MPI.MINLOC, MPI.REPLACE, MPI.NO_OP, ) def testFreePredefined(self): for op in self.OPS: if op == MPI.OP_NULL: continue self.assertRaisesMPI( [MPI.ERR_OP, MPI.ERR_ARG], op.Free, ) self.assertNotEqual(op, MPI.OP_NULL) # ------------------------------------------------------------------------------ class TestExcInfoNull(BaseTestCase): def testFree(self): self.assertRaisesMPI( [MPI.ERR_INFO, MPI.ERR_ARG], MPI.INFO_NULL.Free, ) def testTruth(self): self.assertFalse(bool(MPI.INFO_NULL)) @unittest.skipMPI('msmpi(<8.1.0)') def testDup(self): self.assertRaisesMPI( [MPI.ERR_INFO, MPI.ERR_ARG], MPI.INFO_NULL.Dup, ) def testGet(self): self.assertRaisesMPI( [MPI.ERR_INFO, MPI.ERR_ARG], MPI.INFO_NULL.Get, 'key', ) def testSet(self): self.assertRaisesMPI( [MPI.ERR_INFO, MPI.ERR_ARG], MPI.INFO_NULL.Set, 'key', 'value', ) def testDelete(self): self.assertRaisesMPI( [MPI.ERR_INFO, MPI.ERR_ARG], MPI.INFO_NULL.Delete, 'key', ) def testGetNKeys(self): self.assertRaisesMPI( [MPI.ERR_INFO, MPI.ERR_ARG], MPI.INFO_NULL.Get_nkeys, ) def testGetNthKey(self): self.assertRaisesMPI( [MPI.ERR_INFO, MPI.ERR_ARG], MPI.INFO_NULL.Get_nthkey, 0, ) class TestExcInfo(BaseTestCase): def setUp(self): super().setUp() self.INFO = MPI.Info.Create() def tearDown(self): self.INFO.Free() self.INFO = None super().tearDown() def testDelete(self): self.assertRaisesMPI( MPI.ERR_INFO_NOKEY, self.INFO.Delete, 'key', ) def testGetNthKey(self): self.assertRaisesMPI( [MPI.ERR_INFO_KEY, MPI.ERR_ARG], self.INFO.Get_nthkey, 0, ) try: MPI.Info.Create().Free() except NotImplementedError: unittest.disable(TestExcInfo, 'mpi-info') unittest.disable(TestExcInfoNull, 'mpi-info') # ------------------------------------------------------------------------------ class TestExcGroupNull(BaseTestCase): def testCompare(self): self.assertRaisesMPI( MPI.ERR_GROUP, MPI.Group.Compare, MPI.GROUP_NULL, MPI.GROUP_NULL, ) self.assertRaisesMPI( MPI.ERR_GROUP, MPI.Group.Compare, MPI.GROUP_NULL, MPI.GROUP_EMPTY, ) self.assertRaisesMPI( MPI.ERR_GROUP, MPI.Group.Compare, MPI.GROUP_EMPTY, MPI.GROUP_NULL, ) def testAccessors(self): for method in ('Get_size', 'Get_rank'): self.assertRaisesMPI( MPI.ERR_GROUP, getattr(MPI.GROUP_NULL, method), ) class TestExcGroup(BaseTestCase): pass # ------------------------------------------------------------------------------ class TestExcSessionNull(BaseTestCase): def testGetNumPsets(self): self.assertRaisesMPI( MPI.ERR_SESSION, MPI.SESSION_NULL.Get_num_psets, ) def testGetNthPset(self): self.assertRaisesMPI( MPI.ERR_SESSION, MPI.SESSION_NULL.Get_nth_pset, 0, ) def testGetInfo(self): self.assertRaisesMPI( MPI.ERR_SESSION, MPI.SESSION_NULL.Get_info, ) def testGetPsetInfo(self): self.assertRaisesMPI( MPI.ERR_SESSION, MPI.SESSION_NULL.Get_pset_info, "mpi://SELF", ) def testCreateGroup(self): self.assertRaisesMPI( MPI.ERR_SESSION, MPI.SESSION_NULL.Create_group, "mpi://SELF", ) def testGetErrhandler(self): self.assertRaisesMPI( MPI.ERR_SESSION, MPI.SESSION_NULL.Get_errhandler, ) def testSetErrhandler(self): self.assertRaisesMPI( MPI.ERR_SESSION, MPI.SESSION_NULL.Set_errhandler, MPI.ERRORS_RETURN, ) class TestExcSession(BaseTestCase): def setUp(self): super().setUp() self.SESSION = MPI.Session.Init() self.SESSION.Set_errhandler(MPI.ERRORS_RETURN) def tearDown(self): self.SESSION.Finalize() self.SESSION = None super().tearDown() def testGetNthPsetNeg(self): self.assertRaisesMPI( MPI.ERR_ARG, self.SESSION.Get_nth_pset, -1, ) @unittest.skipMPI('mpich(<4.1.0)') def testGetNthPsetPos(self): self.assertRaisesMPI( MPI.ERR_ARG, self.SESSION.Get_nth_pset, self.SESSION.Get_num_psets(), ) def testGetPsetInfo(self): self.assertRaisesMPI( MPI.ERR_ARG, self.SESSION.Get_pset_info, "@qerty!#$", ) def testCreateGroup(self): self.assertRaisesMPI( [MPI.ERR_ARG, MPI.ERR_UNSUPPORTED_OPERATION], self.SESSION.Create_group, "@qerty!#$", ) try: MPI.Session.Init().Finalize() except NotImplementedError: unittest.disable(TestExcSessionNull, 'mpi-session') unittest.disable(TestExcSession, 'mpi-session') # ------------------------------------------------------------------------------ class TestExcCommNull(BaseTestCase): ERR_COMM = MPI.ERR_COMM def testFree(self): self.assertRaisesMPI( MPI.ERR_COMM, MPI.COMM_NULL.Free, ) def testCompare(self): self.assertRaisesMPI( self.ERR_COMM, MPI.Comm.Compare, MPI.COMM_NULL, MPI.COMM_NULL, ) self.assertRaisesMPI( self.ERR_COMM, MPI.Comm.Compare, MPI.COMM_SELF, MPI.COMM_NULL, ) self.assertRaisesMPI( self.ERR_COMM, MPI.Comm.Compare, MPI.COMM_WORLD, MPI.COMM_NULL, ) self.assertRaisesMPI( self.ERR_COMM, MPI.Comm.Compare, MPI.COMM_NULL, MPI.COMM_SELF, ) self.assertRaisesMPI( self.ERR_COMM, MPI.Comm.Compare, MPI.COMM_NULL, MPI.COMM_WORLD, ) def testAccessors(self): for method in ( 'Get_size', 'Get_rank', 'Is_inter', 'Is_intra', 'Get_group', 'Get_topology', ): self.assertRaisesMPI( MPI.ERR_COMM, getattr(MPI.COMM_NULL, method), ) def testDisconnect(self): try: self.assertRaisesMPI( MPI.ERR_COMM, MPI.COMM_NULL.Disconnect, ) except NotImplementedError: self.skipTest('mpi-comm-disconnect') @unittest.skipMPI('openmpi(<1.4.2)') def testGetAttr(self): self.assertRaisesMPI( MPI.ERR_COMM, MPI.COMM_NULL.Get_attr, MPI.TAG_UB, ) @unittest.skipMPI('openmpi(<1.4.1)') def testGetErrhandler(self): self.assertRaisesMPI( [MPI.ERR_COMM, MPI.ERR_ARG], MPI.COMM_NULL.Get_errhandler, ) def testSetErrhandler(self): self.assertRaisesMPI( MPI.ERR_COMM, MPI.COMM_NULL.Set_errhandler, MPI.ERRORS_RETURN, ) def testIntraNull(self): comm_null = MPI.Intracomm() self.assertRaisesMPI(MPI.ERR_COMM, comm_null.Dup) self.assertRaisesMPI(MPI.ERR_COMM, comm_null.Create, MPI.GROUP_EMPTY) self.assertRaisesMPI(MPI.ERR_COMM, comm_null.Split, color=0, key=0) def testInterNull(self): comm_null = MPI.Intercomm() self.assertRaisesMPI(MPI.ERR_COMM, comm_null.Get_remote_group) self.assertRaisesMPI(MPI.ERR_COMM, comm_null.Get_remote_size) self.assertRaisesMPI(MPI.ERR_COMM, comm_null.Dup) self.assertRaisesMPI(MPI.ERR_COMM, comm_null.Create, MPI.GROUP_EMPTY) self.assertRaisesMPI(MPI.ERR_COMM, comm_null.Split, color=0, key=0) self.assertRaisesMPI(MPI.ERR_COMM, comm_null.Merge, high=True) class TestExcComm(BaseTestCase): @unittest.skipMPI('MPICH1') def testFreeSelf(self): errhdl = MPI.COMM_SELF.Get_errhandler() try: MPI.COMM_SELF.Set_errhandler(MPI.ERRORS_RETURN) self.assertRaisesMPI( [MPI.ERR_COMM, MPI.ERR_ARG], MPI.COMM_SELF.Free, ) finally: MPI.COMM_SELF.Set_errhandler(errhdl) errhdl.Free() @unittest.skipMPI('MPICH1') def testFreeWorld(self): self.assertRaisesMPI( [MPI.ERR_COMM, MPI.ERR_ARG], MPI.COMM_WORLD.Free, ) def testKeyvalInvalid(self): self.assertRaisesMPI( [MPI.ERR_KEYVAL, MPI.ERR_OTHER], MPI.COMM_WORLD.Get_attr, MPI.KEYVAL_INVALID, ) # ------------------------------------------------------------------------------ class TestExcWinNull(BaseTestCase): def testFree(self): self.assertRaisesMPI( [MPI.ERR_WIN, MPI.ERR_ARG], MPI.WIN_NULL.Free, ) def testGetErrhandler(self): self.assertRaisesMPI( [MPI.ERR_WIN, MPI.ERR_ARG], MPI.WIN_NULL.Get_errhandler, ) def testSetErrhandler(self): self.assertRaisesMPI( [MPI.ERR_WIN, MPI.ERR_ARG], MPI.WIN_NULL.Set_errhandler, MPI.ERRORS_RETURN, ) def testCallErrhandler(self): self.assertRaisesMPI( [MPI.ERR_WIN, MPI.ERR_ARG], MPI.WIN_NULL.Call_errhandler, 0, ) class TestExcWin(BaseTestCase): def setUp(self): super().setUp() self.WIN = MPI.Win.Create(None, 1, MPI.INFO_NULL, MPI.COMM_SELF) self.WIN.Set_errhandler(MPI.ERRORS_RETURN) def tearDown(self): self.WIN.Free() self.WIN = None super().tearDown() def testKeyvalInvalid(self): self.assertRaisesMPI( [MPI.ERR_KEYVAL, MPI.ERR_OTHER], self.WIN.Get_attr, MPI.KEYVAL_INVALID ) try: MPI.Win.Create(None, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free() except (NotImplementedError, MPI.Exception): unittest.disable(TestExcWin, 'mpi-win') unittest.disable(TestExcWinNull, 'mpi-win') # ------------------------------------------------------------------------------ class TestExcErrhandlerNull(BaseTestCase): def testFree(self): self.assertRaisesMPI( [MPI.ERR_ERRHANDLER, MPI.ERR_ARG], MPI.ERRHANDLER_NULL.Free, ) def testCommSelfSetErrhandler(self): self.assertRaisesMPI( [MPI.ERR_ERRHANDLER, MPI.ERR_ARG], MPI.COMM_SELF.Set_errhandler, MPI.ERRHANDLER_NULL ) def testCommWorldSetErrhandler(self): self.assertRaisesMPI( [MPI.ERR_ERRHANDLER, MPI.ERR_ARG], MPI.COMM_WORLD.Set_errhandler, MPI.ERRHANDLER_NULL, ) class TestExcErrhandler(BaseTestCase): pass # ------------------------------------------------------------------------------ if __name__ == '__main__': unittest.main() # ------------------------------------------------------------------------------ mpi4py-4.0.3/test/test_file.py000066400000000000000000000200171475341043600162560ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import os, tempfile, pathlib def maketemp(prefix): fd, name = tempfile.mkstemp(prefix=prefix) os.close(fd) return name class BaseTestFile: COMM = MPI.COMM_NULL FILE = MPI.FILE_NULL prefix = 'mpi4py' def setUp(self): self.fname = maketemp(self.prefix) self.amode = MPI.MODE_RDWR | MPI.MODE_CREATE #self.amode |= MPI.MODE_DELETE_ON_CLOSE try: self.FILE = MPI.File.Open( self.COMM, self.fname, self.amode, MPI.INFO_NULL, ) #self.fname=None except Exception: os.remove(self.fname) raise def tearDown(self): if self.FILE == MPI.FILE_NULL: return amode = self.FILE.amode self.FILE.Close() if not (amode & MPI.MODE_DELETE_ON_CLOSE): MPI.File.Delete(self.fname, MPI.INFO_NULL) @unittest.skipMPI('openmpi(==2.0.0)') @unittest.skipMPI('MPICH2(<1.1.0)') def testPreallocate(self): size = self.FILE.Get_size() self.assertEqual(size, 0) self.FILE.Preallocate(1) size = self.FILE.Get_size() self.assertEqual(size, 1) self.FILE.Preallocate(100) size = self.FILE.Get_size() self.assertEqual(size, 100) self.FILE.Preallocate(10) size = self.FILE.Get_size() self.assertEqual(size, 100) self.FILE.Preallocate(200) size = self.FILE.Get_size() self.assertEqual(size, 200) def testGetSetSize(self): size = self.FILE.Get_size() self.assertEqual(size, 0) size = self.FILE.size self.assertEqual(size, 0) self.FILE.Set_size(100) size = self.FILE.Get_size() self.assertEqual(size, 100) size = self.FILE.size self.assertEqual(size, 100) def testGetGroup(self): fgroup = self.FILE.Get_group() cgroup = self.COMM.Get_group() gcomp = MPI.Group.Compare(fgroup, cgroup) self.assertEqual(gcomp, MPI.IDENT) fgroup.Free() cgroup.Free() def testGetAmode(self): amode = self.FILE.Get_amode() self.assertEqual(self.amode, amode) self.assertEqual(self.FILE.amode, self.amode) def testGetSetInfo(self): #info = MPI.INFO_NULL #self.FILE.Set_info(info) info = MPI.Info.Create() self.FILE.Set_info(info) info.Free() info = self.FILE.Get_info() self.FILE.Set_info(info) info.Free() def testGetSetView(self): fsize = 100 * MPI.DOUBLE.size self.FILE.Set_size(fsize) displacements = range(100) datatypes = [MPI.SHORT, MPI.INT, MPI.LONG, MPI.FLOAT, MPI.DOUBLE] datareps = ['native'] #['native', 'internal', 'external32'] for disp in displacements: for dtype in datatypes: for datarep in datareps: etype, ftype = dtype, dtype self.FILE.Set_view(disp, etype, ftype, datarep, MPI.INFO_NULL) of, et, ft, dr = self.FILE.Get_view() self.assertEqual(disp, of) self.assertEqual(etype.Get_extent(), et.Get_extent()) self.assertEqual(ftype.Get_extent(), ft.Get_extent()) self.assertEqual(datarep, dr) try: if not et.is_predefined: et.Free() except NotImplementedError: if et != etype: et.Free() try: if not ft.is_predefined: ft.Free() except NotImplementedError: if ft != ftype: ft.Free() def testGetSetAtomicity(self): atom = self.FILE.Get_atomicity() self.assertFalse(atom) for atomicity in [True, False] * 4: self.FILE.Set_atomicity(atomicity) atom = self.FILE.Get_atomicity() self.assertEqual(atom, atomicity) def testSync(self): self.FILE.Sync() def testSeekGetPosition(self): offset = 0 self.FILE.Seek(offset, MPI.SEEK_END) self.FILE.Seek(offset, MPI.SEEK_CUR) self.FILE.Seek(offset, MPI.SEEK_SET) pos = self.FILE.Get_position() self.assertEqual(pos, offset) def testSeekGetPositionShared(self): offset = 0 self.FILE.Seek_shared(offset, MPI.SEEK_END) self.FILE.Seek_shared(offset, MPI.SEEK_CUR) self.FILE.Seek_shared(offset, MPI.SEEK_SET) pos = self.FILE.Get_position_shared() self.assertEqual(pos, offset) @unittest.skipMPI('openmpi(==2.0.0)') def testGetByteOffset(self): for offset in range(10): disp = self.FILE.Get_byte_offset(offset) self.assertEqual(disp, offset) def testGetTypeExtent(self): extent = self.FILE.Get_type_extent(MPI.BYTE) self.assertEqual(extent, 1) def testGetErrhandler(self): eh = self.FILE.Get_errhandler() self.assertEqual(eh, MPI.ERRORS_RETURN) eh.Free() def testPyProps(self): file = self.FILE # self.assertEqual(file.size, 0) self.assertEqual(file.amode, self.amode) # group = file.group self.assertEqual(type(group), MPI.Group) self.assertEqual(file.group_size, group.Get_size()) self.assertEqual(file.group_rank, group.Get_rank()) group.Free() # info = file.info self.assertEqual(type(info), MPI.Info) file.info = info info.Free() # self.assertEqual(type(file.atomicity), bool) for atomicity in [True, False] * 4: file.atomicity = atomicity self.assertEqual(file.atomicity, atomicity) def testPickle(self): from pickle import dumps, loads with self.assertRaises(ValueError): loads(dumps(self.FILE)) class TestFileNull(unittest.TestCase): def setUp(self): self.eh_save = MPI.FILE_NULL.Get_errhandler() def tearDown(self): MPI.FILE_NULL.Set_errhandler(self.eh_save) self.eh_save.Free() def testGetSetErrhandler(self): eh = MPI.FILE_NULL.Get_errhandler() self.assertEqual(eh, MPI.ERRORS_RETURN) eh.Free() MPI.FILE_NULL.Set_errhandler(MPI.ERRORS_ARE_FATAL) eh = MPI.FILE_NULL.Get_errhandler() self.assertEqual(eh, MPI.ERRORS_ARE_FATAL) eh.Free() MPI.FILE_NULL.Set_errhandler(MPI.ERRORS_RETURN) eh = MPI.FILE_NULL.Get_errhandler() self.assertEqual(eh, MPI.ERRORS_RETURN) eh.Free() class TestFileSelf(BaseTestFile, unittest.TestCase): COMM = MPI.COMM_SELF prefix = BaseTestFile.prefix + (f'-{MPI.COMM_WORLD.Get_rank()}') class TestFilePath(BaseTestFile, unittest.TestCase): COMM = MPI.COMM_SELF prefix = BaseTestFile.prefix + (f'-{MPI.COMM_WORLD.Get_rank()}') @staticmethod def _test_open_close(path): comm = MPI.COMM_SELF amode = MPI.MODE_CREATE | MPI.MODE_RDWR amode |= MPI.MODE_DELETE_ON_CLOSE try: fh = MPI.File.Open(comm, path, amode) except: os.remove(path) raise else: MPI.File.Close(fh) def testPath(self): name = maketemp(self.prefix) path = pathlib.PurePath(name) self._test_open_close(path) def testStr(self): name = maketemp(self.prefix) path = os.fsdecode(os.fsencode(name)) self._test_open_close(path) def testBytes(self): name = maketemp(self.prefix) path = os.fsencode(name) self._test_open_close(path) def have_feature(): case = BaseTestFile() case.COMM = TestFileSelf.COMM case.prefix = TestFileSelf.prefix case.setUp() case.tearDown() try: have_feature() except NotImplementedError: unittest.disable(BaseTestFile, 'mpi-file') unittest.disable(TestFileNull, 'mpi-file') unittest.disable(TestFilePath, 'mpi-file') if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_fortran.py000066400000000000000000000123201475341043600170100ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class BaseTestFortran: HANDLES = [] def check(self, handle1): try: fint = handle1.py2f() except NotImplementedError: self.skipTest(type(handle1).__name__) handle2 = type(handle1).f2py(fint) self.assertEqual(handle1, handle2) def create(self): handle = None klass = type(self.HANDLES[0]) if issubclass(klass, MPI.Status): handle = MPI.Status() if issubclass(klass, MPI.Datatype): handle = MPI.BYTE.Dup() if issubclass(klass, MPI.Op): handle = MPI.Op.Create(lambda *_: None) if issubclass(klass, MPI.Request): handle = MPI.COMM_SELF.Recv_init(bytearray(0), 0, 0) if issubclass(klass, MPI.Message): handle = MPI.MESSAGE_NULL if issubclass(klass, MPI.Errhandler): handle = MPI.ERRHANDLER_NULL if issubclass(klass, MPI.Info): handle = MPI.Info.Create() if issubclass(klass, MPI.Group): handle = MPI.COMM_SELF.Get_group() if issubclass(klass, MPI.Session): handle = MPI.Session.Init() if issubclass(klass, MPI.Comm): handle = MPI.COMM_SELF.Dup() if issubclass(klass, MPI.Win): handle = MPI.Win.Create(MPI.BOTTOM, comm=MPI.COMM_SELF) if issubclass(klass, MPI.File): import os name = os.devnull mode = MPI.MODE_RDONLY if os.name != 'posix': import tempfile rank = MPI.COMM_WORLD.Get_rank() fd, name = tempfile.mkstemp(prefix=f'mpi4py-{rank}-') os.close(fd) mode |= MPI.MODE_CREATE mode |= MPI.MODE_DELETE_ON_CLOSE handle = MPI.File.Open(MPI.COMM_SELF, name, mode) return handle def destroy(self, handle): if handle: for method in ('Free', 'Close', 'Finalize'): if hasattr(handle, method): getattr(handle, method)() def testFortran(self): for handle in self.HANDLES: self.check(handle) if not handle: continue if not hasattr(handle, 'Dup'): continue handle = handle.Dup() try: self.check(handle) finally: handle.Free() try: handle = self.create() except (NotImplementedError, MPI.Exception): klass = type(self.HANDLES[0]) self.skipTest(klass.__name__) try: self.check(handle) finally: self.destroy(handle) class TestFortranStatus(BaseTestFortran, unittest.TestCase): def setUp(self): s1 = MPI.Status() s2 = MPI.Status() s2.source = 1 s2.tag = 2 s2.error = MPI.ERR_OTHER s3 = MPI.Status() s3.source = 0 s3.tag = 0 s3.error = MPI.SUCCESS self.HANDLES = [s1, s2, s3] @unittest.skipMPI('MPICH1') def testFortran(self): super().testFortran() def testFintArray(self): s = MPI.F_SOURCE t = MPI.F_TAG e = MPI.F_ERROR for status in self.HANDLES: try: f_status = status.py2f() except NotImplementedError: continue self.assertEqual(f_status[s], status.Get_source()) self.assertEqual(f_status[t], status.Get_tag()) self.assertEqual(f_status[e], status.Get_error()) self.assertEqual(len(f_status), MPI.F_STATUS_SIZE) class TestFortranDatatype(BaseTestFortran, unittest.TestCase): HANDLES = [ MPI.DATATYPE_NULL, MPI.CHAR, MPI.SHORT, MPI.INT, MPI.LONG, MPI.FLOAT, MPI.DOUBLE, ] class TestFortranOp(BaseTestFortran, unittest.TestCase): HANDLES = [ MPI.OP_NULL, MPI.MAX, MPI.MIN, MPI.SUM, MPI.PROD, MPI.LAND, MPI.BAND, MPI.LOR, MPI.BOR, MPI.LXOR, MPI.BXOR, MPI.MAXLOC, MPI.MINLOC, ] class TestFortranRequest(BaseTestFortran, unittest.TestCase): HANDLES = [ MPI.REQUEST_NULL, ] class TestFortranMessage(BaseTestFortran, unittest.TestCase): HANDLES = [ MPI.MESSAGE_NULL, MPI.MESSAGE_NO_PROC, ] class TestFortranErrhandler(BaseTestFortran, unittest.TestCase): HANDLES = [ MPI.ERRHANDLER_NULL, MPI.ERRORS_RETURN, MPI.ERRORS_ARE_FATAL, ] class TestFortranInfo(BaseTestFortran, unittest.TestCase): HANDLES = [ MPI.INFO_NULL, ] class TestFortranGroup(BaseTestFortran, unittest.TestCase): HANDLES = [ MPI.GROUP_NULL, MPI.GROUP_EMPTY, ] class TestFortranSession(BaseTestFortran, unittest.TestCase): HANDLES = [ MPI.SESSION_NULL, ] class TestFortranComm(BaseTestFortran, unittest.TestCase): HANDLES = [ MPI.COMM_NULL, MPI.COMM_SELF, MPI.COMM_WORLD, ] class TestFortranWin(BaseTestFortran, unittest.TestCase): HANDLES = [ MPI.WIN_NULL, ] class TestFortranFile(BaseTestFortran, unittest.TestCase): HANDLES = [ MPI.FILE_NULL, ] if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_grequest.py000066400000000000000000000142461475341043600172050ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class GReqCtx: source = 3 tag = 7 completed = False cancel_called = False free_called = False def query(self, status): status.Set_source(self.source) status.Set_tag(self.tag) def free(self): self.free_called = True def cancel(self, completed): self.cancel_called = True if completed is not self.completed: raise MPI.Exception(MPI.ERR_PENDING) @unittest.skipMPI('MPI(<2.0)') @unittest.skipMPI('openmpi(==4.1.0)') class TestGrequest(unittest.TestCase): def testConstructor(self): ctx = GReqCtx() greq = MPI.Grequest.Start(ctx.query, ctx.free, ctx.cancel) dupe = MPI.Grequest(greq) self.assertIs(type(dupe), MPI.Grequest) self.assertEqual(dupe, greq) dupe = MPI.Grequest.fromhandle(greq.handle) self.assertIs(type(dupe), MPI.Grequest) self.assertEqual(dupe, greq) dupe = MPI.Grequest.f2py(greq.py2f()) self.assertIs(type(dupe), MPI.Grequest) self.assertEqual(dupe, greq) dupe = MPI.Request(greq) self.assertIs(type(dupe), MPI.Request) self.assertEqual(dupe, greq) with self.assertRaises(TypeError): dupe = MPI.Prequest(greq) greq.Cancel() greq.Complete() greq.Wait() @unittest.skipMPI('openmpi') # TODO: open-mpi/ompi#11681 def testExceptionHandling(self): ctx = GReqCtx() def raise_mpi(*args): raise MPI.Exception(MPI.ERR_BUFFER) def raise_rte(*args): raise ValueError(42) def check_exc(exception, is_mpi, stderr): output = stderr.getvalue() header = 'Traceback (most recent call last):\n' if is_mpi: chkcode = MPI.ERR_BUFFER excname = MPI.Exception.__name__ else: chkcode = MPI.ERR_OTHER excname = ValueError.__name__ ierr = exception.Get_error_class() self.assertEqual(ierr, chkcode) self.assertTrue(output.startswith(header)) self.assertIn(excname, output) for raise_fn, is_mpi in ( (raise_mpi, True), (raise_rte, False), ): greq = MPI.Grequest.Start(raise_fn, ctx.free, ctx.cancel) greq.Complete() with self.assertRaises(MPI.Exception) as exc_cm: with unittest.capture_stderr() as stderr: greq.Wait() if greq: greq.Free() check_exc(exc_cm.exception, is_mpi, stderr) # greq = MPI.Grequest.Start(ctx.query, raise_fn, ctx.cancel) greq.Complete() with self.assertRaises(MPI.Exception) as exc_cm: with unittest.capture_stderr() as stderr: greq.Wait() if greq: greq.Free() check_exc(exc_cm.exception, is_mpi, stderr) # greq = MPI.Grequest.Start(ctx.query, ctx.free, raise_fn) with self.assertRaises(MPI.Exception) as exc_cm: with unittest.capture_stderr() as stderr: greq.Cancel() greq.Complete() greq.Wait() if greq: greq.Free() check_exc(exc_cm.exception, is_mpi, stderr) def testAll(self): ctx = GReqCtx() greq = MPI.Grequest.Start(ctx.query, ctx.free, ctx.cancel) self.assertFalse(greq.Test()) self.assertFalse(ctx.free_called) greq.Cancel() self.assertTrue(ctx.cancel_called) ctx.cancel_called = False greq.Complete() ctx.completed = True greq.Cancel() self.assertTrue(ctx.cancel_called) status = MPI.Status() self.assertTrue(greq.Test(status)) self.assertEqual(status.Get_source(), ctx.source) self.assertEqual(status.Get_tag(), ctx.tag) self.assertEqual(status.Get_error(), MPI.SUCCESS) greq.Wait() self.assertTrue(ctx.free_called) def testAll1(self): ctx = GReqCtx() greq = MPI.Grequest.Start(ctx.query, None, None) self.assertFalse(greq.Test()) greq.Cancel() greq.Complete() status = MPI.Status() self.assertTrue(greq.Test(status)) self.assertEqual(status.Get_source(), ctx.source) self.assertEqual(status.Get_tag(), ctx.tag) self.assertEqual(status.Get_error(), MPI.SUCCESS) self.assertFalse(status.Is_cancelled()) greq.Wait() def testAll2(self): greq = MPI.Grequest.Start(None, None, None) self.assertFalse(greq.Test()) greq.Cancel() greq.Complete() status = MPI.Status() self.assertTrue(greq.Test(status)) self.assertEqual(status.Get_source(), MPI.ANY_SOURCE) self.assertEqual(status.Get_tag(), MPI.ANY_TAG) self.assertEqual(status.Get_error(), MPI.SUCCESS) self.assertFalse(status.Is_cancelled()) greq.Wait() def testPyCompleteTest(self): greq = MPI.Grequest.Start() self.assertFalse(greq.Test()) greq.cancel() greq.complete(42) status = MPI.Status() flag, result = greq.test(status) self.assertTrue(flag) self.assertEqual(result, 42) self.assertEqual(status.Get_source(), MPI.ANY_SOURCE) self.assertEqual(status.Get_tag(), MPI.ANY_TAG) self.assertEqual(status.Get_error(), MPI.SUCCESS) self.assertFalse(status.Is_cancelled()) obj = greq.wait() self.assertIsNone(obj) def testPyCompleteWait(self): greq = MPI.Grequest.Start() self.assertFalse(greq.Test()) greq.cancel() greq.complete(42) status = MPI.Status() result = greq.wait(status) self.assertEqual(result, 42) self.assertEqual(status.Get_source(), MPI.ANY_SOURCE) self.assertEqual(status.Get_tag(), MPI.ANY_TAG) self.assertEqual(status.Get_error(), MPI.SUCCESS) self.assertFalse(status.Is_cancelled()) flag, obj = greq.test() self.assertTrue(flag) self.assertIsNone(obj) if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_group.py000066400000000000000000000163641475341043600165050ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class BaseTestGroup: def testProperties(self): group = self.GROUP self.assertEqual(group.Get_size(), group.size) self.assertEqual(group.Get_rank(), group.rank) def testCompare(self): results = (MPI.IDENT, MPI.SIMILAR, MPI.UNEQUAL) group = MPI.COMM_WORLD.Get_group() gcmp = MPI.Group.Compare(self.GROUP, group) group.Free() self.assertIn(gcmp, results) gcmp = MPI.Group.Compare(self.GROUP, self.GROUP) self.assertEqual(gcmp, MPI.IDENT) gcmp = self.GROUP.Compare(self.GROUP) self.assertEqual(gcmp, MPI.IDENT) def testDup(self): group = self.GROUP.Dup() self.assertEqual(MPI.Group.Compare(group, self.GROUP), MPI.IDENT) group.Free() def testUnion(self): group = MPI.Group.Union(MPI.GROUP_EMPTY, self.GROUP) self.assertEqual(MPI.Group.Compare(group, self.GROUP), MPI.IDENT) group.Free() group = MPI.Group.Union(self.GROUP, MPI.GROUP_EMPTY) self.assertEqual(MPI.Group.Compare(group, self.GROUP), MPI.IDENT) group.Free() group = MPI.Group.Union(self.GROUP, self.GROUP) self.assertEqual(MPI.Group.Compare(group, self.GROUP), MPI.IDENT) group.Free() def testDifference(self): group = MPI.Group.Difference(MPI.GROUP_EMPTY, self.GROUP) self.assertEqual(MPI.Group.Compare(group, MPI.GROUP_EMPTY), MPI.IDENT) group.Free() group = MPI.Group.Difference(self.GROUP, MPI.GROUP_EMPTY) self.assertEqual(MPI.Group.Compare(group, self.GROUP), MPI.IDENT) group.Free() group = MPI.Group.Difference(self.GROUP, self.GROUP) self.assertEqual(MPI.Group.Compare(group, MPI.GROUP_EMPTY), MPI.IDENT) group.Free() def testIntersection(self): group = MPI.Group.Intersection(MPI.GROUP_EMPTY, self.GROUP) self.assertEqual(MPI.Group.Compare(group, MPI.GROUP_EMPTY), MPI.IDENT) group.Free() group = MPI.Group.Intersection(self.GROUP, MPI.GROUP_EMPTY) self.assertEqual(MPI.Group.Compare(group, MPI.GROUP_EMPTY), MPI.IDENT) group.Free() group = MPI.Group.Intersection(self.GROUP, self.GROUP) self.assertEqual(MPI.Group.Compare(group, self.GROUP), MPI.IDENT) group.Free() def testIncl(self): group = self.GROUP.Incl([]) self.assertEqual(MPI.Group.Compare(group, MPI.GROUP_EMPTY), MPI.IDENT) group.Free() def testExcl(self): group = self.GROUP.Excl([]) self.assertEqual(MPI.Group.Compare(group, self.GROUP), MPI.IDENT) group.Free() def testRangeIncl(self): if self.GROUP == MPI.GROUP_EMPTY: return group = self.GROUP.Range_incl([]) self.assertEqual(MPI.Group.Compare(group, MPI.GROUP_EMPTY), MPI.IDENT) group.Free() ranges = [ (0, self.GROUP.Get_size()-1, 1), ] group = self.GROUP.Range_incl(ranges) self.assertEqual(MPI.Group.Compare(group, self.GROUP), MPI.IDENT) group.Free() def testRangeExcl(self): if self.GROUP == MPI.GROUP_EMPTY: return group = self.GROUP.Range_excl([]) self.assertEqual(MPI.Group.Compare(group, self.GROUP), MPI.IDENT) group.Free() ranges = [ (0, self.GROUP.Get_size()-1, 1), ] group = self.GROUP.Range_excl(ranges) self.assertEqual(MPI.Group.Compare(group, MPI.GROUP_EMPTY), MPI.IDENT) group.Free() def testTranslRanks(self): group1 = self.GROUP group2 = self.GROUP ranks1 = list(range(group1.Get_size())) * 3 ranks2 = MPI.Group.Translate_ranks(group1, ranks1) ranks2 = MPI.Group.Translate_ranks(group1, ranks1, group2) self.assertEqual(list(ranks1), list(ranks2)) group = self.GROUP ranks1 = list(range(group.Get_size())) ranks2 = group.Translate_ranks(group=group) self.assertEqual(list(ranks1), list(ranks2)) ranks2 = group.Translate_ranks() self.assertEqual(len(ranks2), group.Get_size()) self.assertNotIn(MPI.UNDEFINED, set(ranks2)) @unittest.skipMPI('MPICH1') @unittest.skipMPI('LAM/MPI') def testTranslRanksProcNull(self): if self.GROUP == MPI.GROUP_EMPTY: return group1 = self.GROUP group2 = self.GROUP ranks1 = [MPI.PROC_NULL] * 10 ranks2 = MPI.Group.Translate_ranks(group1, ranks1, group2) self.assertEqual(list(ranks1), list(ranks2)) def testTranslRanksGroupEmpty(self): if self.GROUP == MPI.GROUP_EMPTY: return group1 = self.GROUP group2 = MPI.GROUP_EMPTY ranks1 = list(range(group1.Get_size())) * 2 ranks2 = MPI.Group.Translate_ranks(group1, ranks1, group2) for rank in ranks2: self.assertEqual(rank, MPI.UNDEFINED) def testPickle(self): from pickle import dumps self.assertRaises(ValueError, dumps, self.GROUP) class TestGroupNull(unittest.TestCase): def testConstructor(self): group = MPI.Group() self.assertIsNot(group, MPI.GROUP_NULL) self.assertEqual(group, MPI.GROUP_NULL) def testNull(self): GROUP_NULL = MPI.GROUP_NULL group_null = MPI.Group() self.assertFalse(GROUP_NULL) self.assertFalse(group_null) self.assertEqual(group_null, GROUP_NULL) def testPickle(self): from pickle import dumps, loads group_null = loads(dumps(MPI.GROUP_NULL)) self.assertIs(group_null, MPI.GROUP_NULL) group_null = loads(dumps(MPI.Group(MPI.GROUP_NULL))) self.assertIsNot(group_null, MPI.GROUP_NULL) self.assertEqual(group_null, MPI.GROUP_NULL) class TestGroupEmpty(BaseTestGroup, unittest.TestCase): def setUp(self): self.GROUP = MPI.GROUP_EMPTY def testEmpty(self): self.assertTrue(self.GROUP) def testSize(self): size = self.GROUP.Get_size() self.assertEqual(size, 0) def testRank(self): rank = self.GROUP.Get_rank() self.assertEqual(rank, MPI.UNDEFINED) @unittest.skipMPI('MPICH1') def testTranslRanks(self): super().testTranslRanks() def testPickle(self): from pickle import dumps, loads group_empty = loads(dumps(MPI.GROUP_EMPTY)) self.assertIs(group_empty, MPI.GROUP_EMPTY) group_empty = loads(dumps(MPI.Group(MPI.GROUP_EMPTY))) self.assertIsNot(group_empty, MPI.GROUP_EMPTY) self.assertEqual(group_empty, MPI.GROUP_EMPTY) class TestGroupSelf(BaseTestGroup, unittest.TestCase): def setUp(self): self.GROUP = MPI.COMM_SELF.Get_group() def tearDown(self): self.GROUP.Free() def testSize(self): size = self.GROUP.Get_size() self.assertEqual(size, 1) def testRank(self): rank = self.GROUP.Get_rank() self.assertEqual(rank, 0) class TestGroupWorld(BaseTestGroup, unittest.TestCase): def setUp(self): self.GROUP = MPI.COMM_WORLD.Get_group() def tearDown(self): self.GROUP.Free() def testSize(self): size = self.GROUP.Get_size() self.assertGreaterEqual(size, 1) def testRank(self): size = self.GROUP.Get_size() rank = self.GROUP.Get_rank() self.assertTrue(rank >= 0 and rank < size) if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_info.py000066400000000000000000000225161475341043600163000ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import sys class TestInfoNull(unittest.TestCase): def testTruth(self): self.assertFalse(bool(MPI.INFO_NULL)) def testPickle(self): from pickle import dumps, loads null = loads(dumps(MPI.INFO_NULL)) self.assertIs(null, MPI.INFO_NULL) null = loads(dumps(MPI.Info())) self.assertIsNot(null, MPI.INFO_NULL) self.assertEqual(null, MPI.INFO_NULL) def testPyMethods(self): inull = MPI.INFO_NULL def getitem(): return inull['k'] def setitem(): inull['k'] = 'v' def delitem(): del inull['k'] def update(): inull.update([]) def pop(): inull.pop('k') def popitem(): inull.popitem() self.assertEqual(len(inull), 0) self.assertNotIn('key', inull) self.assertRaises(KeyError, getitem) self.assertRaises(KeyError, setitem) self.assertRaises(KeyError, delitem) self.assertRaises(KeyError, update) self.assertRaises(KeyError, pop) self.assertRaises(KeyError, popitem) self.assertIsNone(inull.get('key', None)) self.assertIsNone(inull.pop('key', None)) self.assertEqual(inull.keys(), []) self.assertEqual(inull.values(), []) self.assertEqual(inull.items(), []) self.assertEqual(inull.copy(), inull) inull.clear() class TestInfoEnv(unittest.TestCase): KEYS = ( "command", "argv", "maxprocs", "soft", "host", "arch", "wdir", "file", "thread_level", ) def testTruth(self): self.assertTrue(bool(MPI.INFO_ENV)) def testPickle(self): from pickle import dumps, loads ienv = loads(dumps(MPI.INFO_ENV)) self.assertIs(ienv, MPI.INFO_ENV) ienv = loads(dumps(MPI.Info(MPI.INFO_ENV))) self.assertIsNot(ienv, MPI.INFO_ENV) self.assertEqual(ienv, MPI.INFO_ENV) def testPyMethods(self): env = MPI.INFO_ENV for key in self.KEYS: v = env.Get(key) del v def testDup(self): env = MPI.INFO_ENV dup = env.Dup() try: for key in self.KEYS: self.assertEqual(env.Get(key), dup.Get(key)) finally: dup.Free() def testCreateEnv(self): try: env = MPI.Info.Create_env() except NotImplementedError: if MPI.Get_version() >= (4, 0): raise raise unittest.SkipTest("mpi-info-create-env") for key in self.KEYS: v = env.Get(key) del v try: dup = env.Dup() try: for key in self.KEYS: self.assertEqual(env.Get(key), dup.Get(key)) finally: dup.Free() finally: env.Free() for args in ( None, [], (), sys.executable, [sys.executable], (sys.executable,), ): MPI.Info.Create_env(args).Free() MPI.Info.Create_env(args=args).Free() class TestInfo(unittest.TestCase): def setUp(self): self.INFO = MPI.Info.Create() def tearDown(self): self.INFO.Free() self.assertEqual(self.INFO, MPI.INFO_NULL) self.INFO = None def testTruth(self): self.assertTrue(bool(self.INFO)) def testCreate(self): data = {'key1': 'value1', 'key2': 'value2'} for items in (None, {}, [], data, list(data.items())): info = MPI.Info.Create(items) if items is not None: self.assertEqual(info.Get_nkeys(), len(items)) for k, v in dict(items).items(): self.assertEqual(info.Get(k), v) info.Free() def testCreateBad(self): with self.assertRaises(TypeError): MPI.Info.Create(items=123) def testDup(self): info = self.INFO.Dup() self.assertNotEqual(self.INFO, info) self.assertEqual(info.Get_nkeys(), 0) info.Free() self.assertFalse(info) def testGet(self): value = self.INFO.Get('key') self.assertIsNone(value) def testGetNKeys(self): self.assertEqual(self.INFO.Get_nkeys(), 0) def testGetSetDelete(self): INFO = self.INFO self.assertEqual(INFO.Get_nkeys(), 0) INFO.Set('key', 'value') nkeys = INFO.Get_nkeys() self.assertEqual(nkeys, 1) key = INFO.Get_nthkey(0) self.assertEqual(key, 'key') value = INFO.Get('key') self.assertEqual(value, 'value') INFO.Delete('key') nkeys = INFO.Get_nkeys() self.assertEqual(nkeys, 0) value = INFO.Get('key') self.assertIsNone(value) def testPickle(self): from pickle import dumps, loads mold = self.INFO info = loads(dumps(mold)) self.assertIsNot(info, mold) self.assertNotEqual(info, mold) self.assertEqual(info.items(), mold.items()) info.Free() mold = self.INFO mold.update([("foo", "bar"), ("answer", "42")]) info = loads(dumps(mold)) self.assertIsNot(info, mold) self.assertNotEqual(info, mold) self.assertEqual(info.items(), mold.items()) info.Free() def testPyMethods(self): INFO = self.INFO self.assertEqual(len(INFO), 0) self.assertNotIn('key', INFO) self.assertEqual(INFO.keys(), []) self.assertEqual(INFO.values(), []) self.assertEqual(INFO.items(), []) INFO['key'] = 'value' self.assertEqual(len(INFO), 1) self.assertIn('key', INFO) self.assertEqual(INFO['key'], 'value') for key in INFO: self.assertEqual(key, 'key') self.assertEqual(INFO.keys(), ['key']) self.assertEqual(INFO.values(), ['value']) self.assertEqual(INFO.items(), [('key', 'value')]) self.assertEqual(key, 'key') del INFO['key'] self.assertEqual(len(INFO), 0) INFO['key'] = 'value' self.assertEqual(INFO.pop('key'), 'value') self.assertEqual(len(INFO), 0) self.assertEqual(INFO.pop('key', 'value'), 'value') self.assertRaises(KeyError, INFO.pop, 'key') INFO['key1'] = 'value1' INFO['key2'] = 'value2' self.assertEqual(INFO.pop('key1'), 'value1') self.assertEqual(len(INFO), 1) self.assertEqual(INFO.pop('key2'), 'value2') self.assertEqual(len(INFO), 0) INFO['key'] = 'value' self.assertEqual(INFO.popitem(), ('key', 'value')) self.assertEqual(len(INFO), 0) self.assertRaises(KeyError, INFO.popitem) INFO['key1'] = 'value1' INFO['key2'] = 'value2' self.assertEqual(INFO.popitem(), ('key2', 'value2')) self.assertEqual(len(INFO), 1) self.assertEqual(INFO.popitem(), ('key1', 'value1')) self.assertEqual(len(INFO), 0) self.assertEqual(len(INFO), 0) self.assertNotIn('key', INFO) self.assertEqual(INFO.keys(), []) self.assertEqual(INFO.values(), []) self.assertEqual(INFO.items(), []) def getitem(): INFO['key'] self.assertRaises(KeyError, getitem) def delitem(): del INFO['key'] self.assertRaises(KeyError, delitem) INFO.clear() INFO.update([('key1','value1')]) self.assertEqual(len(INFO), 1) self.assertEqual(INFO['key1'], 'value1') self.assertEqual(INFO.get('key1'), 'value1') self.assertIsNone(INFO.get('key2')) self.assertEqual(INFO.get('key2', 'value2'), 'value2') INFO.update(key2='value2') self.assertEqual(len(INFO), 2) self.assertEqual(INFO['key1'], 'value1') self.assertEqual(INFO['key2'], 'value2') self.assertEqual(INFO.get('key1'), 'value1') self.assertEqual(INFO.get('key2'), 'value2') self.assertIsNone(INFO.get('key3')) self.assertEqual(INFO.get('key3', 'value3'), 'value3') INFO.update([('key1', 'newval1')], key2='newval2') self.assertEqual(len(INFO), 2) self.assertEqual(INFO['key1'], 'newval1') self.assertEqual(INFO['key2'], 'newval2') self.assertEqual(INFO.get('key1'), 'newval1') self.assertEqual(INFO.get('key2'), 'newval2') self.assertIsNone(INFO.get('key3')) self.assertEqual(INFO.get('key3', 'newval3'), 'newval3') INFO.update(dict(key1='val1', key2='val2', key3='val3')) self.assertEqual(len(INFO), 3) self.assertEqual(INFO['key1'], 'val1') self.assertEqual(INFO['key2'], 'val2') self.assertEqual(INFO['key3'], 'val3') dupe = INFO.copy() self.assertEqual(INFO.items(), dupe.items()) dupe.Free() INFO.clear() self.assertEqual(len(INFO), 0) self.assertIsNone(INFO.get('key1')) self.assertIsNone(INFO.get('key2')) self.assertIsNone(INFO.get('key3')) self.assertEqual(INFO.get('key1', 'value1'), 'value1') self.assertEqual(INFO.get('key2', 'value2'), 'value2') self.assertEqual(INFO.get('key3', 'value3'), 'value3') try: MPI.Info.Create().Free() except NotImplementedError: unittest.disable(TestInfo, 'mpi-info') unittest.disable(TestInfoNull, 'mpi-info') if (MPI.VERSION < 3 and MPI.INFO_ENV == MPI.INFO_NULL): unittest.disable(TestInfoEnv, 'mpi-info-env') if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_io.py000066400000000000000000000742401475341043600157550ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl import os, tempfile import platform def arrayimpl_loop_io(): impi = unittest.mpi_predicate('impi(>=2021.12.0)') openmpi = unittest.mpi_predicate('openmpi(<4.2.0)') is_i386 = platform.machine() in ('i386', 'i686') is_win = os.name == 'nt' for array, typecode in arrayimpl.loop(): if unittest.is_mpi_gpu('mvapich', array): continue if openmpi and is_i386 and typecode in ('g', 'G'): continue if impi and is_win and typecode in ('l', 'L', 'g'): continue yield array, typecode scalar = arrayimpl.scalar class BaseTestIO: COMM = MPI.COMM_NULL FILE = MPI.FILE_NULL prefix = 'mpi4py-' def setUp(self): comm = self.COMM world_size = MPI.COMM_WORLD.Get_size() world_rank = MPI.COMM_WORLD.Get_rank() prefix = self.prefix if comm.Get_size() < world_size: prefix += f'{world_rank}-' fname = None if comm.Get_rank() == 0: fd, fname = tempfile.mkstemp(prefix=prefix) os.close(fd) fname = comm.bcast(fname, 0) amode = MPI.MODE_RDWR | MPI.MODE_CREATE amode |= MPI.MODE_DELETE_ON_CLOSE amode |= MPI.MODE_UNIQUE_OPEN info = MPI.INFO_NULL try: self.FILE = MPI.File.Open(comm, fname, amode, info) except Exception: if comm.Get_rank() == 0: os.remove(fname) raise def tearDown(self): if self.FILE: self.FILE.Close() self.COMM.Barrier() class BaseTestIOBasic(BaseTestIO): # non-collective def testReadWriteAt(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in arrayimpl_loop_io(): with arrayimpl.test(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(42, typecode, count) fh.Write_at(count*rank, wbuf.as_raw()) fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Read_at(count*rank, rbuf.as_mpi_c(count)) for value in rbuf[:-1]: self.assertEqual(value, scalar(42)) self.assertEqual(rbuf[-1], scalar(-1)) comm.Barrier() def testIReadIWriteAt(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in arrayimpl_loop_io(): with arrayimpl.test(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(42, typecode, count) fh.Iwrite_at(count*rank, wbuf.as_raw()).Wait() fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Iread_at(count*rank, rbuf.as_mpi_c(count)).Wait() for value in rbuf[:-1]: self.assertEqual(value, scalar(42)) self.assertEqual(rbuf[-1], scalar(-1)) comm.Barrier() def testReadWrite(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in arrayimpl_loop_io(): with arrayimpl.test(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(42, typecode, count) for r in range(size): if r == rank: fh.Seek(0, MPI.SEEK_SET) fh.Write(wbuf.as_raw()) fh.Sync() comm.Barrier() fh.Sync() for n in range(0, len(wbuf)): rbuf = array(-1, typecode, n+1) fh.Seek(0, MPI.SEEK_SET) fh.Read(rbuf.as_mpi_c(n)) for value in rbuf[:-1]: self.assertEqual(value, scalar(42)) self.assertEqual(rbuf[-1], scalar(-1)) comm.Barrier() def testIReadIWrite(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in arrayimpl_loop_io(): with arrayimpl.test(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(42, typecode, count) for r in range(size): if r == rank: fh.Seek(0, MPI.SEEK_SET) fh.Iwrite(wbuf.as_raw()).Wait() fh.Sync() comm.Barrier() fh.Sync() for n in range(0, len(wbuf)): rbuf = array(-1, typecode, n+1) fh.Seek(0, MPI.SEEK_SET) fh.Iread(rbuf.as_mpi_c(n)).Wait() for value in rbuf[:-1]: self.assertEqual(value, scalar(42)) self.assertEqual(rbuf[-1], scalar(-1)) comm.Barrier() def testReadWriteShared(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in arrayimpl_loop_io(): with arrayimpl.test(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(rank%42, typecode, count) fh.Seek_shared(0, MPI.SEEK_SET) fh.Write_shared(wbuf.as_raw()) fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Seek_shared(0, MPI.SEEK_SET) fh.Read_shared(rbuf.as_mpi_c(count)) for value in rbuf[:-1]: self.assertTrue(0<=value<42) self.assertEqual(value, rbuf[0]) self.assertEqual(rbuf[-1], scalar(-1)) comm.Barrier() def testIReadIWriteShared(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in arrayimpl_loop_io(): with arrayimpl.test(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(rank%42, typecode, count) fh.Seek_shared(0, MPI.SEEK_SET) fh.Iwrite_shared(wbuf.as_raw()).Wait() fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Seek_shared(0, MPI.SEEK_SET) fh.Iread_shared(rbuf.as_mpi_c(count)).Wait() for value in rbuf[:-1]: self.assertTrue(0<=value<42) self.assertEqual(value, rbuf[0]) self.assertEqual(rbuf[-1], scalar(-1)) comm.Barrier() # collective def testReadWriteAtAll(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in arrayimpl_loop_io(): with arrayimpl.test(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(42, typecode, count) fh.Write_at_all(count*rank, wbuf.as_raw()) fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Read_at_all(count*rank, rbuf.as_mpi_c(count)) for value in rbuf[:-1]: self.assertEqual(value, scalar(42)) self.assertEqual(rbuf[-1], scalar(-1)) comm.Barrier() def testIReadIWriteAtAll(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in arrayimpl_loop_io(): with arrayimpl.test(self): try: # MPI 3.1 etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(42, typecode, count) fh.Iwrite_at_all(count*rank, wbuf.as_raw()).Wait() fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Iread_at_all(count*rank, rbuf.as_mpi_c(count)).Wait() for value in rbuf[:-1]: self.assertEqual(value, scalar(42)) self.assertEqual(rbuf[-1], scalar(-1)) comm.Barrier() except NotImplementedError: if MPI.Get_version() >= (3, 1): raise self.skipTest('mpi-iwrite_at_all') def testReadWriteAtAllBeginEnd(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in arrayimpl_loop_io(): with arrayimpl.test(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(42, typecode, count) fh.Write_at_all_begin(count*rank, wbuf.as_raw()) fh.Write_at_all_end(wbuf.as_raw()) fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Read_at_all_begin(count*rank, rbuf.as_mpi_c(count)) fh.Read_at_all_end(rbuf.as_raw()) for value in rbuf[:-1]: self.assertEqual(value, scalar(42)) self.assertEqual(rbuf[-1], scalar(-1)) comm.Barrier() def testReadWriteAll(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in arrayimpl_loop_io(): with arrayimpl.test(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(42, typecode, count) fh.Seek(count*rank, MPI.SEEK_SET) fh.Write_all(wbuf.as_raw()) fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Seek(count*rank, MPI.SEEK_SET) fh.Read_all(rbuf.as_mpi_c(count)) for value in rbuf[:-1]: self.assertEqual(value, scalar(42)) self.assertEqual(rbuf[-1], scalar(-1)) comm.Barrier() def testIReadIWriteAll(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in arrayimpl_loop_io(): with arrayimpl.test(self): try: # MPI 3.1 etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(42, typecode, count) fh.Seek(count*rank, MPI.SEEK_SET) fh.Iwrite_all(wbuf.as_raw()).Wait() fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Seek(count*rank, MPI.SEEK_SET) fh.Iread_all(rbuf.as_mpi_c(count)).Wait() for value in rbuf[:-1]: self.assertEqual(value, scalar(42)) self.assertEqual(rbuf[-1], scalar(-1)) comm.Barrier() except NotImplementedError: if MPI.Get_version() >= (3, 1): raise self.skipTest('mpi-iwrite_all') def testReadWriteAllBeginEnd(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in arrayimpl_loop_io(): with arrayimpl.test(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(42, typecode, count) fh.Seek(count*rank, MPI.SEEK_SET) fh.Write_all_begin(wbuf.as_raw()) fh.Write_all_end(wbuf.as_raw()) fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Seek(count*rank, MPI.SEEK_SET) fh.Read_all_begin(rbuf.as_mpi_c(count)) fh.Read_all_end(rbuf.as_raw()) for value in rbuf[:-1]: self.assertEqual(value, scalar(42)) self.assertEqual(rbuf[-1], scalar(-1)) comm.Barrier() def testReadWriteOrdered(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in arrayimpl_loop_io(): with arrayimpl.test(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(rank%42, typecode, count) fh.Seek_shared(0, MPI.SEEK_SET) fh.Write_ordered(wbuf.as_raw()) fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Seek_shared(0, MPI.SEEK_SET) fh.Read_ordered(rbuf.as_mpi_c(count)) for value in rbuf[:-1]: self.assertEqual(value, scalar(rank%42)) self.assertEqual(rbuf[-1], scalar(-1)) comm.Barrier() def testReadWriteOrderedBeginEnd(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in arrayimpl_loop_io(): with arrayimpl.test(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(rank%42, typecode, count) fh.Seek_shared(0, MPI.SEEK_SET) fh.Write_ordered_begin(wbuf.as_raw()) fh.Write_ordered_end(wbuf.as_raw()) fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Seek_shared(0, MPI.SEEK_SET) fh.Read_ordered_begin(rbuf.as_mpi_c(count)) fh.Read_ordered_end(rbuf.as_raw()) for value in rbuf[:-1]: self.assertEqual(value, scalar(rank%42)) self.assertEqual(rbuf[-1], scalar(-1)) comm.Barrier() class BaseTestIOView(BaseTestIO): def _test_contiguous(self, combiner): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in arrayimpl_loop_io(): with arrayimpl.test(self): comm.Barrier() fh.Set_size(0) btype = array.TypeMap[typecode] if combiner == MPI.COMBINER_NAMED: etype = btype ftype = btype if combiner == MPI.COMBINER_DUP: etype = btype.Dup().Commit() ftype = etype.Dup().Commit() if combiner == MPI.COMBINER_CONTIGUOUS: etype = btype ftype = etype.Create_contiguous(7).Commit() fh.Set_view(0, etype, ftype) for i in range(3): wval = 10*(rank+1)+i wbuf = array(wval, typecode, 7) fh.Write_ordered(wbuf.as_raw()) fh.Sync() comm.Barrier() fh.Set_view(0, etype, ftype) for i in range(3): rval = 10*(rank+1)+i rbuf = array(0, typecode, 7) fh.Read_ordered(rbuf.as_raw()) for value in rbuf: self.assertEqual(value, scalar(rval)) if ftype != btype: ftype.Free() fh.Set_view(0, etype, etype) for i in range(3): for r in range(size): rval = 10*(r+1)+i rbuf = array(0, typecode, 7) fh.Read_all(rbuf.as_raw()) for value in rbuf: self.assertEqual(value, scalar(rval)) if etype != btype: etype.Free() def _test_strided(self, combiner): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in arrayimpl_loop_io(): with arrayimpl.test(self): comm.Barrier() fh.Set_size(0) etype = array.TypeMap[typecode] esize = etype.size index1 = [0, 2, 4, 6] index2 = [1, 3, 5] if combiner in ( MPI.COMBINER_VECTOR, MPI.COMBINER_HVECTOR, ): if combiner == MPI.COMBINER_VECTOR: ftype1 = etype.Create_vector(4, 1, 2).Commit() if combiner == MPI.COMBINER_HVECTOR: ftype1 = etype.Create_hvector(4, 1, esize*2).Commit() fbase2 = etype.Create_indexed([1]*3, index2) ftype2 = fbase2.Create_resized(0, ftype1.extent).Commit() fbase2.Free() if combiner in ( MPI.COMBINER_INDEXED, MPI.COMBINER_INDEXED_BLOCK, MPI.COMBINER_HINDEXED, MPI.COMBINER_HINDEXED_BLOCK, ): INDEXED = MPI.COMBINER_INDEXED INDEXED_BLOCK = MPI.COMBINER_INDEXED_BLOCK HINDEXED = MPI.COMBINER_HINDEXED HINDEXED_BLOCK = MPI.COMBINER_HINDEXED_BLOCK if combiner == INDEXED: Create = MPI.Datatype.Create_indexed if combiner == HINDEXED: Create = MPI.Datatype.Create_hindexed if combiner == INDEXED_BLOCK: Create = MPI.Datatype.Create_indexed_block if combiner == HINDEXED_BLOCK: Create = MPI.Datatype.Create_hindexed_block if combiner in (INDEXED, HINDEXED): blens1 = [1] * 4 blens2 = [1] * 3 if combiner in (INDEXED_BLOCK, HINDEXED_BLOCK): blens1 = 1 blens2 = 1 if combiner in (INDEXED, INDEXED_BLOCK): disps1 = index1 disps2 = index2 if combiner in ( HINDEXED, HINDEXED_BLOCK): disps1 = [esize*i for i in index1] disps2 = [esize*i for i in index2] ftype1 = Create(etype, blens1, disps1).Commit() fbase2 = Create(etype, blens2, disps2) ftype2 = fbase2.Create_resized(0, ftype1.extent).Commit() fbase2.Free() if combiner == MPI.COMBINER_STRUCT: ftype1 = MPI.Datatype.Create_struct( [1] * 4, [esize*i for i in index1], [etype] * 4, ).Commit() fbase2 = MPI.Datatype.Create_struct( [1] * 3, [esize*i for i in index2], [etype] * 3, ) ftype2 = fbase2.Create_resized( 0, ftype1.extent, ).Commit() fbase2.Free() # fh.Set_view(0, etype, ftype1) for i in range(3): wval = 10*(rank+1)+i warg = [wval+j for j in range(0,7,2)] wbuf = array(warg, typecode) fh.Write_ordered(wbuf.as_raw()) fh.Set_view(0, etype, ftype2) for i in range(3): wval = 10*(rank+1)+i warg = [wval+j for j in range(1,7,2)] wbuf = array(warg, typecode) fh.Write_ordered(wbuf.as_raw()) fh.Sync() comm.Barrier() fh.Set_view(0, etype, ftype1) for i in range(3): rval = 10*(rank+1)+i rbuf = array(0, typecode, 4) fh.Read_ordered(rbuf.as_raw()) for value, j in zip(rbuf, range(0,7,2)): self.assertEqual(value, scalar(rval+j)) fh.Set_view(0, etype, ftype2) for i in range(3): rval = 10*(rank+1)+i rbuf = array(0, typecode, 3) fh.Read_ordered(rbuf.as_raw()) for value, j in zip(rbuf, range(1,7,2)): self.assertEqual(value, scalar(rval+j)) ftype1.Free() ftype2.Free() fh.Set_view(0, etype, etype) for i in range(3): for r in range(size): rval = 10*(r+1)+i rbuf = array(0, typecode, 7) fh.Read_all(rbuf.as_raw()) for j, value in enumerate(rbuf): self.assertEqual(value, scalar(rval+j)) def testNamed(self): self._test_contiguous(MPI.COMBINER_NAMED) def testDup(self): self._test_contiguous(MPI.COMBINER_DUP) def testContiguous(self): self._test_contiguous(MPI.COMBINER_CONTIGUOUS) def testVector(self): self._test_strided(MPI.COMBINER_VECTOR) def testHVector(self): self._test_strided(MPI.COMBINER_HVECTOR) def testIndexed(self): self._test_strided(MPI.COMBINER_INDEXED) def testHIndexed(self): self._test_strided(MPI.COMBINER_HINDEXED) def testIndexedBlock(self): self._test_strided(MPI.COMBINER_INDEXED_BLOCK) def testHIndexedBlock(self): self._test_strided(MPI.COMBINER_HINDEXED_BLOCK) def testStruct(self): self._test_strided(MPI.COMBINER_STRUCT) def testSubarray(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in arrayimpl_loop_io(): with arrayimpl.test(self): comm.Barrier() fh.Set_size(0) etype = array.TypeMap[typecode] ftype = etype.Create_subarray( [size*7, 5], [7, 5], [rank*7, 0], ).Commit() fh.Set_view(0, etype, ftype) for i in range(3): wval = 10*(rank+1)+i wbuf = array(wval, typecode, 7*5) fh.Write_all(wbuf.as_raw()) fh.Sync() comm.Barrier() fh.Set_view(0, etype, ftype) for i in range(3): rval = 10*(rank+1)+i rbuf = array(0, typecode, 7*5) fh.Read_all(rbuf.as_raw()) for value in rbuf: self.assertEqual(value, scalar(rval)) ftype.Free() fh.Set_view(0, etype, etype) for i in range(3): for r in range(size): rval = 10*(r+1)+i rbuf = array(0, typecode, 7*5) fh.Read_all(rbuf.as_raw()) for value in rbuf: self.assertEqual(value, scalar(rval)) def testDarrayBlock(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE block = MPI.DISTRIBUTE_BLOCK none = MPI.DISTRIBUTE_NONE dflt = MPI.DISTRIBUTE_DFLT_DARG for array, typecode in arrayimpl_loop_io(): with arrayimpl.test(self): comm.Barrier() fh.Set_size(0) etype = array.TypeMap[typecode] ftype = etype.Create_darray( size, rank, [size*7, 5], [block, none], [dflt, dflt], [size, 1], ).Commit() fh.Set_view(0, etype, ftype) for i in range(3): wval = 10*(rank+1)+i wbuf = array(wval, typecode, 7*5) fh.Write_all(wbuf.as_raw()) fh.Sync() comm.Barrier() fh.Set_view(0, etype, ftype) for i in range(3): rval = 10*(rank+1)+i rbuf = array(0, typecode, 7*5) fh.Read_all(rbuf.as_raw()) for value in rbuf: self.assertEqual(value, scalar(rval)) ftype.Free() fh.Set_view(0, etype, etype) for i in range(3): for r in range(size): for j in range(7): rval = 10*(r+1)+i rbuf = array(0, typecode, 5) fh.Read_all(rbuf.as_raw()) for value in rbuf: self.assertEqual(value, scalar(rval)) def testDarrayCyclic(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE cyclic = MPI.DISTRIBUTE_CYCLIC none = MPI.DISTRIBUTE_NONE dflt = MPI.DISTRIBUTE_DFLT_DARG for array, typecode in arrayimpl_loop_io(): with arrayimpl.test(self): comm.Barrier() fh.Set_size(0) etype = array.TypeMap[typecode] ftype = etype.Create_darray( size, rank, [size*7, 5], [cyclic, none], [1, dflt], [size, 1], ).Commit() fh.Set_view(0, etype, ftype) for i in range(3): wval = 10*(rank+1)+i wbuf = array(wval, typecode, 7*5) fh.Write_all(wbuf.as_raw()) fh.Sync() comm.Barrier() fh.Set_view(0, etype, ftype) for i in range(3): rval = 10*(rank+1)+i rbuf = array(0, typecode, 7*5) fh.Read_all(rbuf.as_raw()) for value in rbuf: self.assertEqual(value, scalar(rval)) ftype.Free() fh.Set_view(0, etype, etype) for i in range(3): for j in range(7): for r in range(size): rval = 10*(r+1)+i rbuf = array(0, typecode, 5) fh.Read_all(rbuf.as_raw()) for value in rbuf: self.assertEqual(value, scalar(rval)) @unittest.skipMPI('MPICH1') @unittest.skipMPI('LAM/MPI') class TestIOBasicSelf(BaseTestIOBasic, unittest.TestCase): COMM = MPI.COMM_SELF @unittest.skipMPI('openmpi(<2.2.0)') @unittest.skipMPI('msmpi') @unittest.skipMPI('MPICH2') @unittest.skipMPI('MPICH1') @unittest.skipMPI('LAM/MPI') class TestIOBasicWorld(BaseTestIOBasic, unittest.TestCase): COMM = MPI.COMM_WORLD @unittest.skipMPI('mpich(>=4.0.0,<4.1.0)') @unittest.skipMPI('openmpi(<2.2.0)') @unittest.skipMPI('MPICH1') @unittest.skipMPI('LAM/MPI') class TestIOViewSelf(BaseTestIOView, unittest.TestCase): COMM = MPI.COMM_SELF @unittest.skipMPI('mpich(>=4.0.0,<4.1.0)') @unittest.skipMPI('openmpi(<2.2.0)') @unittest.skipMPI('msmpi') @unittest.skipMPI('MPICH2') @unittest.skipMPI('MPICH1') @unittest.skipMPI('LAM/MPI') class TestIOViewWorld(BaseTestIOView, unittest.TestCase): COMM = MPI.COMM_WORLD @unittest.skipMPI('msmpi') @unittest.skipMPI('openmpi') @unittest.skipMPI('impi', os.name == 'nt') class TestDatarep(unittest.TestCase): def testRegister(self): def extent_fn(dtype): return dtype.extent try: MPI.Register_datarep( "mpi4py-datarep-dummy", read_fn=None, write_fn=None, extent_fn=extent_fn, ) except NotImplementedError: self.skipTest('mpi-register-datrep') with self.assertRaises(MPI.Exception) as cm: MPI.Register_datarep( "mpi4py-datarep-dummy", read_fn=None, write_fn=None, extent_fn=extent_fn, ) ierr = cm.exception.Get_error_class() self.assertEqual(ierr, MPI.ERR_DUP_DATAREP) def have_feature(): case = BaseTestIO() case.COMM = TestIOBasicSelf.COMM case.prefix = TestIOBasicSelf.prefix case.setUp() case.tearDown() try: have_feature() except NotImplementedError: unittest.disable(BaseTestIO, 'mpi-io') if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_mpiapi.py000066400000000000000000000116061475341043600166220ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import os, re, sys import subprocess as sp import shutil mod_file = MPI.__file__ pxd_file = os.path.join(os.path.dirname(mod_file), 'libmpi.pxd') mpi_small_count_allowed = [ 'MPI_Op_create', ] mpi_fortran = [ 'MPI_Type_create_f90_integer', 'MPI_Type_create_f90_real', 'MPI_Type_create_f90_complex', ] mpi_deprecated = [ 'MPI_Attr_delete', 'MPI_Attr_get', 'MPI_Attr_put', 'MPI_Info_get', 'MPI_Info_get_valuelen', 'MPI_Keyval_create', 'MPI_Keyval_free', ] mpi_removed = [ 'MPI_Address', 'MPI_Errhandler_create', 'MPI_Errhandler_get', 'MPI_Errhandler_set', 'MPI_Type_extent', 'MPI_Type_hindexed', 'MPI_Type_hvector', 'MPI_Type_lb', 'MPI_Type_struct', 'MPI_Type_ub', ] mpi_missing = [] if MPI.Get_version() < (4, 1): mpi_missing += [ f'MPI_Remove_error_{func}' for func in ('class', 'code', 'string') ] mpi_missing += [ f'MPI_Status_{func}_{attr}' for func in ('get', 'set') for attr in ('source', 'tag', 'error') ] mpi_missing += [ f'MPI_Request_get_status_{func}' for func in ('any', 'all', 'some') ] mpi_missing += [ f'MPI_Buffer_{func}' for func in ('flush', 'iflush') ] + [ f'MPI_{cls}_{func}_buffer{kind}' for cls in ('Comm', 'Session') for func in ('attach', 'detach', 'flush', 'iflush') for kind in (('',) if 'flush' in func else ('', '_c')) ] mpi_missing += [ 'MPI_Type_get_value_index', 'MPI_Get_hw_resource_info', ] if MPI.Get_version() < (6, 0): mpi_missing += [ 'MPI_Comm_ack_failed', 'MPI_Comm_agree', 'MPI_Comm_get_failed', 'MPI_Comm_iagree', 'MPI_Comm_is_revoked', 'MPI_Comm_ishrink', 'MPI_Comm_revoke', 'MPI_Comm_shrink', ] name, version = MPI.get_vendor() if name == 'MPICH' and version < (4, 0, 3): mpi_missing += [ 'MPI_Status_set_elements_c', ] @unittest.skipMPI('MPI(<4.0)') class TestMPIAPI(unittest.TestCase): MPINAME = r"MPI_[A-Z][a-z0-9_]+" def get_api_symbols(self): regex = re.compile(rf"^\s*int\s+({self.MPINAME})\s*\(") api_symbols = set() with open(pxd_file) as fh: for line in fh: match = regex.search(line) if match: sym = match.groups()[0] api_symbols.add(sym) return api_symbols def get_mod_symbols(self): nm = shutil.which('nm') nm_flags = ['-Pu'] if sys.platform == 'linux': nm_flags.append('-D') cmd = [nm, *nm_flags, mod_file] out = sp.check_output(cmd, close_fds=False) nm_output = out.decode() regex = re.compile(rf"^_?({self.MPINAME}) U.*$") mod_symbols = set() for line in nm_output.split("\n"): match = regex.search(line) if match: sym = match.groups()[0] mod_symbols.add(sym) return mod_symbols @unittest.skipIf(shutil.which('nm') is None, 'nm') @unittest.skipUnless(hasattr(os, 'posix_spawn'), 'os.posix_spawn') def testLargeCountSymbols(self): api_symbols = self.get_api_symbols() large_count = {sym for sym in api_symbols if sym.endswith("_c")} mpi_symbols = self.get_mod_symbols() for sym in mpi_small_count_allowed: self.assertIn(sym, mpi_symbols) mpi_symbols.difference_update(mpi_small_count_allowed) small_count = {sym[:-2] for sym in large_count} bad_symbols = set.intersection(small_count, mpi_symbols) self.assertFalse(bad_symbols) @unittest.skipIf(shutil.which('nm') is None, 'nm') @unittest.skipUnless(hasattr(os, 'posix_spawn'), 'os.posix_spawn') def testSymbolCoverage(self): api_symbols = self.get_api_symbols() mod_symbols = self.get_mod_symbols() self.assertTrue(api_symbols) self.assertTrue(mod_symbols) uncovered = set.difference(api_symbols, mod_symbols) uncovered.difference_update(mpi_deprecated) uncovered.difference_update(mpi_removed) uncovered.difference_update(mpi_missing) for sym in mod_symbols: if sym.endswith("_c"): uncovered.discard(sym[:-2]) uncovered.discard(sym[:-2] + '_x') if sym.endswith("_x"): uncovered.discard(sym[:-2]) uncovered.discard(sym[:-2] + '_c') if MPI.REAL == MPI.DATATYPE_NULL or MPI.REAL.Get_size() == 0: uncovered.difference_update(mpi_fortran) if name == 'MPICH' and MPI.DISPLACEMENT_CURRENT == 0: mpiio = re.compile('MPI_(File_.*|Register_datarep)') uncovered = set(filter( lambda s: not mpiio.match(s), uncovered)) self.assertFalse(uncovered) if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_mpimem.py000066400000000000000000000013711475341043600166250ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class TestMemory(unittest.TestCase): def testMemory1(self): for size in range(0, 10000, 100): try: mem1 = MPI.Alloc_mem(size) self.assertEqual(len(mem1), size) MPI.Free_mem(mem1) except NotImplementedError: self.skipTest('mpi-alloc_mem') def testMemory2(self): for size in range(0, 10000, 100): try: mem2 = MPI.Alloc_mem(size, MPI.INFO_NULL) self.assertEqual(len(mem2), size) MPI.Free_mem(mem2) except NotImplementedError: self.skipTest('mpi-alloc_mem') if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_msgspec.py000066400000000000000000001457701475341043600170160ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import sys from arrayimpl import ( array, numpy, cupy, numba, ) from arrayimpl import typestr typemap = MPI.Datatype.fromcode # --- class BaseBuf: def __init__(self, typecode, initializer): self._buf = array.array(typecode, initializer) def __eq__(self, other): return self._buf == other._buf def __ne__(self, other): return self._buf != other._buf def __len__(self): return len(self._buf) def __getitem__(self, item): return self._buf[item] def __setitem__(self, item, value): self._buf[item] = value._buf # --- try: import dlpackimpl as dlpack except ImportError: dlpack = None class DLPackCPUBuf(BaseBuf): versioned = True def __init__(self, typecode, initializer): super().__init__(typecode, initializer) self.managed = dlpack.make_dl_managed_tensor(self._buf, self.versioned) def __del__(self): self.managed = None if sys and not hasattr(sys, 'pypy_version_info'): if sys.getrefcount(self._buf) > 2: raise RuntimeError('dlpack: possible reference leak') def __dlpack_device__(self): device = self.managed.dl_tensor.device return (device.device_type, device.device_id) def __dlpack__( self, stream=None, max_version=None, dl_device=None, copy=None, ): kDLCPU = dlpack.DLDeviceType.kDLCPU managed = self.managed device = managed.dl_tensor.device if device.device_type == kDLCPU: assert stream is None else: assert stream == -1 capsule = dlpack.make_py_capsule(managed, self.versioned) return capsule class DLPackCPUBufV0(DLPackCPUBuf): versioned = False def __dlpack__(self, stream=None): return super().__dlpack__(stream=stream) if cupy is not None: class DLPackGPUBuf(BaseBuf): has_dlpack = None dev_type = None def __init__(self, typecode, initializer): self._buf = cupy.array(initializer, dtype=typecode) self.has_dlpack = hasattr(self._buf, '__dlpack_device__') # TODO(leofang): test CUDA managed memory? if cupy.cuda.runtime.is_hip: self.dev_type = dlpack.DLDeviceType.kDLROCM else: self.dev_type = dlpack.DLDeviceType.kDLCUDA def __del__(self): if sys and not hasattr(sys, 'pypy_version_info'): if sys.getrefcount(self._buf) > 2: raise RuntimeError('dlpack: possible reference leak') def __dlpack_device__(self): if self.has_dlpack: return self._buf.__dlpack_device__() else: return (self.dev_type, self._buf.device.id) if False: # TODO: wait until CuPy supports DLPack v1.0 def __dlpack__(self, stream=None, **kwargs): assert self.has_dlpack cupy.cuda.get_current_stream().synchronize() return self._buf.__dlpack__(stream=-1, **kwargs) else: def __dlpack__(self, stream=None): cupy.cuda.get_current_stream().synchronize() if self.has_dlpack: return self._buf.__dlpack__(stream=-1) else: return self._buf.toDlpack() class DLPackGPUBufV0(DLPackGPUBuf): def __dlpack__(self, stream=None): cupy.cuda.get_current_stream().synchronize() if self.has_dlpack: return self._buf.__dlpack__(stream=-1) else: return self._buf.toDlpack() else: class DLPackGPUBufInitMixin: def __init__(self, *args): super().__init__(*args) kDLCUDA = dlpack.DLDeviceType.kDLCUDA device = self.managed.dl_tensor.device device.device_type = kDLCUDA class DLPackGPUBuf(DLPackGPUBufInitMixin, DLPackCPUBuf): pass class DLPackGPUBufV0(DLPackGPUBufInitMixin, DLPackCPUBufV0): pass # --- class CAIBuf(BaseBuf): def __init__(self, typecode, initializer, readonly=False): super().__init__(typecode, initializer) address = self._buf.buffer_info()[0] typecode = self._buf.typecode itemsize = self._buf.itemsize self.__cuda_array_interface__ = dict( version = 0, data = (address, readonly), typestr = typestr(typecode, itemsize), shape = (len(self._buf), 1, 1), strides = (itemsize,) * 3, descr = [('', typestr(typecode, itemsize))], ) cupy_issue_2259 = False if cupy is not None: cupy_issue_2259 = not isinstance( cupy.zeros((2,2)).T.__cuda_array_interface__['strides'], tuple ) # --- def Sendrecv(smsg, rmsg): MPI.COMM_SELF.Sendrecv(sendbuf=smsg, dest=0, sendtag=0, recvbuf=rmsg, source=0, recvtag=0, status=MPI.Status()) class TestMessageSimple(unittest.TestCase): def testMessageBad(self): buf = MPI.Alloc_mem(5) empty = [None, 0, "B"] def f(): Sendrecv([buf, 0, 0, "i", None], empty) self.assertRaises(ValueError, f) def f(): Sendrecv([buf, 0, "\0"], empty) self.assertRaises(KeyError, f) def f(): Sendrecv([buf, -1, "i"], empty) self.assertRaises(ValueError, f) def f(): Sendrecv([buf, 0, -1, "i"], empty) self.assertRaises(ValueError, f) def f(): Sendrecv([buf, 0, +2, "i"], empty) self.assertRaises(ValueError, f) def f(): Sendrecv([None, 1, 0, "i"], empty) self.assertRaises(ValueError, f) def f(): Sendrecv([buf, None, 0, "i"], empty) self.assertRaises(ValueError, f) def f(): Sendrecv([buf, 0, 1, MPI.DATATYPE_NULL], empty) self.assertRaises(ValueError, f) def f(): Sendrecv([buf, None, 0, MPI.DATATYPE_NULL], empty) self.assertRaises(ValueError, f) try: t = MPI.INT.Create_resized(0, -4).Commit() def f(): Sendrecv([buf, None, t], empty) self.assertRaises(ValueError, f) def f(): Sendrecv([buf, 0, 1, t], empty) self.assertRaises(ValueError, f) t.Free() except NotImplementedError: pass MPI.Free_mem(buf) buf = [1,2,3,4] def f(): Sendrecv([buf, 4, 0, "i"], empty) self.assertRaises(TypeError, f) buf = {1:2,3:4} def f(): Sendrecv([buf, 4, 0, "i"], empty) self.assertRaises(TypeError, f) def f(): Sendrecv(b"abc", b"abc") self.assertRaises((BufferError, TypeError, ValueError), f) def f(): Sendrecv(object, empty) self.assertRaises(TypeError, f) def testMessageNone(self): empty = [None, 0, "B"] Sendrecv(empty, empty) empty = [None, "B"] Sendrecv(empty, empty) def testMessageBottom(self): empty = [MPI.BOTTOM, 0, "B"] Sendrecv(empty, empty) empty = [MPI.BOTTOM, "B"] Sendrecv(empty, empty) def testMessageBytes(self): sbuf = b"abc" rbuf = bytearray(3) Sendrecv([sbuf, "c"], [rbuf, MPI.CHAR]) self.assertEqual(sbuf, rbuf) def testMessageBytearray(self): sbuf = bytearray(b"abc") rbuf = bytearray(3) Sendrecv([sbuf, "c"], [rbuf, MPI.CHAR]) self.assertEqual(sbuf, rbuf) def testMessageMemoryView(self): sbuf = memoryview(b"abc") rbuf = bytearray(3) Sendrecv([sbuf, "c"], [rbuf, MPI.CHAR]) self.assertEqual(sbuf, rbuf) self.assertRaises((BufferError, TypeError, ValueError), Sendrecv, [rbuf, "c"], [sbuf, "c"]) @unittest.skipMPI('msmpi(<8.0.0)') class TestMessageBlock(unittest.TestCase): @unittest.skipIf(MPI.COMM_WORLD.Get_size() < 2, 'mpi-world-size<2') def testMessageBad(self): comm = MPI.COMM_WORLD buf = MPI.Alloc_mem(4) empty = [None, 0, "B"] def f(): comm.Alltoall([buf, None, "i"], empty) self.assertRaises(ValueError, f) MPI.Free_mem(buf) class BaseTestMessageSimpleArray: TYPECODES = "bhil"+"BHIL"+"fd" def array(self, typecode, initializer): raise NotImplementedError def check1(self, z, s, r, typecode): r[:] = z Sendrecv(s, r) for a, b in zip(s, r): self.assertEqual(a, b) def check2(self, z, s, r, typecode): datatype = typemap(typecode) for type in (None, typecode, datatype): r[:] = z Sendrecv([s, type], [r, type]) for a, b in zip(s, r): self.assertEqual(a, b) def check3(self, z, s, r, typecode): size = len(r) for count in range(size): r[:] = z Sendrecv([s, count], [r, count]) for i in range(count): self.assertEqual(r[i], s[i]) for i in range(count, size): self.assertEqual(r[i], z[0]) for count in range(size): r[:] = z Sendrecv([s, (count, None)], [r, (count, None)]) for i in range(count): self.assertEqual(r[i], s[i]) for i in range(count, size): self.assertEqual(r[i], z[0]) for disp in range(size): r[:] = z Sendrecv([s, (None, disp)], [r, (None, disp)]) for i in range(disp): self.assertEqual(r[i], z[0]) for i in range(disp, size): self.assertEqual(r[i], s[i]) for disp in range(size): for count in range(size-disp): r[:] = z Sendrecv([s, (count, disp)], [r, (count, disp)]) for i in range(0, disp): self.assertEqual(r[i], z[0]) for i in range(disp, disp+count): self.assertEqual(r[i], s[i]) for i in range(disp+count, size): self.assertEqual(r[i], z[0]) def check4(self, z, s, r, typecode): datatype = typemap(typecode) for type in (None, typecode, datatype): for count in (None, len(s)): r[:] = z Sendrecv([s, count, type], [r, count, type]) for a, b in zip(s, r): self.assertEqual(a, b) def check5(self, z, s, r, typecode): datatype = typemap(typecode) for type in (None, typecode, datatype): for p in range(0, len(s)): r[:] = z Sendrecv([s, (p, None), type], [r, (p, None), type]) for a, b in zip(s[:p], r[:p]): self.assertEqual(a, b) for q in range(p, len(s)): count, displ = q-p, p r[:] = z Sendrecv([s, (count, displ), type], [r, (count, displ), type]) for a, b in zip(r[:p], z[:p]): self.assertEqual(a, b) for a, b in zip(r[p:q], s[p:q]): self.assertEqual(a, b) for a, b in zip(r[q:], z[q:]): self.assertEqual(a, b) def check6(self, z, s, r, typecode): datatype = typemap(typecode) for type in (None, typecode, datatype): for p in range(0, len(s)): r[:] = z Sendrecv([s, p, None, type], [r, p, None, type]) for a, b in zip(s[:p], r[:p]): self.assertEqual(a, b) for q in range(p, len(s)): count, displ = q-p, p r[:] = z Sendrecv([s, count, displ, type], [r, count, displ, type]) for a, b in zip(r[:p], z[:p]): self.assertEqual(a, b) for a, b in zip(r[p:q], s[p:q]): self.assertEqual(a, b) for a, b in zip(r[q:], z[q:]): self.assertEqual(a, b) def check(self, test): for t in tuple(self.TYPECODES): for n in range(1, 10): z = self.array(t, [0]*n) s = self.array(t, list(range(n))) r = self.array(t, [0]*n) test(z, s, r, t) def testArray1(self): self.check(self.check1) def testArray2(self): self.check(self.check2) def testArray3(self): self.check(self.check3) def testArray4(self): self.check(self.check4) def testArray5(self): self.check(self.check5) def testArray6(self): self.check(self.check6) def testBuffer(self): kDLCPU, kDLCUDA = 1, 2 obj = self.array('i', [0,1,2,3]) buf = MPI.buffer.frombuffer(obj) device_type = kDLCPU if hasattr(obj, '__dlpack_device__'): device_type, _ = obj.__dlpack_device__() elif hasattr(obj, '__cuda_array_interface__'): device_type = kDLCUDA if device_type == kDLCPU: buf.cast('i') buf.tobytes('i') buf[0] = buf[0] if device_type == kDLCUDA: with self.assertRaises(BufferError): buf.cast('i') with self.assertRaises(BufferError): buf.tobytes('i') with self.assertRaises(BufferError): buf[0] = buf[0] @unittest.skipIf(array is None, 'array') class TestMessageSimpleArray(unittest.TestCase, BaseTestMessageSimpleArray): def array(self, typecode, initializer): return array.array(typecode, initializer) @unittest.skipIf(numpy is None, 'numpy') class TestMessageSimpleNumPy(unittest.TestCase, BaseTestMessageSimpleArray): def array(self, typecode, initializer): return numpy.array(initializer, dtype=typecode) def testByteOrder(self): sbuf = numpy.zeros([3], 'i') rbuf = numpy.zeros([3], 'i') sbuf = sbuf.view(sbuf.dtype.newbyteorder('=')) rbuf = rbuf.view(rbuf.dtype.newbyteorder('=')) Sendrecv(sbuf, rbuf) byteorder = '<' if sys.byteorder == 'little' else '>' sbuf = sbuf.view(sbuf.dtype.newbyteorder(byteorder)) rbuf = rbuf.view(rbuf.dtype.newbyteorder(byteorder)) Sendrecv(sbuf, rbuf) byteorder = '>' if sys.byteorder == 'little' else '<' sbuf = sbuf.view(sbuf.dtype.newbyteorder(byteorder)) rbuf = rbuf.view(rbuf.dtype.newbyteorder(byteorder)) self.assertRaises(BufferError, Sendrecv, sbuf, rbuf) Sendrecv([sbuf, MPI.INT], [rbuf, MPI.INT]) def testOrderC(self): sbuf = numpy.ones([3,2]) rbuf = numpy.zeros([3,2]) Sendrecv(sbuf, rbuf) self.assertTrue((sbuf == rbuf).all()) def testOrderFortran(self): sbuf = numpy.ones([3,2]).T rbuf = numpy.zeros([3,2]).T Sendrecv(sbuf, rbuf) self.assertTrue((sbuf == rbuf).all()) def testReadonly(self): sbuf = numpy.ones([3]) rbuf = numpy.zeros([3]) sbuf.flags.writeable = False Sendrecv(sbuf, rbuf) self.assertTrue((sbuf == rbuf).all()) def testNotWriteable(self): sbuf = numpy.ones([3]) rbuf = numpy.zeros([3]) rbuf.flags.writeable = False self.assertRaises( (BufferError, ValueError, TypeError), Sendrecv, sbuf, rbuf ) def testNotContiguous(self): sbuf = numpy.ones([3,2])[:,0] rbuf = numpy.zeros([3]) self.assertRaises( (BufferError, ValueError, TypeError), Sendrecv, sbuf, rbuf, ) @unittest.skipIf(array is None, 'array') @unittest.skipIf(dlpack is None, 'dlpack') class TestMessageSimpleDLPackCPUBuf(unittest.TestCase, BaseTestMessageSimpleArray): def array(self, typecode, initializer): return DLPackCPUBuf(typecode, initializer) class TestMessageSimpleDLPackCPUBufV0(TestMessageSimpleDLPackCPUBuf): def array(self, typecode, initializer): return DLPackCPUBufV0(typecode, initializer) @unittest.skipIf(cupy is None and (array is None or dlpack is None), 'cupy') class TestMessageSimpleDLPackGPUBuf(unittest.TestCase, BaseTestMessageSimpleArray): def array(self, typecode, initializer): return DLPackGPUBuf(typecode, initializer) class TestMessageSimpleDLPackGPUBufV0(TestMessageSimpleDLPackGPUBuf): def array(self, typecode, initializer): return DLPackGPUBufV0(typecode, initializer) @unittest.skipIf(array is None, 'array') class TestMessageSimpleCAIBuf(unittest.TestCase, BaseTestMessageSimpleArray): def array(self, typecode, initializer): return CAIBuf(typecode, initializer) @unittest.skipIf(cupy is None, 'cupy') class TestMessageSimpleCuPy(unittest.TestCase, BaseTestMessageSimpleArray): def array(self, typecode, initializer): return cupy.array(initializer, dtype=typecode) def testOrderC(self): sbuf = cupy.ones([3,2]) rbuf = cupy.zeros([3,2]) Sendrecv(sbuf, rbuf) self.assertTrue((sbuf == rbuf).all()) @unittest.skipIf(cupy_issue_2259, 'cupy-issue-2259') def testOrderFortran(self): sbuf = cupy.ones([3,2]).T rbuf = cupy.zeros([3,2]).T Sendrecv(sbuf, rbuf) self.assertTrue((sbuf == rbuf).all()) @unittest.skipIf(cupy_issue_2259, 'cupy-issue-2259') def testNotContiguous(self): sbuf = cupy.ones([3,2])[:,0] rbuf = cupy.zeros([3]) self.assertRaises((BufferError, ValueError), Sendrecv, sbuf, rbuf) @unittest.skipIf(numba is None, 'numba') class TestMessageSimpleNumba(unittest.TestCase, BaseTestMessageSimpleArray): def array(self, typecode, initializer): n = len(initializer) arr = numba.cuda.device_array((n,), dtype=typecode) arr[:] = initializer return arr def testOrderC(self): sbuf = numba.cuda.device_array((6,)) sbuf[:] = 1 sbuf = sbuf.reshape(3,2) rbuf = numba.cuda.device_array((6,)) rbuf[:] = 0 rbuf = sbuf.reshape(3,2) Sendrecv(sbuf, rbuf) # numba arrays do not have the .all() method for i in range(3): for j in range(2): self.assertEqual(sbuf[i,j], rbuf[i,j]) def testOrderFortran(self): sbuf = numba.cuda.device_array((6,)) sbuf[:] = 1 sbuf = sbuf.reshape(3,2,order='F') rbuf = numba.cuda.device_array((6,)) rbuf[:] = 0 rbuf = sbuf.reshape(3,2,order='F') Sendrecv(sbuf, rbuf) # numba arrays do not have the .all() method for i in range(3): for j in range(2): self.assertEqual(sbuf[i,j], rbuf[i,j]) def testNotContiguous(self): sbuf = numba.cuda.device_array((6,)) sbuf[:] = 1 sbuf = sbuf.reshape(3,2)[:,0] rbuf = numba.cuda.device_array((3,)) rbuf[:] = 0 self.assertRaises((BufferError, ValueError), Sendrecv, sbuf, rbuf) # --- @unittest.skipIf(array is None, 'array') @unittest.skipIf(dlpack is None, 'dlpack') class TestMessageDLPackCPUBuf(unittest.TestCase): def testVersion(self): buf = DLPackCPUBuf('i', [0,1,2,3]) buf.managed.version.major = 0 self.assertRaises(BufferError, MPI.Get_address, buf) def testReadonly(self): smsg = DLPackCPUBuf('i', [0,1,2,3]) rmsg = DLPackCPUBuf('i', [0,0,0,0]) smsg.managed.flags |= dlpack.DLPACK_FLAG_BITMASK_READ_ONLY rmsg.managed.flags |= dlpack.DLPACK_FLAG_BITMASK_READ_ONLY MPI.Get_address(smsg) self.assertRaises(BufferError, Sendrecv, smsg, rmsg) def testDevice(self): buf = DLPackCPUBuf('i', [0,1,2,3]) buf.__dlpack_device__ = None self.assertRaises(TypeError, MPI.Get_address, buf) buf.__dlpack_device__ = lambda: None self.assertRaises(TypeError, MPI.Get_address, buf) buf.__dlpack_device__ = lambda: (None, 0) self.assertRaises(TypeError, MPI.Get_address, buf) buf.__dlpack_device__ = lambda: (1, None) self.assertRaises(TypeError, MPI.Get_address, buf) buf.__dlpack_device__ = lambda: (1,) self.assertRaises(ValueError, MPI.Get_address, buf) buf.__dlpack_device__ = lambda: (1, 0, 1) self.assertRaises(ValueError, MPI.Get_address, buf) del buf.__dlpack_device__ MPI.Get_address(buf) def testCapsule(self): buf = DLPackCPUBuf('i', [0,1,2,3]) # capsule = buf.__dlpack__() MPI.Get_address(buf) MPI.Get_address(buf) del capsule # capsule = buf.__dlpack__() retvals = [capsule] * 2 buf.__dlpack__ = lambda *args, **kwargs: retvals.pop() MPI.Get_address(buf) self.assertRaises(BufferError, MPI.Get_address, buf) del buf.__dlpack__ del capsule # buf.__dlpack__ = lambda *args, **kwargs: None self.assertRaises(BufferError, MPI.Get_address, buf) del buf.__dlpack__ def testNdim(self): buf = DLPackCPUBuf('i', [0,1,2,3]) dltensor = buf.managed.dl_tensor # for ndim in (2, 1, 0): dltensor.ndim = ndim MPI.Get_address(buf) # dltensor.ndim = -1 self.assertRaises(BufferError, MPI.Get_address, buf) # del dltensor def testShape(self): buf = DLPackCPUBuf('i', [0,1,2,3]) dltensor = buf.managed.dl_tensor # dltensor.ndim = 1 dltensor.shape[0] = -1 self.assertRaises(BufferError, MPI.Get_address, buf) # dltensor.ndim = 0 dltensor.shape = None dltensor.strides = None MPI.Get_address(buf) # dltensor.ndim = 1 dltensor.shape = None self.assertRaises(BufferError, MPI.Get_address, buf) # del dltensor def testStrides(self): buf = DLPackCPUBuf('i', range(8)) dltensor = buf.managed.dl_tensor # for order in ('C', 'F'): dltensor.ndim, dltensor.shape, dltensor.strides = \ dlpack.make_dl_shape([2, 2, 2], order=order) MPI.Get_address(buf) dltensor.strides[0] = -1 self.assertRaises(BufferError, MPI.Get_address, buf) # del dltensor def testDtypeCode(self): sbuf = DLPackCPUBuf('H', range(4)) rbuf = DLPackCPUBuf('H', [0]*4) dtype = sbuf.managed.dl_tensor.dtype dtype.code = dlpack.DLDataTypeCode.kDLOpaqueHandle dtype = None Sendrecv(sbuf, rbuf) for i in range(4): self.assertEqual(rbuf[i], i) def testDtypeLanes(self): sbuf = DLPackCPUBuf('I', range(4)) rbuf = DLPackCPUBuf('I', [0]*4) dtype = sbuf.managed.dl_tensor.dtype dtype.bits //= 2 dtype.lanes *= 2 dtype = None Sendrecv(sbuf, rbuf) for i in range(4): self.assertEqual(rbuf[i], i) def testContiguous(self): buf = DLPackCPUBuf('i', range(8)) dltensor = buf.managed.dl_tensor # dltensor.ndim, dltensor.shape, dltensor.strides = \ dlpack.make_dl_shape([2, 2, 2], order='C') s = dltensor.strides strides = [s[i] for i in range(dltensor.ndim)] s[0], s[1], s[2] = (strides[i] for i in [0, 1, 2]) MPI.Get_address(buf) s[0], s[1], s[2] = (strides[i] for i in [2, 1, 0]) MPI.Get_address(buf) s[0], s[1], s[2] = (strides[i] for i in [0, 2, 1]) self.assertRaises(BufferError, MPI.Get_address, buf) s[0], s[1], s[2] = (strides[i] for i in [1, 0, 2]) self.assertRaises(BufferError, MPI.Get_address, buf) del s # dltensor.ndim, dltensor.shape, dltensor.strides = \ dlpack.make_dl_shape([1, 3, 1], order='C') s = dltensor.strides MPI.Get_address(buf) for i in range(4): for j in range(4): s[0], s[2] = i, j MPI.Get_address(buf) s[1] = 0 self.assertRaises(BufferError, MPI.Get_address, buf) del s # del dltensor def testByteOffset(self): buf = DLPackCPUBuf('B', [0,1,2,3]) dltensor = buf.managed.dl_tensor # dltensor.ndim = 1 for i in range(len(buf)): dltensor.byte_offset = i mem = MPI.buffer(buf) self.assertEqual(mem[0], buf[i]) # del dltensor # --- @unittest.skipIf(array is None, 'array') class TestMessageCAIBuf(unittest.TestCase): def testReadonly(self): smsg = CAIBuf('i', [1,2,3], readonly=True) rmsg = CAIBuf('i', [0,0,0], readonly=True) MPI.Get_address(smsg) self.assertRaises(BufferError, Sendrecv, smsg, rmsg) def testNonContiguous(self): smsg = CAIBuf('i', [1,2,3]) rmsg = CAIBuf('i', [0,0,0]) Sendrecv(smsg, rmsg) strides = rmsg.__cuda_array_interface__['strides'] good_strides = strides[:-2] + (0, 7) rmsg.__cuda_array_interface__['strides'] = good_strides Sendrecv(smsg, rmsg) bad_strides = (7,) + strides[1:] rmsg.__cuda_array_interface__['strides'] = bad_strides self.assertRaises(BufferError, Sendrecv, smsg, rmsg) def testAttrNone(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__ = None self.assertRaises(TypeError, Sendrecv, smsg, rmsg) def testAttrEmpty(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__ = dict() self.assertRaises(KeyError, Sendrecv, smsg, rmsg) def testAttrType(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) items = list(rmsg.__cuda_array_interface__.items()) rmsg.__cuda_array_interface__ = items self.assertRaises(TypeError, Sendrecv, smsg, rmsg) def testDataMissing(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) del rmsg.__cuda_array_interface__['data'] self.assertRaises(KeyError, Sendrecv, smsg, rmsg) def testDataNone(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__['data'] = None self.assertRaises(TypeError, Sendrecv, smsg, rmsg) def testDataType(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__['data'] = 0 self.assertRaises(TypeError, Sendrecv, smsg, rmsg) def testDataValue(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) dev_ptr = rmsg.__cuda_array_interface__['data'][0] rmsg.__cuda_array_interface__['data'] = (dev_ptr, ) self.assertRaises(ValueError, Sendrecv, smsg, rmsg) rmsg.__cuda_array_interface__['data'] = ( ) self.assertRaises(ValueError, Sendrecv, smsg, rmsg) rmsg.__cuda_array_interface__['data'] = (dev_ptr, False, None) self.assertRaises(ValueError, Sendrecv, smsg, rmsg) def testMask(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__['mask'] = None Sendrecv(smsg, rmsg) rmsg.__cuda_array_interface__['mask'] = True self.assertRaises(BufferError, Sendrecv, smsg, rmsg) def testTypestrMissing(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) del rmsg.__cuda_array_interface__['typestr'] self.assertRaises(KeyError, Sendrecv, smsg, rmsg) def testTypestrNone(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__['typestr'] = None self.assertRaises(TypeError, Sendrecv, smsg, rmsg) def testTypestrType(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__['typestr'] = 42 self.assertRaises(TypeError, Sendrecv, smsg, rmsg) def testTypestrEndian(self): smsg = CAIBuf('i', [1,2,3]) rmsg = CAIBuf('i', [0,0,0]) typestr = smsg.__cuda_array_interface__['typestr'] byteorder = '>' if sys.byteorder == 'little' else '<' typestr = byteorder + typestr[1:] smsg.__cuda_array_interface__['typestr'] = typestr smsg.__cuda_array_interface__['descr'][0] = ('', typestr) self.assertRaises(BufferError, Sendrecv, smsg, rmsg) typestr = '#' + typestr[1:] smsg.__cuda_array_interface__['typestr'] = typestr smsg.__cuda_array_interface__['descr'][0] = ('', typestr) self.assertRaises(BufferError, Sendrecv, smsg, rmsg) typestr = '|' + typestr[1:] smsg.__cuda_array_interface__['typestr'] = typestr smsg.__cuda_array_interface__['descr'][0] = ('', typestr) Sendrecv(smsg, rmsg) def testTypestrItemsize(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) typestr = rmsg.__cuda_array_interface__['typestr'] rmsg.__cuda_array_interface__['typestr'] = typestr[:2]+'X' self.assertRaises(ValueError, Sendrecv, smsg, rmsg) def testShapeMissing(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) del rmsg.__cuda_array_interface__['shape'] self.assertRaises(KeyError, Sendrecv, smsg, rmsg) def testShapeNone(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__['shape'] = None self.assertRaises(TypeError, Sendrecv, smsg, rmsg) def testShapeType(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__['shape'] = 3 self.assertRaises(TypeError, Sendrecv, smsg, rmsg) def testShapeValue(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__['shape'] = (3, -1) rmsg.__cuda_array_interface__['strides'] = None self.assertRaises(BufferError, Sendrecv, smsg, rmsg) def testStridesMissing(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) del rmsg.__cuda_array_interface__['strides'] Sendrecv(smsg, rmsg) self.assertEqual(smsg, rmsg) def testStridesNone(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__['strides'] = None Sendrecv(smsg, rmsg) self.assertEqual(smsg, rmsg) def testStridesType(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__['strides'] = 42 self.assertRaises(TypeError, Sendrecv, smsg, rmsg) def testDescrMissing(self): smsg = CAIBuf('d', [1,2,3]) rmsg = CAIBuf('d', [0,0,0]) del rmsg.__cuda_array_interface__['descr'] Sendrecv(smsg, rmsg) self.assertEqual(smsg, rmsg) def testDescrNone(self): smsg = CAIBuf('d', [1,2,3]) rmsg = CAIBuf('d', [0,0,0]) rmsg.__cuda_array_interface__['descr'] = None Sendrecv(smsg, rmsg) self.assertEqual(smsg, rmsg) def testDescrType(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__['descr'] = 42 self.assertRaises(TypeError, Sendrecv, smsg, rmsg) def testDescrWarning(self): m, n = 5, 3 smsg = CAIBuf('d', list(range(m*n))) rmsg = CAIBuf('d', [0]*(m*n)) typestr = rmsg.__cuda_array_interface__['typestr'] itemsize = int(typestr[2:]) new_typestr = "|V"+str(itemsize*n) new_descr = [('', typestr)]*n rmsg.__cuda_array_interface__['shape'] = (m,) rmsg.__cuda_array_interface__['strides'] = (itemsize*n,) rmsg.__cuda_array_interface__['typestr'] = new_typestr rmsg.__cuda_array_interface__['descr'] = new_descr import warnings with warnings.catch_warnings(): warnings.simplefilter("error") self.assertRaises(RuntimeWarning, Sendrecv, smsg, rmsg) self.assertWarns(RuntimeWarning, Sendrecv, smsg, rmsg) self.assertEqual(smsg, rmsg) # --- def Alltoallv(smsg, rmsg): comm = MPI.COMM_SELF comm.Alltoallv(smsg, rmsg) @unittest.skipMPI('msmpi(<8.0.0)') class TestMessageVector(unittest.TestCase): def testMessageBad(self): buf = MPI.Alloc_mem(5) empty = [None, 0, [0], "B"] def f(): Alltoallv([buf, 0, [0], "i", None], empty) self.assertRaises(ValueError, f) def f(): Alltoallv([buf, 0, [0], "\0"], empty) self.assertRaises(KeyError, f) def f(): Alltoallv([buf, None, [0], MPI.DATATYPE_NULL], empty) self.assertRaises(ValueError, f) def f(): Alltoallv([buf, None, [0], "i"], empty) self.assertRaises(ValueError, f) try: t = MPI.INT.Create_resized(0, -4).Commit() def f(): Alltoallv([buf, None, [0], t], empty) self.assertRaises(ValueError, f) t.Free() except NotImplementedError: pass MPI.Free_mem(buf) buf = [1,2,3,4] def f(): Alltoallv([buf, 0, 0, "i"], empty) self.assertRaises(TypeError, f) buf = {1:2,3:4} def f(): Alltoallv([buf, 0, 0, "i"], empty) self.assertRaises(TypeError, f) def f(): Alltoallv(object, empty) self.assertRaises(TypeError, f) def testMessageNone(self): empty = [None, 0, "B"] Alltoallv(empty, empty) empty = [None, "B"] Alltoallv(empty, empty) def testMessageBottom(self): empty = [MPI.BOTTOM, 0, [0], "B"] Alltoallv(empty, empty) empty = [MPI.BOTTOM, 0, "B"] Alltoallv(empty, empty) empty = [MPI.BOTTOM, "B"] Alltoallv(empty, empty) def testMessageBytes(self): sbuf = b"abc" rbuf = bytearray(3) Alltoallv([sbuf, "c"], [rbuf, MPI.CHAR]) self.assertEqual(sbuf, rbuf) def testMessageBytearray(self): sbuf = bytearray(b"abc") rbuf = bytearray(3) Alltoallv([sbuf, "c"], [rbuf, MPI.CHAR]) self.assertEqual(sbuf, rbuf) @unittest.skipMPI('msmpi(<8.0.0)') class BaseTestMessageVectorArray: TYPECODES = "bhil"+"BHIL"+"fd" def array(self, typecode, initializer): raise NotImplementedError def check1(self, z, s, r, typecode): r[:] = z Alltoallv(s, r) for a, b in zip(s, r): self.assertEqual(a, b) def check2(self, z, s, r, typecode): datatype = typemap(typecode) for type in (None, typecode, datatype): r[:] = z Alltoallv([s, type], [r, type]) for a, b in zip(s, r): self.assertEqual(a, b) def check3(self, z, s, r, typecode): size = len(r) for count in range(size): r[:] = z Alltoallv([s, count], [r, count]) for i in range(count): self.assertEqual(r[i], s[i]) for i in range(count, size): self.assertEqual(r[i], z[0]) for count in range(size): r[:] = z Alltoallv([s, (count, None)], [r, (count, None)]) for i in range(count): self.assertEqual(r[i], s[i]) for i in range(count, size): self.assertEqual(r[i], z[0]) for disp in range(size): for count in range(size-disp): r[:] = z Alltoallv([s, ([count], [disp])], [r, ([count], [disp])]) for i in range(0, disp): self.assertEqual(r[i], z[0]) for i in range(disp, disp+count): self.assertEqual(r[i], s[i]) for i in range(disp+count, size): self.assertEqual(r[i], z[0]) def check4(self, z, s, r, typecode): datatype = typemap(typecode) for type in (None, typecode, datatype): for count in (None, len(s)): r[:] = z Alltoallv([s, count, type], [r, count, type]) for a, b in zip(s, r): self.assertEqual(a, b) def check5(self, z, s, r, typecode): datatype = typemap(typecode) for type in (None, typecode, datatype): for p in range(len(s)): r[:] = z Alltoallv([s, (p, None), type], [r, (p, None), type]) for a, b in zip(s[:p], r[:p]): self.assertEqual(a, b) for q in range(p, len(s)): count, displ = q-p, p r[:] = z Alltoallv([s, (count, [displ]), type], [r, (count, [displ]), type]) for a, b in zip(r[:p], z[:p]): self.assertEqual(a, b) for a, b in zip(r[p:q], s[p:q]): self.assertEqual(a, b) for a, b in zip(r[q:], z[q:]): self.assertEqual(a, b) def check6(self, z, s, r, typecode): datatype = typemap(typecode) for type in (None, typecode, datatype): for p in range(0, len(s)): r[:] = z Alltoallv([s, p, None, type], [r, p, None, type]) for a, b in zip(s[:p], r[:p]): self.assertEqual(a, b) for q in range(p, len(s)): count, displ = q-p, p r[:] = z Alltoallv([s, count, [displ], type], [r, count, [displ], type]) for a, b in zip(r[:p], z[:p]): self.assertEqual(a, b) for a, b in zip(r[p:q], s[p:q]): self.assertEqual(a, b) for a, b in zip(r[q:], z[q:]): self.assertEqual(a, b) def check(self, test): for t in tuple(self.TYPECODES): for n in range(1, 10): z = self.array(t, [0]*n) s = self.array(t, list(range(n))) r = self.array(t, [0]*n) test(z, s, r, t) def testArray1(self): self.check(self.check1) def testArray2(self): self.check(self.check2) def testArray3(self): self.check(self.check3) def testArray4(self): self.check(self.check4) def testArray5(self): self.check(self.check5) def testArray6(self): self.check(self.check6) @unittest.skipIf(array is None, 'array') class TestMessageVectorArray(unittest.TestCase, BaseTestMessageVectorArray): def array(self, typecode, initializer): return array.array(typecode, initializer) @unittest.skipIf(numpy is None, 'numpy') class TestMessageVectorNumPy(unittest.TestCase, BaseTestMessageVectorArray): def array(self, typecode, initializer): return numpy.array(initializer, dtype=typecode) def testCountNumPyArray(self): sbuf = bytearray(b"abc") rbuf = bytearray(4) count = numpy.array([3]) displ = numpy.array([1]) Alltoallv([sbuf, count], [rbuf, (3, displ)]) self.assertEqual(sbuf, rbuf[displ[0]:]) with self.assertRaises(TypeError): count = numpy.array([3.1]) displ = numpy.array([1.1]) Alltoallv([sbuf, count], [rbuf, (3, displ)]) def testCountNumPyScalar(self): sbuf = bytearray(b"abc") rbuf = bytearray(4) count = numpy.array([3])[0] displ = numpy.array([1])[0] Alltoallv([sbuf, count], [rbuf, (3, [displ])]) self.assertEqual(sbuf, rbuf[displ:]) with self.assertRaises(TypeError): count = numpy.array([3.1])[0] displ = numpy.array([1.1])[0] Alltoallv([sbuf, (3, [displ])], [rbuf, count]) def testCountNumPyZeroDim(self): sbuf = bytearray(b"xabc") rbuf = bytearray(3) count = numpy.array(3) displ = numpy.array(1) Alltoallv([sbuf, (3, [displ])], [rbuf, count]) self.assertEqual(sbuf[displ:], rbuf) with self.assertRaises(TypeError): count = numpy.array(3.0) displ = numpy.array(1.0) Alltoallv([sbuf, (3, [displ])], [rbuf, count]) @unittest.skipIf(array is None, 'array') class TestMessageVectorCAIBuf(unittest.TestCase, BaseTestMessageVectorArray): def array(self, typecode, initializer): return CAIBuf(typecode, initializer) @unittest.skipIf(cupy is None, 'cupy') class TestMessageVectorCuPy(unittest.TestCase, BaseTestMessageVectorArray): def array(self, typecode, initializer): return cupy.array(initializer, dtype=typecode) @unittest.skipIf(numba is None, 'numba') class TestMessageVectorNumba(unittest.TestCase, BaseTestMessageVectorArray): def array(self, typecode, initializer): n = len(initializer) arr = numba.cuda.device_array((n,), dtype=typecode) arr[:] = initializer return arr # --- def Alltoallw(smsg, rmsg): try: MPI.COMM_SELF.Alltoallw(smsg, rmsg) except NotImplementedError: if isinstance(smsg, (list, tuple)): smsg = smsg[0] if isinstance(rmsg, (list, tuple)): rmsg = rmsg[0] try: rmsg[:] = smsg except: pass class TestMessageVectorW(unittest.TestCase): def testMessageBad(self): sbuf = MPI.Alloc_mem(4) rbuf = MPI.Alloc_mem(4) def f(): Alltoallw([sbuf],[rbuf]) self.assertRaises(ValueError, f) def f(): Alltoallw([sbuf, [0], [0], [MPI.BYTE], None], [rbuf, [0], [0], [MPI.BYTE]]) self.assertRaises(ValueError, f) def f(): Alltoallw([sbuf, [0], [0], [MPI.BYTE]], [rbuf, [0], [0], [MPI.BYTE], None]) self.assertRaises(ValueError, f) def f(): Alltoallw([MPI.BOTTOM, None, [0], [MPI.BYTE]], [rbuf, [0], [0], [MPI.BYTE]]) self.assertRaises(ValueError, f) def f(): Alltoallw([MPI.BOTTOM, [0], None, [MPI.BYTE]], [rbuf, [0], [0], [MPI.BYTE]]) self.assertRaises(ValueError, f) MPI.Free_mem(sbuf) MPI.Free_mem(rbuf) def testMessageBottom(self): sbuf = b"abcxyz" rbuf = bytearray(6) saddr = MPI.Get_address(sbuf) raddr = MPI.Get_address(rbuf) stype = MPI.Datatype.Create_struct([6], [saddr], [MPI.CHAR]).Commit() rtype = MPI.Datatype.Create_struct([6], [raddr], [MPI.CHAR]).Commit() smsg = [MPI.BOTTOM, [1], [0] , [stype]] rmsg = [MPI.BOTTOM, ([1], [0]), [rtype]] try: Alltoallw(smsg, rmsg) self.assertEqual(sbuf, rbuf) finally: stype.Free() rtype.Free() def testMessageBytes(self): sbuf = b"abc" rbuf = bytearray(3) smsg = [sbuf, [3], [0], [MPI.CHAR]] rmsg = [rbuf, ([3], [0]), [MPI.CHAR]] Alltoallw(smsg, rmsg) self.assertEqual(sbuf, rbuf) def testMessageBytearray(self): sbuf = bytearray(b"abc") rbuf = bytearray(3) smsg = [sbuf, [3], [0], [MPI.CHAR]] rmsg = [rbuf, ([3], [0]), [MPI.CHAR]] Alltoallw(smsg, rmsg) self.assertEqual(sbuf, rbuf) sbuf = bytearray(b"abc") rbuf = bytearray(3) smsg = [sbuf, None, None, [MPI.CHAR]] rmsg = [rbuf, [MPI.CHAR]] Alltoallw(smsg, rmsg) self.assertEqual(sbuf[0], rbuf[0]) self.assertEqual(bytearray(2), rbuf[1:]) @unittest.skipIf(array is None, 'array') def testMessageArray(self): sbuf = array.array('i', [1,2,3]) rbuf = array.array('i', [0,0,0]) smsg = [sbuf, [3], [0], [MPI.INT]] rmsg = [rbuf, ([3], [0]), [MPI.INT]] Alltoallw(smsg, rmsg) self.assertEqual(sbuf, rbuf) @unittest.skipIf(numpy is None, 'numpy') def testMessageNumPy(self): sbuf = numpy.array([1,2,3], dtype='i') rbuf = numpy.array([0,0,0], dtype='i') smsg = [sbuf, [3], [0], [MPI.INT]] rmsg = [rbuf, ([3], [0]), [MPI.INT]] Alltoallw(smsg, rmsg) self.assertTrue((sbuf == rbuf).all()) @unittest.skipIf(array is None, 'array') def testMessageCAIBuf(self): sbuf = CAIBuf('i', [1,2,3], readonly=True) rbuf = CAIBuf('i', [0,0,0], readonly=False) smsg = [sbuf, [3], [0], [MPI.INT]] rmsg = [rbuf, ([3], [0]), [MPI.INT]] Alltoallw(smsg, rmsg) self.assertEqual(sbuf, rbuf) @unittest.skipIf(cupy is None, 'cupy') def testMessageCuPy(self): sbuf = cupy.array([1,2,3], 'i') rbuf = cupy.array([0,0,0], 'i') smsg = [sbuf, [3], [0], [MPI.INT]] rmsg = [rbuf, ([3], [0]), [MPI.INT]] Alltoallw(smsg, rmsg) self.assertTrue((sbuf == rbuf).all()) @unittest.skipIf(numba is None, 'numba') def testMessageNumba(self): sbuf = numba.cuda.device_array((3,), 'i') sbuf[:] = [1,2,3] rbuf = numba.cuda.device_array((3,), 'i') rbuf[:] = [0,0,0] smsg = [sbuf, [3], [0], [MPI.INT]] rmsg = [rbuf, ([3], [0]), [MPI.INT]] Alltoallw(smsg, rmsg) # numba arrays do not have the .all() method for i in range(3): self.assertEqual(sbuf[i], rbuf[i]) # --- def Reduce(smsg, rmsg): MPI.COMM_SELF.Reduce(smsg, rmsg, MPI.SUM, 0) def ReduceScatter(smsg, rmsg, rcounts): MPI.COMM_SELF.Reduce_scatter(smsg, rmsg, rcounts, MPI.SUM) class TestMessageReduce(unittest.TestCase): def testMessageBad(self): sbuf = MPI.Alloc_mem(8) rbuf = MPI.Alloc_mem(8) with self.assertRaises(ValueError): Reduce([sbuf, 1, MPI.INT], [rbuf, 1, MPI.FLOAT]) with self.assertRaises(ValueError): Reduce([sbuf, 1, MPI.INT], [rbuf, 2, MPI.INT]) MPI.Free_mem(sbuf) MPI.Free_mem(rbuf) class TestMessageReduceScatter(unittest.TestCase): def testMessageBad(self): sbuf = MPI.Alloc_mem(16) rbuf = MPI.Alloc_mem(16) with self.assertRaises(ValueError): ReduceScatter( [sbuf, 1, MPI.INT], [rbuf, 1, MPI.FLOAT], [1], ) with self.assertRaises(ValueError): ReduceScatter( [sbuf, 2, MPI.INT], [rbuf, 1, MPI.INT], [1], ) with self.assertRaises(ValueError): ReduceScatter( [sbuf, 2, MPI.INT], [rbuf, 1, MPI.INT], [2], ) with self.assertRaises(ValueError): ReduceScatter( MPI.IN_PLACE, [rbuf, 1, MPI.INT], [2], ) MPI.Free_mem(sbuf) MPI.Free_mem(rbuf) # --- def PutGet(smsg, rmsg, target=None): try: win = MPI.Win.Allocate(256, 1, MPI.INFO_NULL, MPI.COMM_SELF) except NotImplementedError: win = MPI.WIN_NULL try: try: win.Fence() except NotImplementedError: pass try: win.Put(smsg, 0, target) except NotImplementedError: pass try: win.Fence() except NotImplementedError: pass try: win.Get(rmsg, 0, target) except NotImplementedError: if isinstance(smsg, (list, tuple)): smsg = smsg[0] if isinstance(rmsg, (list, tuple)): rmsg = rmsg[0] try: rmsg[:] = smsg except: pass try: win.Fence() except NotImplementedError: pass finally: if win != MPI.WIN_NULL: win.Free() class TestMessageRMA(unittest.TestCase): def testMessageBad(self): sbuf = [None, 0, 0, "B", None] rbuf = [None, 0, 0, "B"] target = (0, 0, MPI.BYTE) def f(): PutGet(sbuf, rbuf, target) self.assertRaises(ValueError, f) sbuf = [None, 0, 0, "B"] rbuf = [None, 0, 0, "B", None] target = (0, 0, MPI.BYTE) def f(): PutGet(sbuf, rbuf, target) self.assertRaises(ValueError, f) sbuf = [None, 0, "B"] rbuf = [None, 0, "B"] target = (0, 0, MPI.BYTE, None) def f(): PutGet(sbuf, rbuf, target) self.assertRaises(ValueError, f) sbuf = [None, 0, "B"] rbuf = [None, 0, "B"] target = {1:2,3:4} def f(): PutGet(sbuf, rbuf, target) self.assertRaises(ValueError, f) def testMessageNone(self): for empty in ([None, 0, 0, MPI.BYTE], [None, 0, MPI.BYTE], [None, MPI.BYTE]): for target in (None, 0, [0, 0, MPI.BYTE]): PutGet(empty, empty, target) def testMessageBottom(self): for empty in ([MPI.BOTTOM, 0, 0, MPI.BYTE], [MPI.BOTTOM, 0, MPI.BYTE], [MPI.BOTTOM, MPI.BYTE]): for target in (None, 0, [0, 0, MPI.BYTE]): PutGet(empty, empty, target) def testMessageBytes(self): for target in (None, 0, [0, 3, MPI.BYTE]): sbuf = b"abc" rbuf = bytearray(3) PutGet(sbuf, rbuf, target) self.assertEqual(sbuf, rbuf) def testMessageBytearray(self): for target in (None, 0, [0, 3, MPI.BYTE]): sbuf = bytearray(b"abc") rbuf = bytearray(3) PutGet(sbuf, rbuf, target) self.assertEqual(sbuf, rbuf) @unittest.skipMPI('msmpi') @unittest.skipIf(array is None, 'array') def testMessageArray(self): sbuf = array.array('i', [1,2,3]) rbuf = array.array('i', [0,0,0]) PutGet(sbuf, rbuf) self.assertEqual(sbuf, rbuf) @unittest.skipMPI('msmpi') @unittest.skipIf(numpy is None, 'numpy') def testMessageNumPy(self): sbuf = numpy.array([1,2,3], dtype='i') rbuf = numpy.array([0,0,0], dtype='i') PutGet(sbuf, rbuf) self.assertTrue((sbuf == rbuf).all()) @unittest.skipMPI('msmpi') @unittest.skipIf(array is None, 'array') def testMessageCAIBuf(self): sbuf = CAIBuf('i', [1,2,3], readonly=True) rbuf = CAIBuf('i', [0,0,0], readonly=False) PutGet(sbuf, rbuf) self.assertEqual(sbuf, rbuf) @unittest.skipMPI('msmpi') @unittest.skipIf(cupy is None, 'cupy') def testMessageCuPy(self): sbuf = cupy.array([1,2,3], 'i') rbuf = cupy.array([0,0,0], 'i') PutGet(sbuf, rbuf) self.assertTrue((sbuf == rbuf).all()) @unittest.skipMPI('msmpi') @unittest.skipIf(numba is None, 'numba') def testMessageNumba(self): sbuf = numba.cuda.device_array((3,), 'i') sbuf[:] = [1,2,3] rbuf = numba.cuda.device_array((3,), 'i') rbuf[:] = [0,0,0] PutGet(sbuf, rbuf) # numba arrays do not have the .all() method for i in range(3): self.assertEqual(sbuf[i], rbuf[i]) # --- if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_msgzero.py000066400000000000000000000033461475341043600170330ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class BaseTestMessageZero: null_b = [None, MPI.INT] null_v = [None, (0, None), MPI.INT] def testPointToPoint(self): comm = self.COMM comm.Sendrecv(sendbuf=self.null_b, dest=comm.rank, recvbuf=self.null_b, source=comm.rank) r2 = comm.Irecv(self.null_b, comm.rank) r1 = comm.Isend(self.null_b, comm.rank) MPI.Request.Waitall([r1, r2]) def testCollectivesBlock(self): comm = self.COMM comm.Bcast(self.null_b) comm.Gather(self.null_b, self.null_b) comm.Scatter(self.null_b, self.null_b) comm.Allgather(self.null_b, self.null_b) comm.Alltoall(self.null_b, self.null_b) def testCollectivesVector(self): comm = self.COMM comm.Gatherv(self.null_b, self.null_v) comm.Scatterv(self.null_v, self.null_b) comm.Allgatherv(self.null_b, self.null_v) comm.Alltoallv(self.null_v, self.null_v) @unittest.skipMPI('openmpi') def testReductions(self): comm = self.COMM comm.Reduce(self.null_b, self.null_b) comm.Allreduce(self.null_b, self.null_b) comm.Reduce_scatter_block(self.null_b, self.null_b) rcnt = [0]*comm.Get_size() comm.Reduce_scatter(self.null_b, self.null_b, rcnt) try: comm.Scan(self.null_b, self.null_b) except NotImplementedError: pass try: comm.Exscan(self.null_b, self.null_b) except NotImplementedError: pass class TestMessageZeroSelf(BaseTestMessageZero, unittest.TestCase): COMM = MPI.COMM_SELF class TestMessageZeroWorld(BaseTestMessageZero, unittest.TestCase): COMM = MPI.COMM_WORLD if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_objmodel.py000066400000000000000000000306101475341043600171320ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import ctypes import operator import weakref import sys import os class TestObjModel(unittest.TestCase): objects = [ MPI.Status(), MPI.DATATYPE_NULL, MPI.REQUEST_NULL, MPI.INFO_NULL, MPI.ERRHANDLER_NULL, MPI.SESSION_NULL, MPI.GROUP_NULL, MPI.WIN_NULL, MPI.OP_NULL, MPI.FILE_NULL, MPI.MESSAGE_NULL, MPI.COMM_NULL, ] def testEq(self): for i, obj1 in enumerate(self.objects): objects = self.objects[:] obj2 = objects[i] self.assertTrue (bool(obj1 == obj2)) self.assertFalse(bool(obj1 != obj2)) del objects[i] for obj2 in objects: self.assertTrue (bool(obj1 != obj2)) self.assertTrue (bool(obj2 != obj1)) self.assertFalse(bool(obj1 == obj2)) self.assertFalse(bool(obj2 == obj1)) self.assertFalse(bool(None == obj1 )) self.assertFalse(bool(obj1 == None )) self.assertFalse(bool(obj1 == True )) self.assertFalse(bool(obj1 == False)) self.assertFalse(bool(obj1 == 12345)) self.assertFalse(bool(obj1 == "abc")) self.assertFalse(bool(obj1 == [123])) self.assertFalse(bool(obj1 == (1,2))) self.assertFalse(bool(obj1 == {0:0})) self.assertFalse(bool(obj1 == set())) def testNe(self): for i, obj1 in enumerate(self.objects): objects = self.objects[:] obj2 = objects[i] self.assertFalse(bool(obj1 != obj2)) del objects[i] for obj2 in objects: self.assertTrue(bool(obj1 != obj2)) self.assertTrue(bool(None != obj1 )) self.assertTrue(bool(obj1 != None )) self.assertTrue(bool(obj1 != True )) self.assertTrue(bool(obj1 != False)) self.assertTrue(bool(obj1 != 12345)) self.assertTrue(bool(obj1 != "abc")) self.assertTrue(bool(obj1 != [123])) self.assertTrue(bool(obj1 != (1,2))) self.assertTrue(bool(obj1 != {0:0})) self.assertTrue(bool(obj1 != set())) def testCmp(self): for obj in self.objects: for binop in ('lt', 'le', 'gt', 'ge'): binop = getattr(operator, binop) with self.assertRaises(TypeError): binop(obj, obj) def testBool(self): for obj in self.objects[1:]: self.assertFalse(not not obj) self.assertTrue(not obj) self.assertFalse(obj) def testReduce(self): import pickle import copy def functions(obj): for protocol in range(0, pickle.HIGHEST_PROTOCOL + 1): yield lambda ob: pickle.loads(pickle.dumps(ob, protocol)) yield copy.copy yield copy.deepcopy for obj in self.objects: for copier in functions(obj): dup = copier(obj) self.assertIs(type(dup), type(obj)) if isinstance(obj, MPI.Status): self.assertIsNot(dup, obj) else: self.assertIs(dup, obj) cls = type(obj) dup = copier(cls(obj)) self.assertIs(type(dup), cls) self.assertIsNot(dup, obj) cls = type(f'My{type(obj).__name__}', (type(obj),), {}) main = __import__('__main__') cls.__module__ = main.__name__ setattr(main, cls.__name__, cls) dup = copier(cls(obj)) delattr(main, cls.__name__) self.assertIs(type(dup), cls) self.assertIsNot(dup, obj) def testHash(self): for obj in self.objects: ob_hash = lambda: hash(obj) self.assertRaises(TypeError, ob_hash) def testInit(self): for i, obj in enumerate(self.objects): klass = type(obj) new = klass() self.assertEqual(new, obj) new = klass(obj) self.assertEqual(new, obj) objects = self.objects[:] del objects[i] for other in objects: ob_init = lambda: klass(other) self.assertRaises(TypeError, ob_init) ob_init = lambda: klass(1234) self.assertRaises(TypeError, ob_init) ob_init = lambda: klass("abc") self.assertRaises(TypeError, ob_init) def testWeakRef(self): for obj in self.objects: wr = weakref.ref(obj) self.assertIs(wr(), obj) self.assertIn(wr, weakref.getweakrefs(obj)) wr = weakref.proxy(obj) self.assertIn(wr, weakref.getweakrefs(obj)) def testHandle(self): objects = self.objects[:] objects += [ MPI.INT, MPI.FLOAT, MPI.Request(MPI.REQUEST_NULL), MPI.Prequest(MPI.REQUEST_NULL), MPI.Grequest(MPI.REQUEST_NULL), MPI.INFO_ENV, MPI.GROUP_EMPTY, MPI.ERRORS_RETURN, MPI.ERRORS_ABORT, MPI.ERRORS_ARE_FATAL, MPI.COMM_SELF, MPI.COMM_WORLD, ] for obj in objects: if isinstance(obj, MPI.Status): continue self.assertGreaterEqual(obj.handle, 0) newobj = type(obj).fromhandle(obj.handle) self.assertEqual(newobj, obj) self.assertEqual(type(newobj), type(obj)) self.assertEqual(newobj.handle, obj.handle) with self.assertRaises(AttributeError): newobj.handle = None with self.assertRaises(AttributeError): newobj.handle = obj.handle with self.assertRaises(AttributeError): del newobj.handle def testSafeFreeNull(self): objects = self.objects[:] for obj in objects: if isinstance(obj, MPI.Status): continue obj.free() self.assertFalse(obj) obj.free() self.assertFalse(obj) def testSafeFreeConstant(self): objects = [ MPI.INT, MPI.LONG, MPI.FLOAT, MPI.DOUBLE, MPI.INFO_ENV, MPI.SUM, MPI.PROD, MPI.GROUP_EMPTY, MPI.ERRORS_ABORT, MPI.ERRORS_ARE_FATAL, MPI.ERRORS_RETURN, MPI.MESSAGE_NO_PROC, MPI.COMM_SELF, MPI.COMM_WORLD, ] for obj in filter(None, objects): self.assertTrue(obj) for _ in range(3): obj.free() self.assertTrue(obj) if not isinstance(obj, MPI.Errhandler): clon = type(obj)(obj) self.assertTrue(clon) for _ in range(3): clon.free() self.assertFalse(clon) if hasattr(obj, 'Dup'): self.assertTrue(obj) dup = obj.Dup() self.assertTrue(dup) for _ in range(3): dup.free() self.assertFalse(dup) self.assertTrue(obj) for _ in range(3): obj.free() self.assertTrue(obj) def testSafeFreeCreated(self): objects = [ MPI.COMM_SELF.Isend((None, 0, MPI.BYTE), MPI.PROC_NULL), MPI.Op.Create(lambda *_: None), MPI.COMM_SELF.Get_group(), MPI.COMM_SELF.Get_errhandler(), ] try: objects += [MPI.Info.Create()] except (NotImplementedError, MPI.Exception): pass if os.name == 'posix': try: objects += [MPI.File.Open(MPI.COMM_SELF, "/dev/null")] except NotImplementedError: pass try: objects += [MPI.Win.Create(MPI.BOTTOM)] except (NotImplementedError, MPI.Exception): pass try: objects += [MPI.Session.Init()] except NotImplementedError: pass for obj in objects: self.assertTrue(obj) for _ in range(3): obj.free() self.assertFalse(obj) def testConstants(self): import pickle names = ( 'BOTTOM', 'IN_PLACE', 'BUFFER_AUTOMATIC', ) for name in names: constant = getattr(MPI, name) self.assertEqual(repr(constant), name) self.assertEqual(memoryview(constant).nbytes, 0) self.assertEqual(MPI.Get_address(constant), constant) if sys.implementation.name != 'pypy': self.assertIsNone(memoryview(constant).obj) with self.assertRaises(ValueError): type(constant)(constant + 1) self.assertEqual(repr(constant), name) self.assertEqual(constant.__reduce__(), name) for protocol in range(pickle.HIGHEST_PROTOCOL): value = pickle.loads(pickle.dumps(constant, protocol)) self.assertIs(type(value), type(constant)) self.assertEqual(value, constant) def testSizeOf(self): for obj in self.objects: n1 = MPI._sizeof(obj) n2 = MPI._sizeof(type(obj)) self.assertEqual(n1, n2) with self.assertRaises(TypeError): MPI._sizeof(None) def testAddressOf(self): for obj in self.objects: addr = MPI._addressof(obj) self.assertNotEqual(addr, 0) with self.assertRaises(TypeError): MPI._addressof(None) def testAHandleOf(self): for obj in self.objects: hdl = MPI._handleof(obj) self.assertGreaterEqual(hdl, 0) with self.assertRaises(TypeError): MPI._handleof(None) @unittest.skipUnless(sys.implementation.name == 'cpython', "cpython") @unittest.skipUnless(hasattr(MPI, '__pyx_capi__'), "cython") def testCAPI(self): status = MPI.Status() status.source = 0 status.tag = 1 status.error = MPI.ERR_OTHER extra_objects = [ status, MPI.INT, MPI.SUM, MPI.INFO_ENV, MPI.MESSAGE_NO_PROC, MPI.ERRORS_RETURN, MPI.GROUP_EMPTY, MPI.COMM_SELF, ] pyapi = ctypes.pythonapi PyCapsule_GetPointer = pyapi.PyCapsule_GetPointer PyCapsule_GetPointer.restype = ctypes.c_void_p PyCapsule_GetPointer.argtypes = [ctypes.py_object, ctypes.c_char_p] pyx_capi = MPI.__pyx_capi__ for obj in self.objects + extra_objects: cls = type(obj) if issubclass(cls, MPI.Comm): cls = MPI.Comm typename = cls.__name__ modifier = '' if isinstance(obj, MPI.Status): mpi_type = ctypes.c_void_p modifier = ' *' elif MPI._sizeof(cls) == ctypes.sizeof(ctypes.c_uint32): mpi_type = ctypes.c_uint32 elif MPI._sizeof(cls) == ctypes.sizeof(ctypes.c_uint64): mpi_type = ctypes.c_uint64 new_functype = ctypes.PYFUNCTYPE(ctypes.py_object, mpi_type) get_functype = ctypes.PYFUNCTYPE(ctypes.c_void_p, ctypes.py_object) new_capsule = pyx_capi[f'PyMPI{typename}_New'] get_capsule = pyx_capi[f'PyMPI{typename}_Get'] new_signature = f'PyObject *(MPI_{typename}{modifier})'.encode() get_signature = f'MPI_{typename} *(PyObject *)'.encode() PyCapsule_GetPointer.restype = new_functype pympi_new = PyCapsule_GetPointer(new_capsule, new_signature) PyCapsule_GetPointer.restype = get_functype pympi_get = PyCapsule_GetPointer(get_capsule, get_signature) PyCapsule_GetPointer.restype = ctypes.c_void_p objptr = pympi_get(obj) if isinstance(obj, MPI.Status): newarg = objptr else: newarg = mpi_type.from_address(objptr).value self.assertEqual(objptr, MPI._addressof(obj)) self.assertEqual(newarg, MPI._handleof(obj)) newobj = pympi_new(newarg) self.assertIs(type(newobj), type(obj)) self.assertEqual(newobj, obj) if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_op.py000066400000000000000000000220211475341043600157520ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest try: import array except ImportError: array = None def asarray(typecode, data): tobytes = lambda s: memoryview(s).tobytes() frombytes = array.array.frombytes a = array.array(typecode, []) frombytes(a, tobytes(data)) return a def mysum_obj(a, b): for i in range(len(a)): b[i] = a[i] + b[i] return b def mysum_buf(a, b, dt): assert dt == MPI.INT assert len(a) == len(b) b[:] = mysum_obj(asarray('i', a), asarray('i', b)) def mysum(ba, bb, dt): if dt is None: return mysum_obj(ba, bb) else: return mysum_buf(ba, bb, dt) def mybor(a, b, dt): assert dt == MPI.BYTE assert len(a) == len(b) for i in range(len(a)): b[i] = a[i] | b[i] class TestOp(unittest.TestCase): def testConstructor(self): op = MPI.Op() self.assertFalse(op) self.assertEqual(op, MPI.OP_NULL) @unittest.skipIf(array is None, 'array') def testCreate(self): for comm in [MPI.COMM_SELF, MPI.COMM_WORLD]: for commute in [True, False]: for N in range(4): myop = MPI.Op.Create(mysum, commute) self.assertFalse(myop.is_predefined) try: size = comm.Get_size() rank = comm.Get_rank() a = array.array('i', [i*(rank+1) for i in range(N)]) b = array.array('i', [0]*len(a)) comm.Allreduce([a, MPI.INT], [b, MPI.INT], myop) scale = sum(range(1,size+1)) for i in range(N): self.assertEqual(b[i], scale*i) res = myop(a, b) self.assertIs(res, b) for i in range(N): self.assertEqual(b[i], a[i]+scale*i) myop2 = MPI.Op(myop) a = array.array('i', [1]*N) b = array.array('i', [2]*N) res = myop2(a, b) self.assertIs(res, b) for i in range(N): self.assertEqual(b[i], 3) myop2 = None finally: myop.Free() def testCreateMany(self): MAX_USER_OP = 32 # max user-defined operations # create ops = [] for i in range(MAX_USER_OP): o = MPI.Op.Create(mysum) ops.append(o) with self.assertRaises(RuntimeError): MPI.Op.Create(mysum) # cleanup for o in ops: o.Free() # another round ops = [] for i in range(MAX_USER_OP): op = MPI.Op.Create(mybor) ops.append(op) with self.assertRaises(RuntimeError): MPI.Op.Create(mybor) # local reductions try: b1 = bytearray([2] * 3) b2 = bytearray([4] * 3) for op in ops: ibuf = [b1, MPI.BYTE] obuf = [b2, MPI.BYTE] op.Reduce_local(ibuf, obuf) for c1, c2 in zip(b1, b2): self.assertEqual(c1, 2) self.assertEqual(c2, 6) except NotImplementedError: pass # pickling support try: for op in ops: op.__reduce__() clon = MPI.Op.f2py(op.py2f()) with self.assertRaises(ValueError): clon.__reduce__() except NotImplementedError: pass # cleanup for op in ops: op.Free() def _test_call(self, op, args, res): self.assertEqual(op(*args), res) self.assertEqual(MPI.Op(op)(*args), res) def testCall(self): self.assertRaises(TypeError, MPI.OP_NULL) self.assertRaises(TypeError, MPI.OP_NULL, None) self.assertRaises(ValueError, MPI.OP_NULL, None, None) self._test_call(MPI.MIN, (2,3), 2) self._test_call(MPI.MAX, (2,3), 3) self._test_call(MPI.SUM, (2,3), 5) self._test_call(MPI.PROD, (2,3), 6) for x in (False, True): for y in (False, True): self._test_call(MPI.LAND, (x,y), x and y) self._test_call(MPI.LOR, (x,y), x or y) self._test_call(MPI.LXOR, (x,y), x ^ y) for x in range(5): for y in range(5): self._test_call(MPI.BAND, (x,y), x & y) self._test_call(MPI.BOR, (x,y), x | y) self._test_call(MPI.BXOR, (x,y), x ^ y) if MPI.REPLACE: self._test_call(MPI.REPLACE, (2,3), 3) self._test_call(MPI.REPLACE, (3,2), 2) if MPI.NO_OP: self._test_call(MPI.NO_OP, (2,3), 2) self._test_call(MPI.NO_OP, (3,2), 3) def testMinMax(self): x = [1]; y = [1] res = MPI.MIN(x, y) self.assertEqual(res, x) res = MPI.MAX(x, y) self.assertEqual(res, x) def testMinMaxLoc(self): x = [1]; i = [2]; u = [x, i] y = [2]; j = [1]; v = [y, j] res = MPI.MINLOC(u, v) self.assertIs(res[0], x) self.assertIs(res[1], i) res = MPI.MINLOC(v, u) self.assertIs(res[0], x) self.assertIs(res[1], i) res = MPI.MAXLOC(u, v) self.assertIs(res[0], y) self.assertIs(res[1], j) res = MPI.MAXLOC(v, u) self.assertIs(res[0], y) self.assertIs(res[1], j) # x = [1]; i = 0; u = [x, i] y = [1]; j = 1; v = [y, j] res = MPI.MINLOC(u, v) self.assertIs(res[0], x) self.assertIs(res[1], i) res = MPI.MAXLOC(u, v) self.assertIs(res[0], x) self.assertIs(res[1], i) # x = [1]; i = 1; u = [x, i] y = [1]; j = 0; v = [y, j] res = MPI.MINLOC(u, v) self.assertIs(res[0], y) self.assertIs(res[1], j) res = MPI.MAXLOC(u, v) self.assertIs(res[0], y) self.assertIs(res[1], j) # x = [1]; i = [0]; u = [x, i] y = [1]; j = [1]; v = [y, j] res = MPI.MINLOC(u, v) self.assertIs(res[0], x) self.assertIs(res[1], i) res = MPI.MAXLOC(u, v) self.assertIs(res[0], x) self.assertIs(res[1], i) # x = [1]; i = [1]; u = [x, i] y = [1]; j = [0]; v = [y, j] res = MPI.MINLOC(u, v) self.assertIs(res[0], y) self.assertIs(res[1], j) res = MPI.MAXLOC(u, v) self.assertIs(res[0], y) self.assertIs(res[1], j) @unittest.skipMPI('openmpi(<=1.8.1)') def testIsCommutative(self): try: MPI.SUM.Is_commutative() except NotImplementedError: self.skipTest('mpi-op-is_commutative') ops = [ MPI.MAX, MPI.MIN, MPI.SUM, MPI.PROD, MPI.LAND, MPI.BAND, MPI.LOR, MPI.BOR, MPI.LXOR, MPI.BXOR, MPI.MAXLOC, MPI.MINLOC, ] for op in ops: flag = op.Is_commutative() self.assertEqual(flag, op.is_commutative) self.assertTrue(flag) @unittest.skipMPI('openmpi(<=1.8.1)') @unittest.skipMPI('mpich(==3.4.1)') def testIsCommutativeExtra(self): try: MPI.SUM.Is_commutative() except NotImplementedError: self.skipTest('mpi-op-is_commutative') ops = [MPI.REPLACE, MPI.NO_OP] for op in ops: if not op: continue flag = op.Is_commutative() self.assertEqual(flag, op.is_commutative) #self.assertFalse(flag) def testIsPredefined(self): self.assertTrue(MPI.OP_NULL.is_predefined) ops = [MPI.MAX, MPI.MIN, MPI.SUM, MPI.PROD, MPI.LAND, MPI.BAND, MPI.LOR, MPI.BOR, MPI.LXOR, MPI.BXOR, MPI.MAXLOC, MPI.MINLOC,] for op in ops: self.assertTrue(op.is_predefined) def testPicklePredefined(self): from pickle import dumps, loads ops = [ MPI.MAX, MPI.MIN, MPI.SUM, MPI.PROD, MPI.LAND, MPI.BAND, MPI.LOR, MPI.BOR, MPI.LXOR, MPI.BXOR, MPI.MAXLOC, MPI.MINLOC, MPI.OP_NULL, MPI.NO_OP, ] for op in ops: newop = loads(dumps(op)) self.assertIs(newop, op) newop = loads(dumps(MPI.Op(op))) self.assertIsNot(newop, op) self.assertEqual(newop, op) def testPickleUserDefined(self): from pickle import dumps, loads for commute in [True, False]: myop1 = MPI.Op.Create(mysum, commute) myop2 = loads(dumps(myop1)) self.assertNotEqual(myop1, myop2) myop1.Free() self.assertEqual(myop2([2], [3]), [5]) self.assertEqual(myop2.Is_commutative(), commute) myop2.Free() if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_p2p_buf.py000066400000000000000000000461211475341043600167000ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import itertools import arrayimpl class BaseTestP2PBuf: COMM = MPI.COMM_NULL def testSendrecv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() dest = (rank + 1) % size source = (rank - 1) % size for array, typecode in arrayimpl.loop(): with arrayimpl.test(self): for s in range(0, size+1): with self.subTest(s=s): sbuf = array( s, typecode, s) rbuf = array(-1, typecode, s+1) self.COMM.Sendrecv(sbuf.as_mpi(), dest, 0, rbuf.as_mpi(), source, 0) check = arrayimpl.scalar(s) for value in rbuf[:-1]: self.assertEqual(value, check) check = arrayimpl.scalar(-1) self.assertEqual(rbuf[-1], check) def testISendrecv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() dest = (rank + 1) % size source = (rank - 1) % size try: self.COMM.Isendrecv( bytearray(1), dest, 0, bytearray(1), source, 0, ).Wait() except NotImplementedError: if MPI.Get_version() >= (4, 0): raise raise unittest.SkipTest("mpi-isendrecv") for array, typecode in arrayimpl.loop(): with arrayimpl.test(self): for s in range(0, size+1): with self.subTest(s=s): sbuf = array( s, typecode, s) rbuf = array(-1, typecode, s+1) self.COMM.Isendrecv( sbuf.as_mpi(), dest, 0, rbuf.as_mpi(), source, 0, ).Wait() check = arrayimpl.scalar(s) for value in rbuf[:-1]: self.assertEqual(value, check) check = arrayimpl.scalar(-1) self.assertEqual(rbuf[-1], check) def testSendrecvReplace(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() dest = (rank + 1) % size source = (rank - 1) % size for array, typecode in arrayimpl.loop(): with arrayimpl.test(self): for s in range(0, size+1): with self.subTest(s=s): buf = array(rank, typecode, s); self.COMM.Sendrecv_replace(buf.as_mpi(), dest, 0, source, 0) check = arrayimpl.scalar(source) for value in buf: self.assertEqual(value, check) def testISendrecvReplace(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() dest = (rank + 1) % size source = (rank - 1) % size try: self.COMM.Isendrecv_replace( bytearray(1), dest, 0, source, 0 ).Wait() except NotImplementedError: if MPI.Get_version() >= (4, 0): raise raise unittest.SkipTest("mpi-isendrecv") for array, typecode in arrayimpl.loop(): with arrayimpl.test(self): for s in range(0, size+1): with self.subTest(s=s): buf = array(rank, typecode, s); self.COMM.Isendrecv_replace( buf.as_mpi(), dest, 0, source, 0 ).Wait() check = arrayimpl.scalar(source) for value in buf: self.assertEqual(value, check) def testSendRecv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): with arrayimpl.test(self): if unittest.is_mpi_gpu('openmpi', array): continue if unittest.is_mpi_gpu('mvapich', array): continue for s in range(0, size+1): with self.subTest(s=s): # sbuf = array( s, typecode, s) rbuf = array(-1, typecode, s) mem = array( 0, typecode, 2*(s+MPI.BSEND_OVERHEAD)).as_raw() if size == 1: MPI.Attach_buffer(mem) rbuf = sbuf MPI.Detach_buffer() elif rank == 0: MPI.Attach_buffer(mem) self.COMM.Ibsend(sbuf.as_mpi(), 1, 0).Wait() self.COMM.Bsend(sbuf.as_mpi(), 1, 0) MPI.Detach_buffer() self.COMM.Send(sbuf.as_mpi(), 1, 0) self.COMM.Ssend(sbuf.as_mpi(), 1, 0) self.COMM.Recv(rbuf.as_mpi(), 1, 0) self.COMM.Recv(rbuf.as_mpi(), 1, 0) self.COMM.Recv(rbuf.as_mpi(), 1, 0) self.COMM.Recv(rbuf.as_mpi(), 1, 0) elif rank == 1: self.COMM.Recv(rbuf.as_mpi(), 0, 0) self.COMM.Recv(rbuf.as_mpi(), 0, 0) self.COMM.Recv(rbuf.as_mpi(), 0, 0) self.COMM.Recv(rbuf.as_mpi(), 0, 0) MPI.Attach_buffer(mem) self.COMM.Ibsend(sbuf.as_mpi(), 0, 0).Wait() self.COMM.Bsend(sbuf.as_mpi(), 0, 0) MPI.Detach_buffer() self.COMM.Send(sbuf.as_mpi(), 0, 0) self.COMM.Ssend(sbuf.as_mpi(), 0, 0) else: rbuf = sbuf check = arrayimpl.scalar(s) for value in rbuf: self.assertEqual(value, check) # rank = self.COMM.Get_rank() sbuf = array( s, typecode, s) rbuf = array(-1, typecode, s) rreq = self.COMM.Irecv(rbuf.as_mpi(), rank, 0) self.COMM.Rsend(sbuf.as_mpi(), rank, 0) rreq.Wait() for value in rbuf: self.assertEqual(value, check) rbuf = array(-1, typecode, s) rreq = self.COMM.Irecv(rbuf.as_mpi(), rank, 0) self.COMM.Irsend(sbuf.as_mpi(), rank, 0).Wait() rreq.Wait() for value in rbuf: self.assertEqual(value, check) def testProcNull(self): comm = self.COMM # comm.Sendrecv(None, MPI.PROC_NULL, 0, None, MPI.PROC_NULL, 0) comm.Sendrecv_replace(None, MPI.PROC_NULL, 0, MPI.PROC_NULL, 0) # comm.Send (None, MPI.PROC_NULL) comm.Isend (None, MPI.PROC_NULL).Wait() # comm.Ssend(None, MPI.PROC_NULL) comm.Issend(None, MPI.PROC_NULL).Wait() # buf = MPI.Alloc_mem(MPI.BSEND_OVERHEAD) MPI.Attach_buffer(buf) comm.Bsend(None, MPI.PROC_NULL) comm.Ibsend(None, MPI.PROC_NULL).Wait() MPI.Detach_buffer() MPI.Free_mem(buf) # comm.Rsend(None, MPI.PROC_NULL) comm.Irsend(None, MPI.PROC_NULL).Wait() # comm.Recv (None, MPI.PROC_NULL) comm.Irecv(None, MPI.PROC_NULL).Wait() @unittest.skipMPI('mpich(<4.1.0)') def testProcNullISendrecv(self): try: self.COMM.Isendrecv( None, MPI.PROC_NULL, 0, None, MPI.PROC_NULL, 0, ).Wait() self.COMM.Isendrecv_replace( None, MPI.PROC_NULL, 0, MPI.PROC_NULL, 0, ).Wait() except NotImplementedError: if MPI.Get_version() >= (4, 0): raise raise unittest.SkipTest("mpi-isendrecv") @unittest.skipMPI('mpich(==3.4.1)') def testProcNullPersistent(self): comm = self.COMM # req = comm.Send_init(None, MPI.PROC_NULL) req.Start(); req.Wait(); req.Free() # req = comm.Ssend_init(None, MPI.PROC_NULL) req.Start(); req.Wait(); req.Free() # buf = MPI.Alloc_mem(MPI.BSEND_OVERHEAD) MPI.Attach_buffer(buf) req = comm.Bsend_init(None, MPI.PROC_NULL) req.Start(); req.Wait(); req.Free() MPI.Detach_buffer() MPI.Free_mem(buf) # req = comm.Rsend_init(None, MPI.PROC_NULL) req.Start(); req.Wait(); req.Free() # req = comm.Recv_init(None, MPI.PROC_NULL) req.Start(); req.Wait(); req.Free() def testConstructor(self): preq = self.COMM.Send_init(b"", MPI.PROC_NULL, 0) dupe = MPI.Prequest(preq) self.assertIs(type(dupe), MPI.Prequest) self.assertEqual(dupe, preq) dupe = MPI.Prequest.fromhandle(preq.handle) self.assertIs(type(dupe), MPI.Prequest) self.assertEqual(dupe, preq) dupe = MPI.Prequest.f2py(preq.py2f()) self.assertIs(type(dupe), MPI.Prequest) self.assertEqual(dupe, preq) dupe = MPI.Request(preq) self.assertIs(type(dupe), MPI.Request) self.assertEqual(dupe, preq) with self.assertRaises(TypeError): dupe = MPI.Grequest(preq) preq.Start() preq.Wait() preq.Free() def testPersistent(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() dest = (rank + 1) % size source = (rank - 1) % size for array, typecode in arrayimpl.loop(): with arrayimpl.test(self): if unittest.is_mpi_gpu('openmpi', array): continue if unittest.is_mpi_gpu('mvapich', array): continue for s, xs in itertools.product(range(size+1), range(3)): with self.subTest(s=s, xs=xs): # sbuf = array( s, typecode, s) rbuf = array(-1, typecode, s+xs) sendreq = self.COMM.Send_init(sbuf.as_mpi(), dest, 0) recvreq = self.COMM.Recv_init(rbuf.as_mpi(), source, 0) sendreq.Start() recvreq.Start() sendreq.Wait() recvreq.Wait() self.assertNotEqual(sendreq, MPI.REQUEST_NULL) self.assertNotEqual(recvreq, MPI.REQUEST_NULL) sendreq.Free() recvreq.Free() self.assertEqual(sendreq, MPI.REQUEST_NULL) self.assertEqual(recvreq, MPI.REQUEST_NULL) check = arrayimpl.scalar(s) for value in rbuf[:s]: self.assertEqual(value, check) check = arrayimpl.scalar(-1) for value in rbuf[s:]: self.assertEqual(value, check) # sbuf = array(s, typecode, s) rbuf = array(-1, typecode, s+xs) sendreq = self.COMM.Send_init(sbuf.as_mpi(), dest, 0) recvreq = self.COMM.Recv_init(rbuf.as_mpi(), source, 0) reqlist = [sendreq, recvreq] MPI.Prequest.Startall(reqlist) index1 = MPI.Prequest.Waitany(reqlist) self.assertIn(index1, [0, 1]) self.assertNotEqual(reqlist[index1], MPI.REQUEST_NULL) index2 = MPI.Prequest.Waitany(reqlist) self.assertIn(index2, [0, 1]) self.assertNotEqual(reqlist[index2], MPI.REQUEST_NULL) self.assertNotEqual(index1, index2) index3 = MPI.Prequest.Waitany(reqlist) self.assertEqual(index3, MPI.UNDEFINED) for preq in reqlist: self.assertNotEqual(preq, MPI.REQUEST_NULL) preq.Free() self.assertEqual(preq, MPI.REQUEST_NULL) check = arrayimpl.scalar(s) for value in rbuf[:s]: self.assertEqual(value, check) check = arrayimpl.scalar(-1) for value in rbuf[s:]: self.assertEqual(value, check) # sbuf = array( s, typecode, s) rbuf = array(-1, typecode, s+xs) sendreq = self.COMM.Ssend_init(sbuf.as_mpi(), dest, 0) recvreq = self.COMM.Recv_init(rbuf.as_mpi(), source, 0) sendreq.Start() recvreq.Start() sendreq.Wait() recvreq.Wait() self.assertNotEqual(sendreq, MPI.REQUEST_NULL) self.assertNotEqual(recvreq, MPI.REQUEST_NULL) sendreq.Free() recvreq.Free() self.assertEqual(sendreq, MPI.REQUEST_NULL) self.assertEqual(recvreq, MPI.REQUEST_NULL) check = arrayimpl.scalar(s) for value in rbuf[:s]: self.assertEqual(value, check) check = arrayimpl.scalar(-1) for value in rbuf[s:]: self.assertEqual(value, check) # mem = array( 0, typecode, s+MPI.BSEND_OVERHEAD).as_raw() sbuf = array( s, typecode, s) rbuf = array(-1, typecode, s+xs) MPI.Attach_buffer(mem) sendreq = self.COMM.Bsend_init(sbuf.as_mpi(), dest, 0) recvreq = self.COMM.Recv_init(rbuf.as_mpi(), source, 0) sendreq.Start() recvreq.Start() sendreq.Wait() recvreq.Wait() MPI.Detach_buffer() self.assertNotEqual(sendreq, MPI.REQUEST_NULL) self.assertNotEqual(recvreq, MPI.REQUEST_NULL) sendreq.Free() recvreq.Free() self.assertEqual(sendreq, MPI.REQUEST_NULL) self.assertEqual(recvreq, MPI.REQUEST_NULL) check = arrayimpl.scalar(s) for value in rbuf[:s]: self.assertEqual(value, check) check = arrayimpl.scalar(-1) for value in rbuf[s:]: self.assertEqual(value, check) # rank = self.COMM.Get_rank() sbuf = array( s, typecode, s) rbuf = array(-1, typecode, s+xs) recvreq = self.COMM.Recv_init (rbuf.as_mpi(), rank, 0) sendreq = self.COMM.Rsend_init(sbuf.as_mpi(), rank, 0) recvreq.Start() sendreq.Start() recvreq.Wait() sendreq.Wait() self.assertNotEqual(sendreq, MPI.REQUEST_NULL) self.assertNotEqual(recvreq, MPI.REQUEST_NULL) sendreq.Free() recvreq.Free() self.assertEqual(sendreq, MPI.REQUEST_NULL) self.assertEqual(recvreq, MPI.REQUEST_NULL) check = arrayimpl.scalar(s) for value in rbuf[:s]: self.assertEqual(value, check) check = arrayimpl.scalar(-1) for value in rbuf[s:]: self.assertEqual(value, check) def testProbe(self): comm = self.COMM.Dup() try: request = comm.Issend([None, 0, MPI.BYTE], comm.rank, 123) self.assertTrue(request) status = MPI.Status() comm.Probe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) self.assertTrue(request) flag = request.Test() self.assertTrue(request) self.assertFalse(flag) comm.Recv([None, 0, MPI.BYTE], comm.rank, 123) self.assertTrue(request) flag = False while not flag: flag = request.Test() self.assertFalse(request) self.assertTrue(flag) finally: comm.Free() @unittest.skipMPI('MPICH1') @unittest.skipMPI('LAM/MPI') def testProbeCancel(self): comm = self.COMM.Dup() try: request = comm.Issend([None, 0, MPI.BYTE], comm.rank, 123) status = MPI.Status() comm.Probe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) request.Cancel() self.assertTrue(request) status = MPI.Status() request.Get_status(status) cancelled = status.Is_cancelled() if not cancelled: comm.Recv([None, 0, MPI.BYTE], comm.rank, 123) request.Wait() else: request.Free() finally: comm.Free() def testIProbe(self): comm = self.COMM.Dup() try: f = comm.Iprobe() self.assertFalse(f) f = comm.Iprobe(MPI.ANY_SOURCE) self.assertFalse(f) f = comm.Iprobe(MPI.ANY_SOURCE, MPI.ANY_TAG) self.assertFalse(f) status = MPI.Status() f = comm.Iprobe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertFalse(f) self.assertEqual(status.source, MPI.ANY_SOURCE) self.assertEqual(status.tag, MPI.ANY_TAG) self.assertEqual(status.error, MPI.SUCCESS) finally: comm.Free() class TestP2PBufSelf(BaseTestP2PBuf, unittest.TestCase): COMM = MPI.COMM_SELF class TestP2PBufWorld(BaseTestP2PBuf, unittest.TestCase): COMM = MPI.COMM_WORLD class TestP2PBufSelfDup(TestP2PBufSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() @unittest.skipMPI('openmpi(<1.4.0)', MPI.Query_thread() > MPI.THREAD_SINGLE) class TestP2PBufWorldDup(TestP2PBufWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_p2p_buf_matched.py000066400000000000000000000176211475341043600203700ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl @unittest.skipIf(MPI.MESSAGE_NULL == MPI.MESSAGE_NO_PROC, 'mpi-message') class TestMessage(unittest.TestCase): def testMessageNull(self): null = MPI.MESSAGE_NULL self.assertFalse(null) null2 = MPI.Message() self.assertEqual(null, null2) null3 = MPI.Message(null) self.assertEqual(null, null3) def testMessageNoProc(self): # noproc = MPI.MESSAGE_NO_PROC self.assertTrue(noproc) noproc.Recv(None) self.assertTrue(noproc) noproc.Irecv(None).Wait() self.assertTrue(noproc) # noproc2 = MPI.Message(MPI.MESSAGE_NO_PROC) self.assertTrue(noproc2) self.assertEqual(noproc2, noproc) self.assertNotEqual(noproc, MPI.MESSAGE_NULL) # message = MPI.Message(MPI.MESSAGE_NO_PROC) message.Recv(None) self.assertEqual(message, MPI.MESSAGE_NULL) # message = MPI.Message(MPI.MESSAGE_NO_PROC) request = message.Irecv(None) self.assertEqual(message, MPI.MESSAGE_NULL) self.assertNotEqual(request, MPI.REQUEST_NULL) request.Wait() self.assertEqual(request, MPI.REQUEST_NULL) def testPickle(self): from pickle import dumps, loads for message in ( MPI.MESSAGE_NULL, MPI.MESSAGE_NO_PROC, ): msg = loads(dumps(message)) self.assertIs(msg, message) msg = loads(dumps(MPI.Message(message))) self.assertIsNot(msg, message) self.assertEqual(msg, message) comm = MPI.COMM_SELF request = comm.Isend(b"", 0, 0) with self.assertRaises(ValueError): loads(dumps(request)) message = comm.Mprobe(0, 0) with self.assertRaises(ValueError): loads(dumps(message)) message.Recv(bytearray(1)) request.Wait() @unittest.skipIf(MPI.MESSAGE_NULL == MPI.MESSAGE_NO_PROC, 'mpi-message') class BaseTestP2PMatched: COMM = MPI.COMM_NULL def testIMProbe(self): comm = self.COMM.Dup() try: m = comm.Improbe() self.assertIsNone(m) m = comm.Improbe(MPI.ANY_SOURCE) self.assertIsNone(m) m = comm.Improbe(MPI.ANY_SOURCE, MPI.ANY_TAG) self.assertIsNone(m) status = MPI.Status() m = comm.Improbe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertIsNone(m) self.assertEqual(status.source, MPI.ANY_SOURCE) self.assertEqual(status.tag, MPI.ANY_TAG) self.assertEqual(status.error, MPI.SUCCESS) m = MPI.Message.Iprobe(comm) self.assertIsNone(m) buf = [None, 0, MPI.BYTE] s = comm.Isend(buf, comm.rank, 0) r = comm.Mprobe(comm.rank, 0).Irecv(buf) MPI.Request.Waitall([s,r]) finally: comm.Free() def testProbeRecv(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() for array, typecode in arrayimpl.loop(): with arrayimpl.test(self): for s in range(0, size+1): with self.subTest(s=s): sbuf = array( s, typecode, s) rbuf = array(-1, typecode, s) if size == 1: n = comm.Improbe(0, 0) self.assertIsNone(n) sr = comm.Isend(sbuf.as_mpi(), 0, 0) m = comm.Mprobe(0, 0) self.assertIsInstance(m, MPI.Message) self.assertTrue(m) rr = m.Irecv(rbuf.as_raw()) self.assertFalse(m) self.assertTrue(sr) self.assertTrue(rr) MPI.Request.Waitall([sr,rr]) self.assertFalse(sr) self.assertFalse(rr) # n = comm.Improbe(0, 0) self.assertIsNone(n) r = comm.Isend(sbuf.as_mpi(), 0, 0) m = MPI.Message.Probe(comm, 0, 0) self.assertIsInstance(m, MPI.Message) self.assertTrue(m) m.Recv(rbuf.as_raw()) self.assertFalse(m) r.Wait() # n = MPI.Message.Iprobe(comm, 0, 0) self.assertIsNone(n) r = comm.Isend(sbuf.as_mpi(), 0, 0) comm.Probe(0, 0) m = MPI.Message.Iprobe(comm, 0, 0) self.assertIsInstance(m, MPI.Message) self.assertTrue(m) m.Recv(rbuf.as_raw()) self.assertFalse(m) r.Wait() # n = MPI.Message.Iprobe(comm, 0, 0) self.assertIsNone(n) r = comm.Isend(sbuf.as_mpi(), 0, 0) m = comm.Mprobe(0, 0) self.assertIsInstance(m, MPI.Message) self.assertTrue(m) m.Recv(rbuf.as_raw()) self.assertFalse(m) r.Wait() elif rank == 0: n = comm.Improbe(0, 0) self.assertIsNone(n) # comm.Send(sbuf.as_mpi(), 1, 0) m = comm.Mprobe(1, 0) self.assertTrue(m) m.Recv(rbuf.as_raw()) self.assertFalse(m) # n = comm.Improbe(0, 0) self.assertIsNone(n) comm.Send(sbuf.as_mpi(), 1, 1) m = None while not m: m = comm.Improbe(1, 1) m.Irecv(rbuf.as_raw()).Wait() elif rank == 1: n = comm.Improbe(1, 0) self.assertIsNone(n) # m = comm.Mprobe(0, 0) self.assertTrue(m) m.Recv(rbuf.as_raw()) self.assertFalse(m) # n = comm.Improbe(1, 0) self.assertIsNone(n) comm.Send(sbuf.as_mpi(), 0, 0) m = None while not m: m = comm.Improbe(0, 1) m.Irecv(rbuf.as_mpi()).Wait() comm.Send(sbuf.as_mpi(), 0, 1) else: rbuf = sbuf check = arrayimpl.scalar(s) for value in rbuf: self.assertEqual(value, check) class TestP2PMatchedSelf(BaseTestP2PMatched, unittest.TestCase): COMM = MPI.COMM_SELF class TestP2PMatchedWorld(BaseTestP2PMatched, unittest.TestCase): COMM = MPI.COMM_WORLD class TestP2PMatchedSelfDup(TestP2PMatchedSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() class TestP2PMatchedWorldDup(TestP2PMatchedWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_p2p_buf_part.py000066400000000000000000000173271475341043600177340ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl class BaseTestP2PBufPart: COMM = MPI.COMM_NULL def testSelf(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.loop(): with arrayimpl.test(self): for s in range(0, size): for p in range(1, 4): with self.subTest(p=p, s=s): sbuf = array( s, typecode, s*p) rbuf = array(-1, typecode, s*p) sreq = self.COMM.Psend_init(sbuf.as_mpi(), p, rank, 0) rreq = self.COMM.Precv_init(rbuf.as_mpi(), p, rank, 0) for _ in range(3): rreq.Start() for i in range(p): flag = rreq.Parrived(i) self.assertFalse(flag) sreq.Start() for i in range(p): sreq.Pready(i) for j in range(i+1, p): flag = rreq.Parrived(j) self.assertFalse(flag) for i in range(p): while not rreq.Parrived(i): pass flag = rreq.Parrived(i) self.assertTrue(flag) rreq.Wait() sreq.Wait() self.assertNotEqual(sreq, MPI.REQUEST_NULL) self.assertNotEqual(rreq, MPI.REQUEST_NULL) check = arrayimpl.scalar(s) for value in rbuf: self.assertEqual(value, check) rreq.Free() sreq.Free() self.assertEqual(sreq, MPI.REQUEST_NULL) self.assertEqual(rreq, MPI.REQUEST_NULL) def testRing(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() dest = (rank + 1) % size source = (rank - 1) % size for array, typecode in arrayimpl.loop(): with arrayimpl.test(self): for s in range(0, size): for p in range(1, 4): with self.subTest(p=p, s=s): sbuf = array( s, typecode, s*p) rbuf = array(-1, typecode, s*p) sreq = self.COMM.Psend_init(sbuf.as_mpi(), p, dest, 0) rreq = self.COMM.Precv_init(rbuf.as_mpi(), p, source, 0) for _ in range(3): self.COMM.Barrier() rreq.Start() for i in range(p): flag = rreq.Parrived(i) self.assertFalse(flag) self.COMM.Barrier() sreq.Start() for i in range(p): sreq.Pready(i) self.COMM.Barrier() for i in range(p): while not rreq.Parrived(i): pass flag = rreq.Parrived(i) self.assertTrue(flag) rreq.Wait() sreq.Wait() self.assertNotEqual(sreq, MPI.REQUEST_NULL) self.assertNotEqual(rreq, MPI.REQUEST_NULL) self.COMM.Barrier() check = arrayimpl.scalar(s) for value in rbuf: self.assertEqual(value, check) rreq.Free() sreq.Free() self.assertEqual(sreq, MPI.REQUEST_NULL) self.assertEqual(rreq, MPI.REQUEST_NULL) def testRingRangeList(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() dest = (rank + 1) % size source = (rank - 1) % size for array, typecode in arrayimpl.loop(): with arrayimpl.test(self): for s in range(0, size): for p in range(1, 4): with self.subTest(p=p, s=s): sbuf = array( s, typecode, s*p) rbuf = array(-1, typecode, s*p) sreq = self.COMM.Psend_init(sbuf.as_mpi(), p, dest, 0) rreq = self.COMM.Precv_init(rbuf.as_mpi(), p, source, 0) for case in range(4): self.COMM.Barrier() rreq.Start() for i in range(p): flag = rreq.Parrived(i) self.assertFalse(flag) self.COMM.Barrier() sreq.Start() if p > 1 and case % 2 == 0: sreq.Pready_range(0, p//2-1) sreq.Pready_range(p//2, p-1) else: sreq.Pready_list(list(range(0, p//2))) sreq.Pready_list(list(range(p//2, p))) self.COMM.Barrier() for i in range(p): while not rreq.Parrived(i): pass flag = rreq.Parrived(i) self.assertTrue(flag) rreq.Wait() sreq.Wait() self.assertNotEqual(sreq, MPI.REQUEST_NULL) self.assertNotEqual(rreq, MPI.REQUEST_NULL) self.COMM.Barrier() check = arrayimpl.scalar(s) for value in rbuf: self.assertEqual(value, check) rreq.Free() sreq.Free() self.assertEqual(sreq, MPI.REQUEST_NULL) self.assertEqual(rreq, MPI.REQUEST_NULL) class TestP2PBufPartSelf(BaseTestP2PBufPart, unittest.TestCase): COMM = MPI.COMM_SELF class TestP2PBufPartWorld(BaseTestP2PBufPart, unittest.TestCase): COMM = MPI.COMM_WORLD class TestP2PBufPartSelfDup(TestP2PBufPartSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() class TestP2PBufPartWorldDup(TestP2PBufPartWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() def have_feature(): info = MPI.Get_library_version() if 'MPICH' in info and 'ch3:' in info: raise NotImplementedError sreq = MPI.COMM_SELF.Psend_init(bytearray(1), 1, 0, 0) rreq = MPI.COMM_SELF.Precv_init(bytearray(1), 1, 0, 0) sreq.Start(); rreq.Start(); sreq.Pready(0); rreq.Parrived(0); rreq.Wait(); rreq.Free(); del rreq; sreq.Wait(); sreq.Free(); del sreq; try: have_feature() except NotImplementedError: unittest.disable(BaseTestP2PBufPart, 'mpi-p2p-part') if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_p2p_obj.py000066400000000000000000000701601475341043600166760ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import threading import warnings import sys def allocate(n): return bytearray(n) _basic = [ None, True, False, -7, 0, 7, -2**63+1, 2**63-1, -2.17, 0.0, 3.14, 1+2j, 2-3j, 'mpi4py', ] messages = list(_basic) messages += [ list(_basic), tuple(_basic), set(_basic), frozenset(_basic), {f'k{k}': v for k, v in enumerate(_basic)}, ] class BaseTestP2PObj: COMM = MPI.COMM_NULL def testSendAndRecv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages: self.COMM.send(smess, MPI.PROC_NULL) rmess = self.COMM.recv(None, MPI.PROC_NULL, 0) self.assertIsNone(rmess) if size == 1: return for smess in messages: if rank == 0: self.COMM.send(smess, rank+1, 0) rmess = smess elif rank == size - 1: rmess = self.COMM.recv(None, rank-1, 0) else: rmess = self.COMM.recv(None, rank-1, 0) self.COMM.send(rmess, rank+1, 0) self.assertEqual(rmess, smess) def testISendAndRecv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() buf = None for smess in messages: req = self.COMM.isend(smess, MPI.PROC_NULL) self.assertTrue(req) req.Wait() self.assertFalse(req) rmess = self.COMM.recv(buf, MPI.PROC_NULL, 0) self.assertIsNone(rmess) for smess in messages: req = self.COMM.isend(smess, rank, 0) self.assertTrue(req) rmess = self.COMM.recv(buf, rank, 0) self.assertTrue(req) flag = False while not flag: flag = req.Test() self.assertTrue(flag) self.assertFalse(req) self.assertEqual(rmess, smess) for smess in messages: dst = (rank+1)%size src = (rank-1)%size req = self.COMM.isend(smess, dst, 0) self.assertTrue(req) rmess = self.COMM.recv(buf, src, 0) req.Wait() self.assertFalse(req) self.assertEqual(rmess, smess) def testIRecvAndSend(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() for smess in messages: req = comm.irecv(0, MPI.PROC_NULL) self.assertTrue(req) comm.send(smess, MPI.PROC_NULL) rmess = req.wait() self.assertFalse(req) self.assertIsNone(rmess) for smess in messages: buf = allocate(512) req = comm.irecv(buf, rank, 0) self.assertTrue(req) flag, rmess = req.test() self.assertTrue(req) self.assertFalse(flag) self.assertIsNone(rmess) comm.send(smess, rank, 0) self.assertTrue(req) flag, rmess = req.test() while not flag: flag, rmess = req.test() self.assertTrue(flag) self.assertFalse(req) self.assertEqual(rmess, smess) tmp = allocate(1024) for buf in (None, 1024, tmp): for smess in messages + [messages]: dst = (rank+1)%size src = (rank-1)%size req = comm.irecv(buf, src, 0) self.assertTrue(req) comm.send(smess, dst, 0) rmess = req.wait() self.assertFalse(req) self.assertEqual(rmess, smess) for smess in messages: src = dst = rank rreq1 = comm.irecv(None, src, 1) rreq2 = comm.irecv(None, src, 2) rreq3 = comm.irecv(None, src, 3) rreqs = [rreq1, rreq2, rreq3] for i in range(len(rreqs)): self.assertTrue(rreqs[i]) comm.send(smess, dst, i+1) index, obj = MPI.Request.waitany(rreqs) self.assertEqual(index, i) self.assertEqual(obj, smess) self.assertFalse(rreqs[index]) index, obj = MPI.Request.waitany(rreqs) self.assertEqual(index, MPI.UNDEFINED) self.assertIsNone(obj) for smess in messages: src = dst = rank rreq1 = comm.irecv(None, src, 1) rreq2 = comm.irecv(None, src, 2) rreq3 = comm.irecv(None, src, 3) rreqs = [rreq1, rreq2, rreq3] index, flag, obj = MPI.Request.testany(rreqs) self.assertEqual(index, MPI.UNDEFINED) self.assertFalse(flag) self.assertIsNone(obj) for i in range(len(rreqs)): self.assertTrue(rreqs[i]) comm.send(smess, dst, i+1) index, flag, obj = MPI.Request.testany(rreqs) while not flag: index, flag, obj = MPI.Request.testany(rreqs) self.assertEqual(index, i) self.assertTrue(flag) self.assertEqual(obj, smess) self.assertFalse(rreqs[i]) index, flag, obj = MPI.Request.testany(rreqs) self.assertEqual(index, MPI.UNDEFINED) self.assertTrue(flag) self.assertIsNone(obj) def testIRecvAndISend(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() tmp = allocate(512) for smess in messages: dst = (rank+1)%size src = (rank-1)%size rreq = comm.irecv(None, src, 0) self.assertTrue(rreq) sreq = comm.isend(smess, dst, 0) self.assertTrue(sreq) index1, mess1 = MPI.Request.waitany([sreq,rreq]) self.assertIn(index1, (0, 1)) if index1 == 0: self.assertFalse(sreq) self.assertTrue (rreq) self.assertIsNone(mess1) else: self.assertTrue (sreq) self.assertFalse(rreq) self.assertEqual(mess1, smess) index2, mess2 = MPI.Request.waitany([sreq,rreq]) self.assertIn(index2, (0, 1)) self.assertNotEqual(index2, index1) self.assertFalse(sreq) self.assertFalse(rreq) if index2 == 0: self.assertIsNone(mess2) else: self.assertEqual(mess2, smess) for smess in messages: dst = (rank+1)%size src = (rank-1)%size rreq = comm.irecv(None, src, 0) self.assertTrue(rreq) sreq = comm.isend(smess, dst, 0) self.assertTrue(sreq) index1, flag1, mess1 = MPI.Request.testany([sreq,rreq]) while not flag1: index1, flag1, mess1 = MPI.Request.testany([sreq,rreq]) self.assertIn(index1, (0, 1)) if index1 == 0: self.assertFalse(sreq) self.assertTrue (rreq) self.assertIsNone(mess1) else: self.assertTrue (sreq) self.assertFalse(rreq) self.assertEqual(mess1, smess) index2, flag2, mess2 = MPI.Request.testany([sreq,rreq]) while not flag2: index2, flag2, mess2 = MPI.Request.testany([sreq,rreq]) self.assertIn(index2, (0, 1)) self.assertNotEqual(index2, index1) self.assertFalse(sreq) self.assertFalse(rreq) if index2 == 0: self.assertIsNone(mess2) else: self.assertEqual(mess2, smess) for buf in (None, 512, tmp): for smess in messages: dst = (rank+1)%size src = (rank-1)%size rreq = comm.irecv(buf, src, 0) self.assertTrue(rreq) sreq = comm.isend(smess, dst, 0) self.assertTrue(sreq) dummy, rmess = MPI.Request.waitall([sreq,rreq], []) self.assertFalse(sreq) self.assertFalse(rreq) self.assertIsNone(dummy) self.assertEqual(rmess, smess) for buf in (None, 512, tmp): for smess in messages: src = dst = rank rreq = comm.irecv(buf, src, 1) flag, msg = MPI.Request.testall([rreq]) self.assertFalse(flag) self.assertIsNone(msg) sreq = comm.isend(smess, dst, 1) while True: flag, msg = MPI.Request.testall([sreq,rreq], []) if not flag: self.assertIsNone(msg) continue (dummy, rmess) = msg self.assertIsNone(dummy) self.assertEqual(rmess, smess) break def testManyISendAndRecv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages: reqs = [] for k in range(6): r = self.COMM.isend(smess, rank, 0) reqs.append(r) flag = MPI.Request.Testall(reqs) if not flag: index, flag = MPI.Request.Testany(reqs) if not flag: self.assertEqual(index, MPI.UNDEFINED) indices = MPI.Request.Testsome(reqs) self.assertIsInstance(indices, list) for k in range(3): rmess = self.COMM.recv(None, rank, 0) self.assertEqual(rmess, smess) flag = MPI.Request.Testall(reqs) if not flag: index, flag = MPI.Request.Testany(reqs) self.assertEqual(index, 0) self.assertTrue(flag) indices = MPI.Request.Testsome(reqs) if indices is None: count = MPI.UNDEFINED indices = [] else: count = len(indices) indices = sorted(indices) self.assertGreaterEqual(count, 2) self.assertEqual(indices[:2], [1, 2]) for k in range(3): rmess = self.COMM.recv(None, rank, 0) self.assertEqual(rmess, smess) flag = MPI.Request.Testall(reqs) self.assertTrue(flag) for smess in messages: reqs = [] for k in range(6): r = self.COMM.isend(smess, rank, 0) reqs.append(r) for k in range(3): rmess = self.COMM.recv(None, rank, 0) self.assertEqual(rmess, smess) index = MPI.Request.Waitany(reqs) self.assertEqual(index, 0) indices1 = MPI.Request.Waitsome(reqs) if indices1 is None: count1 = MPI.UNDEFINED indices1 = [] else: count1 = len(indices1) for k in range(3): rmess = self.COMM.recv(None, rank, 0) self.assertEqual(rmess, smess) indices2 = MPI.Request.Waitsome(reqs) if indices2 is None: count2 = MPI.UNDEFINED indices2 = [] else: count2 = len(indices2) if count1 == MPI.UNDEFINED: count1 = 0 if count2 == MPI.UNDEFINED: count2 = 0 self.assertEqual(6, 1+count1+count2) indices = [0]+list(indices1)+list(indices2) indices.sort() self.assertEqual(indices, list(range(6))) MPI.Request.Waitall(reqs) def testManyISSendAndRecv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages: reqs = [] for k in range(6): r = self.COMM.issend(smess, rank, 0) reqs.append(r) flag = MPI.Request.Testall(reqs) self.assertFalse(flag) for k in range(3): rmess = self.COMM.recv(None, rank, 0) self.assertEqual(rmess, smess) flag = MPI.Request.Testall(reqs) self.assertFalse(flag) index, flag = MPI.Request.Testany(reqs) if flag: self.assertEqual(index, 0) indices = MPI.Request.Testsome(reqs) if indices is not None: target = [0, 1, 2] if flag: del target[0] for index in indices: self.assertIn(index, target) for k in range(3): flag = MPI.Request.Testall(reqs) self.assertFalse(flag) rmess = self.COMM.recv(None, rank, 0) self.assertEqual(rmess, smess) flag = False for k in range(10): flag |= MPI.Request.Testall(reqs) if flag: break if unittest.is_mpi('impi') and sys.platform == 'win32': flag |= MPI.Request.Waitall(reqs) self.assertTrue(flag) def testSSendAndRecv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages: self.COMM.ssend(smess, MPI.PROC_NULL) rmess = self.COMM.recv(None, MPI.PROC_NULL, 0) self.assertIsNone(rmess) if size == 1: return for smess in messages: if rank == 0: self.COMM.ssend(smess, rank+1, 0) rmess = smess elif rank == size - 1: rmess = self.COMM.recv(None, rank-1, 0) else: rmess = self.COMM.recv(None, rank-1, 0) self.COMM.ssend(rmess, rank+1, 0) self.assertEqual(rmess, smess) def testISSendAndRecv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages: req = self.COMM.issend(smess, MPI.PROC_NULL) self.assertTrue(req) req.Wait() self.assertFalse(req) rmess = self.COMM.recv(None, MPI.PROC_NULL, 0) self.assertIsNone(rmess) for smess in messages: req = self.COMM.issend(smess, rank, 0) self.assertTrue(req) flag = req.Test() self.assertFalse(flag) self.assertTrue(req) rmess = self.COMM.recv(None, rank, 0) self.assertTrue(req) flag = False while not flag: flag = req.Test() self.assertTrue(flag) self.assertFalse(req) self.assertEqual(rmess, smess) for smess in messages: dst = (rank+1)%size src = (rank-1)%size req = self.COMM.issend(smess, dst, 0) self.assertTrue(req) rmess = self.COMM.recv(None, src, 0) req.Wait() self.assertFalse(req) self.assertEqual(rmess, smess) def testCancel(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() status = MPI.Status() for smess in messages: req = self.COMM.issend(smess, rank) self.assertTrue(req) req.cancel() flag = req.get_status(status) cancelled = status.Is_cancelled() self.assertTrue(req) if cancelled: self.assertTrue(flag) req.Free() self.assertFalse(req) else: self.assertFalse(flag) rmess = self.COMM.recv(None, rank, 0) flag = req.get_status() while not flag: flag = req.get_status() self.assertTrue(flag) self.assertTrue(req) flag, _ = req.test() self.assertTrue(flag) self.assertFalse(req) self.assertEqual(rmess, smess) def testIRecvAndBSend(self): comm = self.COMM rank = comm.Get_rank() buf = MPI.Alloc_mem((1<<16)+MPI.BSEND_OVERHEAD) MPI.Attach_buffer(buf) try: for smess in messages: src = dst = rank req1 = comm.irecv(None, src, 1) req2 = comm.irecv(None, src, 2) req3 = comm.irecv(None, src, 3) comm.bsend(smess, dst, 3) comm.bsend(smess, dst, 2) comm.bsend(smess, dst, 1) self.assertEqual(smess, req3.wait()) self.assertEqual(smess, req2.wait()) self.assertEqual(smess, req1.wait()) comm.bsend(smess, MPI.PROC_NULL, 3) finally: MPI.Detach_buffer() MPI.Free_mem(buf) def testIRecvAndIBSend(self): comm = self.COMM rank = comm.Get_rank() buf = MPI.Alloc_mem((1<<16)+MPI.BSEND_OVERHEAD) MPI.Attach_buffer(buf) try: for smess in messages: src = dst = rank req1 = comm.irecv(None, src, 1) req2 = comm.irecv(None, src, 2) req3 = comm.irecv(None, src, 3) req4 = comm.ibsend(smess, dst, 3) req5 = comm.ibsend(smess, dst, 2) req6 = comm.ibsend(smess, dst, 1) MPI.Request.waitall([req4, req5, req6]) self.assertEqual(smess, req3.wait()) self.assertEqual(smess, req2.wait()) self.assertEqual(smess, req1.wait()) comm.ibsend(smess, MPI.PROC_NULL, 3).wait() finally: MPI.Detach_buffer() MPI.Free_mem(buf) def testIRecvAndSSend(self): comm = self.COMM rank = comm.Get_rank() for smess in messages: src = dst = rank req1 = comm.irecv(None, src, 1) req2 = comm.irecv(None, src, 2) req3 = comm.irecv(None, src, 3) comm.ssend(smess, dst, 3) comm.ssend(smess, dst, 2) comm.ssend(smess, dst, 1) self.assertEqual(smess, req3.wait()) self.assertEqual(smess, req2.wait()) self.assertEqual(smess, req1.wait()) comm.ssend(smess, MPI.PROC_NULL, 3) def testIRecvAndISSend(self): comm = self.COMM rank = comm.Get_rank() for smess in messages: src = dst = rank req1 = comm.irecv(None, src, 1) req2 = comm.irecv(None, src, 2) req3 = comm.irecv(None, src, 3) req4 = comm.issend(smess, dst, 3) req5 = comm.issend(smess, dst, 2) req6 = comm.issend(smess, dst, 1) MPI.Request.waitall([req4, req5, req6]) self.assertEqual(smess, req3.wait()) self.assertEqual(smess, req2.wait()) self.assertEqual(smess, req1.wait()) comm.issend(smess, MPI.PROC_NULL, 3).wait() def testSendrecv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages: dest = (rank + 1) % size source = (rank - 1) % size rmess = self.COMM.sendrecv(smess, dest, 0, None, source, 0) continue self.assertEqual(rmess, smess) rmess = self.COMM.sendrecv(None, dest, 0, None, source, 0) self.assertIsNone(rmess) rmess = self.COMM.sendrecv(smess, MPI.PROC_NULL, 0, None, MPI.PROC_NULL, 0) self.assertIsNone(rmess) def testMixed(self): comm = self.COMM rank = comm.Get_rank() # sreq = comm.Isend([None, 0, 'B'], rank) obj = comm.recv(None, rank) sreq.Wait() self.assertIsNone(obj) for smess in messages: buf = MPI.pickle.dumps(smess) sreq = comm.Isend([buf, 'B'], rank) rmess = comm.recv(None, rank) sreq.Wait() self.assertEqual(rmess, smess) # sreq = comm.Isend([None, 0, 'B'], rank) rreq = comm.irecv(None, rank) sreq.Wait() obj = rreq.wait() self.assertIsNone(obj) for smess in messages: buf = MPI.pickle.dumps(smess) sreq = comm.Isend([buf, 'B'], rank) rreq = comm.irecv(None, rank) sreq.Wait() rmess = rreq.wait() self.assertEqual(rmess, smess) def testPingPong01(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages: self.COMM.send(smess, MPI.PROC_NULL) rmess = self.COMM.recv(None, MPI.PROC_NULL, 0) self.assertIsNone(rmess) if size == 1: return smess = None if rank == 0: self.COMM.send(smess, rank+1, 0) rmess = self.COMM.recv(None, rank+1, 0) elif rank == 1: rmess = self.COMM.recv(None, rank-1, 0) self.COMM.send(smess, rank-1, 0) else: rmess = smess self.assertEqual(rmess, smess) for smess in messages: if rank == 0: self.COMM.send(smess, rank+1, 0) rmess = self.COMM.recv(None, rank+1, 0) elif rank == 1: rmess = self.COMM.recv(None, rank-1, 0) self.COMM.send(smess, rank-1, 0) else: rmess = smess self.assertEqual(rmess, smess) @unittest.skipMPI('MPICH1') def testProbe(self): comm = self.COMM.Dup() try: status = MPI.Status() flag = comm.iprobe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertFalse(flag) for smess in messages: request = comm.issend(smess, comm.rank, 123) self.assertTrue(request) while not comm.iprobe(MPI.ANY_SOURCE, MPI.ANY_TAG, status): pass self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) comm.probe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) self.assertTrue(request) flag, obj = request.test() self.assertTrue(request) self.assertFalse(flag) self.assertIsNone(obj) obj = comm.recv(None, comm.rank, 123) self.assertEqual(obj, smess) self.assertTrue(request) obj = request.wait() self.assertFalse(request) self.assertIsNone(obj) finally: comm.Free() def testWaitSomeRecv(self): comm = self.COMM.Dup() rank = comm.Get_rank() reqs = [comm.irecv(source=rank, tag=i) for i in range(6)] for indexlist in ([5], [3,1,2], [0,4]): for i in indexlist: comm.ssend("abc", dest=rank, tag=i) statuses = [] idxs, objs = MPI.Request.waitsome(reqs, statuses) self.assertEqual(sorted(idxs), sorted(indexlist)) self.assertEqual(objs, ["abc"]*len(idxs)) self.assertFalse(any(reqs[i] for i in idxs)) self.assertEqual(len(statuses), len(idxs)) self.assertTrue(all(s.source == rank for s in statuses)) self.assertTrue(all(s.tag in indexlist for s in statuses)) self.assertTrue(all(s.error == MPI.SUCCESS for s in statuses)) idxs, objs = MPI.Request.waitsome(reqs) self.assertIsNone(idxs) self.assertIsNone(objs) self.assertFalse(any(reqs)) comm.Free() def testTestSomeRecv(self): comm = self.COMM.Dup() rank = comm.Get_rank() reqs = [comm.irecv(source=rank, tag=i) for i in range(6)] statuses = [] idxs, objs = MPI.Request.testsome(reqs, statuses) self.assertEqual(idxs, []) self.assertEqual(objs, []) self.assertTrue(all(reqs)) self.assertEqual(statuses, []) for indexlist in ([5], [], [3,1,2], [], [0,4]): for i in indexlist: comm.ssend("abc", dest=rank, tag=i) statuses = [] idxs, objs = MPI.Request.testsome(reqs, statuses) self.assertEqual(sorted(idxs), sorted(indexlist)) self.assertEqual(objs, ["abc"]*len(idxs)) self.assertFalse(any(reqs[i] for i in idxs)) self.assertEqual(len(statuses), len(idxs)) self.assertTrue(all(s.source == rank for s in statuses)) self.assertTrue(all(s.tag in indexlist for s in statuses)) self.assertTrue(all(s.error == MPI.SUCCESS for s in statuses)) idxs, objs = MPI.Request.testsome(reqs) self.assertIsNone(idxs) self.assertIsNone(objs) self.assertFalse(any(reqs)) comm.Free() def testWaitSomeSend(self): comm = self.COMM.Dup() rank = comm.Get_rank() reqs = [comm.issend("abc", dest=rank, tag=i) for i in range(6)] for indexlist in ([5], [3,1,2], [0,4]): for i in indexlist: msg = comm.recv(source=rank, tag=i) self.assertEqual(msg, "abc") idxs, objs = MPI.Request.waitsome(reqs) while sorted(idxs) != sorted(indexlist): i, o = MPI.Request.waitsome(reqs) idxs.extend(i) objs.extend(o) self.assertEqual(sorted(idxs), sorted(indexlist)) self.assertEqual(objs, [None]*len(idxs)) self.assertFalse(any(reqs[i] for i in idxs)) idxs, objs = MPI.Request.waitsome(reqs) self.assertIsNone(idxs) self.assertIsNone(objs) self.assertFalse(any(reqs)) comm.Free() def testTestSomeSend(self): comm = self.COMM.Dup() rank = comm.Get_rank() reqs = [comm.issend("abc", dest=rank, tag=i) for i in range(6)] idxs, objs = MPI.Request.testsome(reqs) self.assertEqual(idxs, []) self.assertEqual(objs, []) self.assertTrue(all(reqs)) for indexlist in ([5], [], [3,1,2], [], [0,4]): for i in indexlist: msg = comm.recv(source=rank, tag=i) self.assertEqual(msg, "abc") idxs, objs = MPI.Request.testsome(reqs) while sorted(idxs) != sorted(indexlist): i, o = MPI.Request.testsome(reqs) idxs.extend(i) objs.extend(o) self.assertEqual(sorted(idxs), sorted(indexlist)) self.assertEqual(objs, [None]*len(idxs)) self.assertFalse(any(reqs[i] for i in idxs)) idxs, objs = MPI.Request.testsome(reqs) self.assertIsNone(idxs) self.assertIsNone(objs) self.assertFalse(any(reqs)) comm.Free() def testRecvObjArg(self): comm = self.COMM rank = comm.Get_rank() req1 = comm.isend("42", rank) req2 = comm.isend([42], rank) with warnings.catch_warnings(): warnings.simplefilter("error") with self.assertRaises(UserWarning): comm.recv(bytearray(0), MPI.PROC_NULL) warnings.simplefilter("ignore") obj = comm.recv(128, rank) self.assertEqual(obj, "42") req1.wait() obj = comm.recv(bytearray(128), rank) self.assertEqual(obj, [42]) req2.wait() def testCommLock(self): comm = self.COMM.Dup() table = MPI._comm_lock_table(comm) self.assertIsInstance(table, dict) self.assertNotIn('bcast', table) comm.bcast(None, root=0) self.assertIn('bcast', table) lock = table['bcast'] lock_type = type(threading.Lock()) self.assertIsInstance(lock, lock_type) comm.Free() class TestP2PObjSelf(BaseTestP2PObj, unittest.TestCase): COMM = MPI.COMM_SELF class TestP2PObjWorld(BaseTestP2PObj, unittest.TestCase): COMM = MPI.COMM_WORLD class TestP2PObjSelfDup(TestP2PObjSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() @unittest.skipMPI('openmpi(<1.4.0)', MPI.Query_thread() > MPI.THREAD_SINGLE) class TestP2PObjWorldDup(TestP2PObjWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_p2p_obj_matched.py000066400000000000000000000123161475341043600203620ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest _basic = [ None, True, False, -7, 0, 7, -2**63+1, 2**63-1, -2.17, 0.0, 3.14, 1+2j, 2-3j, 'mpi4py', ] messages = list(_basic) messages += [ list(_basic), tuple(_basic), {f'k{k}': v for k, v in enumerate(_basic)}, ] @unittest.skipIf(MPI.MESSAGE_NULL == MPI.MESSAGE_NO_PROC, 'mpi-message') class TestMessage(unittest.TestCase): def testMessageNull(self): null = MPI.MESSAGE_NULL self.assertFalse(null) null2 = MPI.Message() self.assertEqual(null, null2) null3 = MPI.Message(null) self.assertEqual(null, null3) def testMessageNoProc(self): # noproc = MPI.MESSAGE_NO_PROC self.assertTrue(noproc) noproc.recv() self.assertTrue(noproc) noproc.irecv().wait() self.assertTrue(noproc) # noproc2 = MPI.Message(MPI.MESSAGE_NO_PROC) self.assertTrue(noproc2) self.assertEqual(noproc2, noproc) self.assertNotEqual(noproc, MPI.MESSAGE_NULL) # message = MPI.Message(MPI.MESSAGE_NO_PROC) message.recv() self.assertEqual(message, MPI.MESSAGE_NULL) # message = MPI.Message(MPI.MESSAGE_NO_PROC) request = message.irecv() self.assertEqual(message, MPI.MESSAGE_NULL) self.assertNotEqual(request, MPI.REQUEST_NULL) request.wait() self.assertEqual(request, MPI.REQUEST_NULL) # comm = MPI.COMM_SELF message = comm.mprobe(MPI.PROC_NULL) self.assertNotEqual(message, MPI.MESSAGE_NULL) self.assertEqual(message, MPI.MESSAGE_NO_PROC) noproc = comm.improbe(MPI.PROC_NULL) self.assertNotEqual(message, MPI.MESSAGE_NULL) self.assertEqual(message, MPI.MESSAGE_NO_PROC) @unittest.skipIf(MPI.MESSAGE_NULL == MPI.MESSAGE_NO_PROC, 'mpi-message') class BaseTestP2PMatched: COMM = MPI.COMM_NULL def testIMProbe(self): comm = self.COMM.Dup() try: m = comm.improbe() self.assertIsNone(m) m = comm.improbe(MPI.ANY_SOURCE) self.assertIsNone(m) m = comm.improbe(MPI.ANY_SOURCE, MPI.ANY_TAG) self.assertIsNone(m) status = MPI.Status() m = comm.improbe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertIsNone(m) self.assertEqual(status.source, MPI.ANY_SOURCE) self.assertEqual(status.tag, MPI.ANY_TAG) self.assertEqual(status.error, MPI.SUCCESS) m = MPI.Message.iprobe(comm) self.assertIsNone(m) s = comm.isend(None, comm.rank, 0) r = comm.mprobe(comm.rank, 0).irecv() MPI.Request.waitall([s,r]) finally: comm.Free() def testProbeRecv(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() for smsg in messages: if size == 1: sr = comm.isend(smsg, 0, 0) m = comm.mprobe(0, 0) self.assertIsInstance(m, MPI.Message) self.assertTrue(m) rr = m.irecv() self.assertFalse(m) self.assertTrue(sr) self.assertTrue(rr) MPI.Request.Waitall([sr,rr]) self.assertFalse(sr) self.assertFalse(rr) # r = comm.isend(smsg, 0, 0) m = MPI.Message.probe(comm, 0, 0) self.assertIsInstance(m, MPI.Message) self.assertTrue(m) rmsg = m.recv() self.assertFalse(m) r.wait() elif rank == 0: comm.send(smsg, 1, 0) m = comm.mprobe(1, 0) self.assertTrue(m) rmsg = m.recv() self.assertFalse(m) # comm.send(smsg, 1, 1) m = None while not m: m = MPI.Message.iprobe(comm, 1, 1) rmsg = m.irecv().wait() elif rank == 1: m = comm.mprobe(0, 0) self.assertTrue(m) rmsg = m.recv() self.assertFalse(m) comm.send(rmsg, 0, 0) # m = None while not m: m = MPI.Message.iprobe(comm, 0, 1) rmsg = m.irecv().wait() comm.send(smsg, 0, 1) else: rmsg = smsg self.assertEqual(smsg, rmsg) class TestP2PMatchedSelf(BaseTestP2PMatched, unittest.TestCase): COMM = MPI.COMM_SELF class TestP2PMatchedWorld(BaseTestP2PMatched, unittest.TestCase): COMM = MPI.COMM_WORLD @unittest.skipMPI('openmpi(<1.8.5)', MPI.COMM_WORLD.Get_size() > 1) class TestP2PMatchedSelfDup(TestP2PMatchedSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() @unittest.skipMPI('openmpi(<1.8.5)', MPI.COMM_WORLD.Get_size() > 1) class TestP2PMatchedWorldDup(TestP2PMatchedWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_pack.py000066400000000000000000000160351475341043600162620ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl import os, sys, platform def allclose(a, b, rtol=1.e-5, atol=1.e-8): try: iter(a) except TypeError: a = [a] try: iter(b) except TypeError: b = [b] for x, y in zip(a, b): if x == y: continue if abs(x - y) > (atol + rtol * abs(y)): return False return True class BaseTestPack: COMM = MPI.COMM_NULL skipdtype = [] def testPackSize(self): for array, typecode in arrayimpl.loop(): with arrayimpl.test(self): if typecode in self.skipdtype: continue datatype = array.TypeMap[typecode] itemsize = datatype.Get_size() overhead = datatype.Pack_size(0, self.COMM) for count in range(10): with self.subTest(count=count): pack_size = datatype.Pack_size(count, self.COMM) self.assertEqual(pack_size - overhead, count*itemsize) def testPackUnpack(self): for array, typecode1 in arrayimpl.loop(): with arrayimpl.test(self): if typecode1 in self.skipdtype: continue for typecode2 in array.TypeMap: if typecode2 in self.skipdtype: continue datatype1 = array.TypeMap[typecode1] datatype2 = array.TypeMap[typecode2] for count in range(10): with self.subTest(typecode=(typecode1, typecode2), count=count): # input and output arrays iarray1 = array(range(count), typecode1).as_raw() iarray2 = array(range(count), typecode2).as_raw() oarray1 = array(count, typecode1, count).as_raw() oarray2 = array(count, typecode2, count).as_raw() # temp array for packing size1 = datatype1.Pack_size(len(iarray1), self.COMM) size2 = datatype2.Pack_size(len(iarray2), self.COMM) tmpbuf = array(0, 'b', size1 + size2 + 1).as_raw() # pack input arrays position = 0 position = datatype1.Pack(iarray1, tmpbuf, position, self.COMM) position = datatype2.Pack(iarray2, tmpbuf, position, self.COMM) # unpack output arrays position = 0 position = datatype1.Unpack(tmpbuf, position, oarray1, self.COMM) position = datatype2.Unpack(tmpbuf, position, oarray2, self.COMM) # test self.assertTrue(allclose(iarray1, oarray1)) self.assertTrue(allclose(iarray2, oarray2)) EXT32 = 'external32' class BaseTestPackExternal: skipdtype = [] def testPackSize(self): for array, typecode in arrayimpl.loop(): with arrayimpl.test(self): if typecode in self.skipdtype: continue datatype = array.TypeMap[typecode] overhead = datatype.Pack_external_size(EXT32, 0) for count in range(10): with self.subTest(count=count): pack_size = datatype.Pack_external_size(EXT32, count) real_size = pack_size - overhead self.assertGreaterEqual(real_size, 0) def testPackUnpackExternal(self): for array, typecode1 in arrayimpl.loop(): with arrayimpl.test(self): if unittest.is_mpi_gpu('mpich', array): continue if unittest.is_mpi_gpu('openmpi', array): continue if unittest.is_mpi_gpu('mvapich', array): continue if typecode1 in self.skipdtype: continue for typecode2 in array.TypeMap: if typecode2 in self.skipdtype: continue datatype1 = array.TypeMap[typecode1] datatype2 = array.TypeMap[typecode2] for count in range(1, 10): with self.subTest(count=count, typecode=(typecode1, typecode2)): # input and output arrays val = 127 if typecode1 == 'b' else 255 iarray1 = array(val, typecode1, count).as_raw() iarray2 = array(range(count), typecode2).as_raw() oarray1 = array(-1, typecode1, count).as_raw() oarray2 = array(-1, typecode2, count).as_raw() # temp array for packing size1 = datatype1.Pack_external_size(EXT32, len(iarray1)) size2 = datatype2.Pack_external_size(EXT32, len(iarray2)) tmpbuf = array(0, 'b', size1 + size2 + 1).as_raw() # pack input arrays position = 0 position = datatype1.Pack_external(EXT32, iarray1, tmpbuf, position) position = datatype2.Pack_external(EXT32, iarray2, tmpbuf, position) # unpack output arrays position = 0 position = datatype1.Unpack_external(EXT32, tmpbuf, position, oarray1) position = datatype2.Unpack_external(EXT32, tmpbuf, position, oarray2) # test result self.assertTrue(allclose(iarray1, oarray1)) self.assertTrue(allclose(iarray2, oarray2)) class TestPackSelf(BaseTestPack, unittest.TestCase): COMM = MPI.COMM_SELF class TestPackWorld(BaseTestPack, unittest.TestCase): COMM = MPI.COMM_SELF @unittest.skipMPI('openmpi(<3.0.0)') class TestPackExternal(BaseTestPackExternal, unittest.TestCase): pass name, version = MPI.get_vendor() if name == 'MPICH': if version < (4, 0, 0): BaseTestPackExternal.skipdtype += 'ldgLFDG' if platform.architecture(None)[0] == '32bit': BaseTestPackExternal.skipdtype += 'gG' elif name == 'Open MPI': if version < (5, 0, 0): BaseTestPackExternal.skipdtype += 'gG' if (platform.system(), platform.machine()) == ('Darwin', 'arm64'): BaseTestPackExternal.skipdtype += 'G' elif name == 'Intel MPI': BaseTestPackExternal.skipdtype += 'lgLG' BaseTestPackExternal.skipdtype += 'D' if os.name == 'nt': BaseTestPackExternal.skipdtype += 'q' elif name == 'Microsoft MPI': BaseTestPackExternal.skipdtype += 'gFDG' elif name == 'MVAPICH': BaseTestPackExternal.skipdtype += 'lfdgLFDG' elif name == 'MPICH2': BaseTestPackExternal.skipdtype += 'ldgLFDG' else: try: MPI.BYTE.Pack_external_size(EXT32, 0) except NotImplementedError: unittest.disable(BaseTestPackExternal, 'mpi-ext32') if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_package.py000066400000000000000000000043341475341043600167360ustar00rootroot00000000000000import mpi4py import unittest import sys, os pkgdir = os.path.dirname(mpi4py.__file__) class TestImport(unittest.TestCase): def testImportMPI(self): import mpi4py.MPI def testImportBench(self): import mpi4py.bench def testImportFutures(self): import mpi4py.futures import mpi4py.futures.server import mpi4py.futures.__main__ def testImportRun(self): import mpi4py.run import mpi4py.__main__ def testImportTyping(self): import mpi4py.typing def testImportUtil(self): import mpi4py.util import mpi4py.util.dtlib import mpi4py.util.pkl5 import mpi4py.util.pool import mpi4py.util.sync class TestDataFiles(unittest.TestCase): def testTyping(self): import importlib.machinery if sys.version_info < (3, 8): check = self.assertFalse else: check = self.assertTrue py_typed = os.path.join(pkgdir, "py.typed") check(os.path.exists(py_typed)) suffixes = [ *importlib.machinery.SOURCE_SUFFIXES, *importlib.machinery.EXTENSION_SUFFIXES, ] for root, dirs, files in os.walk(pkgdir): for fname in files: name, _, extra = fname.partition(".") suffix = f".{extra}" for entry in suffixes: if suffix.endswith(entry): pyi = os.path.join(root, f"{name}.pyi") check(os.path.exists(pyi)) break def testCython(self): for fname in [ "__init__.pxd", "libmpi.pxd", "MPI.pxd", ]: pxd = os.path.join(pkgdir, fname) self.assertTrue(os.path.exists(pxd)) def testHeaders(self): for fname in [ os.path.join("MPI.h"), os.path.join("MPI_api.h"), os.path.join("include", "mpi4py", "pycapi.h"), os.path.join("include", "mpi4py", "mpi4py.h"), os.path.join("include", "mpi4py", "mpi4py.i"), ]: hdr = os.path.join(pkgdir, fname) self.assertTrue(os.path.exists(hdr)) if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_pickle.py000066400000000000000000000072671475341043600166220ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import sys try: import pickle as pyPickle except ImportError: pyPickle = None try: import dill except ImportError: dill = None try: import marshal except ImportError: marshal = None try: import json except ImportError: json = None try: import yaml yaml.dump(None) except ImportError: yaml = None OBJS = [ None, True, False, 7, 1<<32, 3.14, 1+2j, 'qwerty', (0, 1, 2), [0, 1, 2], {'a':0, 'b':1}, ] def tobytes(s): return memoryview(s).tobytes() class TestPickle(unittest.TestCase): def setUp(self): self.pickle = MPI.pickle def tearDown(self): self.pickle.__init__() def do_pickle(self, obj, pickle): comm = MPI.COMM_SELF o = comm.sendrecv(obj, 0, 0, None, 0, 0) self.assertEqual(obj, o) s = pickle.dumps(obj) o = pickle.loads(s) self.assertEqual(obj, o) def testDefault(self): pickle = self.pickle protocols = [0, 1, 2, 3, 4] if sys.version_info[:2] >= (3, 8): protocols.append(5) protocols.append(-1) protocols.append(None) for proto in protocols: pickle.__init__(protocol=proto) for obj in OBJS: self.do_pickle(obj, pickle) self.do_pickle(OBJS, pickle) def testPyPickle(self): pickle = self.pickle dumps = pyPickle.dumps loads = pyPickle.loads protocols = [0, 1, 2, 3, 4] if sys.version_info[:2] >= (3, 8): protocols.append(5) protocols.append(-1) protocols.append(None) for proto in protocols: pickle.__init__(dumps, loads, proto) for obj in OBJS: self.do_pickle(obj, pickle) self.do_pickle(OBJS, pickle) @unittest.skipIf(dill is None, 'dill') def testDill(self): pickle = self.pickle dumps = dill.dumps loads = dill.loads protocols = list(range(dill.HIGHEST_PROTOCOL+1)) protocols.append(-1) protocols.append(None) for proto in protocols: pickle.__init__(dumps, loads, proto) for obj in OBJS: self.do_pickle(obj, pickle) self.do_pickle(OBJS, pickle) @unittest.skipIf(marshal is None, 'marshal') def testMarshal(self): pickle = self.pickle dumps = marshal.dumps loads = marshal.loads protocols = [0, 1, 2, 3, 4] protocols.append(None) for protocol in protocols: pickle.__init__(dumps, loads, protocol) for obj in OBJS: self.do_pickle(obj, pickle) self.do_pickle(OBJS, pickle) @unittest.skipIf(json is None, 'json') def testJson(self): pickle = self.pickle dumps = lambda o: json.dumps(o).encode() loads = lambda s: json.loads(tobytes(s).decode()) pickle.__init__(dumps, loads) OBJS2 = [ o for o in OBJS if not isinstance(o, (float, complex, tuple)) ] for obj in OBJS2: self.do_pickle(obj, pickle) self.do_pickle(OBJS2, pickle) @unittest.skipIf(yaml is None, 'yaml') def testYAML(self): pickle = self.pickle dumps = lambda o: yaml.dump(o).encode() loads = lambda s: yaml.load(tobytes(s).decode(), Loader=yaml.Loader) pickle.__init__(dumps, loads) OBJS2 = [ o for o in OBJS if not isinstance(o, (complex, tuple)) ] for obj in OBJS2: self.do_pickle(obj, pickle) self.do_pickle(OBJS2, pickle) if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_request.py000066400000000000000000000171101475341043600170270ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class TestRequest(unittest.TestCase): def setUp(self): self.REQUEST = MPI.Request() self.STATUS = MPI.Status() def testWait(self): self.REQUEST.Wait() self.REQUEST.Wait(None) self.REQUEST.Wait(self.STATUS) self.assertIs(self.REQUEST.Wait(), True) self.REQUEST.wait() self.REQUEST.wait(None) self.REQUEST.wait(self.STATUS) self.assertIsNone(self.REQUEST.wait()) def testTest(self): self.REQUEST.Test() self.REQUEST.Test(None) self.REQUEST.Test(self.STATUS) self.assertIs(self.REQUEST.Test(), True) self.REQUEST.test() self.REQUEST.test(None) self.REQUEST.test(self.STATUS) self.assertEqual(self.REQUEST.test(), (True, None)) @unittest.skipMPI('MPICH1') @unittest.skipMPI('LAM/MPI') def testGetStatus(self): try: flag = self.REQUEST.Get_status() except NotImplementedError: self.skipTest('mpi-request-get_status') self.assertTrue(flag) flag = self.REQUEST.Get_status(self.STATUS) self.assertTrue(flag) self.assertEqual(self.STATUS.Get_source(), MPI.ANY_SOURCE) self.assertEqual(self.STATUS.Get_tag(), MPI.ANY_TAG) self.assertEqual(self.STATUS.Get_error(), MPI.SUCCESS) self.assertEqual(self.STATUS.Get_count(MPI.BYTE), 0) self.assertEqual(self.STATUS.Get_elements(MPI.BYTE), 0) try: self.assertFalse(self.STATUS.Is_cancelled()) except NotImplementedError: self.skipTest('mpi-status-is_cancelled') flag = self.REQUEST.get_status() self.assertTrue(flag) flag = self.REQUEST.get_status(self.STATUS) self.assertEqual(self.STATUS.source, MPI.ANY_SOURCE) self.assertEqual(self.STATUS.tag, MPI.ANY_TAG) self.assertEqual(self.STATUS.error, MPI.SUCCESS) class TestRequestArray(unittest.TestCase): def setUp(self): self.REQUESTS = [MPI.Request() for i in range(5)] self.STATUSES = [MPI.Status() for i in range(5)] self.STATUS = self.STATUSES[0] for status in self.STATUSES: status.source = 0 status.tag = 0 status.error = MPI.ERR_OTHER def testWaitany(self): MPI.Request.Waitany(self.REQUESTS) MPI.Request.Waitany(self.REQUESTS, None) MPI.Request.Waitany(self.REQUESTS, self.STATUSES[0]) MPI.Request.waitany(self.REQUESTS) MPI.Request.waitany(self.REQUESTS, None) MPI.Request.waitany(self.REQUESTS, self.STATUSES[0]) def testTestany(self): MPI.Request.Testany(self.REQUESTS) MPI.Request.Testany(self.REQUESTS, None) MPI.Request.Testany(self.REQUESTS, self.STATUSES[0]) MPI.Request.testany(self.REQUESTS) MPI.Request.testany(self.REQUESTS, None) MPI.Request.testany(self.REQUESTS, self.STATUSES[0]) def testGetStatusAny(self): with self.catchNotImplementedError(4, 1): status = self.STATUS index, flag = MPI.Request.Get_status_any(self.REQUESTS) self.assertEqual(index, MPI.UNDEFINED) self.assertTrue(flag) index, flag = MPI.Request.Get_status_any(self.REQUESTS, None) self.assertEqual(index, MPI.UNDEFINED) self.assertTrue(flag) if unittest.is_mpi('impi(==2021.14.0)'): status.error = MPI.SUCCESS if unittest.is_mpi('impi(==2021.14.1)'): status.error = MPI.SUCCESS index, flag = MPI.Request.Get_status_any(self.REQUESTS, status) self.assertEqual(index, MPI.UNDEFINED) self.assertTrue(flag) self.assertEqual(status.source, MPI.ANY_SOURCE) self.assertEqual(status.tag, MPI.ANY_TAG) self.assertEqual(status.error, MPI.SUCCESS) with self.catchNotImplementedError(4, 1): index, flag = MPI.Request.get_status_any(self.REQUESTS) self.assertEqual(index, MPI.UNDEFINED) self.assertTrue(flag) def testWaitall(self): MPI.Request.Waitall(self.REQUESTS) MPI.Request.Waitall(self.REQUESTS, None) self.assertIs(MPI.Request.Waitall(self.REQUESTS), True) for statuses in (tuple(self.STATUSES), (self.STATUSES[0],), ()): MPI.Request.Waitall(self.REQUESTS, statuses) for statuses in (self.STATUSES, []): MPI.Request.Waitall(self.REQUESTS, statuses) self.assertEqual(len(statuses), len(self.REQUESTS)) MPI.Request.waitall(self.REQUESTS) MPI.Request.waitall(self.REQUESTS, None) for statuses in (self.STATUSES, []): MPI.Request.waitall(self.REQUESTS, statuses) self.assertEqual(len(statuses), len(self.REQUESTS)) def testTestall(self): MPI.Request.Testall(self.REQUESTS) MPI.Request.Testall(self.REQUESTS, None) self.assertIs(MPI.Request.Testall(self.REQUESTS), True) for statuses in (self.STATUSES, []): MPI.Request.Testall(self.REQUESTS, statuses) self.assertEqual(len(statuses), len(self.REQUESTS)) MPI.Request.testall(self.REQUESTS) MPI.Request.testall(self.REQUESTS, None) for statuses in (self.STATUSES, []): MPI.Request.testall(self.REQUESTS, statuses) self.assertEqual(len(statuses), len(self.REQUESTS)) def testGetStatusAll(self): with self.catchNotImplementedError(4, 1): statuses = self.STATUSES flag = MPI.Request.Get_status_all(self.REQUESTS) self.assertTrue(flag) flag = MPI.Request.Get_status_all(self.REQUESTS, None) self.assertTrue(flag) flag = MPI.Request.Get_status_all(self.REQUESTS, statuses) self.assertTrue(flag) for status in statuses: self.assertEqual(status.source, MPI.ANY_SOURCE) self.assertEqual(status.tag, MPI.ANY_TAG) self.assertEqual(status.error, MPI.SUCCESS) with self.catchNotImplementedError(4, 1): flag = MPI.Request.get_status_all(self.REQUESTS) self.assertTrue(flag) def testWaitsome(self): ret = MPI.Request.Waitsome(self.REQUESTS) self.assertIsNone(ret) ret = MPI.Request.Waitsome(self.REQUESTS, None) self.assertIsNone(ret) for statuses in (self.STATUSES, []): slen = len(statuses) ret = MPI.Request.Waitsome(self.REQUESTS, statuses) self.assertIsNone(ret) self.assertEqual(len(statuses), slen) def testTestsome(self): ret = MPI.Request.Testsome(self.REQUESTS) self.assertIsNone(ret) ret = MPI.Request.Testsome(self.REQUESTS, None) self.assertIsNone(ret) for statuses in (self.STATUSES, []): slen = len(statuses) ret = MPI.Request.Testsome(self.REQUESTS, statuses) self.assertIsNone(ret) self.assertEqual(len(statuses), slen) def testGetStatusSome(self): with self.catchNotImplementedError(4, 1): statuses = self.STATUSES indices = MPI.Request.Get_status_some(self.REQUESTS) self.assertIsNone(indices) indices = MPI.Request.Get_status_some(self.REQUESTS, None) self.assertIsNone(indices) indices = MPI.Request.Get_status_some(self.REQUESTS, statuses) self.assertIsNone(indices) with self.catchNotImplementedError(4, 1): indices = MPI.Request.get_status_some(self.REQUESTS) self.assertIsNone(indices) if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_rma.py000066400000000000000000000462711475341043600161300ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import contextlib import arrayimpl import sys scalar = arrayimpl.scalar typemap = MPI.Datatype.fromcode def mkzeros(n): return bytearray(n) def memzero(m): try: m[:] = 0 except IndexError: # cffi buffer m[0:len(m)] = b'\0'*len(m) @contextlib.contextmanager def win_lock(win, rank, *args, **kwargs): win.Lock(rank, *args, **kwargs) try: yield finally: win.Unlock(rank) @contextlib.contextmanager def win_lock_all(win, *args, **kwargs): win.Lock_all(*args, **kwargs) try: yield finally: win.Unlock_all() class BaseTestRMA: COMM = MPI.COMM_NULL INFO = MPI.INFO_NULL def setUp(self): nbytes = 100*MPI.DOUBLE.size try: self.mpi_memory = MPI.Alloc_mem(nbytes) self.memory = self.mpi_memory memzero(self.memory) except MPI.Exception: self.mpi_memory = None self.memory = bytearray(nbytes) self.WIN = MPI.Win.Create(self.memory, 1, self.INFO, self.COMM) def tearDown(self): self.WIN.Free() if self.mpi_memory: MPI.Free_mem(self.mpi_memory) def testPutGet(self): group = self.WIN.Get_group() size = group.Get_size() group.Free() for array, typecode in arrayimpl.loop(): with arrayimpl.test(self): if unittest.is_mpi_gpu('mvapich', array): continue for count in range(10): for rank in range(size): with self.subTest(rank=rank, count=count): sbuf = array(range(count), typecode) rbuf = array(-1, typecode, count+1) # self.WIN.Fence() self.WIN.Put(sbuf.as_mpi(), rank) self.WIN.Fence() self.WIN.Get(rbuf.as_mpi_c(count), rank) self.WIN.Fence() for i in range(count): self.assertEqual(sbuf[i], scalar(i)) self.assertEqual(rbuf[i], scalar(i)) self.assertEqual(rbuf[-1], scalar(-1)) # sbuf = array(range(count), typecode) rbuf = array(-1, typecode, count+1) target = sbuf.itemsize self.WIN.Fence() self.WIN.Put(sbuf.as_mpi(), rank, target) self.WIN.Fence() self.WIN.Get(rbuf.as_mpi_c(count), rank, target) self.WIN.Fence() for i in range(count): self.assertEqual(sbuf[i], scalar(i)) self.assertEqual(rbuf[i], scalar(i)) self.assertEqual(rbuf[-1], scalar(-1)) # sbuf = array(range(count), typecode) rbuf = array(-1, typecode, count+1) datatype = typemap(typecode) target = (sbuf.itemsize, count, datatype) self.WIN.Fence() self.WIN.Put(sbuf.as_mpi(), rank, target) self.WIN.Fence() self.WIN.Get(rbuf.as_mpi_c(count), rank, target) self.WIN.Fence() for i in range(count): self.assertEqual(sbuf[i], scalar(i)) self.assertEqual(rbuf[i], scalar(i)) self.assertEqual(rbuf[-1], scalar(-1)) def testAccumulate(self): group = self.WIN.Get_group() size = group.Get_size() group.Free() for array, typecode in arrayimpl.loop(): with arrayimpl.test(self): if unittest.is_mpi_gpu('openmpi', array): continue if unittest.is_mpi_gpu('mvapich', array): continue if typecode in '?': continue if typecode in 'FDG': continue for count in range(10): for rank in range(size): with self.subTest(rank=rank, count=count): sbuf = array(range(count), typecode) rbuf = array(-1, typecode, count+1) for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): self.WIN.Fence() self.WIN.Accumulate(sbuf.as_mpi(), rank, op=op) self.WIN.Fence() self.WIN.Get(rbuf.as_mpi_c(count), rank) self.WIN.Fence() for i in range(count): self.assertEqual(sbuf[i], scalar(i)) self.assertNotEqual(rbuf[i], scalar(-1)) self.assertEqual(rbuf[-1], scalar(-1)) @unittest.skipMPI('openmpi(>=1.10,<1.11)') def testGetAccumulate(self): group = self.WIN.Get_group() size = group.Get_size() rank = group.Get_rank() group.Free() self.WIN.Fence() obuf = MPI.Alloc_mem(1); memzero(obuf) rbuf = MPI.Alloc_mem(1); memzero(rbuf) try: try: self.WIN.Get_accumulate([obuf, 0, MPI.BYTE], [rbuf, 0, MPI.BYTE], rank) self.WIN.Fence() finally: MPI.Free_mem(obuf) MPI.Free_mem(rbuf) except NotImplementedError: self.skipTest('mpi-win-get_accumulate') self.WIN.Fence() for array, typecode in arrayimpl.loop(): with arrayimpl.test(self): if unittest.is_mpi_gpu('openmpi', array): continue if unittest.is_mpi_gpu('mvapich', array): continue if typecode in '?': continue if typecode in 'FDG': continue for count in range(10): for rank in range(size): with self.subTest(rank=rank, count=count): ones = array([1]*count, typecode) sbuf = array(range(count), typecode) rbuf = array(-1, typecode, count+1) gbuf = array(-1, typecode, count+1) for op in ( MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN, MPI.REPLACE, MPI.NO_OP, ): with win_lock(self.WIN, rank): self.WIN.Put(ones.as_mpi(), rank) self.WIN.Flush(rank) self.WIN.Get_accumulate(sbuf.as_mpi(), rbuf.as_mpi_c(count), rank, op=op) self.WIN.Flush(rank) self.WIN.Get(gbuf.as_mpi_c(count), rank) self.WIN.Flush(rank) # for i in range(count): self.assertEqual(sbuf[i], scalar(i)) self.assertEqual(rbuf[i], scalar(1)) self.assertEqual(gbuf[i], scalar(op(1, i))) self.assertEqual(rbuf[-1], scalar(-1)) self.assertEqual(gbuf[-1], scalar(-1)) def testFetchAndOp(self): group = self.WIN.Get_group() size = group.Get_size() rank = group.Get_rank() group.Free() self.WIN.Fence() blen = MPI.INT.Get_size() obuf = MPI.Alloc_mem(blen); memzero(obuf) rbuf = MPI.Alloc_mem(blen); memzero(rbuf) try: try: self.WIN.Fetch_and_op( [obuf, 1, MPI.INT], [rbuf, 1, MPI.INT], rank) self.WIN.Fence() finally: MPI.Free_mem(obuf) MPI.Free_mem(rbuf) except NotImplementedError: self.skipTest('mpi-win-fetch_and_op') self.WIN.Fence() for array, typecode in arrayimpl.loop(): with arrayimpl.test(self): if unittest.is_mpi_gpu('openmpi', array): continue if unittest.is_mpi_gpu('mvapich', array): continue if typecode in '?': continue if typecode in 'FDG': continue obuf = array(+1, typecode) rbuf = array(-1, typecode, 2) datatype = typemap(typecode) for op in ( MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN, MPI.REPLACE, MPI.NO_OP, ): for rank in range(size): for disp in range(3): with self.subTest(disp=disp, rank=rank): with win_lock(self.WIN, rank): self.WIN.Fetch_and_op( obuf.as_mpi(), rbuf.as_mpi_c(1), rank, disp * datatype.size, op=op ) self.assertEqual(rbuf[1], scalar(-1)) big = bytearray(MPI.INT.Get_size()) buf1 = bytearray(1) buf2 = bytearray(1) with win_lock(self.WIN, rank): self.WIN.Fetch_and_op(buf1, buf2, rank, op=MPI.NO_OP) with self.assertRaises(ValueError): with win_lock(self.WIN, rank): self.WIN.Fetch_and_op(big, buf2, rank, op=MPI.NO_OP) with self.assertRaises(ValueError): with win_lock(self.WIN, rank): self.WIN.Fetch_and_op(buf1, big, rank, op=MPI.NO_OP) with self.assertRaises(ValueError): with win_lock(self.WIN, rank): self.WIN.Fetch_and_op(buf1, [big, MPI.INT], rank, op=MPI.NO_OP) @unittest.skipMPI('mpich(>=4.0,<4.1)', sys.platform == 'darwin') def testCompareAndSwap(self): group = self.WIN.Get_group() size = group.Get_size() rank = group.Get_rank() group.Free() self.WIN.Fence() obuf = MPI.Alloc_mem(1); memzero(obuf) cbuf = MPI.Alloc_mem(1); memzero(cbuf) rbuf = MPI.Alloc_mem(1); memzero(rbuf) try: try: self.WIN.Compare_and_swap([obuf, 1, MPI.BYTE], [cbuf, 1, MPI.BYTE], [rbuf, 1, MPI.BYTE], rank, 0) self.WIN.Fence() finally: MPI.Free_mem(obuf) MPI.Free_mem(cbuf) MPI.Free_mem(rbuf) except NotImplementedError: self.skipTest('mpi-win-compare_and_swap') self.WIN.Fence() for array, typecode in arrayimpl.loop(): with arrayimpl.test(self): if unittest.is_mpi_gpu('openmpi', array): continue if unittest.is_mpi_gpu('mvapich', array): continue if typecode in 'fdg': continue if typecode in 'FDG': continue obuf = array(+1, typecode) cbuf = array( 0, typecode) rbuf = array(-1, typecode, 2) datatype = typemap(typecode) for rank in range(size): for disp in range(3): with self.subTest(disp=disp, rank=rank): with win_lock(self.WIN, rank): self.WIN.Compare_and_swap( obuf.as_mpi(), cbuf.as_mpi(), rbuf.as_mpi_c(1), rank, disp * datatype.size ) self.assertEqual(rbuf[1], scalar(-1)) big = bytearray(MPI.INT.Get_size()) buf1 = bytearray(1) buf2 = bytearray(1) buf3 = bytearray(1) with win_lock(self.WIN, rank): self.WIN.Compare_and_swap(buf1, buf2, buf3, rank) with self.assertRaises(ValueError): with win_lock(self.WIN, rank): self.WIN.Compare_and_swap(big, buf2, buf3, rank) with self.assertRaises(ValueError): with win_lock(self.WIN, rank): self.WIN.Compare_and_swap(buf1, big, buf3, rank) with self.assertRaises(ValueError): with win_lock(self.WIN, rank): self.WIN.Compare_and_swap(buf1, buf2, big, rank) with self.assertRaises(ValueError): with win_lock(self.WIN, rank): self.WIN.Compare_and_swap(buf1, [big, MPI.INT], buf3, rank) with self.assertRaises(ValueError): with win_lock(self.WIN, rank): self.WIN.Compare_and_swap(buf1, buf2, [big, MPI.INT], rank) def testPutProcNull(self): self.WIN.Fence() self.WIN.Put(None, MPI.PROC_NULL, None) self.WIN.Fence() def testGetProcNull(self): self.WIN.Fence() self.WIN.Get(None, MPI.PROC_NULL, None) self.WIN.Fence() def testAccumulateProcNullReplace(self): self.WIN.Fence() zeros = mkzeros(8) self.WIN.Fence() self.WIN.Accumulate([zeros, MPI.INT], MPI.PROC_NULL, None, MPI.REPLACE) self.WIN.Fence() self.WIN.Accumulate([zeros, MPI.INT], MPI.PROC_NULL, None, MPI.REPLACE) self.WIN.Fence() def testAccumulateProcNullSum(self): self.WIN.Fence() zeros = mkzeros(8) self.WIN.Fence() self.WIN.Accumulate([zeros, MPI.INT], MPI.PROC_NULL, None, MPI.SUM) self.WIN.Fence() self.WIN.Accumulate([None, MPI.INT], MPI.PROC_NULL, None, MPI.SUM) self.WIN.Fence() def testGetAccumulateProcNull(self): obuf = [mkzeros(8), 0, MPI.INT] rbuf = [mkzeros(8), 0, MPI.INT] self.WIN.Fence() try: self.WIN.Get_accumulate(obuf, rbuf, MPI.PROC_NULL) except NotImplementedError: self.skipTest('mpi-win-get_accumulate') self.WIN.Fence() ##def testFetchAndOpProcNull(self): ## obuf = cbuf = rbuf = None ## self.WIN.Fence() ## try: ## self.WIN.Fetch_and_op(obuf, rbuf, MPI.PROC_NULL, 0) ## except NotImplementedError: ## self.skipTest('mpi-win-fetch_and_op') ## self.WIN.Fence() ##def testCompareAndSwapProcNull(self): ## obuf = cbuf = rbuf = None ## self.WIN.Fence() ## try: ## self.WIN.Compare_and_swap(obuf, cbuf, rbuf, MPI.PROC_NULL, 0) ## except NotImplementedError: ## self.skipTest('mpi-win-compare_and_swap') ## self.WIN.Fence() def testFence(self): win = self.WIN LMODE = [ 0, MPI.MODE_NOSTORE, MPI.MODE_NOPUT, MPI.MODE_NOSTORE|MPI.MODE_NOPUT, ] GMODE = [ 0, MPI.MODE_NOPRECEDE, MPI.MODE_NOSUCCEED, ] win.Fence() for lmode in LMODE: for gmode in GMODE: assertion = lmode | gmode win.Fence(assertion) win.Fence() @unittest.skipMPI('openmpi(==1.8.1)') def testFenceAll(self): win = self.WIN assertion = 0 modes = [ 0, MPI.MODE_NOSTORE, MPI.MODE_NOPUT, MPI.MODE_NOPRECEDE, MPI.MODE_NOSUCCEED, ] win.Fence() for mode in modes: win.Fence(mode) assertion |= mode win.Fence(assertion) win.Fence() @unittest.skipMPI('openmpi(==1.8.6)') def testStartComplete(self): self.WIN.Start(MPI.GROUP_EMPTY) self.WIN.Complete() @unittest.skipMPI('openmpi(==1.8.6)') def testPostWait(self): self.WIN.Post(MPI.GROUP_EMPTY) self.WIN.Wait() @unittest.skipMPI('openmpi(==1.8.7)') @unittest.skipMPI('openmpi(==1.8.6)') def testStartCompletePostWait(self): win = self.WIN wingroup = win.Get_group() size = wingroup.Get_size() rank = wingroup.Get_rank() if size < 2: return wingroup.Free() if rank == 0: group = wingroup.Excl([0]) win.Start(group) win.Complete() win.Post(group) win.Wait() group.Free() else: group = wingroup.Incl([0]) win.Post(group) win.Wait() win.Start(group) win.Complete() group.Free() wingroup.Free() @unittest.skipMPI('openmpi(==1.8.7)') @unittest.skipMPI('openmpi(==1.8.6)') def testStartCompletePostTest(self): comm = self.COMM win = self.WIN wingroup = win.Get_group() size = wingroup.Get_size() rank = wingroup.Get_rank() if size < 2: return wingroup.Free() if rank == 0: group = wingroup.Excl([0]) win.Start(group) comm.Barrier() win.Complete() comm.Barrier() group.Free() else: group = wingroup.Incl([0]) win.Post(group) flag = win.Test() self.assertFalse(flag) comm.Barrier() comm.Barrier() flag = win.Test() self.assertTrue(flag) group.Free() wingroup.Free() @unittest.skipMPI('MPI(<3.0)') def testSync(self): win = self.WIN comm = self.COMM rank = comm.Get_rank() with win_lock(win, rank): win.Sync() comm.Barrier() @unittest.skipMPI('MPI(<3.0)') def testFlush(self): win = self.WIN comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() # for i in range(size): with win_lock(win, i): win.Flush(i) comm.Barrier() for i in range(size): if i == rank: with win_lock_all(win): win.Flush_all() comm.Barrier() # for i in range(size): with win_lock(win, i): win.Flush_local(i) comm.Barrier() for i in range(size): if i == rank: with win_lock_all(win): win.Flush_local_all() comm.Barrier() class TestRMASelf(BaseTestRMA, unittest.TestCase): COMM = MPI.COMM_SELF class TestRMAWorld(BaseTestRMA, unittest.TestCase): COMM = MPI.COMM_WORLD try: MPI.Win.Create(None, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free() except (NotImplementedError, MPI.Exception): unittest.disable(BaseTestRMA, 'mpi-rma') if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_rma_nb.py000066400000000000000000000200111475341043600165670ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import contextlib import arrayimpl scalar = arrayimpl.scalar def mkzeros(n): return bytearray(n) def memzero(m): try: m[:] = 0 except IndexError: # cffi buffer m[0:len(m)] = b'\0'*len(m) @contextlib.contextmanager def win_lock(win, rank, *args, **kwargs): win.Lock(rank, *args, **kwargs) try: yield finally: win.Unlock(rank) class BaseTestRMA: COMM = MPI.COMM_NULL INFO = MPI.INFO_NULL COUNT_MIN = 0 def setUp(self): nbytes = 100*MPI.DOUBLE.size try: self.mpi_memory = MPI.Alloc_mem(nbytes) self.memory = self.mpi_memory memzero(self.memory) except MPI.Exception: self.mpi_memory = None self.memory = bytearray(nbytes) self.WIN = MPI.Win.Create(self.memory, 1, self.INFO, self.COMM) def tearDown(self): self.WIN.Free() if self.mpi_memory: MPI.Free_mem(self.mpi_memory) @unittest.skipMPI('impi(>=2021.13.0)') def testPutGet(self): group = self.WIN.Get_group() size = group.Get_size() group.Free() for array, typecode in arrayimpl.loop(): with arrayimpl.test(self): if unittest.is_mpi_gpu('mvapich', array): continue for count in range(10): for rank in range(size): with self.subTest(rank=rank, count=count): sbuf = array([rank]*count, typecode) rbuf = array(-1, typecode, count+1) self.WIN.Fence() with win_lock(self.WIN, rank): r = self.WIN.Rput(sbuf.as_mpi(), rank) r.Wait() self.WIN.Flush(rank) r = self.WIN.Rget(rbuf.as_mpi_c(count), rank) r.Wait() for i in range(count): self.assertEqual(sbuf[i], scalar(rank)) self.assertEqual(rbuf[i], scalar(rank)) self.assertEqual(rbuf[-1], scalar(-1)) @unittest.skipMPI('impi(>=2021.13.0)') @unittest.skipMPI('openmpi(>=1.10.0,<1.11.0)') def testAccumulate(self): group = self.WIN.Get_group() size = group.Get_size() group.Free() for array, typecode in arrayimpl.loop(): with arrayimpl.test(self): if unittest.is_mpi_gpu('openmpi', array): continue if unittest.is_mpi_gpu('mvapich', array): continue if typecode in '?': continue if typecode in 'FDG': continue for count in range(10): for rank in range(size): with self.subTest(rank=rank, count=count): ones = array([1]*count, typecode) sbuf = array(range(count), typecode) rbuf = array(-1, typecode, count+1) for op in ( MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN, MPI.REPLACE, ): with win_lock(self.WIN, rank): self.WIN.Put(ones.as_mpi(), rank) self.WIN.Flush(rank) r = self.WIN.Raccumulate(sbuf.as_mpi(), rank, op=op) r.Wait() self.WIN.Flush(rank) r = self.WIN.Rget(rbuf.as_mpi_c(count), rank) r.Wait() # for i in range(count): self.assertEqual(sbuf[i], scalar(i)) self.assertEqual(rbuf[i], scalar(op(1, i))) self.assertEqual(rbuf[-1], scalar(-1)) @unittest.skipMPI('impi(>=2021.13.0)') @unittest.skipMPI('openmpi(>=1.10,<1.11)') def testGetAccumulate(self): group = self.WIN.Get_group() size = group.Get_size() group.Free() for array, typecode in arrayimpl.loop(): with arrayimpl.test(self): if unittest.is_mpi_gpu('openmpi', array): continue if unittest.is_mpi_gpu('mvapich', array): continue if typecode in '?': continue if typecode in 'FDG': continue for count in range(10): for rank in range(size): with self.subTest(rank=rank, count=count): ones = array([1]*count, typecode) sbuf = array(range(count), typecode) rbuf = array(-1, typecode, count+1) gbuf = array(-1, typecode, count+1) for op in ( MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN, MPI.REPLACE, MPI.NO_OP, ): with win_lock(self.WIN, rank): self.WIN.Put(ones.as_mpi(), rank) self.WIN.Flush(rank) r = self.WIN.Rget_accumulate(sbuf.as_mpi(), rbuf.as_mpi_c(count), rank, op=op) r.Wait() self.WIN.Flush(rank) r = self.WIN.Rget(gbuf.as_mpi_c(count), rank) r.Wait() # for i in range(count): self.assertEqual(sbuf[i], scalar(i)) self.assertEqual(rbuf[i], scalar(1)) self.assertEqual(gbuf[i], scalar(op(1, i))) self.assertEqual(rbuf[-1], scalar(-1)) self.assertEqual(gbuf[-1], scalar(-1)) def testPutProcNull(self): rank = self.COMM.Get_rank() with win_lock(self.WIN, rank): r = self.WIN.Rput(None, MPI.PROC_NULL, None) r.Wait() def testGetProcNull(self): rank = self.COMM.Get_rank() with win_lock(self.WIN, rank): r = self.WIN.Rget(None, MPI.PROC_NULL, None) r.Wait() def testAccumulateProcNullReplace(self): rank = self.COMM.Get_rank() zeros = mkzeros(8) with win_lock(self.WIN, rank): r = self.WIN.Raccumulate([zeros, MPI.INT], MPI.PROC_NULL, None, MPI.REPLACE) r.Wait() r = self.WIN.Raccumulate([zeros, MPI.INT], MPI.PROC_NULL, None, MPI.REPLACE) r.Wait() def testAccumulateProcNullSum(self): rank = self.COMM.Get_rank() zeros = mkzeros(8) with win_lock(self.WIN, rank): r = self.WIN.Raccumulate([zeros, MPI.INT], MPI.PROC_NULL, None, MPI.SUM) r.Wait() r = self.WIN.Raccumulate([None, MPI.INT], MPI.PROC_NULL, None, MPI.SUM) r.Wait() @unittest.skipMPI('MPI(<3.0)') @unittest.skipMPI('openmpi(<1.8.1)') @unittest.skipMPI('MPICH2(<1.5.0)') class TestRMASelf(BaseTestRMA, unittest.TestCase): COMM = MPI.COMM_SELF @unittest.skipMPI('MPI(<3.0)') @unittest.skipMPI('openmpi(<1.8.1)') @unittest.skipMPI('MPICH2(<1.5.0)') class TestRMAWorld(BaseTestRMA, unittest.TestCase): COMM = MPI.COMM_WORLD try: MPI.Win.Create(None, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free() except (NotImplementedError, MPI.Exception): unittest.disable(BaseTestRMA, 'mpi-rma-nb') if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_session.py000066400000000000000000000076211475341043600170300ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class TestSession(unittest.TestCase): def testSessionInit(self): session = MPI.Session() self.assertFalse(session) self.assertEqual(session, MPI.SESSION_NULL) session = MPI.Session.Init() self.assertTrue(session) self.assertNotEqual(session, MPI.SESSION_NULL) self.assertEqual(session, MPI.Session(session)) session.Finalize() def testSessionGetInfo(self): session = MPI.Session.Init() info = session.Get_info() info.Free() session.Finalize() def testSessionPsets(self): session = MPI.Session.Init() num_psets = session.Get_num_psets() for n in range(num_psets): name = session.Get_nth_pset(n) self.assertGreater(len(name), 0) session.Finalize() def testSessionPsetInfo(self): session = MPI.Session.Init() num_psets = session.Get_num_psets() for n in range(num_psets): name = session.Get_nth_pset(n) info = session.Get_pset_info(name) info.Free() session.Finalize() def testSessionPsetGroup(self): session = MPI.Session.Init() num_psets = session.Get_num_psets() for n in range(num_psets): name = session.Get_nth_pset(n) try: group = session.Create_group(name) group.Free() group = MPI.Group.Create_from_session_pset(session, name) group.Free() except MPI.Exception as exc: # openmpi UNSUPPORTED = MPI.ERR_OTHER if exc.Get_error_class() != UNSUPPORTED: raise session.Finalize() def testSessionSELF(self): session = MPI.Session.Init() name = "mpi://SELF" info = session.Get_pset_info(name) self.assertEqual(info.Get("mpi_size"), "1") info.Free() group = session.Create_group(name) self.assertEqual(group.Get_rank(), 0) self.assertEqual(group.Get_size(), 1) group.Free() session.Finalize() def testSessionWORLD(self): comm = MPI.COMM_WORLD session = MPI.Session.Init() name = "mpi://WORLD" info = session.Get_pset_info(name) size = comm.Get_size() self.assertEqual(info.Get("mpi_size"), str(size)) info.Free() group = session.Create_group(name) self.assertEqual(group.Get_size(), comm.Get_size()) self.assertEqual(group.Get_rank(), comm.Get_rank()) group.Free() session.Finalize() def testBuffering(self): session = MPI.Session.Init() buf = MPI.Alloc_mem((1<<16)+MPI.BSEND_OVERHEAD) try: with self.catchNotImplementedError(4, 1): session.Attach_buffer(buf) with self.catchNotImplementedError(4, 1): session.Flush_buffer() with self.catchNotImplementedError(4, 1): session.Iflush_buffer().Wait() finally: with self.catchNotImplementedError(4, 1): oldbuf = session.Detach_buffer() self.assertEqual(oldbuf.address, buf.address) self.assertEqual(oldbuf.nbytes, buf.nbytes) MPI.Free_mem(buf) with self.catchNotImplementedError(4, 1): session.Attach_buffer(MPI.BUFFER_AUTOMATIC) bufauto = session.Detach_buffer() self.assertEqual(bufauto, MPI.BUFFER_AUTOMATIC) session.Finalize() def testPickle(self): from pickle import dumps, loads session = MPI.Session.Init() with self.assertRaises(ValueError): loads(dumps(session)) session.Finalize() try: MPI.Session.Init().Finalize() except NotImplementedError: unittest.disable(TestSession, 'mpi-session') if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_spawn.py000066400000000000000000000255451475341043600165020ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import sys, os, mpi4py MPI4PYPATH = os.path.abspath( os.path.dirname(mpi4py.__path__[0]) ) CHILDSCRIPT = os.path.abspath( os.path.join(os.path.dirname(__file__), 'spawn_child.py') ) def childscript(): from tempfile import mkstemp from textwrap import dedent fd, script = mkstemp(suffix='.py', prefix="mpi4py-") os.close(fd) python = sys.executable pypath = MPI4PYPATH with open(script, "w") as f: f.write(dedent(f"""\ #!{python} import sys; sys.path.insert(0, "{pypath}") from mpi4py import MPI parent = MPI.Comm.Get_parent() parent.Barrier() parent.Disconnect() assert parent == MPI.COMM_NULL parent = MPI.Comm.Get_parent() assert parent == MPI.COMM_NULL """)) os.chmod(script, int("770", 8)) return script def ch4_ucx(): return 'ch4:ucx' in MPI.Get_library_version() def ch4_ofi(): return 'ch4:ofi' in MPI.Get_library_version() def appnum(): if MPI.APPNUM == MPI.KEYVAL_INVALID: return None return MPI.COMM_WORLD.Get_attr(MPI.APPNUM) def badport(): if MPI.get_vendor()[0] != 'MPICH': return False try: port = MPI.Open_port() MPI.Close_port(port) except: port = "" return port == "" def using_GPU(): # Once a CUDA context is created, the process cannot be forked. # Note: This seems to be a partial fix. Even if we are running cpu-only # tests, if MPI is built with CUDA support we can still fail. Unfortunately # there is no runtime check for us to detect if it's the case... using_cupy = (sys.modules.get('cupy') is not None) using_numba = (sys.modules.get('numba') is not None) return using_cupy or using_numba def sequential(): return MPI.COMM_WORLD.Get_size() == 1 def macos(): return sys.platform == 'darwin' def windows(): return sys.platform == 'win32' def github(): return os.environ.get('GITHUB_ACTIONS') == 'true' def azure(): return os.environ.get('TF_BUILD') == 'True' def skip_spawn(): return ( os.environ.get('MPI4PY_TEST_SPAWN') in (None, '0', 'no', 'off', 'false') ) @unittest.skipMPI('MPI(<2.0)') @unittest.skipMPI('openmpi(<3.0.0)') @unittest.skipMPI('openmpi(==4.0.0)') @unittest.skipMPI('openmpi(==4.0.1)', macos()) @unittest.skipMPI('openmpi(==4.0.2)', macos()) @unittest.skipMPI('openmpi(>=4.1.0,<4.2.0)', azure()) @unittest.skipMPI('openmpi(>=4.1.0,<4.2.0)', github()) @unittest.skipMPI('openmpi(>=5.0.0,<5.0.7)', skip_spawn()) @unittest.skipMPI('mpich(<4.1.0)', appnum() is None) @unittest.skipMPI('mpich(<4.3.0)', badport()) @unittest.skipMPI('msmpi(<8.1.0)') @unittest.skipMPI('msmpi', skip_spawn()) @unittest.skipMPI('msmpi', appnum() is None) @unittest.skipMPI('msmpi', os.environ.get("PMI_APPNUM") is None) @unittest.skipMPI('mvapich', appnum() is None) @unittest.skipMPI('mvapich', badport()) @unittest.skipMPI('mvapich(<3.0.0)') @unittest.skipMPI('MPICH2') @unittest.skipMPI('MPICH1') @unittest.skipIf(using_GPU(), 'using CUDA') class BaseTestSpawn: COMM = MPI.COMM_NULL COMMAND = sys.executable ARGS = [CHILDSCRIPT, MPI4PYPATH] MAXPROCS = 1 INFO = MPI.INFO_NULL ROOT = 0 class BaseTestSpawnSingle(BaseTestSpawn): def testCommSpawn(self): self.COMM.Barrier() child = self.COMM.Spawn( self.COMMAND, self.ARGS, self.MAXPROCS, info=self.INFO, root=self.ROOT, ) local_size = child.Get_size() remote_size = child.Get_remote_size() child.Barrier() child.Disconnect() self.COMM.Barrier() self.assertEqual(local_size, self.COMM.Get_size()) self.assertEqual(remote_size, self.MAXPROCS) @unittest.skipMPI('msmpi') def testErrcodes(self): self.COMM.Barrier() errcodes = [] child = self.COMM.Spawn( self.COMMAND, self.ARGS, self.MAXPROCS, info=self.INFO, root=self.ROOT, errcodes=errcodes, ) child.Barrier() child.Disconnect() self.COMM.Barrier() self.assertEqual(len(errcodes), self.MAXPROCS) for errcode in errcodes: self.assertEqual(errcode, MPI.SUCCESS) @unittest.skipMPI('msmpi') @unittest.skipMPI('mpich(==3.4.1)', ch4_ofi()) def testArgsOnlyAtRoot(self): self.COMM.Barrier() if self.COMM.Get_rank() == self.ROOT: child = self.COMM.Spawn( self.COMMAND, self.ARGS, self.MAXPROCS, info=self.INFO, root=self.ROOT, ) else: child = self.COMM.Spawn( None, None, -1, info=MPI.INFO_NULL, root=self.ROOT, ) child.Barrier() child.Disconnect() self.COMM.Barrier() @unittest.skipIf(os.name != 'posix', 'posix') def testNoArgs(self): self.COMM.Barrier() script = None if self.COMM.Get_rank() == self.ROOT: script = childscript() self.COMM.Barrier() script = self.COMM.bcast(script, root=self.ROOT) child = self.COMM.Spawn( script, None, self.MAXPROCS, info=self.INFO, root=self.ROOT, ) child.Barrier() child.Disconnect() self.COMM.Barrier() if self.COMM.Get_rank() == self.ROOT: os.remove(script) self.COMM.Barrier() class BaseTestSpawnMultiple(BaseTestSpawn): def testCommSpawn(self): self.COMM.Barrier() count = 2 + (self.COMM.Get_size() == 0) COMMAND = [self.COMMAND] * count ARGS = [self.ARGS] * len(COMMAND) MAXPROCS = [self.MAXPROCS] * len(COMMAND) INFO = [self.INFO] * len(COMMAND) child = self.COMM.Spawn_multiple( COMMAND, ARGS, MAXPROCS, info=INFO, root=self.ROOT, ) local_size = child.Get_size() remote_size = child.Get_remote_size() child.Barrier() child.Disconnect() self.COMM.Barrier() self.assertEqual(local_size, self.COMM.Get_size()) self.assertEqual(remote_size, sum(MAXPROCS)) def testCommSpawnDefaults1(self): self.COMM.Barrier() count = 2 + (self.COMM.Get_size() == 0) COMMAND = [self.COMMAND] * count ARGS = [self.ARGS] * len(COMMAND) child = self.COMM.Spawn_multiple(COMMAND, ARGS) local_size = child.Get_size() remote_size = child.Get_remote_size() child.Barrier() child.Disconnect() self.COMM.Barrier() self.assertEqual(local_size, self.COMM.Get_size()) self.assertEqual(remote_size, len(COMMAND)) def testCommSpawnDefaults2(self): self.COMM.Barrier() count = 2 + (self.COMM.Get_size() == 0) COMMAND = [self.COMMAND] * count ARGS = [self.ARGS] * len(COMMAND) child = self.COMM.Spawn_multiple(COMMAND, ARGS, 1, MPI.INFO_NULL) local_size = child.Get_size() remote_size = child.Get_remote_size() child.Barrier() child.Disconnect() self.COMM.Barrier() self.assertEqual(local_size, self.COMM.Get_size()) self.assertEqual(remote_size, len(COMMAND)) @unittest.skipMPI('msmpi') def testErrcodes(self): self.COMM.Barrier() count = 2 + (self.COMM.Get_size() == 0) COMMAND = [self.COMMAND] * count ARGS = [self.ARGS]*len(COMMAND) MAXPROCS = list(range(1, len(COMMAND)+1)) INFO = MPI.INFO_NULL errcodelist = [] child = self.COMM.Spawn_multiple( COMMAND, ARGS, MAXPROCS, info=INFO, root=self.ROOT, errcodes=errcodelist, ) child.Barrier() child.Disconnect() self.COMM.Barrier() self.assertEqual(len(errcodelist), len(COMMAND)) for i, errcodes in enumerate(errcodelist): self.assertEqual(len(errcodes), MAXPROCS[i]) for errcode in errcodes: self.assertEqual(errcode, MPI.SUCCESS) @unittest.skipMPI('msmpi') def testArgsOnlyAtRoot(self): self.COMM.Barrier() if self.COMM.Get_rank() == self.ROOT: count = 2 + (self.COMM.Get_size() == 0) COMMAND = [self.COMMAND] * count ARGS = [self.ARGS] * len(COMMAND) MAXPROCS = list(range(1, len(COMMAND)+1)) INFO = [MPI.INFO_NULL] * len(COMMAND) child = self.COMM.Spawn_multiple( COMMAND, ARGS, MAXPROCS, info=INFO, root=self.ROOT, ) else: child = self.COMM.Spawn_multiple( None, None, -1, info=MPI.INFO_NULL, root=self.ROOT, ) child.Barrier() child.Disconnect() self.COMM.Barrier() @unittest.skipIf(os.name != 'posix', 'posix') def testNoArgs(self): self.COMM.Barrier() script = None if self.COMM.Get_rank() == self.ROOT: script = childscript() self.COMM.Barrier() script = self.COMM.bcast(script, root=self.ROOT) count = 2 + (self.COMM.Get_size() == 0) COMMAND = [script] * count MAXPROCS = list(range(1, len(COMMAND)+1)) INFO = [self.INFO] * len(COMMAND) child = self.COMM.Spawn_multiple( COMMAND, None, MAXPROCS, info=INFO, root=self.ROOT, ) child.Barrier() child.Disconnect() self.COMM.Barrier() if self.COMM.Get_rank() == self.ROOT: os.remove(script) self.COMM.Barrier() def testArgsBad(self): if self.COMM.Get_size() > 1: return CMDS = [self.COMMAND] ARGS = [self.ARGS] MAXP = [self.MAXPROCS] INFO = [self.INFO] with self.assertRaises(ValueError): self.COMM.Spawn_multiple(CMDS[0], ARGS, MAXP, INFO, root=0) with self.assertRaises(ValueError): self.COMM.Spawn_multiple(CMDS, ARGS*2, MAXP, INFO, root=0) with self.assertRaises(ValueError): self.COMM.Spawn_multiple(CMDS, ARGS[0][0], MAXP*2, INFO, root=0) with self.assertRaises(ValueError): self.COMM.Spawn_multiple(CMDS, ARGS, MAXP[0], INFO*2, root=0) class TestSpawnSingleSelf(BaseTestSpawnSingle, unittest.TestCase): COMM = MPI.COMM_SELF class TestSpawnSingleWorld(BaseTestSpawnSingle, unittest.TestCase): COMM = MPI.COMM_WORLD class TestSpawnSingleSelfMany(TestSpawnSingleSelf): MAXPROCS = MPI.COMM_WORLD.Get_size() class TestSpawnSingleWorldMany(TestSpawnSingleWorld): MAXPROCS = MPI.COMM_WORLD.Get_size() class TestSpawnMultipleSelf(BaseTestSpawnMultiple, unittest.TestCase): COMM = MPI.COMM_SELF class TestSpawnMultipleWorld(BaseTestSpawnMultiple, unittest.TestCase): COMM = MPI.COMM_WORLD class TestSpawnMultipleSelfMany(TestSpawnMultipleSelf): MAXPROCS = MPI.COMM_WORLD.Get_size() class TestSpawnMultipleWorldMany(TestSpawnMultipleWorld): MAXPROCS = MPI.COMM_WORLD.Get_size() if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_status.py000066400000000000000000000102001475341043600166530ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class TestStatus(unittest.TestCase): def setUp(self): self.STATUS = MPI.Status() def tearDown(self): self.STATUS = None def testDefaultFieldValues(self): self.assertEqual(self.STATUS.Get_source(), MPI.ANY_SOURCE) self.assertEqual(self.STATUS.Get_tag(), MPI.ANY_TAG) self.assertEqual(self.STATUS.Get_error(), MPI.SUCCESS) def testGetCount(self): count = self.STATUS.Get_count(MPI.BYTE) self.assertEqual(count, 0) def testGetElements(self): elements = self.STATUS.Get_elements(MPI.BYTE) self.assertEqual(elements, 0) def testSetElements(self): try: self.STATUS.Set_elements(MPI.BYTE, 7) count = self.STATUS.Get_count(MPI.BYTE) self.assertEqual(count, 7) elements = self.STATUS.Get_elements(MPI.BYTE) self.assertEqual(elements, 7) except NotImplementedError: if MPI.Get_version() >= (2,0): raise self.skipTest('mpi-status-set_elements') def testIsCancelled(self): flag = self.STATUS.Is_cancelled() self.assertIs(type(flag), bool) self.assertFalse(flag) def testSetCancelled(self): try: self.STATUS.Set_cancelled(True) flag = self.STATUS.Is_cancelled() self.assertTrue(flag) except NotImplementedError: if MPI.Get_version() >= (2,0): raise self.skipTest('mpi-status-set_cancelled') def testPyProps(self): self.assertEqual(self.STATUS.Get_source(), self.STATUS.source) self.assertEqual(self.STATUS.Get_tag(), self.STATUS.tag) self.assertEqual(self.STATUS.Get_error(), self.STATUS.error) self.STATUS.source = 1 self.STATUS.tag = 2 self.STATUS.error = MPI.ERR_ARG self.assertEqual(self.STATUS.source, 1) self.assertEqual(self.STATUS.tag, 2) self.assertEqual(self.STATUS.error, MPI.ERR_ARG) try: self.assertIs(type(self.STATUS.count), int) self.assertEqual(self.STATUS.count, 0) self.STATUS.count = 7 self.assertEqual(self.STATUS.count, 7) self.STATUS.count = 0 except NotImplementedError: if MPI.Get_version() >= (2,0): raise try: self.assertIs(type(self.STATUS.cancelled), bool) self.assertFalse(self.STATUS.cancelled) self.STATUS.cancelled = True self.assertTrue(self.STATUS.cancelled) self.STATUS.cancelled = False except NotImplementedError: if MPI.Get_version() >= (2,0): raise def testConstructor(self): self.assertRaises(TypeError, MPI.Status, 123) self.assertRaises(TypeError, MPI.Status, "abc") def testCopyConstructor(self): self.STATUS.source = 1 self.STATUS.tag = 2 self.STATUS.error = MPI.ERR_ARG status = MPI.Status(self.STATUS) self.assertEqual(status.source, 1) self.assertEqual(status.tag, 2) self.assertEqual(status.error, MPI.ERR_ARG) try: self.STATUS.Set_elements(MPI.BYTE, 7) except NotImplementedError: pass try: self.STATUS.Set_cancelled(True) except NotImplementedError: pass status = MPI.Status(self.STATUS) try: count = status.Get_count(MPI.BYTE) elems = status.Get_elements(MPI.BYTE) self.assertEqual(count, 7) self.assertEqual(elems, 7) except NotImplementedError: pass try: flag = status.Is_cancelled() self.assertTrue(flag) except NotImplementedError: pass def testPickle(self): from pickle import dumps, loads self.STATUS.source = 1 self.STATUS.tag = 2 self.STATUS.error = MPI.ERR_ARG status = loads(dumps(self.STATUS)) self.assertEqual(status.source, 1) self.assertEqual(status.tag, 2) self.assertEqual(status.error, MPI.ERR_ARG) if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_subclass.py000066400000000000000000000226261475341043600171660ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest # --- class MyBaseComm: def free(self): if self != MPI.COMM_NULL: MPI.Comm.Free(self) class BaseTestBaseComm: def setUp(self): self.comm = self.CommType(self.COMM_BASE) def testSubType(self): self.assertNotIn( type(self.comm), [ MPI.Comm, MPI.Intracomm, MPI.Cartcomm, MPI.Graphcomm, MPI.Distgraphcomm, MPI.Intercomm, ] ) self.assertIsInstance(self.comm, self.CommType) def testCloneFree(self): if self.COMM_BASE != MPI.COMM_NULL: comm = self.comm.Clone() else: comm = self.CommType() self.assertIsInstance(comm, MPI.Comm) self.assertIsInstance(comm, self.CommType) comm.free() def tearDown(self): self.comm.free() # --- class MyComm(MPI.Comm, MyBaseComm): def __new__(cls, comm=None): if comm is not None: if comm != MPI.COMM_NULL: comm = comm.Clone() return super().__new__(cls, comm) class BaseTestMyComm(BaseTestBaseComm): CommType = MyComm class TestMyCommNULL(BaseTestMyComm, unittest.TestCase): COMM_BASE = MPI.COMM_NULL class TestMyCommSELF(BaseTestMyComm, unittest.TestCase): COMM_BASE = MPI.COMM_SELF class TestMyCommWORLD(BaseTestMyComm, unittest.TestCase): COMM_BASE = MPI.COMM_WORLD # --- class MyIntracomm(MPI.Intracomm, MyBaseComm): def __new__(cls, comm=None): if comm is not None: if comm != MPI.COMM_NULL: comm = comm.Dup() return super().__new__(cls, comm) class BaseTestMyIntracomm(BaseTestBaseComm): CommType = MyIntracomm class TestMyIntracommNULL(BaseTestMyIntracomm, unittest.TestCase): COMM_BASE = MPI.COMM_NULL class TestMyIntracommSELF(BaseTestMyIntracomm, unittest.TestCase): COMM_BASE = MPI.COMM_SELF class TestMyIntracommWORLD(BaseTestMyIntracomm, unittest.TestCase): COMM_BASE = MPI.COMM_WORLD # --- class MyCartcomm(MPI.Cartcomm, MyBaseComm): def __new__(cls, comm=None): if comm is not None: if comm != MPI.COMM_NULL: dims = [comm.size] comm = comm.Create_cart(dims) return super().__new__(cls, comm) class BaseTestMyCartcomm(BaseTestBaseComm): CommType = MyCartcomm class TestMyCartcommNULL(BaseTestMyCartcomm, unittest.TestCase): COMM_BASE = MPI.COMM_NULL class TestMyCartcommSELF(BaseTestMyCartcomm, unittest.TestCase): COMM_BASE = MPI.COMM_SELF class TestMyCartcommWORLD(BaseTestMyCartcomm, unittest.TestCase): COMM_BASE = MPI.COMM_WORLD # --- class MyGraphcomm(MPI.Graphcomm, MyBaseComm): def __new__(cls, comm=None): if comm is not None: if comm != MPI.COMM_NULL: index = list(range(0, comm.size+1)) edges = list(range(0, comm.size)) comm = comm.Create_graph(index, edges) return super().__new__(cls, comm) class BaseTestMyGraphcomm(BaseTestBaseComm): CommType = MyGraphcomm class TestMyGraphcommNULL(BaseTestMyGraphcomm, unittest.TestCase): COMM_BASE = MPI.COMM_NULL class TestMyGraphcommSELF(BaseTestMyGraphcomm, unittest.TestCase): COMM_BASE = MPI.COMM_SELF class TestMyGraphcommWORLD(BaseTestMyGraphcomm, unittest.TestCase): COMM_BASE = MPI.COMM_WORLD # --- class MyRequest(MPI.Request): def __new__(cls, request=None): return super().__new__(cls, request) def test(self): return super(type(self), self).Test() def wait(self): return super(type(self), self).Wait() class MyPrequest(MPI.Prequest): def __new__(cls, request=None): return super().__new__(cls, request) def test(self): return super(type(self), self).Test() def wait(self): return super(type(self), self).Wait() def start(self): return super(type(self), self).Start() class MyGrequest(MPI.Grequest): def __new__(cls, request=None): return super().__new__(cls, request) def test(self): return super(type(self), self).Test() def wait(self): return super(type(self), self).Wait() class BaseTestMyRequest: def setUp(self): self.req = self.MyRequestType(MPI.REQUEST_NULL) def testSubType(self): self.assertIsNot(type(self.req), self.MPIRequestType) self.assertIsInstance(self.req, self.MPIRequestType) self.assertIsInstance(self.req, self.MyRequestType) self.req.test() class TestMyRequest(BaseTestMyRequest, unittest.TestCase): MPIRequestType = MPI.Request MyRequestType = MyRequest class TestMyPrequest(BaseTestMyRequest, unittest.TestCase): MPIRequestType = MPI.Prequest MyRequestType = MyPrequest class TestMyGrequest(BaseTestMyRequest, unittest.TestCase): MPIRequestType = MPI.Grequest MyRequestType = MyGrequest class TestMyRequest2(TestMyRequest): def setUp(self): req = MPI.COMM_SELF.Isend( [MPI.BOTTOM, 0, MPI.BYTE], dest=MPI.PROC_NULL, tag=0) self.req = MyRequest(req) @unittest.skipMPI('mpich(==3.4.1)') class TestMyPrequest2(TestMyPrequest): def setUp(self): req = MPI.COMM_SELF.Send_init( [MPI.BOTTOM, 0, MPI.BYTE], dest=MPI.PROC_NULL, tag=0) self.req = MyPrequest(req) def tearDown(self): self.req.Free() def testStart(self): for i in range(5): self.req.start() self.req.test() self.req.start() self.req.wait() # --- class MyInfo(MPI.Info): def __new__(cls, info=None): return MPI.Info.__new__(cls, info) def free(self): if self != MPI.INFO_NULL: MPI.Info.Free(self) class BaseTestMyInfo: def setUp(self): info = MPI.Info.Create() self.info = MyInfo(info) def tearDown(self): self.info.free() def testSubType(self): self.assertIsNot(type(self.info), MPI.Info) self.assertIsInstance(self.info, MPI.Info) self.assertIsInstance(self.info, MyInfo) def testFree(self): self.assertTrue(self.info) self.info.free() self.assertFalse(self.info) def testCreateDupType(self): for info in ( MyInfo.Create(), self.info.Dup(), self.info.copy(), ): self.assertIsNot(type(info), MPI.Info) self.assertIsInstance(info, MPI.Info) self.assertIsInstance(info, MyInfo) info.free() def testCreateEnvType(self): try: info = MyInfo.Create_env() except NotImplementedError: if MPI.Get_version() >= (4, 0): raise raise unittest.SkipTest("mpi-info-create-env") self.assertIsNot(type(info), MPI.Info) self.assertIsInstance(info, MPI.Info) self.assertIsInstance(info, MyInfo) def testPickle(self): from pickle import dumps, loads items = list(zip("abc", "123")) self.info.update(items) info = loads(dumps(self.info)) self.assertIs(type(info), MyInfo) self.assertEqual(info.items(), items) info.free() class TestMyInfo(BaseTestMyInfo, unittest.TestCase): pass try: MPI.Info.Create().Free() except (NotImplementedError, MPI.Exception): unittest.disable(BaseTestMyInfo, 'mpi-info') # --- class MyWin(MPI.Win): def __new__(cls, win=None): return MPI.Win.__new__(cls, win) def free(self): if self != MPI.WIN_NULL: MPI.Win.Free(self) class BaseTestMyWin: def setUp(self): w = MPI.Win.Create(MPI.BOTTOM) self.win = MyWin(w) def tearDown(self): self.win.free() def testSubType(self): self.assertIsNot(type(self.win), MPI.Win) self.assertIsInstance(self.win, MPI.Win) self.assertIsInstance(self.win, MyWin) def testFree(self): self.assertTrue(self.win) self.win.free() self.assertFalse(self.win) class TestMyWin(BaseTestMyWin, unittest.TestCase): pass try: MPI.Win.Create(MPI.BOTTOM).Free() except (NotImplementedError, MPI.Exception): unittest.disable(BaseTestMyWin, 'mpi-win') # --- import os, tempfile class MyFile(MPI.File): def __new__(cls, file=None): return MPI.File.__new__(cls, file) def close(self): if self != MPI.FILE_NULL: MPI.File.Close(self) class BaseTestMyFile: def openfile(self): fd, fname = tempfile.mkstemp(prefix='mpi4py') os.close(fd) amode = MPI.MODE_RDWR | MPI.MODE_CREATE | MPI.MODE_DELETE_ON_CLOSE try: self.file = MPI.File.Open(MPI.COMM_SELF, fname, amode, MPI.INFO_NULL) return self.file except Exception: os.remove(fname) raise def setUp(self): f = self.openfile() self.file = MyFile(f) def tearDown(self): self.file.close() def testSubType(self): self.assertIsNot(type(self.file), MPI.File) self.assertIsInstance(self.file, MPI.File) self.assertIsInstance(self.file, MyFile) def testFree(self): self.assertTrue(self.file) self.file.close() self.assertFalse(self.file) class TestMyFile(BaseTestMyFile, unittest.TestCase): pass try: BaseTestMyFile().openfile().Close() except NotImplementedError: unittest.disable(BaseTestMyFile, 'mpi-file') # --- if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_threads.py000066400000000000000000000035601475341043600167750ustar00rootroot00000000000000import sys try: import threading HAVE_THREADING = True except ImportError: import dummy_threading as threading HAVE_THREADING = False VERBOSE = False #VERBOSE = True import mpi4py.rc mpi4py.rc.thread_level = 'multiple' from mpi4py import MPI import mpiunittest as unittest class TestMPIThreads(unittest.TestCase): def testThreadLevels(self): levels = [MPI.THREAD_SINGLE, MPI.THREAD_FUNNELED, MPI.THREAD_SERIALIZED, MPI.THREAD_MULTIPLE] for i in range(len(levels)-1): self.assertLess(levels[i], levels[i+1]) try: provided = MPI.Query_thread() self.assertIn(provided, levels) except NotImplementedError: self.skipTest('mpi-query_thread') def testIsThreadMain(self): try: flag = MPI.Is_thread_main() except NotImplementedError: self.skipTest('mpi-is_thread_main') name = threading.current_thread().name main = (name == 'MainThread') or not HAVE_THREADING self.assertEqual(flag, main) if VERBOSE: log = lambda m: sys.stderr.write(m+'\n') log(f"{name}: MPI.Is_thread_main() -> {flag}") def testIsThreadMainInThread(self): try: provided = MPI.Query_thread() except NotImplementedError: self.skipTest('mpi-query_thread') self.testIsThreadMain() T = [threading.Thread(target=self.testIsThreadMain) for _ in range(5)] if provided == MPI.THREAD_MULTIPLE: for t in T: t.start() for t in T: t.join() elif provided == MPI.THREAD_SERIALIZED: for t in T: t.start() t.join() else: self.skipTest('mpi-thread_level') if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_toplevel.py000066400000000000000000000072351475341043600172000ustar00rootroot00000000000000import mpi4py import unittest import warnings import pathlib import os class TestRC(unittest.TestCase): @staticmethod def newrc(): rc = type(mpi4py.rc)() rc(initialize = rc.initialize) rc(threads = rc.threads) rc(thread_level = rc.thread_level) rc(finalize = rc.finalize) rc(fast_reduce = rc.fast_reduce) rc(recv_mprobe = rc.recv_mprobe) rc(irecv_bufsz = rc.irecv_bufsz) rc(errors = rc.errors) return rc def testCallKwArgs(self): rc = self.newrc() kwargs = rc.__dict__.copy() rc(**kwargs) def testInitKwArgs(self): rc = self.newrc() kwargs = rc.__dict__.copy() rc = type(mpi4py.rc)(**kwargs) def testBadAttribute(self): error = lambda: mpi4py.rc(ABCXYZ=123456) self.assertRaises(TypeError, error) error = lambda: setattr(mpi4py.rc, 'ABCXYZ', 123456) self.assertRaises(TypeError, error) error = lambda: getattr(mpi4py.rc, 'ABCXYZ') self.assertRaises(AttributeError, error) def testRepr(self): repr(mpi4py.rc) class TestConfig(unittest.TestCase): def testGetInclude(self): path = mpi4py.get_include() self.assertIsInstance(path, str) self.assertTrue(os.path.isdir(path)) header = os.path.join(path, 'mpi4py', 'mpi4py.h') self.assertTrue(os.path.isfile(header)) def testGetConfig(self): conf = mpi4py.get_config() self.assertIsInstance(conf, dict) mpicc = conf.get('mpicc') if mpicc is not None: self.assertTrue(os.path.exists(mpicc)) class TestProfile(unittest.TestCase): def testProfile(self): import struct import sysconfig bits = struct.calcsize('P') * 8 triplet = sysconfig.get_config_var('MULTIARCH') or '' libpath = [ f"{prefix}{suffix}" for prefix in ("/lib", "/usr/lib") for suffix in (bits, f"/{triplet}", "") ] fspath = ( os.fsencode, os.fsdecode, pathlib.Path ) libraries = ( 'c', 'libc.so.6', 'm', 'libm.so.6', 'dl', 'libdl.so.2', ) def mpi4py_profile(*args, **kwargs): try: mpi4py.profile(*args, **kwargs) except ValueError: pass if os.name != 'posix': with warnings.catch_warnings(): warnings.simplefilter('error') with self.assertRaises(UserWarning): mpi4py.profile(struct.__file__) return with warnings.catch_warnings(): warnings.simplefilter('ignore') for libname in libraries: mpi4py_profile(libname, path=libpath) for fs in fspath: mpi4py_profile(libname, path=map(fs, libpath)) for path in libpath: mpi4py_profile(libname, path=path) for fsp in fspath: mpi4py_profile(libname, path=fsp(path)) warnings.simplefilter('error') with self.assertRaises(UserWarning): mpi4py.profile('hosts', path=["/etc"]) with self.assertRaises(ValueError): mpi4py.profile('@querty') with self.assertRaises(ValueError): mpi4py.profile('@querty', path="/usr/lib") with self.assertRaises(ValueError): mpi4py.profile('@querty', path=["/usr/lib"]) with self.assertRaises(ValueError): mpi4py.profile('@querty') if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_ulfm.py000066400000000000000000000126751475341043600163150ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import warnings import struct @unittest.skipMPI('msmpi') @unittest.skipMPI('mvapich') @unittest.skipMPI('impi') class BaseTestULFM: COMM = MPI.COMM_NULL def setUp(self): self.COMM = self.COMM.Dup() self.COMM.Set_errhandler(MPI.ERRORS_RETURN) def tearDown(self): self.COMM.Free() del self.COMM def testIsRevoked(self): comm = self.COMM try: self.assertFalse(comm.Is_revoked()) except NotImplementedError: pass def testRevoke(self): comm = self.COMM is_intra = comm.Is_intra() try: comm.Revoke() except NotImplementedError: self.skipTest('mpi-comm_revoke') try: self.assertTrue(comm.Is_revoked()) except NotImplementedError: pass # try: comm.Barrier() except MPI.Exception as exc: code = exc.Get_error_class() else: code = MPI.SUCCESS self.assertEqual(code, MPI.ERR_REVOKED) # try: comm.Send([None, 0, MPI.BYTE], MPI.PROC_NULL) except MPI.Exception as exc: code = exc.Get_error_class() else: code = MPI.SUCCESS self.assertEqual(code, MPI.ERR_REVOKED) # try: comm.Recv([None, 0, MPI.BYTE], MPI.PROC_NULL) except MPI.Exception as exc: code = exc.Get_error_class() else: code = MPI.SUCCESS self.assertEqual(code, MPI.ERR_REVOKED) def testGetFailed(self): comm = self.COMM group = comm.Get_failed() gcmp = MPI.Group.Compare(group, MPI.GROUP_EMPTY) group.Free() self.assertIn(gcmp, [MPI.IDENT, MPI.CONGRUENT]) def testAckFailed(self): comm = self.COMM size = comm.Get_size() num_acked = comm.Ack_failed(0) self.assertEqual(num_acked, 0) num_acked = comm.Ack_failed(size) self.assertEqual(num_acked, 0) num_acked = comm.Ack_failed() self.assertEqual(num_acked, 0) def testAgree(self): comm = self.COMM for i in range(5): flag = i flag = comm.Agree(flag) self.assertEqual(flag, i) def testIAgree(self): comm = self.COMM with self.assertRaises(TypeError): comm.Iagree(0) with self.assertRaises(ValueError): comm.Iagree(bytearray(8)) ibuf = MPI.buffer.allocate(struct.calcsize('i')) flag = memoryview(ibuf).cast('i') for i in range(5): flag[0] = i request = comm.Iagree(flag) request.Wait() self.assertEqual(flag[0], i) size = comm.Get_size() if comm.Is_intra() and size > 1: for root in range(size): rank = comm.Get_rank() comm.Barrier() if rank == root: ival = int('1011', base=2) flag[0] = ival request = comm.Iagree(flag) self.assertFalse(request.Test()) self.assertEqual(flag[0], ival) comm.Barrier() else: ival = int('1101', base=2) flag[0] = ival comm.Barrier() self.assertEqual(flag[0], ival) request = comm.Iagree(flag) request.Wait() ival = int('1001', base=2) self.assertEqual(flag[0], ival) def testShrink(self): comm = self.COMM shrink = comm.Shrink() self.assertEqual(comm.Get_size(), shrink.Get_size()) self.assertEqual(comm.Get_rank(), shrink.Get_rank()) if shrink.Is_inter(): self.assertEqual(comm.Get_remote_size(), shrink.Get_remote_size()) shrink.Free() def testIShrink(self): comm = self.COMM shrink, request = comm.Ishrink() self.assertTrue(request) request.Wait() self.assertFalse(request) self.assertEqual(comm.Get_size(), shrink.Get_size()) self.assertEqual(comm.Get_rank(), shrink.Get_rank()) if shrink.Is_inter(): self.assertEqual(comm.Get_remote_size(), shrink.Get_remote_size()) shrink.Free() class TestULFMSelf(BaseTestULFM, unittest.TestCase): COMM = MPI.COMM_SELF class TestULFMWorld(BaseTestULFM, unittest.TestCase): COMM = MPI.COMM_WORLD @unittest.skipMPI('openmpi(>=5.0.0,<5.0.4)') @unittest.skipIf(MPI.COMM_WORLD.Get_size() < 2, 'mpi-world-size<2') class TestULFMInter(BaseTestULFM, unittest.TestCase): @classmethod def setUpClass(cls): BASECOMM = MPI.COMM_WORLD size = BASECOMM.Get_size() rank = BASECOMM.Get_rank() if rank < size // 2 : COLOR = 0 LOCAL_LEADER = 0 REMOTE_LEADER = size // 2 else: COLOR = 1 LOCAL_LEADER = 0 REMOTE_LEADER = 0 INTRACOMM = BASECOMM.Split(COLOR, key=0) INTERCOMM = MPI.Intracomm.Create_intercomm( INTRACOMM, LOCAL_LEADER, BASECOMM, REMOTE_LEADER, ) INTRACOMM.Free() cls.COMM = INTERCOMM cls.COMM.Set_errhandler(MPI.ERRORS_RETURN) @classmethod def tearDownClass(cls): cls.COMM.Free() if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_util_dtlib.py000066400000000000000000000456521475341043600175060ustar00rootroot00000000000000from mpi4py import MPI from mpi4py.util.dtlib import from_numpy_dtype as fromnumpy from mpi4py.util.dtlib import to_numpy_dtype as tonumpy import sys, os import itertools try: import mpiunittest as unittest except ImportError: sys.path.append( os.path.dirname( os.path.abspath(__file__))) import mpiunittest as unittest try: import numpy np_dtype = numpy.dtype np_version = tuple(map(int, numpy.__version__.split('.', 2)[:2])) except ImportError: numpy = None np_dtype = None np_version = None typecodes = list("?cbhilqpBHILQPfdgFDG") typecodes += [f'b{n:d}' for n in (1,)] typecodes += [f'i{n:d}' for n in (1,2,4,8)] typecodes += [f'u{n:d}' for n in (1,2,4,8)] typecodes += [f'f{n:d}' for n in (4,8)] if os.environ.get('COVERAGE_RUN') == 'true': typecodes = list("cif") + ['b1', 'i8', 'f8'] if np_version and np_version < (1, 17): for tc in 'LFDG': if tc in typecodes: typecodes.remove(tc) name, version = MPI.get_vendor() mpich_lt_400 = (name == 'MPICH') and version < (4, 0, 0) if mpich_lt_400: for tc in 'FDG': if tc in typecodes: typecodes.remove(tc) if unittest.is_mpi('impi(>=2021.12.0)') and os.name == 'nt': for tc in [*'lLg', 'i4', 'u4']: if tc in typecodes: typecodes.remove(tc) datatypes = [MPI.Datatype.fromcode(t) for t in typecodes] datatypes += [ MPI.BYTE, MPI.AINT, MPI.OFFSET, MPI.COUNT, ] mpipairtypes = [ MPI.SHORT_INT, MPI.INT_INT, MPI.LONG_INT, MPI.FLOAT_INT, MPI.DOUBLE_INT, MPI.LONG_DOUBLE_INT, ] mpif77types = [ MPI.CHARACTER, MPI.LOGICAL, MPI.INTEGER, MPI.REAL, MPI.DOUBLE_PRECISION, MPI.COMPLEX, MPI.DOUBLE_COMPLEX, ] mpif90types = [ MPI.LOGICAL1, MPI.LOGICAL2, MPI.LOGICAL4, MPI.LOGICAL8, MPI.INTEGER1, MPI.INTEGER2, MPI.INTEGER4, MPI.INTEGER8, MPI.INTEGER16, MPI.REAL2, MPI.REAL4, MPI.REAL8, MPI.REAL16, MPI.COMPLEX4, MPI.COMPLEX8, MPI.COMPLEX16, MPI.COMPLEX32, ] for typelist in [mpif77types, mpif90types]: typelist[:] = [ t for t in datatypes if t != MPI.DATATYPE_NULL and t.Get_name() != 'MPI_DATATYPE_NULL' and t.Get_size() != 0 ] del typelist class TestUtilDTLib(unittest.TestCase): def check(self, arg, *args): if numpy is None: if isinstance(arg, MPI.Datatype): mt1 = arg.Dup() dt1 = tonumpy(mt1) mt1.Free() return if isinstance(arg, MPI.Datatype): mt1 = arg.Dup() dt1 = tonumpy(mt1) else: dt1 = np_dtype(arg, *args) mt1 = fromnumpy(dt1) dt2 = tonumpy(mt1) mt2 = fromnumpy(dt2) dt3 = tonumpy(mt2) mt3 = fromnumpy(dt3) try: self.assertEqual(dt1, dt2) self.assertEqual(dt2, dt3) if isinstance(arg, MPI.Datatype): if arg.combiner not in ( MPI.COMBINER_INDEXED, MPI.COMBINER_HINDEXED, MPI.COMBINER_INDEXED_BLOCK, MPI.COMBINER_HINDEXED_BLOCK, ): self.assertEqual(dt1.itemsize, mt1.extent) self.assertEqual(dt2.itemsize, mt2.extent) self.assertEqual(dt3.itemsize, mt3.extent) finally: mt1.Free() mt2.Free() mt3.Free() def testBasic(self): for spec in typecodes: with self.subTest(spec=spec): self.check(spec) for mpit in datatypes: with self.subTest(name=mpit.name): self.check(mpit) def testSubarray1(self): shapes = [(1,), (1, 1), (1, 1, 1), (3,), (3, 4), (2, 3, 4),] for dt, shape in itertools.product(typecodes, shapes): spec = f"{shape}{dt}" with self.subTest(spec=spec): self.check(spec) def testSubarray2(self): shapes = [(1,), (1, 1), (1, 1, 1), (3,), (3, 4), (2, 3, 4),] orders = [MPI.ORDER_C, MPI.ORDER_FORTRAN] for mt, shape, order in itertools.product(datatypes, shapes, orders): with self.subTest(name=mt.name, shape=shape, order=order): starts = (0,) * len(shape) mt1 = mt.Create_subarray(shape, shape, starts, order) self.check(mt1) mt1.Free() @unittest.skipMPI('msmpi') def testStruct1(self): shapes = [(), (1,), (3,), (3, 5),] iter1 = itertools.product(shapes, typecodes) iter2 = itertools.product(shapes, typecodes) iterN = itertools.product(iter1, iter2) iterA = iter([False, True]) for nt, align in itertools.product(iterN, iterA): s1, t1, s2, t2 = sum(nt, ()) spec = f"{s1}{t1},{s2}{t2}" with self.subTest(spec=spec, align=align): self.check(spec, align) @unittest.skipMPI('msmpi') def testStruct2(self): iter1 = iter(typecodes) iter2 = iter(typecodes) iter3 = iter(typecodes) iterN = itertools.product(iter1, iter2, iter3) iterA = iter([False, True]) for tp, align in itertools.product(iterN, iterA): t1, t2, t3 = tp spec = f"{t1},{t2},{t3}" with self.subTest(spec=spec, align=align): self.check(spec, align) @unittest.skipMPI('msmpi') def testStruct3(self): blens = [1, 2, 3] disps = [1, 27, 71] types = [MPI.INT, MPI.DOUBLE, MPI.INT] mt1 = MPI.Datatype.Create_struct(blens, disps, types) mt2 = MPI.Datatype.Create_struct([1], [0], [mt1]) self.check(mt1) self.check(mt2) mt1.Free() mt2.Free() def makeStruct(self, dt, mt): dt = numpy.dtype(dt).str stp = numpy.dtype(",".join(['B', dt, 'B']), align=True) off = lambda i: stp.fields[stp.names[i]][1] blens = [1, 1, 1] disps = [0, off(1), off(2)] types = [MPI.BYTE, mt, MPI.BYTE] mtp = MPI.Datatype.Create_struct(blens, disps, types) return stp, mtp @unittest.skipMPI('msmpi') @unittest.skipIf(numpy is None, 'numpy') def testStruct4(self): for t in typecodes: with self.subTest(typecode=t): dt0 = np_dtype(t) mt0 = fromnumpy(dt0) stp, mt1 = self.makeStruct(t, mt0) ex1 = stp.itemsize for n, mt in ( (1, mt1), (1, mt1.Dup()), (1, mt1.Create_resized(0, 1*ex1)), (3, mt1.Create_resized(0, 3*ex1)), (3, mt1.Create_contiguous(3)), (5, mt1.Create_subarray([5], [5], [0])), (7, MPI.Datatype.Create_struct([7], [0], [mt1])), ): dt = tonumpy(mt) self.assertEqual(mt.extent, n*ex1) self.assertEqual(dt.itemsize, n*ex1) self.assertTrue(dt.isalignedstruct) self.check(mt) self.check(dt) if mt != mt1: mt.Free() mt0.Free() mt1.Free() @unittest.skipMPI('msmpi') @unittest.skipIf(numpy is None, 'numpy') def testStruct5(self): for t1, t2 in itertools.product(*[typecodes]*2): with self.subTest(t1=t1, t2=t2): dtlist = [] dt = np_dtype(f"c,{t1},{t2},c", align=True) dtlist.append(dt) for _ in range(3): dt = np_dtype([('', dt)]*2, align=True) dtlist.append(dt) for dt in dtlist: mt = fromnumpy(dt) dt2 = tonumpy(mt) mt.Free() self.assertEqual(dt, dt2) def testVector(self): for mt in datatypes: with self.subTest(name=mt.name): mt1 = mt.Create_vector(3, 4, 6) mt2 = mt.Create_hvector(3, 4, 6*mt.extent) self.check(mt1) self.check(mt2) dt1 = tonumpy(mt1) dt2 = tonumpy(mt2) self.check(dt1) self.check(dt2) self.assertEqual(dt1, dt2) mt3 = mt1.Create_vector(2, 3, 4) mt4 = mt2.Create_hvector(2, 3, 4*mt2.extent) self.check(mt3) self.check(mt4) dt3 = tonumpy(mt3) dt4 = tonumpy(mt4) self.check(dt3) self.check(dt4) self.assertEqual(dt3, dt4) mt3.Free() mt4.Free() mt1.Free() mt2.Free() def testHVector(self): for mt in datatypes: with self.subTest(name=mt.name): mt1 = mt.Create_hvector(3, 4, 6*mt.extent+1) mt2 = mt1.Dup() self.check(mt1) self.check(mt2) dt1 = tonumpy(mt1) dt2 = tonumpy(mt2) self.check(dt1) self.check(dt2) self.assertEqual(dt1, dt2) mt3 = mt1.Create_hvector(2, 3, 4*mt1.extent+1) mt4 = mt2.Create_hvector(2, 3, 4*mt2.extent+1) self.check(mt3) self.check(mt4) dt3 = tonumpy(mt3) dt4 = tonumpy(mt4) self.check(dt3) self.check(dt4) self.assertEqual(dt3, dt4) mt3.Free() mt4.Free() mt1.Free() mt2.Free() def testIndexed(self): disps = [1, 6, 12] for mt in datatypes: with self.subTest(name=mt.name): mt1 = mt.Create_indexed([4]*3, disps) mt2 = mt.Create_indexed_block(4, disps) self.check(mt1) self.check(mt2) dt1 = tonumpy(mt1) dt2 = tonumpy(mt2) self.check(dt1) self.check(dt2) self.assertEqual(dt1, dt2) mt3 = mt1.Create_indexed([1], [0]) mt4 = mt2.Create_indexed_block(1, [0]) self.check(mt3) self.check(mt4) dt3 = tonumpy(mt3) dt4 = tonumpy(mt4) self.check(dt3) self.check(dt4) self.assertEqual(dt3, dt4) mt3.Free() mt4.Free() mt1.Free() mt2.Free() def testHIndexed(self): disps = [0, 6, 12] for mt in datatypes: with self.subTest(name=mt.name): mt1 = mt.Create_hindexed([4]*3, [d*mt.extent+1 for d in disps]) mt2 = mt.Create_hindexed_block(4, [d*mt.extent+1 for d in disps]) self.check(mt1) self.check(mt2) dt1 = tonumpy(mt1) dt2 = tonumpy(mt2) self.check(dt1) self.check(dt2) self.assertEqual(dt1, dt2) mt3 = mt1.Create_hindexed([1], [0]) mt4 = mt2.Create_hindexed_block(1, [0]) self.check(mt3) self.check(mt4) mt3.Free() mt4.Free() mt1.Free() mt2.Free() @unittest.skipMPI('msmpi') def testF77(self): for mt in mpif77types: dt = tonumpy(mt) if np_dtype is not None: self.assertEqual(dt.itemsize, mt.extent) @unittest.skipMPI('msmpi') def testF90(self): for mt in mpif90types: dt = tonumpy(mt) if np_dtype is not None: self.assertEqual(dt.itemsize, mt.extent) @unittest.skipMPI('msmpi') def testF90Integer(self): try: mt = MPI.Datatype.Create_f90_integer(1) if mt == MPI.DATATYPE_NULL or mt.Get_size() == 0: raise NotImplementedError except NotImplementedError: self.skipTest('mpi-type-create-f90-integer') for r in range(1, 19): with self.subTest(r=r): mt = MPI.Datatype.Create_f90_integer(r) dt = tonumpy(mt) if np_dtype is not None: self.assertEqual(dt.kind, 'i') self.assertEqual(dt.itemsize, mt.extent) size = mt.Get_size() tstr = f'i{size}' stp, mtp = self.makeStruct(tstr, mt) self.assertEqual(stp.itemsize, mtp.extent) self.check(mtp) mtp.Free() @unittest.skipMPI('msmpi') def testF90Real(self): try: mt = MPI.Datatype.Create_f90_real(7, MPI.UNDEFINED) if mt == MPI.DATATYPE_NULL or mt.Get_size() == 0: raise NotImplementedError except NotImplementedError: self.skipTest('mpi-type-create-f90-real') for p in (6, 7, 14, 15): with self.subTest(p=p): mt = MPI.Datatype.Create_f90_real(p, MPI.UNDEFINED) dt = tonumpy(mt) if np_dtype is not None: self.assertEqual(dt.kind, 'f') self.assertEqual(dt.itemsize, mt.extent) size = mt.Get_size() tstr = f'i{size}' stp, mtp = self.makeStruct(tstr, mt) self.assertEqual(stp.itemsize, mtp.extent) self.check(mtp) mtp.Free() @unittest.skipMPI('msmpi') def testF90Complex(self): try: mt = MPI.Datatype.Create_f90_complex(7, MPI.UNDEFINED) if mt == MPI.DATATYPE_NULL or mt.Get_size() == 0: raise NotImplementedError except NotImplementedError: self.skipTest('mpi-type-create-f90-complex') for p in (6, 7, 14, 15): with self.subTest(p=p): mt = MPI.Datatype.Create_f90_complex(p, MPI.UNDEFINED) dt = tonumpy(mt) if np_dtype is not None: self.assertEqual(dt.kind, 'c') self.assertEqual(dt.itemsize, mt.extent) def testPair(self): for mt in mpipairtypes: with self.subTest(datatype=mt.name): dt = tonumpy(mt) if np_dtype is not None: self.assertTrue(dt.isalignedstruct) self.assertEqual(dt.itemsize, mt.extent) integral = 'bhilqpBHILQP' floating = 'fdg' vtypes = integral + floating itypes = integral for vcode, icode in itertools.product(vtypes, itypes): value = MPI.Datatype.fromcode(vcode) index = MPI.Datatype.fromcode(icode) pair = MPI.Datatype.Get_value_index(value, index) if pair == MPI.DATATYPE_NULL: continue vt, it, pt = map(tonumpy, (value, index, pair)) dt = (f'{vt},{it}', {'align': True}) if np_dtype is not None: dt = np_dtype(dt[0], **dt[1]) self.assertEqual(pt, dt) def testPairStruct(self): cases = [mpipairtypes]*3 +[[False, True]] for mt1, mt2, mt3, dup in itertools.product(*cases): with self.subTest(mt1=mt1.name, mt2=mt2.name, mt3=mt3.name): if dup: mt1 = mt1.Dup() mt2 = mt2.Dup() mt3 = mt3.Dup() align = max(mt.extent for mt in (mt1, mt2, mt3)) structtype = MPI.Datatype.Create_struct( [1, 1, 1], [0, align, align*2], [mt1, mt2, mt3], ) if dup: mt1.Free() mt2.Free() mt3.Free() dt = tonumpy(structtype) structtype.Free() if np_dtype is not None: self.assertTrue(dt.isalignedstruct) def testAlignmentComplex(self): complexcodes = list('FDG') complexcodes += [f'c{n}' for n in (8, 16)] for t in typecodes + complexcodes: with self.subTest(typecode=t): datatype = MPI.Datatype.fromcode(t) alignment1 = MPI._typealign(datatype) if np_dtype is not None: alignment2 = np_dtype(t).alignment self.assertEqual(alignment1, alignment2) def testAlignmentPair(self): for pairtype in mpipairtypes: alignment1 = MPI._typealign(pairtype) self.assertIn(alignment1, (2, 4, 8, 16)) if np_dtype is not None: alignment2 = tonumpy(pairtype).alignment self.assertEqual(alignment1, alignment2) def testAlignmentStruct(self): off = MPI.DOUBLE.extent structtype = MPI.Datatype.Create_struct( [1, 1], [0, off], [MPI.INT, MPI.DOUBLE], ) alignment = MPI._typealign(structtype) self.assertIsNone(alignment) structtype.Free() def testMissingNumPy(self): from mpi4py.util import dtlib np_dtype = dtlib._np_dtype dtlib._np_dtype = None try: for t in typecodes: with self.subTest(typecode=t): mt = MPI.Datatype.fromcode(t) dt = tonumpy(mt) code = mt.tocode() self.assertEqual(dt, code) arraytype = mt.Create_contiguous(7) dt = tonumpy(arraytype) arraytype.Free() self.assertIsInstance(dt, tuple) self.assertEqual(dt[0], code) self.assertEqual(dt[1], (7,)) structtype = MPI.Datatype.Create_struct( [1, 1], [0, mt.extent], [mt, mt], ) dt = tonumpy(structtype) structtype.Free() self.assertIsInstance(dt, dict) self.assertEqual(dt['formats'], [code]*2) self.assertEqual(dt['offsets'], [0, mt.extent]) self.assertEqual(dt['itemsize'], mt.extent*2) self.assertTrue(dt['aligned']) with self.assertRaises(RuntimeError): fromnumpy(None) finally: dtlib._np_dtype = np_dtype @unittest.skipIf(numpy is None, 'numpy') def testFailures(self): endian = '>' if np_dtype(' 1) def testWaitAll(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() dest = (rank + 1) % size source = (rank - 1) % size # requests = [] for smess in messages: req = comm.issend(smess, dest) requests.append(req) flag, _ = self.RequestType.testall(requests) self.assertFalse(flag) flag, obj = self.RequestType.testall(requests) self.assertFalse(flag) self.assertIsNone(obj) comm.barrier() for smess in messages: rmess = comm.recv(None, source) self.assertEqual(rmess, smess) obj = self.RequestType.waitall(requests) self.assertEqual(obj, [None]*len(messages)) # requests1 = [] for smess in messages: req = comm.issend(smess, dest) requests1.append(req) requests2 = [] for _ in messages: req = comm.mprobe(source).irecv() requests2.append(req) statuses = [MPI.Status()] obj = self.RequestType.waitall(requests2, statuses) self.assertEqual(obj, messages) self.assertEqual(len(statuses), len(requests2)) for status in statuses: self.assertEqual(status.source, source) self.assertEqual(status.tag, 0) self.assertGreater(status.Get_count(), 0) comm.barrier() statuses = (MPI.Status(),) self.RequestType.waitall(requests1, statuses) self.assertEqual(statuses[0].error, 0) def testSendrecv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages: rmess = self.COMM.sendrecv(smess, MPI.PROC_NULL, 0, None, MPI.PROC_NULL, 0) self.assertIsNone(rmess) if isinstance(self.COMM, pkl5.Comm): rbuf = MPI.Alloc_mem(32) else: rbuf = None for smess in messages: dest = (rank + 1) % size source = (rank - 1) % size rmess = self.COMM.sendrecv(None, dest, 0, None, source, 0) self.assertIsNone(rmess) rmess = self.COMM.sendrecv(smess, dest, 0, None, source, 0) self.assertEqual(rmess, smess) status = MPI.Status() rmess = self.COMM.sendrecv(smess, dest, 42, rbuf, source, 42, status) self.assertEqual(status.source, source) self.assertEqual(status.tag, 42) self.assertEqual(status.error, 0) if rbuf is not None: MPI.Free_mem(rbuf) def testPingPong01(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages: self.COMM.send(smess, MPI.PROC_NULL) rmess = self.COMM.recv(None, MPI.PROC_NULL, 0) self.assertIsNone(rmess) if size == 1: return smess = None if rank == 0: self.COMM.send(smess, rank+1, 0) rmess = self.COMM.recv(None, rank+1, 0) elif rank == 1: rmess = self.COMM.recv(None, rank-1, 0) self.COMM.send(smess, rank-1, 0) else: rmess = smess self.assertEqual(rmess, smess) for smess in messages: if rank == 0: self.COMM.send(smess, rank+1, 0) rmess = self.COMM.recv(None, rank+1, 0) elif rank == 1: rmess = self.COMM.recv(None, rank-1, 0) self.COMM.send(smess, rank-1, 0) else: rmess = smess self.assertEqual(rmess, smess) def testIrecv(self): if isinstance(self.COMM, pkl5.Comm): self.assertRaises( RuntimeError, self.COMM.irecv, None, MPI.PROC_NULL, 0, ) def testProbe(self): comm = self.COMM.Dup() try: status = MPI.Status() flag = comm.iprobe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertFalse(flag) for smess in messages: request = comm.issend(smess, comm.rank, 123) self.assertIsInstance(request, self.RequestType) self.assertTrue (bool(request != MPI.REQUEST_NULL)) self.assertFalse(bool(request == MPI.REQUEST_NULL)) self.assertTrue (bool(request == self.RequestType(request))) self.assertFalse(bool(request != self.RequestType(request))) self.assertTrue (bool(request != None)) self.assertFalse(bool(request == None)) self.assertTrue (bool(request)) while not comm.iprobe(MPI.ANY_SOURCE, MPI.ANY_TAG, status): pass self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) comm.probe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) self.assertTrue(request) flag, obj = request.test() self.assertTrue(request) self.assertFalse(flag) self.assertIsNone(obj) obj = comm.recv(None, comm.rank, 123) self.assertEqual(obj, smess) self.assertTrue(request) obj = request.wait() self.assertFalse(request) self.assertIsNone(obj) finally: comm.Free() def testMProbe(self): comm = self.COMM.Dup() try: message = comm.mprobe(MPI.PROC_NULL) self.assertIsInstance(message, self.MessageType) self.assertTrue (bool(message == MPI.MESSAGE_NO_PROC)) self.assertFalse(bool(message != MPI.MESSAGE_NO_PROC)) self.assertTrue (bool(message != None)) self.assertFalse(bool(message == None)) rmess = message.recv() self.assertTrue (bool(message == MPI.MESSAGE_NULL)) self.assertFalse(bool(message != MPI.MESSAGE_NULL)) self.assertIsNone(rmess) message = comm.mprobe(MPI.PROC_NULL) self.assertIsInstance(message, self.MessageType) self.assertTrue (bool(message == MPI.MESSAGE_NO_PROC)) self.assertFalse(bool(message != MPI.MESSAGE_NO_PROC)) request = message.irecv() self.assertTrue (bool(message == MPI.MESSAGE_NULL)) self.assertFalse(bool(message != MPI.MESSAGE_NULL)) self.assertTrue (bool(request != MPI.REQUEST_NULL)) self.assertFalse(bool(request == MPI.REQUEST_NULL)) rmess = request.wait() self.assertTrue (bool(request == MPI.REQUEST_NULL)) self.assertFalse(bool(request != MPI.REQUEST_NULL)) self.assertIsNone(rmess) for smess in messages: request = comm.issend(smess, comm.rank, 123) message = comm.mprobe(comm.rank, 123) self.assertIsInstance(message, self.MessageType) self.assertTrue (bool(message == self.MessageType(message))) self.assertFalse(bool(message != self.MessageType(message))) rmess = message.recv() self.assertEqual(rmess, smess) obj = request.wait() self.assertFalse(request) self.assertIsNone(obj) flag, obj = request.test() self.assertTrue(flag) self.assertIsNone(obj) message.free() for smess in messages: request = comm.issend(smess, comm.rank, 123) status = MPI.Status() message = comm.mprobe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) self.assertTrue(message) status = MPI.Status() rmess = message.recv(status) self.assertFalse(message) self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) self.assertEqual(rmess, smess) self.assertTrue(request) request.wait() for smess in messages: request = comm.issend(smess, comm.rank, 123) status = MPI.Status() message = comm.mprobe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) self.assertTrue(message) rreq = message.irecv() self.assertFalse(message) self.assertTrue(rreq) status = MPI.Status() rmess = rreq.wait(status) self.assertFalse(rreq) self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) self.assertEqual(rmess, smess) flag, obj = rreq.test() self.assertTrue(flag) self.assertIsNone(obj) self.assertTrue(request) obj = request.wait() self.assertFalse(request) self.assertIsNone(obj) flag, obj = request.test() self.assertTrue(flag) self.assertIsNone(obj) for smess in messages: request = comm.issend(smess, comm.rank, 123) message = comm.mprobe(MPI.ANY_SOURCE, MPI.ANY_TAG) rreq = message.irecv() rreq.test() request.free() finally: comm.Free() def testIMProbe(self): comm = self.COMM.Dup() try: status = MPI.Status() for smess in messages: message = comm.improbe(MPI.PROC_NULL) self.assertIsInstance(message, self.MessageType) self.assertEqual(message, MPI.MESSAGE_NO_PROC) for smess in messages: message = comm.improbe(comm.rank, 123) self.assertIsNone(message) request = comm.issend(smess, comm.rank, 123) while not comm.iprobe(comm.rank, 123): pass message = comm.improbe(comm.rank, 123) self.assertIsInstance(message, self.MessageType) rmess = message.recv() self.assertEqual(rmess, smess) request.wait() for smess in messages: message = comm.improbe(comm.rank, 123) self.assertIsNone(message) request = comm.issend(smess, comm.rank, 123) while not comm.iprobe(comm.rank, 123): pass message = comm.improbe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) self.assertTrue(message) rmess = message.recv() self.assertFalse(message) self.assertEqual(rmess, smess) self.assertTrue(request) request.wait() self.assertFalse(request) finally: comm.Free() def testMessageProbeIProbe(self): comm = self.COMM.Dup() try: status = MPI.Status() for smess in messages: request = comm.issend(smess, comm.rank, 123) message = self.MessageType.probe(comm, MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) self.assertTrue(message) rmess = message.recv() self.assertFalse(message) self.assertEqual(rmess, smess) self.assertTrue(request) request.wait() self.assertFalse(request) for smess in messages: message = self.MessageType.iprobe(comm, comm.rank, 123) self.assertIsNone(message) request = comm.issend(smess, comm.rank, 123) while not comm.iprobe(comm.rank, 123): pass message = self.MessageType.iprobe(comm, MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) self.assertTrue(message) rmess = message.recv() self.assertFalse(message) self.assertEqual(rmess, smess) self.assertTrue(request) request.wait() self.assertFalse(request) finally: comm.Free() def testSSendAndMProbe(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() if size == 1: return comm = self.COMM.Dup() try: for smess in messages: if rank == 0: comm.ssend(smess, 1) message = comm.mprobe(1) rmess = message.recv() self.assertEqual(rmess, smess) if rank == 1: message = comm.mprobe(0) rmess = message.recv() comm.ssend(rmess, 0) self.assertEqual(rmess, smess) finally: comm.Free() def testRequest(self): req = self.RequestType() self.assertFalse(req) self.assertEqual(req, self.RequestType()) req = self.RequestType(MPI.REQUEST_NULL) self.assertFalse(req) self.assertEqual(req, MPI.REQUEST_NULL) self.assertEqual(req, self.RequestType()) def testMessage(self): msg = self.MessageType() self.assertFalse(msg) self.assertEqual(msg, self.MessageType()) msg = self.MessageType(MPI.MESSAGE_NULL) self.assertFalse(msg) self.assertEqual(msg, self.MessageType()) msg = self.MessageType(MPI.MESSAGE_NO_PROC) self.assertTrue(msg) self.assertEqual(msg, MPI.MESSAGE_NO_PROC) self.assertEqual(msg, self.MessageType(MPI.MESSAGE_NO_PROC)) self.assertNotEqual(msg, MPI.MESSAGE_NULL) @staticmethod def make_intercomm(basecomm): if unittest.is_mpi('msmpi') and MPI.COMM_WORLD.Get_size() >= 3: raise unittest.SkipTest("msmpi") size = basecomm.Get_size() rank = basecomm.Get_rank() if size == 1: raise unittest.SkipTest("comm.size==1") if rank < size // 2 : COLOR = 0 local_leader = 0 remote_leader = size // 2 else: COLOR = 1 local_leader = 0 remote_leader = 0 basecomm.Barrier() intracomm = basecomm.Split(COLOR, key=0) intercomm = MPI.Intracomm.Create_intercomm( intracomm, local_leader, basecomm, remote_leader ) intracomm.Free() if isinstance(basecomm, pkl5.Intracomm): intercomm = pkl5.Intercomm(intercomm) return intercomm, COLOR def testBcastIntra(self, msglist=None, check=None): comm = self.COMM size = comm.Get_size() for smess in (msglist or messages): for root in range(size): rmess = comm.bcast(smess, root) if msglist and check: self.assertTrue(check(rmess)) else: self.assertEqual(rmess, smess) def testBcastInter(self, msglist=None, check=None): comm, COLOR = self.make_intercomm(self.COMM) rank = comm.Get_rank() size = comm.Get_size() rsize = comm.Get_remote_size() for smess in (msglist or messages)[:1]: comm.barrier() for color in [0, 1]: if COLOR == color: for root in range(size): if root == rank: rmess = comm.bcast(smess, root=MPI.ROOT) else: rmess = comm.bcast(None, root=MPI.PROC_NULL) self.assertIsNone(rmess) else: for root in range(rsize): rmess = comm.bcast(None, root=root) if msglist and check: self.assertTrue(check(rmess)) else: self.assertEqual(rmess, smess) if isinstance(comm, pkl5.Comm): bcast = comm.bcast rsize = comm.Get_remote_size() self.assertRaises(MPI.Exception, bcast, None, root=rsize) comm.Free() def testGatherIntra(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() for smess in messages: for root in range(size): rmess = comm.gather(smess, root) if rank == root: self.assertEqual(rmess, [smess]*size) else: self.assertIsNone(rmess) self.assertRaises(MPI.Exception, comm.gather, None, root=-1) self.assertRaises(MPI.Exception, comm.gather, None, root=size) def testGatherInter(self): comm, COLOR = self.make_intercomm(self.COMM) rank = comm.Get_rank() size = comm.Get_size() rsize = comm.Get_remote_size() for smess in messages: for color in [0, 1]: if color == COLOR: for root in range(size): if root == rank: rmess = comm.gather(smess, root=MPI.ROOT) self.assertEqual(rmess, [smess] * rsize) else: rmess = comm.gather(None, root=MPI.PROC_NULL) self.assertIsNone(rmess) else: for root in range(rsize): rmess = comm.gather(smess, root=root) self.assertIsNone(rmess) self.assertRaises(MPI.Exception, comm.gather, None, root=max(size,rsize)) self.assertRaises(MPI.Exception, comm.gather, None, root=max(size,rsize)) comm.Free() def testScatterIntra(self): comm = self.COMM size = comm.Get_size() for smess in messages: for root in range(size): rmess = comm.scatter(None, root) self.assertIsNone(rmess) rmess = comm.scatter([smess]*size, root) self.assertEqual(rmess, smess) rmess = comm.scatter(iter([smess]*size), root) self.assertEqual(rmess, smess) self.assertRaises(MPI.Exception, comm.scatter, [None]*size, root=-1) self.assertRaises(MPI.Exception, comm.scatter, [None]*size, root=size) if size == 1: self.assertRaises(ValueError, comm.scatter, [None]*(size-1), root=0) self.assertRaises(ValueError, comm.scatter, [None]*(size+1), root=0) def testScatterInter(self): comm, COLOR = self.make_intercomm(self.COMM) rank = comm.Get_rank() size = comm.Get_size() rsize = comm.Get_remote_size() for smess in messages + [messages]: for color in [0, 1]: if color == COLOR: for root in range(size): if root == rank: rmess = comm.scatter([smess] * rsize, root=MPI.ROOT) else: rmess = comm.scatter(None, root=MPI.PROC_NULL) self.assertIsNone(rmess) else: for root in range(rsize): rmess = comm.scatter(None, root=root) self.assertEqual(rmess, smess) self.assertRaises(MPI.Exception, comm.scatter, None, root=max(size, rsize)) self.assertRaises(MPI.Exception, comm.scatter, None, root=max(size, rsize)) comm.Free() def testAllgatherIntra(self): comm = self.COMM size = comm.Get_size() for smess in messages: rmess = comm.allgather(None) self.assertEqual(rmess, [None]*size) rmess = comm.allgather(smess) self.assertEqual(rmess, [smess]*size) def testAllgatherInter(self): comm, COLOR = self.make_intercomm(self.COMM) size = comm.Get_remote_size() for smess in messages: rmess = comm.allgather(None) self.assertEqual(rmess, [None]*size) rmess = comm.allgather(smess) self.assertEqual(rmess, [smess]*size) comm.Free() def testAlltoallIntra(self): comm = self.COMM size = comm.Get_size() for smess in messages: rmess = comm.alltoall(None) self.assertEqual(rmess, [None]*size) rmess = comm.alltoall([smess]*size) self.assertEqual(rmess, [smess]*size) rmess = comm.alltoall(iter([smess]*size)) self.assertEqual(rmess, [smess]*size) self.assertRaises(ValueError, comm.alltoall, [None]*(size-1)) self.assertRaises(ValueError, comm.alltoall, [None]*(size+1)) def testAlltoallInter(self): comm, COLOR = self.make_intercomm(self.COMM) size = comm.Get_remote_size() for smess in messages: rmess = comm.alltoall(None) self.assertEqual(rmess, [None]*size) rmess = comm.alltoall([smess]*size) self.assertEqual(rmess, [smess]*size) rmess = comm.alltoall(iter([smess]*size)) self.assertEqual(rmess, [smess]*size) self.assertRaises(ValueError, comm.alltoall, [None]*(size-1)) self.assertRaises(ValueError, comm.alltoall, [None]*(size+1)) comm.Free() @unittest.skipIf(numpy is None, 'numpy') def testBigMPI(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() dest = (rank + 1) % size source = (rank - 1) % size bigmpi = self.bigmpi blocksizes = ( 63, 64, 65, (1<<12)-1, (1<<12), (1<<12)+1, ) for blocksize in blocksizes: bigmpi.blocksize = blocksize a = numpy.empty(1024, dtype='i') b = numpy.empty(1024, dtype='i') c = numpy.empty(1024, dtype='i') a.fill(rank) b.fill(dest) c.fill(42) status = MPI.Status() smess = (a, b) rmess = comm.sendrecv( smess, dest, 42, None, source, 42, status, ) self.assertTrue(numpy.all(rmess[0] == source)) self.assertTrue(numpy.all(rmess[1] == rank)) self.assertGreater(status.Get_elements(MPI.BYTE), 0) comm.barrier() status = MPI.Status() smess = (a, b) request = comm.issend(smess, dest, 123) rmess = comm.mprobe(source, 123).irecv().wait(status) self.assertTrue(numpy.all(rmess[0] == source)) self.assertTrue(numpy.all(rmess[1] == rank)) self.assertGreater(status.Get_elements(MPI.BYTE), 0) request.Free() comm.barrier() check = lambda x: numpy.all(x == 42) self.testBcastIntra([c, c], check) self.testBcastInter([c, c], check) check2 = lambda x: check(x[0]) and check(x[1]) self.testBcastIntra([(c, c.copy())], check2) self.testBcastInter([(c, c.copy())], check2) class BaseTestPKL5: CommType = pkl5.Intracomm MessageType = pkl5.Message RequestType = pkl5.Request def setUp(self): super().setUp() self.pickle_prev = pkl5.pickle self.pickle = pkl5.Pickle() self.pickle.THRESHOLD = 0 pkl5.pickle = self.pickle def tearDown(self): super().tearDown() pkl5.pickle = self.pickle_prev @unittest.skipIf(numpy is None, 'numpy') def testPickle5(self): comm = self.COMM rank = comm.Get_rank() pickle = self.pickle protocols = list(range(-2, pickle.PROTOCOL+1)) for protocol in [None] + protocols: pickle.PROTOCOL = protocol for threshold in (-1, 0, 64, 256, None): pickle.THRESHOLD = threshold threshold = pickle.THRESHOLD for slen in (0, 32, 64, 128, 256, 512): sobj = numpy.empty(slen, dtype='i') sobj.fill(rank) # robj = comm.sendrecv( sobj, rank, 42, None, rank, 42) self.assertTrue(numpy.all(sobj==robj)) # data, bufs = pickle.dumps_oob(sobj) self.assertIs(type(data), bytes) self.assertIs(type(bufs), list) robj = pickle.loads_oob(data, bufs) self.assertTrue(numpy.all(sobj==robj)) have_pickle5 = ( sys.version_info >= (3, 8) or 'pickle5' in sys.modules ) if sobj.nbytes >= threshold and have_pickle5: self.assertEqual(len(bufs), 1) self.assertIs(type(bufs[0]), MPI.buffer) else: self.assertEqual(len(bufs), 0) class TestMPISelf(BaseTest, unittest.TestCase): COMM = MPI.COMM_SELF class TestMPIWorld(BaseTest, unittest.TestCase): COMM = MPI.COMM_WORLD class TestPKL5Self(BaseTestPKL5, TestMPISelf): pass class TestPKL5World(BaseTestPKL5, TestMPIWorld): pass if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_util_pool.py000066400000000000000000000353031475341043600173510ustar00rootroot00000000000000from mpi4py import MPI import mpi4py.util.pool as pool import concurrent.futures as cf import itertools import functools import warnings import unittest import time import sys import os def sqr(x, wait=0.0): time.sleep(wait) return x*x def mul(x, y): return x*y def identity(x): return x def raising(): raise KeyError("key") TIMEOUT1 = 0.1 TIMEOUT2 = 0.2 class TimingWrapper: def __init__(self, func): self.func = func self.elapsed = None def __call__(self, *args, **kwds): t = time.monotonic() try: return self.func(*args, **kwds) finally: self.elapsed = time.monotonic() - t class BaseTestPool: PoolType = None @classmethod def Pool(cls, *args, **kwargs): if 'coverage' in sys.modules: kwargs['python_args'] = '-m coverage run'.split() Pool = cls.PoolType return Pool(*args, **kwargs) @classmethod def setUpClass(cls): super().setUpClass() cls.pool = cls.Pool(1) @classmethod def tearDownClass(cls): cls.pool.terminate() cls.pool.join() cls.pool = None super().tearDownClass() def test_apply(self): papply = self.pool.apply self.assertEqual(papply(sqr, (5,)), sqr(5)) self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3)) def test_map(self): self.assertEqual( self.pool.map(sqr, range(10)), list(map(sqr, list(range(10)))) ) self.assertEqual( self.pool.map(sqr, (i for i in range(10))), list(map(sqr, list(range(10)))) ) self.assertEqual( self.pool.map(sqr, list(range(10))), list(map(sqr, list(range(10)))) ) self.assertEqual( self.pool.map(sqr, range(100), chunksize=20), list(map(sqr, list(range(100)))) ) self.assertEqual( self.pool.map(sqr, (i for i in range(100)), chunksize=20), list(map(sqr, list(range(100)))) ) self.assertEqual( self.pool.map(sqr, list(range(100)), chunksize=20), list(map(sqr, list(range(100)))) ) def test_imap(self): self.assertEqual( list(self.pool.imap(sqr, range(10))), list(map(sqr, list(range(10)))) ) self.assertEqual( list(self.pool.imap(sqr, (i for i in range(10)))), list(map(sqr, list(range(10)))) ) self.assertEqual( list(self.pool.imap(sqr, list(range(10)))), list(map(sqr, list(range(10)))) ) it = self.pool.imap(sqr, range(10)) for i in range(10): self.assertEqual(next(it), i*i) self.assertRaises(StopIteration, next, it) it = self.pool.imap(sqr, list(range(10))) for i in range(10): self.assertEqual(next(it), i*i) self.assertRaises(StopIteration, next, it) it = self.pool.imap(sqr, range(100), chunksize=20) for i in range(100): self.assertEqual(next(it), i*i) self.assertRaises(StopIteration, next, it) it = self.pool.imap(sqr, list(range(100)), chunksize=20) for i in range(100): self.assertEqual(next(it), i*i) self.assertRaises(StopIteration, next, it) def test_imap_unordered(self): args = list(range(10)) result = list(map(sqr, args)) it = self.pool.imap_unordered(sqr, args) self.assertEqual(sorted(it), result) it = self.pool.imap_unordered(sqr, iter(args)) self.assertEqual(sorted(it), result) it = self.pool.imap_unordered(sqr, (a for a in args)) self.assertEqual(sorted(it), result) args = list(range(100)) result = list(map(sqr, args)) it = self.pool.imap_unordered(sqr, args, chunksize=20) self.assertEqual(sorted(it), result) it = self.pool.imap_unordered(sqr, iter(args), chunksize=20) self.assertEqual(sorted(it), result) it = self.pool.imap_unordered(sqr, (a for a in args), chunksize=20) self.assertEqual(sorted(it), result) def test_starmap(self): tuples = list(zip(range(10), range(9, -1, -1))) self.assertEqual( self.pool.starmap(mul, tuples), list(itertools.starmap(mul, tuples)) ) tuples = list(zip(range(100), range(99, -1, -1))) self.assertEqual( self.pool.starmap(mul, tuples, chunksize=20), list(itertools.starmap(mul, tuples)) ) def test_istarmap(self): tuples = list(zip(range(10), range(9, -1, -1))) result = list(itertools.starmap(mul, tuples)) it = self.pool.istarmap(mul, tuples) self.assertEqual(list(it), result) iterator = zip(range(10), range(9, -1, -1)) it = self.pool.istarmap(mul, iterator) self.assertEqual(list(it), result) tuples = list(zip(range(10), range(9, -1, -1))) it = self.pool.istarmap(mul, tuples) for i, j in tuples: self.assertEqual(next(it), i*j) self.assertRaises(StopIteration, next, it) tuples = list(zip(range(100), range(99, -1, -1))) it = self.pool.istarmap(mul, tuples, chunksize=20) for i, j in tuples: self.assertEqual(next(it), i*j) self.assertRaises(StopIteration, next, it) def test_istarmap_unordered(self): tuples = list(zip(range(10), range(9, -1, -1))) result = list(itertools.starmap(mul, tuples)) it = self.pool.istarmap_unordered(mul, tuples) self.assertEqual(sorted(it), sorted(result)) iterator = zip(range(10), range(9, -1, -1)) it = self.pool.istarmap_unordered(mul, iterator) self.assertEqual(sorted(it), sorted(result)) tuples = list(zip(range(100), range(99, -1, -1))) result = list(itertools.starmap(mul, tuples)) it = self.pool.istarmap_unordered(mul, tuples, chunksize=20) self.assertEqual(sorted(it), sorted(result)) def test_apply_async(self): res = self.pool.apply_async(sqr, (7,)) self.assertEqual(res.get(), 49) res = self.pool.apply_async(sqr, (7, TIMEOUT2,)) get = TimingWrapper(res.get) self.assertEqual(get(), 49) self.assertLess(get.elapsed, TIMEOUT2*10) self.assertGreater(get.elapsed, TIMEOUT2/10) def test_apply_async_timeout(self): res = self.pool.apply_async(sqr, (7, TIMEOUT2,)) self.assertFalse(res.ready()) self.assertRaises(ValueError, res.successful) res.wait(TIMEOUT2/100) self.assertFalse(res.ready()) self.assertRaises(ValueError, res.successful) self.assertRaises(TimeoutError, res.get, TIMEOUT2/100) res.wait() self.assertTrue(res.ready()) self.assertTrue(res.successful()) self.assertEqual(res.get(), 49) def test_map_async(self): args = list(range(10)) self.assertEqual( self.pool.map_async(sqr, args).get(), list(map(sqr, args)) ) args = list(range(100)) self.assertEqual( self.pool.map_async(sqr, args, chunksize=20).get(), list(map(sqr, args)) ) def test_map_async_callbacks(self): call_args = [] result = self.pool.map_async( int, ['1', '2'], callback=call_args.append, error_callback=call_args.append ) result.wait() self.assertTrue(result.successful()) self.assertEqual(len(call_args), 1) self.assertEqual(call_args[0], [1, 2]) result = self.pool.map_async( int, ['a'], callback=call_args.append, error_callback=call_args.append ) result.wait() self.assertFalse(result.successful()) self.assertEqual(len(call_args), 2) self.assertIsInstance(call_args[1], ValueError) def test_starmap_async(self): tuples = list(zip(range(10), range(9, -1, -1))) self.assertEqual( self.pool.starmap_async(mul, tuples).get(), list(itertools.starmap(mul, tuples)) ) tuples = list(zip(range(1000), range(999, -1, -1))) self.assertEqual( self.pool.starmap_async(mul, tuples, chunksize=100).get(), list(itertools.starmap(mul, tuples)) ) # --- def test_terminate(self): p = self.Pool(1) for _ in range(100): p.apply_async(time.sleep, (TIMEOUT1,)) result = p.apply_async(time.sleep, (TIMEOUT1,)) p.terminate() p.join() result.wait(TIMEOUT1) result.wait() self.assertFalse(result.successful()) self.assertRaises(Exception, result.get, TIMEOUT1) self.assertRaises(Exception, result.get) p = self.Pool(1) args = [TIMEOUT1] * 100 result = p.map_async(time.sleep, args, chunksize=1) p.terminate() p.join() result.wait(TIMEOUT1) result.wait() self.assertFalse(result.successful()) self.assertRaises(Exception, result.get, TIMEOUT1) self.assertRaises(Exception, result.get) def test_empty_iterable(self): p = self.Pool(1) self.assertEqual(p.map(sqr, []), []) self.assertEqual(list(p.imap(sqr, [])), []) self.assertEqual(list(p.imap_unordered(sqr, [])), []) self.assertEqual(p.starmap(sqr, []), []) self.assertEqual(list(p.istarmap(sqr, [])), []) self.assertEqual(list(p.istarmap_unordered(sqr, [])), []) self.assertEqual(p.map_async(sqr, []).get(), []) self.assertEqual(p.starmap_async(mul, []).get(), []) p.close() p.join() def test_enter_exit(self): pool = self.Pool(1) with pool: pass # with self.assertRaises(ValueError): # with pool: # pass pool.join() # --- def test_async_error_callback(self): p = self.Pool(1) scratchpad = [None] def errback(exc): scratchpad[0] = exc res = p.apply_async(raising, error_callback=errback) p.close() p.join() self.assertRaises(KeyError, res.get) self.assertTrue(scratchpad[0]) self.assertIsInstance(scratchpad[0], KeyError) def test_pool_worker_lifetime_early_close(self): p = self.Pool(1) results = [] for i in range(5): results.append(p.apply_async(sqr, (i, TIMEOUT1))) p.close() p.join() for (j, res) in enumerate(results): self.assertEqual(res.get(), sqr(j)) # --- def test_arg_processes(self): with self.assertRaises(ValueError): self.Pool(-1) with self.assertRaises(ValueError): self.Pool(0) def test_arg_initializer(self): p = self.Pool(1, initializer=identity, initargs=(123,)) with self.assertRaises(TypeError): self.Pool(initializer=123) def test_unsupported_args(self): with warnings.catch_warnings(): warnings.simplefilter("error") with self.assertRaises(UserWarning): with self.Pool(1, maxtasksperchild=1): pass with self.assertRaises(UserWarning): with self.Pool(1, context=object): pass # --- def broken_mpi_spawn(): darwin = (sys.platform == 'darwin') windows = (sys.platform == 'win32') azure = (os.environ.get('TF_BUILD') == 'True') github = (os.environ.get('GITHUB_ACTIONS') == 'true') skip_spawn = ( os.environ.get('MPI4PY_TEST_SPAWN') in (None, '0', 'no', 'off', 'false') ) name, version = MPI.get_vendor() if name == 'Open MPI': if version < (3,0,0): return True if version == (4,0,0): return True if version == (4,0,1) and darwin: return True if version == (4,0,2) and darwin: return True if version >= (4,1,0) and version < (4,2,0): if azure or github: return True if version >= (5,0,0) and version < (5,0,7): if skip_spawn: return True if name == 'MPICH': if version >= (3, 4) and version < (4, 0) and darwin: return True if version < (4, 1): if MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is None: return True if version < (4, 3): try: port = MPI.Open_port() MPI.Close_port(port) except: return True if name == 'Intel MPI': import mpi4py if mpi4py.rc.recv_mprobe: return True if MPI.COMM_WORLD.Get_size() > 1 and windows: return True if name == 'Microsoft MPI': if version < (8,1,0): return True if skip_spawn: return True if MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is None: return True if os.environ.get("PMI_APPNUM") is None: return True if name == 'MVAPICH': if MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is None: return True if version < (3,0,0): return True if name == 'MPICH2': if MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is None: return True if MPI.Get_version() < (2,0): return True if any(map(sys.modules.get, ('cupy', 'numba'))): return True # return False @unittest.skipIf(broken_mpi_spawn(), 'mpi-spawn') @unittest.skipIf(MPI.COMM_WORLD.Get_size() > 1, 'mpi-world-size>1') class TestProcessPool(BaseTestPool, unittest.TestCase): PoolType = pool.Pool class TestThreadPool(BaseTestPool, unittest.TestCase): PoolType = pool.ThreadPool # --- class ExtraExecutorMixing: def map( self, fn, iterable, timeout=None, chunksize=1, unordered=False, ): del unordered # ignored, unused return super().map( fn, iterable, timeout=timeout, chunksize=chunksize, ) def starmap( self, fn, iterable, timeout=None, chunksize=1, unordered=False, ): del unordered # ignored, unused fn = functools.partial(self._apply_args, fn) return super().map( fn, iterable, timeout=timeout, chunksize=chunksize, ) @staticmethod def _apply_args(fn, args): return fn(*args) class ExtraExecutor(ExtraExecutorMixing, cf.ThreadPoolExecutor): pass class ExtraPool(pool.Pool): Executor = ExtraExecutor class TestExtraPool(BaseTestPool, unittest.TestCase): PoolType = ExtraPool @classmethod def Pool(cls, *args, **kwargs): return cls.PoolType(*args, **kwargs) del TestExtraPool # --- if __name__ == '__main__': unittest.main() mpi4py-4.0.3/test/test_util_sync.py000066400000000000000000000506351475341043600173610ustar00rootroot00000000000000from mpi4py import MPI import mpi4py.util.sync as sync import sys, os import random import time try: import mpiunittest as unittest except ImportError: sys.path.append( os.path.dirname( os.path.abspath(__file__))) import mpiunittest as unittest # --- def random_sleep(max_sleep=0.01): time.sleep(max_sleep * random.random()) # noqa: S311 # --- class BaseTestSequential: COMM = MPI.COMM_NULL def testWith(self): comm = self.COMM for _ in range(3): counter = sync.Counter(comm=comm) with sync.Sequential(comm): value = next(counter) counter.free() self.assertEqual( comm.allgather(value), list(range(comm.size)), ) def testBeginEnd(self): comm = self.COMM seq = sync.Sequential(comm) for _ in range(3): counter = sync.Counter(comm=comm) seq.begin() value = next(counter) seq.end() counter.free() self.assertEqual( comm.allgather(value), list(range(comm.size)), ) class TestSequentialSelf(BaseTestSequential, unittest.TestCase): COMM = MPI.COMM_SELF class TestSequentialWorld(BaseTestSequential, unittest.TestCase): COMM = MPI.COMM_WORLD # --- class BaseTestCounter: COMM = MPI.COMM_NULL def testIter(self): comm = self.COMM size = comm.Get_size() counter = sync.Counter(comm=comm) for value in counter: random_sleep() if value >= size - 1: break counter.free() def testNext(self): comm = self.COMM size = comm.Get_size() counter = sync.Counter(comm=comm) while True: value = next(counter) random_sleep() if value >= size - 1: break counter.free() def execute(self, counter, its, condition=True): values = [] if condition: for _ in range(its): value = counter.next() values.append(value) return sorted(self.COMM.allreduce(values)) def testDefault(self): comm = self.COMM size = comm.Get_size() counter = sync.Counter(comm=comm) values = self.execute(counter, 5) counter.free() self.assertEqual(values, list(range(5 * size))) def testStart(self): comm = self.COMM size = comm.Get_size() counter = sync.Counter(start=7, comm=comm) values = self.execute(counter, 5) counter.free() self.assertEqual(values, list(range(7, 7 + 5 * size))) def testStep(self): comm = self.COMM size = comm.Get_size() counter = sync.Counter(step=2, comm=comm) values = self.execute(counter, 5) counter.free() self.assertEqual(values, list(range(0, 2 * 5 * size, 2))) def testTypechar(self): comm = self.COMM size = comm.Get_size() for typechar in ("hHiIlLqQ" + "fd"): counter = sync.Counter(typecode=typechar, comm=comm) values = self.execute(counter, 3) counter.free() self.assertEqual(values, list(range(3 * size))) def testRoot(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() for root in range(size): counter = sync.Counter(comm=comm, root=root) values = self.execute(counter, 5, rank != root) counter.free() self.assertEqual(values, list(range(5 * (size - 1)))) def testFree(self): comm = self.COMM counter = sync.Counter(comm=comm) for _ in range(5): counter.free() self.assertRaises(RuntimeError, counter.next) class TestCounterSelf(BaseTestCounter, unittest.TestCase): COMM = MPI.COMM_SELF class TestCounterWorld(BaseTestCounter, unittest.TestCase): COMM = MPI.COMM_WORLD # --- class BaseTestMutexBasic: COMM = MPI.COMM_NULL def setUp(self): self.mutex = sync.Mutex(comm=self.COMM) def tearDown(self): self.mutex.free() def testExclusion(self): comm = self.COMM mutex = self.mutex counter = sync.Counter(comm=comm) values = [] comm.Barrier() mutex.acquire() for _ in range(10): values.append(next(counter)) mutex.release() counter.free() first, last = values[0], values[-1] self.assertEqual(first % 10, 0) self.assertEqual(last - first + 1, 10) self.assertEqual(values, list(range(first, last+1))) def testFairness(self): comm = self.COMM mutex = self.mutex number = 0 counter = sync.Counter(comm=comm) while next(counter) < comm.Get_size() * 5: mutex.acquire() number += 1 mutex.release() comm.Barrier() counter.free() self.assertEqual(number, 5) def testWith(self): def test_with(): mutex = self.mutex self.assertFalse(mutex.locked()) self.assertRaises(RuntimeError, mutex.release) with mutex: self.assertTrue(mutex.locked()) self.assertRaises(RuntimeError, mutex.acquire) self.assertFalse(mutex.locked()) self.assertRaises(RuntimeError, mutex.release) for _ in range(5): self.COMM.Barrier() test_with() for _ in range(5): random_sleep() test_with() def testAcquireRelease(self): def test_acquire_release(): mutex = self.mutex self.assertFalse(mutex.locked()) self.assertRaises(RuntimeError, mutex.release) locked = mutex.acquire() self.assertTrue(locked) self.assertTrue(mutex.locked()) self.assertRaises(RuntimeError, mutex.acquire) mutex.release() self.assertFalse(mutex.locked()) self.assertRaises(RuntimeError, mutex.release) for _ in range(5): self.COMM.Barrier() test_acquire_release() for _ in range(5): random_sleep() test_acquire_release() def testAcquireNonblocking(self): def test_acquire_nonblocking(): comm = self.COMM mutex = self.mutex self.assertFalse(mutex.locked()) comm.Barrier() locked = mutex.acquire(blocking=False) comm.Barrier() self.assertEqual(mutex.locked(), locked) if locked: mutex.release() self.assertFalse(mutex.locked()) states = comm.allgather(locked) self.assertEqual(states.count(True), 1) comm.Barrier() while not mutex.acquire(blocking=False): pass mutex.release() comm.Barrier() for _ in range(5): self.COMM.Barrier() test_acquire_nonblocking() for _ in range(5): random_sleep() test_acquire_nonblocking() def testAcquireFree(self): mutex = self.mutex mutex.acquire() for _ in range(5): mutex.free() self.assertRaises(RuntimeError, mutex.acquire) self.assertRaises(RuntimeError, mutex.release) self.assertRaises(RuntimeError, mutex.locked) def testFree(self): mutex = self.mutex for _ in range(5): mutex.free() self.assertRaises(RuntimeError, mutex.acquire) self.assertRaises(RuntimeError, mutex.release) self.assertRaises(RuntimeError, mutex.locked) class TestMutexBasicSelf(BaseTestMutexBasic, unittest.TestCase): COMM = MPI.COMM_SELF @unittest.skipMPI('msmpi') class TestMutexBasicWorld(BaseTestMutexBasic, unittest.TestCase): COMM = MPI.COMM_WORLD @unittest.skipMPI('msmpi') def testExclusion(self): super().testExclusion() # --- class BaseTestMutexRecursive: COMM = MPI.COMM_NULL def setUp(self): self.mutex = sync.Mutex(recursive=True, comm=self.COMM) def tearDown(self): self.mutex.free() def testWith(self): def test_with(): mutex = self.mutex self.assertEqual(mutex.count(), 0) self.assertRaises(RuntimeError, mutex.release) with mutex: self.assertEqual(mutex.count(), 1) with mutex: self.assertEqual(mutex.count(), 2) with mutex: self.assertEqual(mutex.count(), 3) self.assertEqual(mutex.count(), 2) self.assertEqual(mutex.count(), 1) self.assertEqual(mutex.count(), 0) self.assertRaises(RuntimeError, mutex.release) for _ in range(5): self.COMM.Barrier() test_with() for _ in range(5): random_sleep() test_with() def testAcquireRelease(self): def test_acquire_release(): mutex = self.mutex self.assertFalse(mutex.locked()) self.assertEqual(mutex.count(), 0) self.assertRaises(RuntimeError, mutex.release) mutex.acquire() self.assertTrue(mutex.locked()) self.assertEqual(mutex.count(), 1) mutex.acquire() self.assertTrue(mutex.locked()) self.assertEqual(mutex.count(), 2) mutex.acquire() self.assertTrue(mutex.locked()) self.assertEqual(mutex.count(), 3) mutex.release() self.assertTrue(mutex.locked()) self.assertEqual(mutex.count(), 2) mutex.release() self.assertTrue(mutex.locked()) self.assertEqual(mutex.count(), 1) mutex.release() self.assertFalse(mutex.locked()) self.assertEqual(mutex.count(), 0) self.assertRaises(RuntimeError, mutex.release) for _ in range(5): self.COMM.Barrier() test_acquire_release() for _ in range(5): random_sleep() test_acquire_release() def testAcquireNonblocking(self): def test_acquire_nonblocking(): comm = self.COMM mutex = self.mutex self.assertEqual(mutex.count(), 0) comm.Barrier() locked = mutex.acquire(blocking=False) self.assertEqual(mutex.locked(), locked) comm.Barrier() self.assertEqual(mutex.count(), int(locked)) if locked: self.assertEqual(mutex.count(), 1) flag = mutex.acquire(blocking=False) self.assertTrue(flag) self.assertEqual(mutex.count(), 2) flag = mutex.acquire(blocking=True) self.assertTrue(flag) self.assertEqual(mutex.count(), 3) mutex.release() self.assertEqual(mutex.count(), 2) mutex.release() self.assertEqual(mutex.count(), 1) mutex.release() comm.Barrier() self.assertFalse(mutex.locked()) self.assertEqual(mutex.count(), 0) states = comm.allgather(locked) self.assertEqual(states.count(True), 1) comm.Barrier() while not mutex.acquire(blocking=False): pass mutex.release() comm.Barrier() for _ in range(5): self.COMM.Barrier() test_acquire_nonblocking() for _ in range(5): random_sleep() test_acquire_nonblocking() def testAcquireFree(self): mutex = self.mutex mutex.acquire() mutex.acquire() mutex.acquire() for _ in range(5): mutex.free() self.assertRaises(RuntimeError, mutex.acquire) self.assertRaises(RuntimeError, mutex.release) self.assertRaises(RuntimeError, mutex.count) def testFree(self): mutex = self.mutex for _ in range(5): mutex.free() self.assertRaises(RuntimeError, mutex.acquire) self.assertRaises(RuntimeError, mutex.release) self.assertRaises(RuntimeError, mutex.count) class TestMutexRecursiveSelf(BaseTestMutexRecursive, unittest.TestCase): COMM = MPI.COMM_SELF @unittest.skipMPI('msmpi') class TestMutexRecursiveWorld(BaseTestMutexRecursive, unittest.TestCase): COMM = MPI.COMM_WORLD # --- class BaseTestCondition: COMM = MPI.COMM_NULL def setUp(self): self.mutex = None self.condition = sync.Condition(comm=self.COMM) def tearDown(self): self.condition.free() def testWaitNotify(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() cv = self.condition if rank == 0: with cv: num = cv.notify() self.assertEqual(num, 0) comm.Barrier() while num < size - 1: with cv: num += cv.notify() random_sleep() with cv: num = cv.notify() self.assertEqual(num, 0) else: comm.Barrier() with cv: random_sleep() cv.wait() self.assertRaises(RuntimeError, cv.wait) self.assertRaises(RuntimeError, cv.notify) def testWaitForNotify(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() cv = self.condition if rank == 0: with cv: num = cv.notify() self.assertEqual(num, 0) comm.Barrier() reqs1 = [comm.isend(00, dest) for dest in range(1, size)] reqs2 = [comm.isend(42, dest) for dest in range(1, size)] while num < size - 1: MPI.Request.Testall(reqs1) with cv: num += cv.notify() random_sleep() self.assertEqual(num, size - 1) MPI.Request.Waitall(reqs2) with cv: num = cv.notify() self.assertEqual(num, 0) else: comm.Barrier() with cv: random_sleep() result = cv.wait_for(lambda: comm.recv()) self.assertEqual(result, 42) self.assertRaises(RuntimeError, cv.wait_for, lambda: False) self.assertRaises(RuntimeError, cv.notify) def testWaitNotifyAll(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() cv = self.condition if rank == 0: with cv: num = cv.notify_all() self.assertEqual(num, 0) comm.Barrier() while num < size - 2: with cv: num += cv.notify_all() random_sleep() self.assertEqual(num, max(0, size - 2)) with cv: num = cv.notify() self.assertEqual(num, 0) elif rank == 1: comm.Barrier() else: comm.Barrier() with cv: random_sleep() cv.wait() self.assertRaises(RuntimeError, cv.wait) self.assertRaises(RuntimeError, cv.notify_all) def testAcquireFree(self): cv = self.condition cv.acquire() for _ in range(5): cv.free() def testFree(self): cv = self.condition for _ in range(5): cv.free() self.assertRaises(RuntimeError, cv.acquire) self.assertRaises(RuntimeError, cv.release) self.assertRaises(RuntimeError, cv.wait) self.assertRaises(RuntimeError, cv.wait_for, lambda: False) self.assertRaises(RuntimeError, cv.notify) self.assertRaises(RuntimeError, cv.notify_all) class TestConditionSelf(BaseTestCondition, unittest.TestCase): COMM = MPI.COMM_SELF @unittest.skipMPI('msmpi') class TestConditionWorld(BaseTestCondition, unittest.TestCase): COMM = MPI.COMM_WORLD # --- class BaseTestConditionMutex(BaseTestCondition): COMM = MPI.COMM_NULL def setUp(self): comm = self.COMM self.mutex = sync.Mutex(comm=comm) self.condition = sync.Condition(self.mutex) def tearDown(self): self.mutex.free() self.condition.free() class TestConditionMutexSelf(BaseTestConditionMutex, unittest.TestCase): COMM = MPI.COMM_SELF @unittest.skipMPI('msmpi') class TestConditionMutexWorld(BaseTestConditionMutex, unittest.TestCase): COMM = MPI.COMM_WORLD # --- class BaseTestSemaphore: COMM = MPI.COMM_NULL def setUp(self): comm = self.COMM value = max(1, comm.Get_size() - 1) self.semaphore = sync.Semaphore(value, bounded=False, comm=comm) def tearDown(self): self.semaphore.free() def testValue(self): sem = self.semaphore self.assertRaises(ValueError, sem.release, 0) self.assertRaises(ValueError, sem.release, -1) self.assertRaises(ValueError, sync.Semaphore, -1) def testBounded(self): sem = self.semaphore comm = self.COMM count = max(1, comm.size - 1) sem._bounded = False if comm.size > 1: if comm.rank == 0: sem.release() comm.Barrier() self.assertEqual(sem._counter.next(0), comm.size) comm.Barrier() if comm.size > 1: if comm.rank == 0: sem.acquire() comm.Barrier() self.assertEqual(sem._counter.next(0), count) sem._bounded = True self.assertRaises(ValueError, sem.release) comm.Barrier() sem.acquire() sem.release() comm.Barrier() self.assertEqual(sem._counter.next(0), count) def testWith(self): def test_with(): sem = self.semaphore with sem: pass for _ in range(5): self.COMM.Barrier() test_with() for _ in range(5): random_sleep() test_with() def testAcquireRelease(self): def test_acquire_release(): sem = self.semaphore locked = sem.acquire() sem.release() self.assertTrue(locked) for _ in range(5): self.COMM.Barrier() test_acquire_release() for _ in range(5): random_sleep() test_acquire_release() def testAcquireNonblocking(self): def test_acquire_nonblocking(): sem = self.semaphore comm = self.COMM count = max(1, comm.size - 1) comm.Barrier() locked = sem.acquire(blocking=False) comm.Barrier() self.assertEqual(sem._counter.next(0), 0) comm.Barrier() if locked: sem.release() comm.Barrier() states = comm.allgather(locked) self.assertEqual(states.count(True), count) self.assertEqual(sem._counter.next(0), count) comm.Barrier() while not sem.acquire(blocking=False): random_sleep() sem.release() comm.Barrier() self.assertEqual(sem._counter.next(0), count) for _ in range(5): self.COMM.Barrier() test_acquire_nonblocking() for _ in range(5): random_sleep() test_acquire_nonblocking() def testAcquireFree(self): comm = self.COMM sem = self.semaphore if comm.rank > 0: sem.acquire() for _ in range(5): sem.free() def testFree(self): sem = self.semaphore for _ in range(5): sem.free() self.assertRaises(RuntimeError, sem.acquire) self.assertRaises(RuntimeError, sem.release) class TestSemaphoreSelf(BaseTestSemaphore, unittest.TestCase): COMM = MPI.COMM_SELF @unittest.skipMPI('msmpi') class TestSemaphoreWorld(BaseTestSemaphore, unittest.TestCase): COMM = MPI.COMM_WORLD # --- try: MPI.Win.Allocate(1, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free() except (NotImplementedError, MPI.Exception): unittest.skip('mpi-win-allocate')(BaseTestCounter) unittest.skip('mpi-win-allocate')(BaseTestMutexBasic) unittest.skip('mpi-win-allocate')(BaseTestMutexRecursive) unittest.skip('mpi-win-allocate')(BaseTestCondition) unittest.skip('mpi-win-allocate')(BaseTestSemaphore) # --- if __name__ == "__main__": unittest.main() mpi4py-4.0.3/test/test_win.py000066400000000000000000000234021475341043600161350ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import sys, os try: sys.getrefcount except AttributeError: class getrefcount: def __init__(self, arg): pass def __eq__(self, other): return True def __add__(self, other): return self def __sub__(self, other): return self def memzero(m): try: m[:] = 0 except IndexError: # cffi buffer m[0:len(m)] = b'\0'*len(m) def ch3_sock(): return 'ch3:sock' in MPI.Get_library_version() def github(): return os.environ.get('GITHUB_ACTIONS') == 'true' class TestWinNull(unittest.TestCase): def testConstructor(self): win = MPI.Win() self.assertEqual(win, MPI.WIN_NULL) self.assertIsNot(win, MPI.WIN_NULL) def construct(): MPI.Win((1,2,3)) self.assertRaises(TypeError, construct) def testGetName(self): name = MPI.WIN_NULL.Get_name() self.assertEqual(name, "MPI_WIN_NULL") class BaseTestWin: COMM = MPI.COMM_NULL INFO = MPI.INFO_NULL CREATE_FLAVOR = MPI.UNDEFINED def testGetAttr(self): base = MPI.Get_address(self.memory) size = len(self.memory) unit = 1 self.assertEqual(size, self.WIN.Get_attr(MPI.WIN_SIZE)) self.assertEqual(unit, self.WIN.Get_attr(MPI.WIN_DISP_UNIT)) self.assertEqual(base, self.WIN.Get_attr(MPI.WIN_BASE)) def testMemory(self): memory = self.WIN.tomemory() self.assertEqual(memory.format, 'B') pointer = MPI.Get_address(memory) length = len(memory) base, size, dunit = self.WIN.attrs self.assertEqual(size, length) self.assertEqual(dunit, 1) self.assertEqual(base, pointer) def testAttributes(self): base, size, unit = self.WIN.attrs self.assertEqual(base, MPI.Get_address(self.memory)) self.assertEqual(size, len(self.memory)) self.assertEqual(unit, 1) def testGetGroup(self): cgroup = self.COMM.Get_group() wgroup = self.WIN.Get_group() grpcmp = MPI.Group.Compare(cgroup, wgroup) cgroup.Free() wgroup.Free() self.assertEqual(grpcmp, MPI.IDENT) def testGetSetInfo(self): #info = MPI.INFO_NULL #self.WIN.Set_info(info) info = MPI.Info.Create() self.WIN.Set_info(info) info.Free() info = self.WIN.Get_info() self.WIN.Set_info(info) info.Free() def testGetSetErrhandler(self): for ERRHANDLER in [MPI.ERRORS_ARE_FATAL, MPI.ERRORS_RETURN, MPI.ERRORS_ARE_FATAL, MPI.ERRORS_RETURN,]: errhdl_1 = self.WIN.Get_errhandler() self.assertNotEqual(errhdl_1, MPI.ERRHANDLER_NULL) self.WIN.Set_errhandler(ERRHANDLER) errhdl_2 = self.WIN.Get_errhandler() self.assertEqual(errhdl_2, ERRHANDLER) errhdl_2.Free() self.assertEqual(errhdl_2, MPI.ERRHANDLER_NULL) self.WIN.Set_errhandler(errhdl_1) errhdl_1.Free() self.assertEqual(errhdl_1, MPI.ERRHANDLER_NULL) def testGetSetName(self): try: name = self.WIN.Get_name() self.WIN.Set_name("mywin") self.assertEqual(self.WIN.Get_name(), "mywin") self.WIN.Set_name(name) self.assertEqual(self.WIN.Get_name(), name) self.WIN.name = self.WIN.name except NotImplementedError: self.skipTest('mpi-win-name') @unittest.skipIf(MPI.WIN_CREATE_FLAVOR == MPI.KEYVAL_INVALID, 'mpi-win-flavor') def testCreateFlavor(self): flavors = (MPI.WIN_FLAVOR_CREATE, MPI.WIN_FLAVOR_ALLOCATE, MPI.WIN_FLAVOR_DYNAMIC, MPI.WIN_FLAVOR_SHARED,) flavor = self.WIN.Get_attr(MPI.WIN_CREATE_FLAVOR) self.assertIn(flavor, flavors) self.assertEqual(flavor, self.WIN.flavor) self.assertEqual(flavor, self.CREATE_FLAVOR) @unittest.skipIf(MPI.WIN_MODEL == MPI.KEYVAL_INVALID, 'mpi-win-model') def testMemoryModel(self): models = (MPI.WIN_SEPARATE, MPI.WIN_UNIFIED) model = self.WIN.Get_attr(MPI.WIN_MODEL) self.assertIn(model, models) self.assertEqual(model, self.WIN.model) def testPyProps(self): win = self.WIN # group = win.group self.assertEqual(type(group), MPI.Group) self.assertEqual(win.group_size, group.Get_size()) self.assertEqual(win.group_rank, group.Get_rank()) group.Free() # info = win.info self.assertIs(type(info), MPI.Info) win.info = info info.Free() # self.assertEqual(type(win.attrs), tuple) self.assertEqual(type(win.flavor), int) self.assertEqual(type(win.model), int) self.assertEqual(type(win.name), str) win.name = "mywin" self.assertEqual(win.name, "mywin") def testPickle(self): from pickle import dumps, loads with self.assertRaises(ValueError): loads(dumps(self.WIN)) class BaseTestWinCreate(BaseTestWin): CREATE_FLAVOR = MPI.WIN_FLAVOR_CREATE def setUp(self): self.memory = MPI.Alloc_mem(10) memzero(self.memory) self.WIN = MPI.Win.Create(self.memory, 1, self.INFO, self.COMM) def tearDown(self): self.WIN.Free() MPI.Free_mem(self.memory) class BaseTestWinAllocate(BaseTestWin): CREATE_FLAVOR = MPI.WIN_FLAVOR_ALLOCATE def setUp(self): self.WIN = MPI.Win.Allocate(10, 1, self.INFO, self.COMM) self.memory = self.WIN.tomemory() memzero(self.memory) def tearDown(self): self.WIN.Free() class BaseTestWinAllocateShared(BaseTestWin): CREATE_FLAVOR = MPI.WIN_FLAVOR_SHARED def setUp(self): self.WIN = MPI.Win.Allocate_shared(10, 1, self.INFO, self.COMM) self.memory = self.WIN.tomemory() memzero(self.memory) def tearDown(self): self.WIN.Free() def testSharedQuery(self): memory = self.WIN.tomemory() address = MPI.Get_address(memory) length = len(memory) memories = self.COMM.allgather((address, length)) rank = self.COMM.Get_rank() size = self.COMM.Get_size() for i in range(size): mem, disp = self.WIN.Shared_query(rank) base = MPI.Get_address(mem) size = len(mem) if i == rank: self.assertEqual(base, memories[i][0]) self.assertEqual(size, memories[i][1]) self.assertEqual(disp, 1) @unittest.skipMPI('impi(==2021.14.0)', github()) @unittest.skipMPI('impi(==2021.14.1)', github()) class BaseTestWinCreateDynamic(BaseTestWin): CREATE_FLAVOR = MPI.WIN_FLAVOR_DYNAMIC def setUp(self): self.WIN = MPI.Win.Create_dynamic(self.INFO, self.COMM) def tearDown(self): self.WIN.Free() def testGetAttr(self): base = self.WIN.Get_attr(MPI.WIN_BASE) size = self.WIN.Get_attr(MPI.WIN_SIZE) self.assertEqual(base, 0) self.assertEqual(size, 0) def testMemory(self): memory = self.WIN.tomemory() self.assertEqual(memory.format, 'B') base = MPI.Get_address(memory) size = len(memory) self.assertEqual(base, 0) self.assertEqual(size, 0) def testAttributes(self): base, size, _ = self.WIN.attrs self.assertEqual(base, 0) self.assertEqual(size, 0) @unittest.skipMPI('msmpi(<9.1.0)') def testAttachDetach(self): mem1 = MPI.Alloc_mem(8) mem2 = MPI.Alloc_mem(16) mem3 = MPI.Alloc_mem(32) for mem in (mem1, mem2, mem3): self.WIN.Attach(mem) self.testMemory() self.WIN.Detach(mem) for mem in (mem1, mem2, mem3): self.WIN.Attach(mem) self.testMemory() for mem in (mem1, mem2, mem3): self.WIN.Detach(mem) for mem in (mem1, mem2, mem3): self.WIN.Attach(mem) self.testMemory() for mem in (mem3, mem2, mem1): self.WIN.Detach(mem) MPI.Free_mem(mem1) MPI.Free_mem(mem2) MPI.Free_mem(mem3) class TestWinCreateSelf(BaseTestWinCreate, unittest.TestCase): COMM = MPI.COMM_SELF @unittest.skipMPI('openmpi(<1.4.0)') class TestWinCreateWorld(BaseTestWinCreate, unittest.TestCase): COMM = MPI.COMM_WORLD class TestWinAllocateSelf(BaseTestWinAllocate, unittest.TestCase): COMM = MPI.COMM_SELF @unittest.skipMPI('openmpi(<1.4.0)') class TestWinAllocateWorld(BaseTestWinAllocate, unittest.TestCase): COMM = MPI.COMM_WORLD class TestWinAllocateSharedSelf(BaseTestWinAllocateShared, unittest.TestCase): COMM = MPI.COMM_SELF @unittest.skipMPI('mpich', ch3_sock() and MPI.COMM_WORLD.Get_size() > 1) class TestWinAllocateSharedWorld(BaseTestWinAllocateShared, unittest.TestCase): COMM = MPI.COMM_WORLD class TestWinCreateDynamicSelf(BaseTestWinCreateDynamic, unittest.TestCase): COMM = MPI.COMM_SELF class TestWinCreateDynamicWorld(BaseTestWinCreateDynamic, unittest.TestCase): COMM = MPI.COMM_WORLD try: MPI.Win.Create(MPI.BOTTOM, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free() except (NotImplementedError, MPI.Exception): unittest.disable(BaseTestWinCreate, 'mpi-win-create') try: MPI.Win.Allocate(1, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free() except (NotImplementedError, MPI.Exception): unittest.disable(BaseTestWinAllocate, 'mpi-win-allocate') try: MPI.Win.Allocate_shared(1, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free() except (NotImplementedError, MPI.Exception): unittest.disable(BaseTestWinAllocateShared, 'mpi-win-shared') try: MPI.Win.Create_dynamic(MPI.INFO_NULL, MPI.COMM_SELF).Free() except (NotImplementedError, MPI.Exception): unittest.disable(BaseTestWinCreateDynamic, 'mpi-win-dynamic') if __name__ == '__main__': unittest.main() mpi4py-4.0.3/tox.ini000066400000000000000000000123511475341043600142640ustar00rootroot00000000000000# Tox (https://tox.readthedocs.io) is a tool for running tests # in multiple virtualenvs. This configuration file will run the # test suite on all supported python versions. To use it, # "pip install tox" and then run "tox" from this directory. [tox] minversion = 3.18.0 skip_missing_interpreters=True envlist = py36 py37 py38 py39 py310 py311 py312 py313 pypy3.7 pypy3.8 pypy3.9 [env] passenv = CPPFLAGS CFLAGS CXXFLAGS LDFLAGS MPI4PY_* MPICFG MPICC MPICXX MPILD MPIEXEC MPIEXEC_* HYDRA_* MPICH_* OMPI_* I_MPI_* MSMPI_* MV2_* [mpi] mpicc = {env:MPICC:mpicc} mpicxx = {env:MPICXX:mpicxx} mpiexec = {env:MPIEXEC:mpiexec} [testenv:.pkg] passenv = {[env]passenv} [testenv] labels = test deps = -r{toxinidir}/conf/requirements-test.txt allowlist_externals = {[mpi]mpiexec} passenv = {[env]passenv} commands = {[mpi]mpiexec} -n 1 {envpython} -m mpi4py --version {[mpi]mpiexec} -n 5 {envpython} -m mpi4py -m mpi4py.bench helloworld {[mpi]mpiexec} -n 5 {envpython} -m mpi4py -m mpi4py.bench ringtest {[mpi]mpiexec} -n 5 {envpython} -m mpi4py -m mpi4py.bench pingpong -q -n 4096 {[mpi]mpiexec} -n 5 {envpython} -m mpi4py -m mpi4py.bench futures -q -n 4096 {[mpi]mpiexec} -n 1 {envpython} {toxinidir}/test/main.py --no-builddir -q -e spawn [] {[mpi]mpiexec} -n 5 {envpython} {toxinidir}/test/main.py --no-builddir -q -e spawn [] {[mpi]mpiexec} -n 1 {envpython} {toxinidir}/demo/futures/test_futures.py -q [] {[mpi]mpiexec} -n 5 {envpython} {toxinidir}/demo/futures/test_futures.py -q [] {[mpi]mpiexec} -n 1 {envpython} -m mpi4py.futures {toxinidir}/demo/futures/test_futures.py -q [] {[mpi]mpiexec} -n 5 {envpython} -m mpi4py.futures {toxinidir}/demo/futures/test_futures.py -q [] {envpython} {toxinidir}/demo/test-run/test_run.py -q [] [testenv:build-cmake] labels = build,cmake deps = build skip_install = true allowlist_externals = {[mpi]mpiexec},rm passenv = {[env]passenv} setenv = CFLAGS=-O0 -Wp,-U_FORTIFY_SOURCE MPI4PY_BUILD_BACKEND=cmake commands_pre = rm -rf build commands = {envpython} -m build --outdir {envtmpdir}/dist {envpython} -m pip install --verbose . mpiexec -n 1 {envpython} test/main.py test_package [testenv:build-meson] labels = build,meson deps = build skip_install = true allowlist_externals = {[mpi]mpiexec},rm passenv = {[env]passenv} setenv = CFLAGS=-O0 -Wp,-U_FORTIFY_SOURCE MPI4PY_BUILD_BACKEND=meson commands_pre = rm -rf .mesonpy-* commands = {envpython} -m build --outdir {envtmpdir}/dist {envpython} -m pip install --verbose . mpiexec -n 1 {envpython} test/main.py test_package [testenv:code] labels = code deps = setuptools -r{toxinidir}/conf/requirements-build-cython.txt skip_install = true allowlist_externals = mv setenv = MPICFG=nompi CFLAGS=-O0 -Wp,-U_FORTIFY_SOURCE commands = {envpython} conf/mpiapigen.py {envpython} setup.py --quiet build -b build/code --force mv src/lib-mpi/pympiconf.h conf/nompi/pympiconf.h {envpython} setup.py --quiet build -b build/code install --force {envpython} conf/mpistubgen.py [testenv:conf] labels = conf,check deps = setuptools -r{toxinidir}/conf/requirements-build-cython.txt skip_install = true allowlist_externals = diff setenv = MPICFG=nompi CFLAGS=-O0 -Wp,-U_FORTIFY_SOURCE commands = {envpython} setup.py --quiet build -b build/conf --force diff -u conf/nompi/pympiconf.h src/lib-mpi/pympiconf.h [testenv:lint] labels = lint,check deps = -r{toxinidir}/conf/requirements-lint.txt allowlist_externals = sh setenv = MPICFG=nompi-fast CFLAGS=-O0 commands = ruff check flake8 docs src flake8 --select=A test pylint mpi4py codespell sh conf/cythonize.sh -Wextra -Werror cython-lint . yamllint . [testenv:type] labels = type,check deps = -r{toxinidir}/conf/requirements-type.txt setenv = MPICFG=nompi-fast CFLAGS=-O0 commands = stubtest mpi4py mypy --python-version 3.13 -p mpi4py mypy --python-version 3.12 -p mpi4py mypy --python-version 3.11 -p mpi4py mypy --python-version 3.10 -p mpi4py mypy --python-version 3.9 -p mpi4py mypy --python-version 3.8 -p mpi4py [testenv:docs] labels = docs deps = -r{toxinidir}/conf/requirements-docs.txt allowlist_externals = rm,mv passenv = SOURCE_DATE_EPOCH setenv = MPICFG=nompi-fast CFLAGS=-O0 TOPDIR= DOCDIR=docs/ SRCDIR=docs/source/ BLDDIR=build/ OUTDIR=docs/ LATEXMKOPTS=-quiet commands_pre = {envpython} -m pip uninstall --yes sphinx-rtd-theme -mv {envbindir}/rst2html5.py {envbindir}/rst2html5 rm -rf {env:OUTDIR}html commands = rst2html5 {env:DOCDIR}index.rst {env:OUTDIR}index.html sphinx-build -M html {env:SRCDIR} {env:BLDDIR} -q -E -W -j auto sphinx-build -M man {env:SRCDIR} {env:BLDDIR} -q -E -W -j auto sphinx-build -M info {env:SRCDIR} {env:BLDDIR} -q -E -W -j auto sphinx-build -M latexpdf {env:SRCDIR} {env:BLDDIR} -q -E -W -j auto rm -r {env:SRCDIR}reference rm {env:BLDDIR}html/.buildinfo mv {env:BLDDIR}html {env:OUTDIR} mv {env:BLDDIR}man/mpi4py.3 {env:OUTDIR} mv {env:BLDDIR}texinfo/mpi4py.info {env:OUTDIR} mv {env:BLDDIR}latex/mpi4py.pdf {env:OUTDIR}