pax_global_header00006660000000000000000000000064145571301570014522gustar00rootroot0000000000000052 comment=ba0a08c0f9d611aa75768ecc921167df48374914 python-pot-0.9.3+dfsg/000077500000000000000000000000001455713015700145735ustar00rootroot00000000000000python-pot-0.9.3+dfsg/.circleci/000077500000000000000000000000001455713015700164265ustar00rootroot00000000000000python-pot-0.9.3+dfsg/.circleci/artifact_path000066400000000000000000000000351455713015700211600ustar00rootroot000000000000000/docs/build/html/index.html python-pot-0.9.3+dfsg/.circleci/config.yml000066400000000000000000000142511455713015700204210ustar00rootroot00000000000000# Tagging a commit with [circle front] will build the front page and perform test-doc. # Tagging a commit with [circle full] will build everything. version: 2 jobs: build_docs: docker: - image: cimg/python:3.10 resource_class: medium steps: - checkout - run: name: Set BASH_ENV command: | echo "set -e" >> $BASH_ENV echo "export DISPLAY=:99" >> $BASH_ENV echo "export OPENBLAS_NUM_THREADS=4" >> $BASH_ENV echo "BASH_ENV:" cat $BASH_ENV - run: name: Merge with upstream command: | echo $(git log -1 --pretty=%B) | tee gitlog.txt echo ${CI_PULL_REQUEST//*pull\//} | tee merge.txt if [[ $(cat merge.txt) != "" ]]; then echo "Merging $(cat merge.txt)"; git remote add upstream https://github.com/PythonOT/POT.git; git pull --ff-only upstream "refs/pull/$(cat merge.txt)/merge"; git fetch upstream master; fi # Load our data - restore_cache: keys: - data-cache-0 - pip-cache - run: name: Install ffmpeg command: | sudo apt update sudo apt install ffmpeg - run: name: Get Python running command: | python -m pip install --user --upgrade --progress-bar off pip python -m pip install --user -e . python -m pip install --user --upgrade --no-cache-dir --progress-bar off -r requirements.txt python -m pip install --user --upgrade --progress-bar off -r docs/requirements.txt python -m pip install --user --upgrade --progress-bar off ipython sphinx-gallery memory_profiler # python -m pip install --user --upgrade --progress-bar off ipython "https://api.github.com/repos/sphinx-gallery/sphinx-gallery/zipball/master" memory_profiler - save_cache: key: pip-cache paths: - ~/.cache/pip # Look at what we have and fail early if there is some library conflict - run: name: Check installation command: | which python python -c "import ot" - run: name: Correct link in release file command: | sed -i -r 's/PR #([[:digit:]]*)/\[PR #\1\]\(https:\/\/github.com\/PythonOT\/POT\/pull\/\1\)/' RELEASES.md sed -i -r 's/Issue #([[:digit:]]*)/\[Issue #\1\]\(https:\/\/github.com\/PythonOT\/POT\/issues\/\1\)/' RELEASES.md # Build docs - run: name: make html command: | cd docs; make html; no_output_timeout: 30m # Save the outputs - store_artifacts: path: docs/build/html/ destination: dev - persist_to_workspace: root: docs/build paths: - html deploy_master: docker: - image: circleci/python:3.6-jessie steps: - attach_workspace: at: /tmp/build - run: name: Fetch docs command: | set -e mkdir -p ~/.ssh echo -e "Host *\nStrictHostKeyChecking no" > ~/.ssh/config chmod og= ~/.ssh/config if [ ! -d ~/PythonOT.github.io ]; then git clone git@github.com:/PythonOT/PythonOT.github.io.git ~/PythonOT.github.io --depth=1 fi - run: name: Deploy docs command: | set -e; if [ "${CIRCLE_BRANCH}" == "master" ]; then git config --global user.email "circle@PythonOT.com"; git config --global user.name "Circle CI"; cd ~/PythonOT.github.io; git checkout master git remote -v git fetch origin git reset --hard origin/master git clean -xdf echo "Deploying dev docs for ${CIRCLE_BRANCH}."; cd master cp -a /tmp/build/html/* .; touch .nojekyll; git add -A; git commit -m "CircleCI update of dev docs (${CIRCLE_BUILD_NUM})."; git push origin master; else echo "No deployment (build: ${CIRCLE_BRANCH})."; fi deploy_tag: docker: - image: circleci/python:3.6-jessie steps: - attach_workspace: at: /tmp/build - run: name: Fetch docs command: | set -e mkdir -p ~/.ssh echo -e "Host *\nStrictHostKeyChecking no" > ~/.ssh/config chmod og= ~/.ssh/config if [ ! -d ~/PythonOT.github.io ]; then git clone git@github.com:/PythonOT/PythonOT.github.io.git ~/PythonOT.github.io --depth=1 fi - run: name: Deploy docs command: | set -e; git config --global user.email "circle@PythonOT.com"; git config --global user.name "Circle CI"; cd ~/PythonOT.github.io; git checkout master git remote -v git fetch origin git reset --hard origin/master git clean -xdf echo "Deploying dev docs for ${CIRCLE_BRANCH}."; cp -a /tmp/build/html/* .; touch .nojekyll; git add -A; git commit -m "CircleCI update of dev docs (${CIRCLE_BUILD_NUM})."; git push origin master; workflows: version: 2 default: jobs: - build_docs: filters: tags: only: /[0-9]+(\.[0-9]+)*$/ - deploy_master: requires: - build_docs filters: branches: only: - master - deploy_tag: requires: - build_docs filters: branches: ignore: /.*/ tags: only: /[0-9]+(\.[0-9]+)*$/python-pot-0.9.3+dfsg/.gitattributes000066400000000000000000000001321455713015700174620ustar00rootroot00000000000000 ot/lp/*.cpp linguist-vendored ot/lp/*.h linguist-vendored *.ipynb linguist-documentation python-pot-0.9.3+dfsg/.github/000077500000000000000000000000001455713015700161335ustar00rootroot00000000000000python-pot-0.9.3+dfsg/.github/CODE_OF_CONDUCT.md000066400000000000000000000062041455713015700207340ustar00rootroot00000000000000# Contributor Covenant Code of Conduct ## Our Pledge In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. ## Our Standards Examples of behavior that contributes to creating a positive environment include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences * Gracefully accepting constructive criticism * Focusing on what is best for the community * Showing empathy towards other community members Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Our Responsibilities Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. ## Scope This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ python-pot-0.9.3+dfsg/.github/CONTRIBUTING.md000066400000000000000000000172301455713015700203670ustar00rootroot00000000000000Contributing to POT =================== First off, thank you for considering contributing to POT. How to contribute ----------------- The preferred workflow for contributing to POT is to fork the [main repository](https://github.com/rflamary/POT) on GitHub, clone, and develop on a branch. Steps: 1. Fork the [project repository](https://github.com/rflamary/POT) by clicking on the 'Fork' button near the top right of the page. This creates a copy of the code under your GitHub user account. For more details on how to fork a repository see [this guide](https://help.github.com/articles/fork-a-repo/). 2. Clone your fork of the POT repo from your GitHub account to your local disk: ```bash $ git clone git@github.com:YourLogin/POT.git $ cd POT ``` 3. Create a ``feature`` branch to hold your development changes: ```bash $ git checkout -b my-feature ``` Always use a ``feature`` branch. It's good practice to never work on the ``master`` branch! 4. Develop the feature on your feature branch. Add changed files using ``git add`` and then ``git commit`` files: ```bash $ git add modified_files $ git commit ``` to record your changes in Git, then push the changes to your GitHub account with: ```bash $ git push -u origin my-feature ``` 5. Follow [these instructions](https://help.github.com/articles/creating-a-pull-request-from-a-fork) to create a pull request from your fork. This will send an email to the committers. (If any of the above seems like magic to you, please look up the [Git documentation](https://git-scm.com/documentation) on the web, or ask a friend or another contributor for help.) Pull Request Checklist ---------------------- We recommended that your contribution complies with the following rules before you submit a pull request: - Follow the PEP8 Guidelines. - If your pull request addresses an issue, please use the pull request title to describe the issue and mention the issue number in the pull request description. This will make sure a link back to the original issue is created. - All public methods should have informative docstrings with sample usage presented as doctests when appropriate. - Please prefix the title of your pull request with `[MRG]` (Ready for Merge), if the contribution is complete and ready for a detailed review. Two core developers will review your code and change the prefix of the pull request to `[MRG + 1]` and `[MRG + 2]` on approval, making it eligible for merging. An incomplete contribution -- where you expect to do more work before receiving a full review -- should be prefixed `[WIP]` (to indicate a work in progress) and changed to `[MRG]` when it matures. WIPs may be useful to: indicate you are working on something to avoid duplicated work, request broad review of functionality or API, or seek collaborators. WIPs often benefit from the inclusion of a [task list](https://github.com/blog/1375-task-lists-in-gfm-issues-pulls-comments) in the PR description. - When adding additional functionality, provide at least one example script in the ``examples/`` folder. Have a look at other examples for reference. Examples should demonstrate why the new functionality is useful in practice and, if possible, compare it to other methods available in POT. - Documentation and high-coverage tests are necessary for enhancements to be accepted. Bug-fixes or new features should be provided with [non-regression tests](https://en.wikipedia.org/wiki/Non-regression_testing). These tests verify the correct behavior of the fix or feature. In this manner, further modifications on the code base are granted to be consistent with the desired behavior. For the Bug-fixes case, at the time of the PR, this tests should fail for the code base in master and pass for the PR code. - At least one paragraph of narrative documentation with links to references in the literature (with PDF links when possible) and the example. You can also check for common programming errors with the following tools: - No pyflakes warnings, check with: ```bash $ pip install pyflakes $ pyflakes path/to/module.py ``` - No PEP8 warnings, check with: ```bash $ pip install pep8 $ pep8 path/to/module.py ``` - AutoPEP8 can help you fix some of the easy redundant errors: ```bash $ pip install autopep8 $ autopep8 path/to/pep8.py ``` Bonus points for contributions that include a performance analysis with a benchmark script and profiling output (please report on the mailing list or on the GitHub issue). Filing bugs ----------- We use Github issues to track all bugs and feature requests; feel free to open an issue if you have found a bug or wish to see a feature implemented. It is recommended to check that your issue complies with the following rules before submitting: - Verify that your issue is not being currently addressed by other [issues](https://github.com/rflamary/POT/issues?q=) or [pull requests](https://github.com/rflamary/POT/pulls?q=). - Please ensure all code snippets and error messages are formatted in appropriate code blocks. See [Creating and highlighting code blocks](https://help.github.com/articles/creating-and-highlighting-code-blocks). - Please include your operating system type and version number, as well as your Python, POT, numpy, and scipy versions. This information can be found by running the following code snippet: ```python import platform; print(platform.platform()) import sys; print("Python", sys.version) import numpy; print("NumPy", numpy.__version__) import scipy; print("SciPy", scipy.__version__) import ot; print("POT", ot.__version__) ``` - Please be specific about what estimators and/or functions are involved and the shape of the data, as appropriate; please include a [reproducible](http://stackoverflow.com/help/mcve) code snippet or link to a [gist](https://gist.github.com). If an exception is raised, please provide the traceback. New contributor tips -------------------- A great way to start contributing to POT is to pick an item from the list of [Easy issues](https://github.com/rflamary/POT/issues?labels=Easy) in the issue tracker. Resolving these issues allow you to start contributing to the project without much prior knowledge. Your assistance in this area will be greatly appreciated by the more experienced developers as it helps free up their time to concentrate on other issues. Documentation ------------- We are glad to accept any sort of documentation: function docstrings, reStructuredText documents (like this one), tutorials, etc. reStructuredText documents live in the source code repository under the doc/ directory. You can edit the documentation using any text editor and then generate the HTML output by typing ``make html`` from the ``docs/`` directory. Alternatively, ``make`` can be used to quickly generate the documentation without the example gallery with `make html-noplot`. The resulting HTML files will be placed in `docs/build/html/` and are viewable in a web browser. For building the documentation, you will need [sphinx](http://sphinx.pocoo.org/), [matplotlib](http://matplotlib.org/), and [pillow](http://pillow.readthedocs.io/en/latest/). When you are writing documentation, it is important to keep a good compromise between mathematical and algorithmic details, and give intuition to the reader on what the algorithm does. It is best to always start with a small paragraph with a hand-waving explanation of what the method does to the data and a figure (coming from an example) illustrating it. This Contribution guide is strongly inspired by the one of the [scikit-learn](https://github.com/scikit-learn/scikit-learn) team. python-pot-0.9.3+dfsg/.github/ISSUE_TEMPLATE/000077500000000000000000000000001455713015700203165ustar00rootroot00000000000000python-pot-0.9.3+dfsg/.github/ISSUE_TEMPLATE/bug_report.md000066400000000000000000000025631455713015700230160ustar00rootroot00000000000000--- name: Bug report about: Create a report to help us improve POT title: '' labels: bug, help wanted assignees: '' --- ## Describe the bug ### To Reproduce Steps to reproduce the behavior: 1. ... 2. #### Screenshots #### Code sample ### Expected behavior ### Environment (please complete the following information): - OS (e.g. MacOS, Windows, Linux): - Python version: - How was POT installed (source, `pip`, `conda`): - Build command you used (if compiling from source): - Only for GPU related bugs: - CUDA version: - GPU models and configuration: - Any other relevant information: Output of the following code snippet: ```python import platform; print(platform.platform()) import sys; print("Python", sys.version) import numpy; print("NumPy", numpy.__version__) import scipy; print("SciPy", scipy.__version__) import ot; print("POT", ot.__version__) ``` ### Additional context python-pot-0.9.3+dfsg/.github/ISSUE_TEMPLATE/feature_request.md000066400000000000000000000013531455713015700240450ustar00rootroot00000000000000--- name: Feature request about: Suggest an idea for this project title: '' labels: enhancement, feature request assignees: '' --- ## 🚀 Feature ### Motivation ### Pitch ### Alternatives ### Additional context python-pot-0.9.3+dfsg/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000023451455713015700217400ustar00rootroot00000000000000## Types of changes ## Motivation and context / Related issue ## How has this been tested (if it applies) ## PR checklist - [ ] I have read the [**CONTRIBUTING**](CONTRIBUTING.md) document. - [ ] The documentation is up-to-date with the changes I made (check build artifacts). - [ ] All tests passed, and additional code has been **covered with new tests**. - [ ] I have added the PR and Issue fix to the [**RELEASES.md**](RELEASES.md) file. python-pot-0.9.3+dfsg/.github/requirements_strict.txt000066400000000000000000000000371455713015700230070ustar00rootroot00000000000000numpy scipy>=1.3 cython pytest python-pot-0.9.3+dfsg/.github/requirements_test_windows.txt000066400000000000000000000001171455713015700242270ustar00rootroot00000000000000numpy scipy>=1.3 cython matplotlib autograd pymanopt cvxopt scikit-learn pytestpython-pot-0.9.3+dfsg/.github/workflows/000077500000000000000000000000001455713015700201705ustar00rootroot00000000000000python-pot-0.9.3+dfsg/.github/workflows/build_doc.yml000066400000000000000000000023671455713015700226470ustar00rootroot00000000000000name: Build doc on: workflow_dispatch: pull_request: push: branches: - 'master' jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v1 # Standard drop-in approach that should work for most people. - name: Set up Python 3.8 uses: actions/setup-python@v1 with: python-version: 3.8 - name: Get Python running run: | python -m pip install --user --upgrade --progress-bar off pip python -m pip install --user --upgrade --progress-bar off -r requirements.txt python -m pip install --user --upgrade --progress-bar off -r docs/requirements.txt python -m pip install --user --upgrade --progress-bar off ipython "https://api.github.com/repos/sphinx-gallery/sphinx-gallery/zipball/master" memory_profiler python -m pip install --user -e . # Look at what we have and fail early if there is some library conflict - name: Check installation run: | which python python -c "import ot" # Build docs - name: Generate HTML docs uses: rickstaa/sphinx-action@master with: docs-folder: "docs/" - uses: actions/upload-artifact@v1 with: name: Documentation path: docs/build/html/python-pot-0.9.3+dfsg/.github/workflows/build_tests.yml000066400000000000000000000113241455713015700232350ustar00rootroot00000000000000name: Tests on: workflow_dispatch: pull_request: branches: - 'master' push: branches: - 'master' create: branches: - 'master' tags: - '**' jobs: linux: runs-on: ubuntu-latest if: "!contains(github.event.head_commit.message, 'no ci')" strategy: max-parallel: 4 matrix: python-version: ["3.8", "3.9", "3.10", "3.11"] steps: - uses: actions/checkout@v1 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v1 with: python-version: ${{ matrix.python-version }} - name: Install POT run: | pip install -e . - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements.txt pip install pytest pytest-cov - name: Run tests run: | python -m pytest --durations=20 -v test/ ot/ --doctest-modules --color=yes --cov=./ --cov-report=xml - name: Upload coverage reports to Codecov with GitHub Action uses: codecov/codecov-action@v3 pep8: runs-on: ubuntu-latest if: "!contains(github.event.head_commit.message, 'no pep8')" steps: - uses: actions/checkout@v1 - name: Set up Python uses: actions/setup-python@v1 with: python-version: "3.10" - name: Install dependencies run: | python -m pip install --upgrade pip pip install flake8 - name: Lint with flake8 run: | # stop the build if there are Python syntax errors or undefined names flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide flake8 examples/ ot/ test/ --count --max-line-length=127 --statistics linux-minimal-deps: runs-on: ubuntu-latest if: "!contains(github.event.head_commit.message, 'no ci')" steps: - uses: actions/checkout@v1 - name: Set up Python uses: actions/setup-python@v1 with: python-version: "3.10" - name: Install dependencies run: | python -m pip install --upgrade pip pip install pytest pytest-cov - name: Install POT run: | pip install -e . - name: Run tests run: | python -m pytest --durations=20 -v test/ ot/ --color=yes --cov=./ --cov-report=xml - name: Upload coverage reports to Codecov with GitHub Action uses: codecov/codecov-action@v3 macos: runs-on: macos-latest if: "!contains(github.event.head_commit.message, 'no ci')" strategy: max-parallel: 4 matrix: python-version: ["3.10"] steps: - uses: actions/checkout@v1 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v1 with: python-version: ${{ matrix.python-version }} - name: Install POT run: | pip install -e . - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements.txt pip install pytest - name: Run tests run: | python -m pytest --durations=20 -v test/ ot/ --color=yes windows: runs-on: windows-latest if: "!contains(github.event.head_commit.message, 'no ci')" strategy: max-parallel: 4 matrix: python-version: ["3.10"] steps: - uses: actions/checkout@v1 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v1 with: python-version: ${{ matrix.python-version }} - name: RC.exe run: | function Invoke-VSDevEnvironment { $vswhere = "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" $installationPath = & $vswhere -prerelease -legacy -latest -property installationPath $Command = Join-Path $installationPath "Common7\Tools\vsdevcmd.bat" & "${env:COMSPEC}" /s /c "`"$Command`" -no_logo && set" | Foreach-Object { if ($_ -match '^([^=]+)=(.*)') { [System.Environment]::SetEnvironmentVariable($matches[1], $matches[2]) } } } Invoke-VSDevEnvironment Get-Command rc.exe | Format-Table -AutoSize - name: Update pip run : | python -m pip install --upgrade pip setuptools python -m pip install cython - name: Install POT run: | python -m pip install -e . - name: Install dependencies run: | python -m pip install -r .github/requirements_test_windows.txt python -m pip3 install torch torchvision torchaudio python -m pip install pytest - name: Run tests run: | python -m pytest --durations=20 -v test/ ot/ --color=yes python-pot-0.9.3+dfsg/.github/workflows/build_tests_cuda.yml000066400000000000000000000011521455713015700242270ustar00rootroot00000000000000name: Tests CUDA on: workflow_dispatch: pull_request_review: types: [submitted] jobs: linux-cuda: runs-on: pc-cuda if: github.event.review.state == 'approved' || github.event_name == 'workflow_dispatch' || (github.event_name == 'push' && github.ref == 'refs/heads/master') steps: - uses: actions/checkout@v1 - name: Install POT run: | python3.10 -m pip install --ignore-installed -e . - name: Run tests run: | python3.10 -m pytest --durations=20 -v test/ ot/ --doctest-modules --color=yes --ignore=test/test_dr.py --ignore=ot.dr --ignore=ot.plot python-pot-0.9.3+dfsg/.github/workflows/build_wheels.yml000066400000000000000000000041451455713015700233650ustar00rootroot00000000000000name: Build wheels on: workflow_dispatch: release: pull_request: push: branches: - "*" jobs: build_wheels: name: ${{ matrix.os }} runs-on: ${{ matrix.os }} if: "contains(github.event.head_commit.message, 'build wheels')" strategy: matrix: os: [ubuntu-latest, macos-latest, windows-latest] steps: - uses: actions/checkout@v1 - name: Set up Python 3.10 uses: actions/setup-python@v1 with: python-version: "3.10" - name: Install dependencies run: | python -m pip install --upgrade pip - name: Install cibuildwheel run: | python -m pip install cibuildwheel==2.16.2 - name: Build wheels env: CIBW_SKIP: "pp*-win* pp*-macosx* cp2* pp* cp36* cp*musl* *i686" # remove pypy on mac and win (wrong version) run: | python -m cibuildwheel --output-dir wheelhouse - uses: actions/upload-artifact@v1 with: name: wheels path: ./wheelhouse build_all_wheels: name: ${{ matrix.os }} runs-on: ${{ matrix.os }} if: "contains(github.event.head_commit.message, 'build all wheels')" strategy: matrix: os: [ubuntu-latest, macos-latest, windows-latest] steps: - uses: actions/checkout@v1 - name: Set up Python 3.8 uses: actions/setup-python@v1 with: python-version: "3.10" - name: Install dependencies run: | python -m pip install --upgrade pip - name: Install cibuildwheel run: | python -m pip install cibuildwheel==2.16.2 - name: Set up QEMU if: runner.os == 'Linux' uses: docker/setup-qemu-action@v1 with: platforms: all - name: Build wheels env: CIBW_SKIP: "pp*-win* pp*-macosx* cp2* pp* cp*musl* *i686" # remove pypy on mac and win (wrong version) CIBW_ARCHS_LINUX: auto aarch64 # force aarch64 with QEMU CIBW_ARCHS_MACOS: x86_64 universal2 arm64 run: | python -m cibuildwheel --output-dir wheelhouse - uses: actions/upload-artifact@v1 with: name: wheels path: ./wheelhouse python-pot-0.9.3+dfsg/.github/workflows/build_wheels_weekly.yml000066400000000000000000000023561455713015700247470ustar00rootroot00000000000000name: Build all wheels on: workflow_dispatch: schedule: - cron: '30 0 * * 1' push: branches: - "master" jobs: build_wheels: name: ${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: matrix: os: [ubuntu-latest, macos-latest, windows-latest] steps: - uses: actions/checkout@v1 - name: Set up Python 3.10 uses: actions/setup-python@v1 with: python-version: "3.10" - name: Install dependencies run: | python -m pip install --upgrade pip - name: Install cibuildwheel run: | python -m pip install cibuildwheel==2.16.2 - name: Set up QEMU if: runner.os == 'Linux' uses: docker/setup-qemu-action@v1 with: platforms: all - name: Build wheels env: CIBW_SKIP: "pp*-win* pp*-macosx* cp2* pp* cp*musl* cp36* *i686" # remove pypy on mac and win (wrong version) CIBW_BEFORE_BUILD: "pip install numpy cython" CIBW_ARCHS_LINUX: auto aarch64 # force aarch64 with QEMU CIBW_ARCHS_MACOS: x86_64 universal2 arm64 run: | python -m cibuildwheel --output-dir wheelhouse - uses: actions/upload-artifact@v1 with: name: wheels path: ./wheelhouse python-pot-0.9.3+dfsg/.github/workflows/circleci-redirector.yml000066400000000000000000000010121455713015700246220ustar00rootroot00000000000000name: circleci-redirector on: [status] jobs: circleci_artifacts_redirector_job: runs-on: ubuntu-latest if: "${{ github.event.context == 'ci/circleci: build_docs' }}" name: Run CircleCI artifacts redirector steps: - name: GitHub Action step uses: larsoner/circleci-artifacts-redirector-action@master with: api-token: ${{ secrets.CIRCLE_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }} artifact-path: 0/dev/index.html circleci-jobs: build_docs python-pot-0.9.3+dfsg/.gitignore000066400000000000000000000026711455713015700165710ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class .spyproject # sphinx-gallery temp files docs/source/auto_examples/*.pickle docs/source/auto_examples/*.md5 docs/auto_examples/ docs/modules/ # C extensions *.so # Cython output ot/lp/emd_wrap.cpp # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # env pythonenv3.8/ # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *,cover .hypothesis/ # Translations *.mo *.pot # xml *.xml # Django stuff: *.log local_settings.py # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # IPython Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # dotenv .env # virtualenv venv/ ENV/ .venv/ # Spyder project settings .spyderproject # Rope project settings .ropeproject # Mac stuff .DS_Store # coverage output folder cov_html/ docs/source/modules/generated/* docs/source/_build/* # local debug folder debug # vscode parameters .vscode # pytest cahche .pytest_cache python-pot-0.9.3+dfsg/.mailmap000066400000000000000000000005111455713015700162110ustar00rootroot00000000000000Nicolas Courty Nicolas Courty Nicolas Courty ncourty Nicolas Courty Nicolas Courty LĂ©o Gautheron Leo gautheron python-pot-0.9.3+dfsg/CONTRIBUTORS.md000066400000000000000000000100041455713015700170450ustar00rootroot00000000000000 ## Creators and Maintainers This toolbox has been created by * [RĂ©mi Flamary](https://remi.flamary.com/) * [Nicolas Courty](http://people.irisa.fr/Nicolas.Courty/) It is currently maintained by * [RĂ©mi Flamary](https://remi.flamary.com/) * [CĂ©dric Vincent-Cuaz](https://cedricvincentcuaz.github.io/) ## Contributors The contributors to this library are: * [RĂ©mi Flamary](http://remi.flamary.com/) (EMD wrapper, Pytorch backend, DA classes, conditional gradients, WDA, weak OT, linear OT mapping, documentation) * [Nicolas Courty](http://people.irisa.fr/Nicolas.Courty/) (Original sinkhorn, Wasserstein barycenters and convolutional barycenters, 1D wasserstein) * [Alexandre Gramfort](http://alexandre.gramfort.net/) (CI, documentation) * [Laetitia Chapel](http://people.irisa.fr/Laetitia.Chapel/) (Partial OT, Unbalanced OT non-regularized) * [Michael Perrot](http://perso.univ-st-etienne.fr/pem82055/) (Mapping estimation) * [LĂ©o Gautheron](https://github.com/aje) (Initial GPU implementation) * [Nathalie Gayraud](https://www.linkedin.com/in/nathalie-t-h-gayraud/?ppe=1) (DA classes) * [Stanislas Chambon](https://slasnista.github.io/) (DA classes) * [Antoine Rolet](https://arolet.github.io/) (EMD solver debug) * Erwan Vautier (Gromov-Wasserstein) * [Kilian Fatras](https://kilianfatras.github.io/) (Stochastic solvers, empirical sinkhorn) * [Alain Rakotomamonjy](https://sites.google.com/site/alainrakotomamonjy/home) (Greenkhorn) * [Vayer Titouan](https://tvayer.github.io/) (Gromov-Wasserstein, Fused-Gromov-Wasserstein) * [Hicham Janati](https://hichamjanati.github.io/) (Unbalanced OT, Debiased barycenters) * [Romain Tavenard](https://rtavenar.github.io/) (1D Wasserstein) * [Mokhtar Z. Alaya](http://mzalaya.github.io/) (Screenkhorn) * [Ievgen Redko](https://ievred.github.io/) (Laplacian DA, JCPOT) * [Adrien Corenflos](https://adriencorenflos.github.io/) (Sliced Wasserstein Distance) * [Tanguy Kerdoncuff](https://hv0nnus.github.io/) (Sampled Gromov Wasserstein) * [Minhui Huang](https://mhhuang95.github.io) (Projection Robust Wasserstein Distance) * [Nathan Cassereau](https://github.com/ncassereau-idris) (Backends) * [CĂ©dric Vincent-Cuaz](https://github.com/cedricvincentcuaz) (Graph Dictionary Learning, FGW, semi-relaxed FGW) * [Eloi Tanguy](https://github.com/eloitanguy) (Generalized Wasserstein Barycenters) * [Camille Le Coz](https://www.linkedin.com/in/camille-le-coz-8593b91a1/) (EMD2 debug) * [Eduardo Fernandes Montesuma](https://eddardd.github.io/my-personal-blog/) (Free support sinkhorn barycenter) * [Theo Gnassounou](https://github.com/tgnassou) (OT between Gaussian distributions) * [ClĂ©ment Bonet](https://clbonet.github.io) (Wassertstein on circle, Spherical Sliced-Wasserstein) * [Ronak Mehta](https://ronakrm.github.io) (Efficient Discrete Multi Marginal Optimal Transport Regularization) * [Xizheng Yu](https://github.com/x12hengyu) (Efficient Discrete Multi Marginal Optimal Transport Regularization) * [Sonia Mazelet](https://github.com/SoniaMaz8) (Template based GNN layers) * [Laurène David](https://github.com/laudavid) (Low rank sinkhorn) ## Acknowledgments This toolbox benefit a lot from open source research and we would like to thank the following persons for providing some code (in various languages): * [Gabriel PeyrĂ©](http://gpeyre.github.io/) (Wasserstein Barycenters in Matlab) * [Mathieu Blondel](https://mblondel.org/) (original implementation smooth OT) * [Nicolas Bonneel](http://liris.cnrs.fr/~nbonneel/) (C++ code for EMD) * [Marco Cuturi](http://marcocuturi.net/) (Sinkhorn Knopp in Matlab/Cuda) POT has benefited from the financing or manpower from the following partners: ANRCNRS3IAHi!PARISpython-pot-0.9.3+dfsg/LICENSE000066400000000000000000000020661455713015700156040ustar00rootroot00000000000000MIT License Copyright (c) 2016-2023 POT contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. python-pot-0.9.3+dfsg/MANIFEST.in000066400000000000000000000004661455713015700163370ustar00rootroot00000000000000include README.md include RELEASES.md include LICENSE include ot/lp/core.h include ot/lp/EMD.h include ot/lp/EMD_wrapper.cpp include ot/lp/emd_wrap.pyx include ot/lp/full_bipartitegraph.h include ot/lp/full_bipartitegraph_omp.h include ot/lp/network_simplex_simple.h include ot/lp/network_simplex_simple_omp.h python-pot-0.9.3+dfsg/Makefile000066400000000000000000000051051455713015700162340ustar00rootroot00000000000000 PYTHON=python3 branch := $(shell git symbolic-ref --short -q HEAD) help : @echo "The following make targets are available:" @echo " help - print this message" @echo " build - build python package" @echo " install - install python package (local user)" @echo " sinstall - install python package (system with sudo)" @echo " remove - remove the package (local user)" @echo " sremove - remove the package (system with sudo)" @echo " clean - remove any temporary files" @echo " notebook - launch ipython notebook" build : $(PYTHON) setup.py build buildext : $(PYTHON) setup.py build_ext --inplace install : $(PYTHON) setup.py install --user sinstall : sudo $(PYTHON) setup.py install remove : $(PYTHON) setup.py install --user --record files.txt tr '\n' '\0' < files.txt | xargs -0 rm -f -- rm files.txt sremove : $(PYTHON) setup.py install --record files.txt tr '\n' '\0' < files.txt | sudo xargs -0 rm -f -- rm files.txt clean : FORCE $(PYTHON) setup.py clean pep8 : flake8 examples/ ot/ test/ --count --max-line-length=127 --statistics --show-source test : FORCE pep8 $(PYTHON) -m pytest --durations=20 -v test/ --doctest-modules --ignore ot/gpu/ pytest : FORCE $(PYTHON) -m pytest --durations=20 -v test/ --doctest-modules --ignore ot/gpu/ release : twine upload dist/* release_test : twine upload --repository-url https://test.pypi.org/legacy/ dist/* rdoc : pandoc --from=markdown --to=rst --output=docs/source/readme.rst README.md sed -i 's,https://pythonot.github.io/auto_examples/,auto_examples/,g' docs/source/readme.rst pandoc --from=markdown --to=rst --output=docs/source/releases.rst RELEASES.md sed -i 's,https://pot.readthedocs.io/en/latest/,,g' docs/source/releases.rst sed -i 's,https://github.com/rflamary/POT/blob/master/notebooks/,auto_examples/,g' docs/source/releases.rst sed -i 's,.ipynb,.html,g' docs/source/releases.rst sed -i 's,https://pythonot.github.io/auto_examples/,auto_examples/,g' docs/source/releases.rst notebook : ipython notebook --matplotlib=inline --notebook-dir=notebooks/ bench : @git stash >/dev/null 2>&1 @echo 'Branch master' @git checkout master >/dev/null 2>&1 python3 $(script) @echo 'Branch $(branch)' @git checkout $(branch) >/dev/null 2>&1 python3 $(script) @git stash apply >/dev/null 2>&1 autopep8 : autopep8 -ir test ot examples --jobs -1 aautopep8 : autopep8 -air test ot examples --jobs -1 wheels : CIBW_BEFORE_BUILD="pip install numpy cython" cibuildwheel --platform linux --output-dir dist dist : wheels $(PYTHON) setup.py sdist pydocstyle : pydocstyle ot FORCE : python-pot-0.9.3+dfsg/README.md000066400000000000000000000701351455713015700160600ustar00rootroot00000000000000# POT: Python Optimal Transport [![PyPI version](https://badge.fury.io/py/POT.svg)](https://badge.fury.io/py/POT) [![Anaconda Cloud](https://anaconda.org/conda-forge/pot/badges/version.svg)](https://anaconda.org/conda-forge/pot) [![Build Status](https://github.com/PythonOT/POT/actions/workflows/build_tests.yml/badge.svg)](https://github.com/PythonOT/POT/actions) [![Codecov Status](https://codecov.io/gh/PythonOT/POT/branch/master/graph/badge.svg)](https://codecov.io/gh/PythonOT/POT) [![Downloads](https://static.pepy.tech/badge/pot)](https://pepy.tech/project/pot) [![Anaconda downloads](https://anaconda.org/conda-forge/pot/badges/downloads.svg)](https://anaconda.org/conda-forge/pot) [![License](https://anaconda.org/conda-forge/pot/badges/license.svg)](https://github.com/PythonOT/POT/blob/master/LICENSE) This open source Python library provide several solvers for optimization problems related to Optimal Transport for signal, image processing and machine learning. Website and documentation: [https://PythonOT.github.io/](https://PythonOT.github.io/) Source Code (MIT): [https://github.com/PythonOT/POT](https://github.com/PythonOT/POT) POT provides the following generic OT solvers (links to examples): * [OT Network Simplex solver](https://pythonot.github.io/auto_examples/plot_OT_1D.html) for the linear program/ Earth Movers Distance [1] . * [Conditional gradient](https://pythonot.github.io/auto_examples/plot_optim_OTreg.html) [6] and [Generalized conditional gradient](https://pythonot.github.io/auto_examples/plot_optim_OTreg.html) for regularized OT [7]. * Entropic regularization OT solver with [Sinkhorn Knopp Algorithm](https://pythonot.github.io/auto_examples/plot_OT_1D.html) [2] , stabilized version [9] [10] [34], lazy CPU/GPU solver from geomloss [60] [61], greedy Sinkhorn [22] and [Screening Sinkhorn [26] ](https://pythonot.github.io/auto_examples/plot_screenkhorn_1D.html). * Bregman projections for [Wasserstein barycenter](https://pythonot.github.io/auto_examples/barycenters/plot_barycenter_lp_vs_entropic.html) [3], [convolutional barycenter](https://pythonot.github.io/auto_examples/barycenters/plot_convolutional_barycenter.html) [21] and unmixing [4]. * Sinkhorn divergence [23] and entropic regularization OT from empirical data. * Debiased Sinkhorn barycenters [Sinkhorn divergence barycenter](https://pythonot.github.io/auto_examples/barycenters/plot_debiased_barycenter.html) [37] * [Smooth optimal transport solvers](https://pythonot.github.io/auto_examples/plot_OT_1D_smooth.html) (dual and semi-dual) for KL and squared L2 regularizations [17]. * Weak OT solver between empirical distributions [39] * Non regularized [Wasserstein barycenters [16] ](https://pythonot.github.io/auto_examples/barycenters/plot_barycenter_lp_vs_entropic.html) with LP solver (only small scale). * [Gromov-Wasserstein distances](https://pythonot.github.io/auto_examples/gromov/plot_gromov.html) and [GW barycenters](https://pythonot.github.io/auto_examples/gromov/plot_gromov_barycenter.html) (exact [13] and regularized [12,51]), differentiable using gradients from Graph Dictionary Learning [38] * [Fused-Gromov-Wasserstein distances solver](https://pythonot.github.io/auto_examples/gromov/plot_fgw.html#sphx-glr-auto-examples-plot-fgw-py) and [FGW barycenters](https://pythonot.github.io/auto_examples/gromov/plot_barycenter_fgw.html) (exact [24] and regularized [12,51]). * [Stochastic solver](https://pythonot.github.io/auto_examples/others/plot_stochastic.html) and [differentiable losses](https://pythonot.github.io/auto_examples/backends/plot_stoch_continuous_ot_pytorch.html) for Large-scale Optimal Transport (semi-dual problem [18] and dual problem [19]) * [Sampled solver of Gromov Wasserstein](https://pythonot.github.io/auto_examples/gromov/plot_gromov.html) for large-scale problem with any loss functions [33] * Non regularized [free support Wasserstein barycenters](https://pythonot.github.io/auto_examples/barycenters/plot_free_support_barycenter.html) [20]. * [One dimensional Unbalanced OT](https://pythonot.github.io/auto_examples/unbalanced-partial/plot_UOT_1D.html) with KL relaxation and [barycenter](https://pythonot.github.io/auto_examples/unbalanced-partial/plot_UOT_barycenter_1D.html) [10, 25]. Also [exact unbalanced OT](https://pythonot.github.io/auto_examples/unbalanced-partial/plot_unbalanced_ot.html) with KL and quadratic regularization and the [regularization path of UOT](https://pythonot.github.io/auto_examples/unbalanced-partial/plot_regpath.html) [41] * [Partial Wasserstein and Gromov-Wasserstein](https://pythonot.github.io/auto_examples/unbalanced-partial/plot_partial_wass_and_gromov.html) (exact [29] and entropic [3] formulations). * [Sliced Wasserstein](https://pythonot.github.io/auto_examples/sliced-wasserstein/plot_variance.html) [31, 32] and Max-sliced Wasserstein [35] that can be used for gradient flows [36]. * [Wasserstein distance on the circle](https://pythonot.github.io/auto_examples/plot_compute_wasserstein_circle.html) [44, 45] * [Spherical Sliced Wasserstein](https://pythonot.github.io/auto_examples/sliced-wasserstein/plot_variance_ssw.html) [46] * [Graph Dictionary Learning solvers](https://pythonot.github.io/auto_examples/gromov/plot_gromov_wasserstein_dictionary_learning.html) [38]. * [Semi-relaxed (Fused) Gromov-Wasserstein divergences](https://pythonot.github.io/auto_examples/gromov/plot_semirelaxed_fgw.html) (exact and regularized [48]). * [Efficient Discrete Multi Marginal Optimal Transport Regularization](https://pythonot.github.io/auto_examples/others/plot_demd_gradient_minimize.html) [50]. * [Several backends](https://pythonot.github.io/quickstart.html#solving-ot-with-multiple-backends) for easy use of POT with [Pytorch](https://pytorch.org/)/[jax](https://github.com/google/jax)/[Numpy](https://numpy.org/)/[Cupy](https://cupy.dev/)/[Tensorflow](https://www.tensorflow.org/) arrays. * Smooth Strongly Convex Nearest Brenier Potentials [58], with an extension to bounding potentials using [59]. POT provides the following Machine Learning related solvers: * [Optimal transport for domain adaptation](https://pythonot.github.io/auto_examples/domain-adaptation/plot_otda_classes.html) with [group lasso regularization](https://pythonot.github.io/auto_examples/domain-adaptation/plot_otda_classes.html), [Laplacian regularization](https://pythonot.github.io/auto_examples/domain-adaptation/plot_otda_laplacian.html) [5] [30] and [semi supervised setting](https://pythonot.github.io/auto_examples/domain-adaptation/plot_otda_semi_supervised.html). * [Linear OT mapping](https://pythonot.github.io/auto_examples/domain-adaptation/plot_otda_linear_mapping.html) [14] and [Joint OT mapping estimation](https://pythonot.github.io/auto_examples/domain-adaptation/plot_otda_mapping.html) [8]. * [Wasserstein Discriminant Analysis](https://pythonot.github.io/auto_examples/others/plot_WDA.html) [11] (requires autograd + pymanopt). * [JCPOT algorithm for multi-source domain adaptation with target shift](https://pythonot.github.io/auto_examples/domain-adaptation/plot_otda_jcpot.html) [27]. * [Graph Neural Network OT layers TFGW](https://pythonot.github.io/auto_examples/gromov/plot_gnn_TFGW.html) [52] and TW (OT-GNN) [53] Some other examples are available in the [documentation](https://pythonot.github.io/auto_examples/index.html). #### Using and citing the toolbox If you use this toolbox in your research and find it useful, please cite POT using the following reference from our [JMLR paper](https://jmlr.org/papers/v22/20-451.html): RĂ©mi Flamary, Nicolas Courty, Alexandre Gramfort, Mokhtar Z. Alaya, AurĂ©lie Boisbunon, Stanislas Chambon, Laetitia Chapel, Adrien Corenflos, Kilian Fatras, Nemo Fournier, LĂ©o Gautheron, Nathalie T.H. Gayraud, Hicham Janati, Alain Rakotomamonjy, Ievgen Redko, Antoine Rolet, Antony Schutz, Vivien Seguy, Danica J. Sutherland, Romain Tavenard, Alexander Tong, Titouan Vayer, POT Python Optimal Transport library, Journal of Machine Learning Research, 22(78):1â’8, 2021. Website: https://pythonot.github.io/ In Bibtex format: ```bibtex @article{flamary2021pot, author = {R{\'e}mi Flamary and Nicolas Courty and Alexandre Gramfort and Mokhtar Z. Alaya and Aur{\'e}lie Boisbunon and Stanislas Chambon and Laetitia Chapel and Adrien Corenflos and Kilian Fatras and Nemo Fournier and L{\'e}o Gautheron and Nathalie T.H. Gayraud and Hicham Janati and Alain Rakotomamonjy and Ievgen Redko and Antoine Rolet and Antony Schutz and Vivien Seguy and Danica J. Sutherland and Romain Tavenard and Alexander Tong and Titouan Vayer}, title = {POT: Python Optimal Transport}, journal = {Journal of Machine Learning Research}, year = {2021}, volume = {22}, number = {78}, pages = {1-8}, url = {http://jmlr.org/papers/v22/20-451.html} } ``` ## Installation The library has been tested on Linux, MacOSX and Windows. It requires a C++ compiler for building/installing the EMD solver and relies on the following Python modules: - Numpy (>=1.16) - Scipy (>=1.0) - Cython (>=0.23) (build only, not necessary when installing from pip or conda) #### Pip installation You can install the toolbox through PyPI with: ```console pip install POT ``` or get the very latest version by running: ```console pip install -U https://github.com/PythonOT/POT/archive/master.zip # with --user for user install (no root) ``` #### Anaconda installation with conda-forge If you use the Anaconda python distribution, POT is available in [conda-forge](https://conda-forge.org). To install it and the required dependencies: ```console conda install -c conda-forge pot ``` #### Post installation check After a correct installation, you should be able to import the module without errors: ```python import ot ``` Note that for easier access the module is named `ot` instead of `pot`. ### Dependencies Some sub-modules require additional dependencies which are discussed below * **ot.dr** (Wasserstein dimensionality reduction) depends on autograd and pymanopt that can be installed with: ```shell pip install pymanopt autograd ``` ## Examples ### Short examples * Import the toolbox ```python import ot ``` * Compute Wasserstein distances ```python # a,b are 1D histograms (sum to 1 and positive) # M is the ground cost matrix Wd = ot.emd2(a, b, M) # exact linear program Wd_reg = ot.sinkhorn2(a, b, M, reg) # entropic regularized OT # if b is a matrix compute all distances to a and return a vector ``` * Compute OT matrix ```python # a,b are 1D histograms (sum to 1 and positive) # M is the ground cost matrix T = ot.emd(a, b, M) # exact linear program T_reg = ot.sinkhorn(a, b, M, reg) # entropic regularized OT ``` * Compute Wasserstein barycenter ```python # A is a n*d matrix containing d 1D histograms # M is the ground cost matrix ba = ot.barycenter(A, M, reg) # reg is regularization parameter ``` ### Examples and Notebooks The examples folder contain several examples and use case for the library. The full documentation with examples and output is available on [https://PythonOT.github.io/](https://PythonOT.github.io/). ## Acknowledgements This toolbox has been created by * [RĂ©mi Flamary](https://remi.flamary.com/) * [Nicolas Courty](http://people.irisa.fr/Nicolas.Courty/) It is currently maintained by * [RĂ©mi Flamary](https://remi.flamary.com/) * [CĂ©dric Vincent-Cuaz](https://cedricvincentcuaz.github.io/) The numerous contributors to this library are listed [here](CONTRIBUTORS.md). POT has benefited from the financing or manpower from the following partners: ANRCNRS3IAHi!PARIS ## Contributions and code of conduct Every contribution is welcome and should respect the [contribution guidelines](https://pythonot.github.io/master/contributing.html). Each member of the project is expected to follow the [code of conduct](https://pythonot.github.io/master/code_of_conduct.html). ## Support You can ask questions and join the development discussion: * On the POT [slack channel](https://pot-toolbox.slack.com) * On the POT [gitter channel](https://gitter.im/PythonOT/community) * On the POT [mailing list](https://mail.python.org/mm3/mailman3/lists/pot.python.org/) You can also post bug reports and feature requests in Github issues. Make sure to read our [guidelines](.github/CONTRIBUTING.md) first. ## References [1] Bonneel, N., Van De Panne, M., Paris, S., & Heidrich, W. (2011, December). [Displacement interpolation using Lagrangian mass transport](https://people.csail.mit.edu/sparis/publi/2011/sigasia/Bonneel_11_Displacement_Interpolation.pdf). In ACM Transactions on Graphics (TOG) (Vol. 30, No. 6, p. 158). ACM. [2] Cuturi, M. (2013). [Sinkhorn distances: Lightspeed computation of optimal transport](https://arxiv.org/pdf/1306.0895.pdf). In Advances in Neural Information Processing Systems (pp. 2292-2300). [3] Benamou, J. D., Carlier, G., Cuturi, M., Nenna, L., & PeyrĂ©, G. (2015). [Iterative Bregman projections for regularized transportation problems](https://arxiv.org/pdf/1412.5154.pdf). SIAM Journal on Scientific Computing, 37(2), A1111-A1138. [4] S. Nakhostin, N. Courty, R. Flamary, D. Tuia, T. Corpetti, [Supervised planetary unmixing with optimal transport](https://hal.archives-ouvertes.fr/hal-01377236/document), Workshop on Hyperspectral Image and Signal Processing : Evolution in Remote Sensing (WHISPERS), 2016. [5] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, [Optimal Transport for Domain Adaptation](https://arxiv.org/pdf/1507.00504.pdf), in IEEE Transactions on Pattern Analysis and Machine Intelligence , vol.PP, no.99, pp.1-1 [6] Ferradans, S., Papadakis, N., PeyrĂ©, G., & Aujol, J. F. (2014). [Regularized discrete optimal transport](https://arxiv.org/pdf/1307.5551.pdf). SIAM Journal on Imaging Sciences, 7(3), 1853-1882. [7] Rakotomamonjy, A., Flamary, R., & Courty, N. (2015). [Generalized conditional gradient: analysis of convergence and applications](https://arxiv.org/pdf/1510.06567.pdf). arXiv preprint arXiv:1510.06567. [8] M. Perrot, N. Courty, R. Flamary, A. Habrard (2016), [Mapping estimation for discrete optimal transport](http://remi.flamary.com/biblio/perrot2016mapping.pdf), Neural Information Processing Systems (NIPS). [9] Schmitzer, B. (2016). [Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems](https://arxiv.org/pdf/1610.06519.pdf). arXiv preprint arXiv:1610.06519. [10] Chizat, L., PeyrĂ©, G., Schmitzer, B., & Vialard, F. X. (2016). [Scaling algorithms for unbalanced transport problems](https://arxiv.org/pdf/1607.05816.pdf). arXiv preprint arXiv:1607.05816. [11] Flamary, R., Cuturi, M., Courty, N., & Rakotomamonjy, A. (2016). [Wasserstein Discriminant Analysis](https://arxiv.org/pdf/1608.08063.pdf). arXiv preprint arXiv:1608.08063. [12] Gabriel PeyrĂ©, Marco Cuturi, and Justin Solomon (2016), [Gromov-Wasserstein averaging of kernel and distance matrices](http://proceedings.mlr.press/v48/peyre16.html) International Conference on Machine Learning (ICML). [13] MĂ©moli, Facundo (2011). [Gromov–Wasserstein distances and the metric approach to object matching](https://media.adelaide.edu.au/acvt/Publications/2011/2011-Gromov%E2%80%93Wasserstein%20Distances%20and%20the%20Metric%20Approach%20to%20Object%20Matching.pdf). Foundations of computational mathematics 11.4 : 417-487. [14] Knott, M. and Smith, C. S. (1984).[On the optimal mapping of distributions](https://link.springer.com/article/10.1007/BF00934745), Journal of Optimization Theory and Applications Vol 43. [15] PeyrĂ©, G., & Cuturi, M. (2018). [Computational Optimal Transport](https://arxiv.org/pdf/1803.00567.pdf) . [16] Agueh, M., & Carlier, G. (2011). [Barycenters in the Wasserstein space](https://hal.archives-ouvertes.fr/hal-00637399/document). SIAM Journal on Mathematical Analysis, 43(2), 904-924. [17] Blondel, M., Seguy, V., & Rolet, A. (2018). [Smooth and Sparse Optimal Transport](https://arxiv.org/abs/1710.06276). Proceedings of the Twenty-First International Conference on Artificial Intelligence and Statistics (AISTATS). [18] Genevay, A., Cuturi, M., PeyrĂ©, G. & Bach, F. (2016) [Stochastic Optimization for Large-scale Optimal Transport](https://arxiv.org/abs/1605.08527). Advances in Neural Information Processing Systems (2016). [19] Seguy, V., Bhushan Damodaran, B., Flamary, R., Courty, N., Rolet, A.& Blondel, M. [Large-scale Optimal Transport and Mapping Estimation](https://arxiv.org/pdf/1711.02283.pdf). International Conference on Learning Representation (2018) [20] Cuturi, M. and Doucet, A. (2014) [Fast Computation of Wasserstein Barycenters](http://proceedings.mlr.press/v32/cuturi14.html). International Conference in Machine Learning [21] Solomon, J., De Goes, F., PeyrĂ©, G., Cuturi, M., Butscher, A., Nguyen, A. & Guibas, L. (2015). [Convolutional wasserstein distances: Efficient optimal transportation on geometric domains](https://dl.acm.org/citation.cfm?id=2766963). ACM Transactions on Graphics (TOG), 34(4), 66. [22] J. Altschuler, J.Weed, P. Rigollet, (2017) [Near-linear time approximation algorithms for optimal transport via Sinkhorn iteration](https://papers.nips.cc/paper/6792-near-linear-time-approximation-algorithms-for-optimal-transport-via-sinkhorn-iteration.pdf), Advances in Neural Information Processing Systems (NIPS) 31 [23] Aude, G., PeyrĂ©, G., Cuturi, M., [Learning Generative Models with Sinkhorn Divergences](https://arxiv.org/abs/1706.00292), Proceedings of the Twenty-First International Conference on Artificial Intelligence and Statistics, (AISTATS) 21, 2018 [24] Vayer, T., Chapel, L., Flamary, R., Tavenard, R. and Courty, N. (2019). [Optimal Transport for structured data with application on graphs](http://proceedings.mlr.press/v97/titouan19a.html) Proceedings of the 36th International Conference on Machine Learning (ICML). [25] Frogner C., Zhang C., Mobahi H., Araya-Polo M., Poggio T. (2015). [Learning with a Wasserstein Loss](http://cbcl.mit.edu/wasserstein/) Advances in Neural Information Processing Systems (NIPS). [26] Alaya M. Z., BĂ©rar M., Gasso G., Rakotomamonjy A. (2019). [Screening Sinkhorn Algorithm for Regularized Optimal Transport](https://papers.nips.cc/paper/9386-screening-sinkhorn-algorithm-for-regularized-optimal-transport), Advances in Neural Information Processing Systems 33 (NeurIPS). [27] Redko I., Courty N., Flamary R., Tuia D. (2019). [Optimal Transport for Multi-source Domain Adaptation under Target Shift](http://proceedings.mlr.press/v89/redko19a.html), Proceedings of the Twenty-Second International Conference on Artificial Intelligence and Statistics (AISTATS) 22, 2019. [28] Caffarelli, L. A., McCann, R. J. (2010). [Free boundaries in optimal transport and Monge-Ampere obstacle problems](http://www.math.toronto.edu/~mccann/papers/annals2010.pdf), Annals of mathematics, 673-730. [29] Chapel, L., Alaya, M., Gasso, G. (2020). [Partial Optimal Transport with Applications on Positive-Unlabeled Learning](https://arxiv.org/abs/2002.08276), Advances in Neural Information Processing Systems (NeurIPS), 2020. [30] Flamary R., Courty N., Tuia D., Rakotomamonjy A. (2014). [Optimal transport with Laplacian regularization: Applications to domain adaptation and shape matching](https://remi.flamary.com/biblio/flamary2014optlaplace.pdf), NIPS Workshop on Optimal Transport and Machine Learning OTML, 2014. [31] Bonneel, Nicolas, et al. [Sliced and radon wasserstein barycenters of measures](https://perso.liris.cnrs.fr/nicolas.bonneel/WassersteinSliced-JMIV.pdf), Journal of Mathematical Imaging and Vision 51.1 (2015): 22-45 [32] Huang, M., Ma S., Lai, L. (2021). [A Riemannian Block Coordinate Descent Method for Computing the Projection Robust Wasserstein Distance](http://proceedings.mlr.press/v139/huang21e.html), Proceedings of the 38th International Conference on Machine Learning (ICML). [33] Kerdoncuff T., Emonet R., Marc S. [Sampled Gromov Wasserstein](https://hal.archives-ouvertes.fr/hal-03232509/document), Machine Learning Journal (MJL), 2021 [34] Feydy, J., SĂ©journĂ©, T., Vialard, F. X., Amari, S. I., TrouvĂ©, A., & PeyrĂ©, G. (2019, April). [Interpolating between optimal transport and MMD using Sinkhorn divergences](http://proceedings.mlr.press/v89/feydy19a/feydy19a.pdf). In The 22nd International Conference on Artificial Intelligence and Statistics (pp. 2681-2690). PMLR. [35] Deshpande, I., Hu, Y. T., Sun, R., Pyrros, A., Siddiqui, N., Koyejo, S., ... & Schwing, A. G. (2019). [Max-sliced wasserstein distance and its use for gans](https://openaccess.thecvf.com/content_CVPR_2019/papers/Deshpande_Max-Sliced_Wasserstein_Distance_and_Its_Use_for_GANs_CVPR_2019_paper.pdf). In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (pp. 10648-10656). [36] Liutkus, A., Simsekli, U., Majewski, S., Durmus, A., & Stöter, F. R. (2019, May). [Sliced-Wasserstein flows: Nonparametric generative modeling via optimal transport and diffusions](http://proceedings.mlr.press/v97/liutkus19a/liutkus19a.pdf). In International Conference on Machine Learning (pp. 4104-4113). PMLR. [37] Janati, H., Cuturi, M., Gramfort, A. [Debiased sinkhorn barycenters](http://proceedings.mlr.press/v119/janati20a/janati20a.pdf) Proceedings of the 37th International Conference on Machine Learning, PMLR 119:4692-4701, 2020 [38] C. Vincent-Cuaz, T. Vayer, R. Flamary, M. Corneli, N. Courty, [Online Graph Dictionary Learning](https://arxiv.org/pdf/2102.06555.pdf), International Conference on Machine Learning (ICML), 2021. [39] Gozlan, N., Roberto, C., Samson, P. M., & Tetali, P. (2017). [Kantorovich duality for general transport costs and applications](https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.712.1825&rep=rep1&type=pdf). Journal of Functional Analysis, 273(11), 3327-3405. [40] Forrow, A., HĂĽtter, J. C., Nitzan, M., Rigollet, P., Schiebinger, G., & Weed, J. (2019, April). [Statistical optimal transport via factored couplings](http://proceedings.mlr.press/v89/forrow19a/forrow19a.pdf). In The 22nd International Conference on Artificial Intelligence and Statistics (pp. 2454-2465). PMLR. [41] Chapel*, L., Flamary*, R., Wu, H., FĂ©votte, C., Gasso, G. (2021). [Unbalanced Optimal Transport through Non-negative Penalized Linear Regression](https://proceedings.neurips.cc/paper/2021/file/c3c617a9b80b3ae1ebd868b0017cc349-Paper.pdf) Advances in Neural Information Processing Systems (NeurIPS), 2020. (Two first co-authors) [42] Delon, J., Gozlan, N., and Saint-Dizier, A. [Generalized Wasserstein barycenters between probability measures living on different subspaces](https://arxiv.org/pdf/2105.09755). arXiv preprint arXiv:2105.09755, 2021. [43] Ălvarez-Esteban, Pedro C., et al. [A fixed-point approach to barycenters in Wasserstein space.](https://arxiv.org/pdf/1511.05355.pdf) Journal of Mathematical Analysis and Applications 441.2 (2016): 744-762. [44] Delon, Julie, Julien Salomon, and Andrei Sobolevski. [Fast transport optimization for Monge costs on the circle.](https://arxiv.org/abs/0902.3527) SIAM Journal on Applied Mathematics 70.7 (2010): 2239-2258. [45] Hundrieser, Shayan, Marcel Klatt, and Axel Munk. [The statistics of circular optimal transport.](https://arxiv.org/abs/2103.15426) Directional Statistics for Innovative Applications: A Bicentennial Tribute to Florence Nightingale. Singapore: Springer Nature Singapore, 2022. 57-82. [46] Bonet, C., Berg, P., Courty, N., Septier, F., Drumetz, L., & Pham, M. T. (2023). [Spherical Sliced-Wasserstein](https://openreview.net/forum?id=jXQ0ipgMdU). International Conference on Learning Representations. [47] Chowdhury, S., & MĂ©moli, F. (2019). [The gromov–wasserstein distance between networks and stable network invariants](https://academic.oup.com/imaiai/article/8/4/757/5627736). Information and Inference: A Journal of the IMA, 8(4), 757-787. [48] CĂ©dric Vincent-Cuaz, RĂ©mi Flamary, Marco Corneli, Titouan Vayer, Nicolas Courty (2022). [Semi-relaxed Gromov-Wasserstein divergence and applications on graphs](https://openreview.net/pdf?id=RShaMexjc-x). International Conference on Learning Representations (ICLR), 2022. [49] Redko, I., Vayer, T., Flamary, R., and Courty, N. (2020). [CO-Optimal Transport](https://proceedings.neurips.cc/paper/2020/file/cc384c68ad503482fb24e6d1e3b512ae-Paper.pdf). Advances in Neural Information Processing Systems, 33. [50] Liu, T., Puigcerver, J., & Blondel, M. (2023). [Sparsity-constrained optimal transport](https://openreview.net/forum?id=yHY9NbQJ5BP). Proceedings of the Eleventh International Conference on Learning Representations (ICLR). [51] Xu, H., Luo, D., Zha, H., & Duke, L. C. (2019). [Gromov-wasserstein learning for graph matching and node embedding](http://proceedings.mlr.press/v97/xu19b.html). In International Conference on Machine Learning (ICML), 2019. [52] Collas, A., Vayer, T., Flamary, F., & Breloy, A. (2023). [Entropic Wasserstein Component Analysis](https://arxiv.org/abs/2303.05119). ArXiv. [53] C. Vincent-Cuaz, R. Flamary, M. Corneli, T. Vayer, N. Courty (2022). [Template based graph neural network with optimal transport distances](https://papers.nips.cc/paper_files/paper/2022/file/4d3525bc60ba1adc72336c0392d3d902-Paper-Conference.pdf). Advances in Neural Information Processing Systems, 35. [54] BĂ©cigneul, G., Ganea, O. E., Chen, B., Barzilay, R., & Jaakkola, T. S. (2020). [Optimal transport graph neural networks](https://arxiv.org/pdf/2006.04804). [55] Ronak Mehta, Jeffery Kline, Vishnu Suresh Lokhande, Glenn Fung, & Vikas Singh (2023). [Efficient Discrete Multi Marginal Optimal Transport Regularization](https://openreview.net/forum?id=R98ZfMt-jE). In The Eleventh International Conference on Learning Representations (ICLR). [56] Jeffery Kline. [Properties of the d-dimensional earth mover’s problem](https://www.sciencedirect.com/science/article/pii/S0166218X19301441). Discrete Applied Mathematics, 265: 128–141, 2019. [57] Delon, J., Desolneux, A., & Salmona, A. (2022). [Gromov–Wasserstein distances between Gaussian distributions](https://hal.science/hal-03197398v2/file/main.pdf). Journal of Applied Probability, 59(4), 1178-1198. [58] Paty F-P., d’Aspremont 1., & Cuturi M. (2020). [Regularity as regularization:Smooth and strongly convex brenier potentials in optimal transport.](http://proceedings.mlr.press/v108/paty20a/paty20a.pdf) In International Conference on Artificial Intelligence and Statistics, pages 1222–1232. PMLR, 2020. [59] Taylor A. B. (2017). [Convex interpolation and performance estimation of first-order methods for convex optimization.](https://dial.uclouvain.be/pr/boreal/object/boreal%3A182881/datastream/PDF_01/view) PhD thesis, Catholic University of Louvain, Louvain-la-Neuve, Belgium, 2017. [60] Feydy, J., Roussillon, P., TrouvĂ©, A., & Gori, P. (2019). [Fast and scalable optimal transport for brain tractograms](https://arxiv.org/pdf/2107.02010.pdf). In Medical Image Computing and Computer Assisted Intervention–MICCAI 2019: 22nd International Conference, Shenzhen, China, October 13–17, 2019, Proceedings, Part III 22 (pp. 636-644). Springer International Publishing. [61] Charlier, B., Feydy, J., Glaunes, J. A., Collin, F. D., & Durif, G. (2021). [Kernel operations on the gpu, with autodiff, without memory overflows](https://www.jmlr.org/papers/volume22/20-275/20-275.pdf). The Journal of Machine Learning Research, 22(1), 3457-3462. [62] H. Van Assel, C. Vincent-Cuaz, T. Vayer, R. Flamary, N. Courty (2023). [Interpolating between Clustering and Dimensionality Reduction with Gromov-Wasserstein](https://arxiv.org/pdf/2310.03398.pdf). NeurIPS 2023 Workshop Optimal Transport and Machine Learning. [63] Li, J., Tang, J., Kong, L., Liu, H., Li, J., So, A. M. C., & Blanchet, J. (2022). [A Convergent Single-Loop Algorithm for Relaxation of Gromov-Wasserstein in Graph Data](https://openreview.net/pdf?id=0jxPyVWmiiF). In The Eleventh International Conference on Learning Representations. [64] Ma, X., Chu, X., Wang, Y., Lin, Y., Zhao, J., Ma, L., & Zhu, W. (2023). [Fused Gromov-Wasserstein Graph Mixup for Graph-level Classifications](https://openreview.net/pdf?id=uqkUguNu40). In Thirty-seventh Conference on Neural Information Processing Systems. [65] Scetbon, M., Cuturi, M., & PeyrĂ©, G. (2021). [Low-Rank Sinkhorn Factorization](https://arxiv.org/pdf/2103.04737.pdf).python-pot-0.9.3+dfsg/RELEASES.md000066400000000000000000001110541455713015700163220ustar00rootroot00000000000000# Releases ## 0.9.3 #### Closed issues - Fixed an issue with cost correction for mismatched labels in `ot.da.BaseTransport` fit methods. This fix addresses the original issue introduced PR #587 (PR #593) ## 0.9.2 *December 2023* This new release contains several new features and bug fixes. Among the new features we have a new solver for estimation of nearest Brenier potentials (SSNB) that can be used for OT mapping estimation (on small problems), new Bregman Alternated Projected Gradient solvers for GW and FGW, and new solvers for Bures-Wasserstein barycenters. We also provide a first solver for Low Rank Sinkhorn that will be ussed to provide low rak OT extensions in the next releases. Finally we have a new exact line-search for (F)GW solvers with KL loss that can be used to improve the convergence of the solvers. We also have a new `LazyTensor` class that can be used to model OT plans and low rank tensors in large scale OT. This class is used to return the plan for the new wrapper for `geomloss` Sinkhorn solver on empirical samples that can lead to x10/x100 speedups on CPU or GPU and have a lazy implementation that allows solving very large problems of a few millions samples. We also have a new API for solving OT problems from empirical samples with `ot.solve_sample` Finally we have a new API for Gromov-Wasserstein solvers with `ot.solve_gromov` function that centralizes most of the (F)GW methods with unified notation. Some example of how to use the new API below: ```python # Generate random data xs, xt = np.random.randn(100, 2), np.random.randn(50, 2) # Solve OT problem with empirical samples sol = ot.solve_sample(xs, xt) # Exact OT betwen smaples with uniform weights sol = ot.solve_sample(xs, xt, wa, wb) # Exact OT with weights given by user sol = ot.solve_sample(xs, xt, reg= 1, metric='euclidean') # sinkhorn with euclidean metric sol = ot.solve_sample(xs, xt, reg= 1, method='geomloss') # faster sinkhorn solver on CPU/GPU sol = ot.solve_sample(x,x2, method='factored', rank=10) # compute factored OT sol = ot.solve_sample(x,x2, method='lowrank', rank=10) # compute lowrank sinkhorn OT value_bw = ot.solve_sample(xs, xt, method='gaussian').value # Bures-Wasserstein distance # Solve GW problem Cs, Ct = ot.dist(xs, xs), ot.dist(xt, xt) # compute cost matrices sol = ot.solve_gromov(Cs,Ct) # Exact GW between samples with uniform weights # Solve FGW problem M = ot.dist(xs, xt) # compute cost matrix # Exact FGW between samples with uniform weights sol = ot.solve_gromov(Cs, Ct, M, loss='KL', alpha=0.7) # FGW with KL data fitting # recover solutions objects P = sol.plan # OT plan u, v = sol.potentials # dual variables value = sol.value # OT value # for GW and FGW value_linear = sol.value_linear # linear part of the loss value_quad = sol.value_quad # quadratic part of the loss ``` Users are encouraged to use the new API (it is much simpler) but it might still be subjects to small changes before the release of POT 1.0 . We also fixed a number of issues, the most pressing being a problem of GPU memory allocation when pytorch is installed that will not happen now thanks to Lazy initialization of the backends. We now also have the possibility to deactivate some backends using environment which prevents POT from importing them and can lead to large import speedup. #### New features + Added support for [Nearest Brenier Potentials (SSNB)](http://proceedings.mlr.press/v108/paty20a/paty20a.pdf) (PR #526) + minor fix (PR #535) + Tweaked `get_backend` to ignore `None` inputs (PR #525) + Callbacks for generalized conditional gradient in `ot.da.sinkhorn_l1l2_gl` are now vectorized to improve performance (PR #507) + The `linspace` method of the backends now has the `type_as` argument to convert to the same dtype and device. (PR #533) + The `convolutional_barycenter2d` and `convolutional_barycenter2d_debiased` functions now work with different devices.. (PR #533) + New API for Gromov-Wasserstein solvers with `ot.solve_gromov` function (PR #536) + New LP solvers from scipy used by default for LP barycenter (PR #537) + Update wheels to Python 3.12 and remove old i686 arch that do not have scipy wheels (PR #543) + Upgraded unbalanced OT solvers for more flexibility (PR #539) + Add LazyTensor for modeling plans and low rank tensor in large scale OT (PR #544) + Add exact line-search for `gromov_wasserstein` and `fused_gromov_wasserstein` with KL loss (PR #556) + Add KL loss to all semi-relaxed (Fused) Gromov-Wasserstein solvers (PR #559) + Further upgraded unbalanced OT solvers for more flexibility and future use (PR #551) + New API function `ot.solve_sample` for solving OT problems from empirical samples (PR #563) + Wrapper for `geomloss`` solver on empirical samples (PR #571) + Add `stop_criterion` feature to (un)regularized (f)gw barycenter solvers (PR #578) + Add `fixed_structure` and `fixed_features` to entropic fgw barycenter solver (PR #578) + Add new BAPG solvers with KL projections for GW and FGW (PR #581) + Add Bures-Wasserstein barycenter in `ot.gaussian` and example (PR #582, PR #584) + Domain adaptation method `SinkhornL1l2Transport` now supports JAX backend (PR #587) + Added support for [Low-Rank Sinkhorn Factorization](https://arxiv.org/pdf/2103.04737.pdf) (PR #568) #### Closed issues - Fix line search evaluating cost outside of the interpolation range (Issue #502, PR #504) - Lazily instantiate backends to avoid unnecessary GPU memory pre-allocations on package import (Issue #516, PR #520) - Handle documentation and warnings when integers are provided to (f)gw solvers based on cg (Issue #530, PR #559) - Correct independence of `fgw_barycenters` to `init_C` and `init_X` (Issue #547, PR #566) - Avoid precision change when computing norm using PyTorch backend (Discussion #570, PR #572) - Create `ot/bregman/`repository (Issue #567, PR #569) - Fix matrix feature shape in `entropic_fused_gromov_barycenters`(Issue #574, PR #573) - Fix (fused) gromov-wasserstein barycenter solvers to support `kl_loss`(PR #576) ## 0.9.1 *August 2023* This new release contains several new features and bug fixes. New features include a new submodule `ot.gnn` that contains two new Graph neural network layers (compatible with [Pytorch Geometric](https://pytorch-geometric.readthedocs.io/)) for template-based pooling of graphs with an example on [graph classification](https://pythonot.github.io/master/auto_examples/gromov/plot_gnn_TFGW.html). Related to this, we also now provide FGW and semi relaxed FGW solvers for which the resulting loss is differentiable w.r.t. the parameter `alpha`. Other contributions on the (F)GW front include a new solver for the Proximal Point algorithm [that can be used to solve entropic GW problems](https://pythonot.github.io/master/auto_examples/gromov/plot_fgw_solvers.html) (using the parameter `solver="PPA"`), new solvers for entropic FGW barycenters, novels Sinkhorn-based solvers for entropic semi-relaxed (F)GW, the possibility to provide a warm-start to the solvers, and optional marginal weights of the samples (uniform weights ar used by default). Finally we added in the submodule `ot.gaussian` and `ot.da` new loss and mapping estimators for the Gaussian Gromov-Wasserstein that can be used as a fast alternative to GW and estimates linear mappings between unregistered spaces that can potentially have different size (See the update [linear mapping example](https://pythonot.github.io/master/auto_examples/domain-adaptation/plot_otda_linear_mapping.html) for an illustration). We also provide a new solver for the [Entropic Wasserstein Component Analysis](https://pythonot.github.io/master/auto_examples/others/plot_EWCA.html) that is a generalization of the celebrated PCA taking into account the local neighborhood of the samples. We also now have a new solver in `ot.smooth` for the [sparsity-constrained OT (last plot)](https://pythonot.github.io/master/auto_examples/plot_OT_1D_smooth.html) that can be used to find regularized OT plans with sparsity constraints. Finally we have a first multi-marginal solver for regular 1D distributions with a Monge loss (see [here](https://pythonot.github.io/master/auto_examples/others/plot_dmmot.html)). The documentation and testings have also been updated. We now have nearly 95% code coverage with the tests. The documentation has been updated and some examples have been streamlined to build more quickly and avoid timeout problems with CircleCI. We also added an optional CI on GPU for the master branch and approved PRs that can be used when a GPU runner is online. Many other bugs and issues have been fixed and we want to thank all the contributors, old and new, who made this release possible. More details below. #### New features - Added Bures Wasserstein distance in `ot.gaussian` (PR ##428) - Added Generalized Wasserstein Barycenter solver + example (PR #372), fixed graphical details on the example (PR #376) - Added Free Support Sinkhorn Barycenter + example (PR #387) - New API for OT solver using function `ot.solve` (PR #388) - Backend version of `ot.partial` and `ot.smooth` (PR #388) - Added argument for warmstart of dual vectors in Sinkhorn-based methods in `ot.bregman` (PR #437) #### Closed issues - Fixed an issue with the documentation gallery sections (PR #395) - Fixed an issue where sinkhorn divergence did not have a gradients (Issue #393, PR #394) - Fixed an issue where we could not ask TorchBackend to place a random tensor on GPU (Issue #371, PR #373) - Fixed an issue where Sinkhorn solver assumed a symmetric cost matrix (Issue #374, PR #375) - Fixed an issue where hitting iteration limits would be reported to stderr by std::cerr regardless of Python's stderr stream status (PR #377) - Fixed an issue where the metric argument in ot.dist did not allow a callable parameter (Issue #378, PR #379) - Fixed an issue where the max number of iterations in ot.emd was not allowed to go beyond 2^31 (PR #380) - Fixed an issue where pointers would overflow in the EMD solver, returning an incomplete transport plan above a certain size (slightly above 46k, its square being roughly 2^31) (PR #381) - Error raised when mass mismatch in emd2 (PR #386) - Fixed an issue where a pytorch example would throw an error if executed on a GPU (Issue #389, PR #391) - Added a work-around for scipy's bug, where you cannot compute the Hamming distance with a "None" weight attribute. (Issue #400, PR #402) - Fixed an issue where the doc could not be built due to some changes in matplotlib's API (Issue #403, PR #402) - Replaced Numpy C Compiler with Setuptools C Compiler due to deprecation issues (Issue #408, PR #409) - Fixed weak optimal transport docstring (Issue #404, PR #410) - Fixed error with parameter `log=True`for `SinkhornLpl1Transport` (Issue #412, PR #413) - Fixed an issue about `warn` parameter in `sinkhorn2` (PR #417) - Fix an issue where the parameter `stopThr` in `empirical_sinkhorn_divergence` was rendered useless by subcalls that explicitly specified `stopThr=1e-9` (Issue #421, PR #422). - Fixed a bug breaking an example where we would try to make an array of arrays of different shapes (Issue #424, PR #425) ## 0.8.2 This releases introduces several new notable features. The less important but most exiting one being that we now have a logo for the toolbox (color and dark background) : ![](https://pythonot.github.io/master/_images/logo.svg)![](https://pythonot.github.io/master/_static/logo_dark.svg) This logo is generated using with matplotlib and using the solution of an OT problem provided by POT (with `ot.emd`). Generating the logo can be done with a simple python script also provided in the [documentation gallery](https://pythonot.github.io/auto_examples/others/plot_logo.html#sphx-glr-auto-examples-others-plot-logo-py). New OT solvers include [Weak OT](https://pythonot.github.io/gen_modules/ot.weak.html#ot.weak.weak_optimal_transport) and [OT with factored coupling](https://pythonot.github.io/gen_modules/ot.factored.html#ot.factored.factored_optimal_transport) that can be used on large datasets. The [Majorization Minimization](https://pythonot.github.io/gen_modules/ot.unbalanced.html?highlight=mm_#ot.unbalanced.mm_unbalanced) solvers for non-regularized Unbalanced OT are now also available. We also now provide an implementation of [GW and FGW unmixing](https://pythonot.github.io/gen_modules/ot.gromov.html#ot.gromov.gromov_wasserstein_linear_unmixing) and [dictionary learning](https://pythonot.github.io/gen_modules/ot.gromov.html#ot.gromov.gromov_wasserstein_dictionary_learning). It is now possible to use autodiff to solve entropic an quadratic regularized OT in the dual for full or stochastic optimization thanks to the new functions to compute the dual loss for [entropic](https://pythonot.github.io/gen_modules/ot.stochastic.html#ot.stochastic.loss_dual_entropic) and [quadratic](https://pythonot.github.io/gen_modules/ot.stochastic.html#ot.stochastic.loss_dual_quadratic) regularized OT and reconstruct the [OT plan](https://pythonot.github.io/gen_modules/ot.stochastic.html#ot.stochastic.plan_dual_entropic) on part or all of the data. They can be used for instance to solve OT problems with stochastic gradient or for estimating the [dual potentials as neural networks](https://pythonot.github.io/auto_examples/backends/plot_stoch_continuous_ot_pytorch.html#sphx-glr-auto-examples-backends-plot-stoch-continuous-ot-pytorch-py). On the backend front, we now have backend compatible functions and classes in the domain adaptation [`ot.da`](https://pythonot.github.io/gen_modules/ot.da.html#module-ot.da) and unbalanced OT [`ot.unbalanced`](https://pythonot.github.io/gen_modules/ot.unbalanced.html) modules. This means that the DA classes can be used on tensors from all compatible backends. The [free support Wasserstein barycenter](https://pythonot.github.io/gen_modules/ot.lp.html?highlight=free%20support#ot.lp.free_support_barycenter) solver is now also backend compatible. Finally we have worked on the documentation to provide an update of existing examples in the gallery and and several new examples including [GW dictionary learning](https://pythonot.github.io/auto_examples/gromov/plot_gromov_wasserstein_dictionary_learning.html#sphx-glr-auto-examples-gromov-plot-gromov-wasserstein-dictionary-learning-py) [weak Optimal Transport](https://pythonot.github.io/auto_examples/others/plot_WeakOT_VS_OT.html#sphx-glr-auto-examples-others-plot-weakot-vs-ot-py), [NN based dual potentials estimation](https://pythonot.github.io/auto_examples/backends/plot_stoch_continuous_ot_pytorch.html#sphx-glr-auto-examples-backends-plot-stoch-continuous-ot-pytorch-py) and [Factored coupling OT](https://pythonot.github.io/auto_examples/others/plot_factored_coupling.html#sphx-glr-auto-examples-others-plot-factored-coupling-py). . #### New features - Remove deprecated `ot.gpu` submodule (PR #361) - Update examples in the gallery (PR #359) - Add stochastic loss and OT plan computation for regularized OT and backend examples(PR #360) - Implementation of factored OT with emd and sinkhorn (PR #358) - A brand new logo for POT (PR #357) - Better list of related examples in quick start guide with `minigallery` (PR #334) - Add optional log-domain Sinkhorn implementation in WDA to support smaller values of the regularization parameter (PR #336) - Backend implementation for `ot.lp.free_support_barycenter` (PR #340) - Add weak OT solver + example (PR #341) - Add backend support for Domain Adaptation and Unbalanced solvers (PR #343) - Add (F)GW linear dictionary learning solvers + example (PR #319) - Add links to related PR and Issues in the doc release page (PR #350) - Add new minimization-maximization algorithms for solving exact Unbalanced OT + example (PR #362) #### Closed issues - Fix mass gradient of `ot.emd2` and `ot.gromov_wasserstein2` so that they are centered (Issue #364, PR #363) - Fix bug in instantiating an `autograd` function `ValFunction` (Issue #337, PR #338) - Fix POT ABI compatibility with old and new numpy (Issue #346, PR #349) - Warning when feeding integer cost matrix to EMD solver resulting in an integer transport plan (Issue #345, PR #343) - Fix bug where gromov_wasserstein2 does not perform backpropagation with CUDA tensors (Issue #351, PR #352) ## 0.8.1.0 *December 2021* This is a bug fix release that will remove the `benchmarks` module form the installation and correct the documentation generation. #### Closed issues - Bug in documentation generation (tag VS master push, PR #332) - Remove installation of the benchmarks in global namespace (Issue #331, PR #333) ## 0.8.1 *December 2021* This release fixes several bugs and introduces two new backends: Cupy and Tensorflow. Note that the tensorflow backend will work only when tensorflow has enabled the Numpy behavior (for transpose that is not by default in tensorflow). We also introduce a simple benchmark on CPU GPU for the sinkhorn solver that will be provided in the [backend](https://pythonot.github.io/gen_modules/ot.backend.html) documentation. This release also brings a few changes in dependencies and compatibility. First we removed tests for Python 3.6 that will not be updated in the future. Also note that POT now depends on Numpy (>= 1.20) because a recent change in ABI is making the wheels non-compatible with older numpy versions. If you really need an older numpy POT will work with no problems but you will need to build it from source. As always we want to that the contributors who helped make POT better (and bug free). #### New features - New benchmark for sinkhorn solver on CPU/GPU and between backends (PR #316) - New tensorflow backend (PR #316) - New Cupy backend (PR #315) - Documentation always up-to-date with README, RELEASES, CONTRIBUTING and CODE_OF_CONDUCT files (PR #316, PR #322). #### Closed issues - Fix bug in older Numpy ABI (<1.20) (Issue #308, PR #326) - Fix bug in `ot.dist` function when non euclidean distance (Issue #305, PR #306) - Fix gradient scaling for functions using `nx.set_gradients` (Issue #309, PR #310) - Fix bug in generalized Conditional gradient solver and SinkhornL1L2 (Issue #311, PR #313) - Fix log error in `gromov_barycenters` (Issue #317, PR #3018) ## 0.8.0 *November 2021* This new stable release introduces several important features. First we now have an OpenMP compatible exact ot solver in `ot.emd`. The OpenMP version is used when the parameter `numThreads` is greater than one and can lead to nice speedups on multi-core machines. Second we have introduced a backend mechanism that allows to use standard POT function seamlessly on Numpy, Pytorch and Jax arrays. Other backends are coming but right now POT can be used seamlessly for training neural networks in Pytorch. Notably we propose the first differentiable computation of the exact OT loss with `ot.emd2` (can be differentiated w.r.t. both cost matrix and sample weights), but also for the classical Sinkhorn loss with `ot.sinkhorn2`, the Wasserstein distance in 1D with `ot.wasserstein_1d`, sliced Wasserstein with `ot.sliced_wasserstein_distance` and Gromov-Wasserstein with `ot.gromov_wasserstein2`. Examples of how this new feature can be used are now available in the documentation where the Pytorch backend is used to estimate a [minimal Wasserstein estimator](https://PythonOT.github.io/auto_examples/backends/plot_unmix_optim_torch.html), a [Generative Network (GAN)](https://PythonOT.github.io/auto_examples/backends/plot_wass2_gan_torch.html), for a [sliced Wasserstein gradient flow](https://PythonOT.github.io/auto_examples/backends/plot_sliced_wass_grad_flow_pytorch.html) and [optimizing the Gromov-Wassersein distance](https://PythonOT.github.io/auto_examples/backends/plot_optim_gromov_pytorch.html). Note that the Jax backend is still in early development and quite slow at the moment, we strongly recommend for Jax users to use the [OTT toolbox](https://github.com/google-research/ott) when possible. As a result of this new feature, the old `ot.gpu` submodule is now deprecated since GPU implementations can be done using GPU arrays on the torch backends. Other novel features include implementation for [Sampled Gromov Wasserstein and Pointwise Gromov Wasserstein](https://PythonOT.github.io/auto_examples/gromov/plot_gromov.html#compute-gw-with-a-scalable-stochastic-method-with-any-loss-function), Sinkhorn in log space with `method='sinkhorn_log'`, [Projection Robust Wasserstein](https://PythonOT.github.io/gen_modules/ot.dr.html?highlight=robust#ot.dr.projection_robust_wasserstein), ans [deviased Sinkorn barycenters](https://PythonOT.github.ioauto_examples/barycenters/plot_debiased_barycenter.html). This release will also simplify the installation process. We have now a `pyproject.toml` that defines the build dependency and POT should now build even when cython is not installed yet. Also we now provide pe-compiled wheels for linux `aarch64` that is used on Raspberry PI and android phones and for MacOS on ARM processors. Finally POT was accepted for publication in the Journal of Machine Learning Research (JMLR) open source software track and we ask the POT users to cite [this paper](https://www.jmlr.org/papers/v22/20-451.html) from now on. The documentation has been improved in particular by adding a "Why OT?" section to the quick start guide and several new examples illustrating the new features. The documentation now has two version : the stable version [https://pythonot.github.io/](https://pythonot.github.io/) corresponding to the last release and the master version [https://pythonot.github.io/master](https://pythonot.github.io/master) that corresponds to the current master branch on GitHub. As usual, we want to thank all the POT contributors (now 37 people have contributed to the toolbox). But for this release we thank in particular Nathan Cassereau and Kamel Guerda from the AI support team at [IDRIS](http://www.idris.fr/) for their support to the development of the backend and OpenMP implementations. #### New features - OpenMP support for exact OT solvers (PR #260) - Backend for running POT in numpy/torch + exact solver (PR #249) - Backend implementation of most functions in `ot.bregman` (PR #280) - Backend implementation of most functions in `ot.optim` (PR #282) - Backend implementation of most functions in `ot.gromov` (PR #294, PR #302) - Test for arrays of different type and device (CPU/GPU) (PR #304, #303) - Implementation of Sinkhorn in log space with `method='sinkhorn_log'` (PR #290) - Implementation of regularization path for L2 Unbalanced OT (PR #274) - Implementation of Projection Robust Wasserstein (PR #267) - Implementation of Debiased Sinkhorn Barycenters (PR #291) - Implementation of Sampled Gromov Wasserstein and Pointwise Gromov Wasserstein (PR #275) - Add `pyproject.toml` and build POT without installing cython first (PR #293) - Lazy implementation in log space for sinkhorn on samples (PR #259) - Documentation cleanup (PR #298) - Two up-to-date documentations [for stable release](https://PythonOT.github.io/) and for [master branch](https://pythonot.github.io/master/). - Building wheels on ARM for Raspberry PI and smartphones (PR #238) - Update build wheels to new version and new pythons (PR #236, #253) - Implementation of sliced Wasserstein distance (Issue #202, PR #203) - Add minimal build to CI and perform pep8 test separately (PR #210) - Speedup of tests and return run time (PR #262) - Add "Why OT" discussion to the documentation (PR #220) - New introductory example to discrete OT in the documentation (PR #191) - Add templates for Issues/PR on Github (PR#181) #### Closed issues - Debug Memory leak in GAN example (#254) - DEbug GPU bug (Issue #284, #287, PR #288) - set_gradients method for JAX backend (PR #278) - Quicker GAN example for CircleCI build (PR #258) - Better formatting in Readme (PR #234) - Debug CI tests (PR #240, #241, #242) - Bug in Partial OT solver dummy points (PR #215) - Bug when Armijo linesearch (Issue #184, #198, #281, PR #189, #199, #286) - Bug Barycenter Sinkhorn (Issue 134, PR #195) - Infeasible solution in exact OT (Issues #126,#93, PR #217) - Doc for SUpport Barycenters (Issue #200, PR #201) - Fix labels transport in BaseTransport (Issue #207, PR #208) - Bug in `emd_1d`, non respected bounds (Issue #169, PR #170) - Removed Python 2.7 support and update codecov file (PR #178) - Add normalization for WDA and test it (PR #172, #296) - Cleanup code for new version of `flake8` (PR #176) - Fixed requirements in `setup.py` (PR #174) - Removed specific MacOS flags (PR #175) ## 0.7.0 *May 2020* This is the new stable release for POT. We made a lot of changes in the documentation and added several new features such as Partial OT, Unbalanced and Multi Sources OT Domain Adaptation and several bug fixes. One important change is that we have created the GitHub organization [PythonOT](https://github.com/PythonOT) that now owns the main POT repository [https://github.com/PythonOT/POT](https://github.com/PythonOT/POT) and the repository for the new documentation is now hosted at [https://PythonOT.github.io/](https://PythonOT.github.io/). This is the first release where the Python 2.7 tests have been removed. Most of the toolbox should still work but we do not offer support for Python 2.7 and will close related Issues. A lot of changes have been done to the documentation that is now hosted on [https://PythonOT.github.io/](https://PythonOT.github.io/) instead of readthedocs. It was a hard choice but readthedocs did not allow us to run sphinx-gallery to update our beautiful examples and it was a huge amount of work to maintain. The documentation is now automatically compiled and updated on merge. We also removed the notebooks from the repository for space reason and also because they are all available in the [example gallery](https://pythonot.github.io/auto_examples/index.html). Note that now the output of the documentation build for each commit in the PR is available to check that the doc builds correctly before merging which was not possible with readthedocs. The CI framework has also been changed with a move from Travis to Github Action which allows to get faster tests on Windows, MacOS and Linux. We also now report our coverage on [Codecov.io](https://codecov.io/gh/PythonOT/POT) and we have a reasonable 92% coverage. We also now generate wheels for a number of OS and Python versions at each merge in the master branch. They are available as outputs of this [action](https://github.com/PythonOT/POT/actions?query=workflow%3A%22Build+dist+and+wheels%22). This will allow simpler multi-platform releases from now on. In terms of new features we now have [OTDA Classes for unbalanced OT](https://pythonot.github.io/gen_modules/ot.da.html#ot.da.UnbalancedSinkhornTransport), a new Domain adaptation class form [multi domain problems (JCPOT)](https://pythonot.github.io/auto_examples/domain-adaptation/plot_otda_jcpot.html#sphx-glr-auto-examples-domain-adaptation-plot-otda-jcpot-py), and several solvers to solve the [Partial Optimal Transport](https://pythonot.github.io/auto_examples/unbalanced-partial/plot_partial_wass_and_gromov.html#sphx-glr-auto-examples-unbalanced-partial-plot-partial-wass-and-gromov-py) problems. This release is also the moment to thank all the POT contributors (old and new) for helping making POT such a nice toolbox. A lot of changes (also in the API) are coming for the next versions. #### Features - New documentation on [https://PythonOT.github.io/](https://PythonOT.github.io/) (PR #160, PR #143, PR #144) - Documentation build on CircleCI with sphinx-gallery (PR #145,PR #146, #155) - Run sphinx gallery in CI (PR #146) - Remove notebooks from repo because available in doc (PR #156) - Build wheels in CI (#157) - Move from travis to GitHub Action for Windows, MacOS and Linux (PR #148, PR #150) - Partial Optimal Transport (PR#141 and PR #142) - Laplace regularized OTDA (PR #140) - Multi source DA with target shift (PR #137) - Screenkhorn algorithm (PR #121) #### Closed issues - Add JMLR paper to the readme and Mathieu Blondel to the Acknoledgments (PR #231, #232) - Bug in Unbalanced OT example (Issue #127) - Clean Cython output when calling setup.py clean (Issue #122) - Various Macosx compilation problems (Issue #113, Issue #118, PR#130) - EMD dimension mismatch (Issue #114, Fixed in PR #116) - 2D barycenter bug for non square images (Issue #124, fixed in PR #132) - Bad value in EMD 1D (Issue #138, fixed in PR #139) - Log bugs for Gromov-Wassertein solver (Issue #107, fixed in PR #108) - Weight issues in barycenter function (PR #106) ## 0.6.0 *July 2019* This is the first official stable release of POT and this means a jump to 0.6! The library has been used in the wild for a while now and we have reached a state where a lot of fundamental OT solvers are available and tested. It has been quite stable in the last months but kept the beta flag in its Pypi classifiers until now. Note that this release will be the last one supporting officially Python 2.7 (See https://python3statement.org/ for more reasons). For next release we will keep the travis tests for Python 2 but will make them non necessary for merge in 2020. The features are never complete in a toolbox designed for solving mathematical problems and research but with the new contributions we now implement algorithms and solvers from 24 scientific papers (listed in the README.md file). New features include a direct implementation of the [empirical Sinkhorn divergence](https://pot.readthedocs.io/en/latest/all.html#ot.bregman.empirical_sinkhorn_divergence), a new efficient (Cython implementation) solver for [EMD in 1D](https://pot.readthedocs.io/en/latest/all.html#ot.lp.emd_1d) and corresponding [Wasserstein 1D](https://pot.readthedocs.io/en/latest/all.html#ot.lp.wasserstein_1d). We now also have implementations for [Unbalanced OT](https://github.com/rflamary/POT/blob/master/notebooks/plot_UOT_1D.ipynb) and a solver for [Unbalanced OT barycenters](https://github.com/rflamary/POT/blob/master/notebooks/plot_UOT_barycenter_1D.ipynb). A new variant of Gromov-Wasserstein divergence called [Fused Gromov-Wasserstein](https://pot.readthedocs.io/en/latest/all.html?highlight=fused_#ot.gromov.fused_gromov_wasserstein) has been also contributed with exemples of use on [structured data](https://github.com/rflamary/POT/blob/master/notebooks/plot_fgw.ipynb) and computing [barycenters of labeld graphs](https://github.com/rflamary/POT/blob/master/notebooks/plot_barycenter_fgw.ipynb). A lot of work has been done on the documentation with several new examples corresponding to the new features and a lot of corrections for the docstrings. But the most visible change is a new [quick start guide](https://pot.readthedocs.io/en/latest/quickstart.html) for POT that gives several pointers about which function or classes allow to solve which specific OT problem. When possible a link is provided to relevant examples. We will also provide with this release some pre-compiled Python wheels for Linux 64bit on github and pip. This will simplify the install process that before required a C compiler and numpy/cython already installed. Finally we would like to acknowledge and thank the numerous contributors of POT that has helped in the past build the foundation and are still contributing to bring new features and solvers to the library. #### Features * Add compiled manylinux 64bits wheels to pip releases (PR #91) * Add quick start guide (PR #88) * Make doctest work on travis (PR #90) * Update documentation (PR #79, PR #84) * Solver for EMD in 1D (PR #89) * Solvers for regularized unbalanced OT (PR #87, PR#99) * Solver for Fused Gromov-Wasserstein (PR #86) * Add empirical Sinkhorn and empirical Sinkhorn divergences (PR #80) #### Closed issues - Issue #59 fail when using "pip install POT" (new details in doc+ hopefully wheels) - Issue #85 Cannot run gpu modules - Issue #75 Greenkhorn do not return log (solved in PR #76) - Issue #82 Gromov-Wasserstein fails when the cost matrices are slightly different - Issue #72 Macosx build problem ## 0.5.0 *Sep 2018* POT is 2 years old! This release brings numerous new features to the toolbox as listed below but also several bug correction. Among the new features, we can highlight a [non-regularized Gromov-Wasserstein solver](https://github.com/rflamary/POT/blob/master/notebooks/plot_gromov.ipynb), a new [greedy variant of sinkhorn](https://pot.readthedocs.io/en/latest/all.html#ot.bregman.greenkhorn), [non-regularized](https://pot.readthedocs.io/en/latest/all.html#ot.lp.barycenter), [convolutional (2D)](https://github.com/rflamary/POT/blob/master/notebooks/plot_convolutional_barycenter.ipynb) and [free support](https://github.com/rflamary/POT/blob/master/notebooks/plot_free_support_barycenter.ipynb) Wasserstein barycenters and [smooth](https://github.com/rflamary/POT/blob/prV0.5/notebooks/plot_OT_1D_smooth.ipynb) and [stochastic](https://pot.readthedocs.io/en/latest/all.html#ot.stochastic.sgd_entropic_regularization) implementation of entropic OT. POT 0.5 also comes with a rewriting of ot.gpu using the cupy framework instead of the unmaintained cudamat. Note that while we tried to keed changes to the minimum, the OTDA classes were deprecated. If you are happy with the cudamat implementation, we recommend you stay with stable release 0.4 for now. The code quality has also improved with 92% code coverage in tests that is now printed to the log in the Travis builds. The documentation has also been greatly improved with new modules and examples/notebooks. This new release is so full of new stuff and corrections thanks to the old and new POT contributors (you can see the list in the [readme](https://github.com/rflamary/POT/blob/master/README.md)). #### Features * Add non regularized Gromov-Wasserstein solver (PR #41) * Linear OT mapping between empirical distributions and 90\% test coverage (PR #42) * Add log parameter in class EMDTransport and SinkhornLpL1Transport (PR #44) * Add Markdown format for Pipy (PR #45) * Test for Python 3.5 and 3.6 on Travis (PR #46) * Non regularized Wasserstein barycenter with scipy linear solver and/or cvxopt (PR #47) * Rename dataset functions to be more sklearn compliant (PR #49) * Smooth and sparse Optimal transport implementation with entropic and quadratic regularization (PR #50) * Stochastic OT in the dual and semi-dual (PR #52 and PR #62) * Free support barycenters (PR #56) * Speed-up Sinkhorn function (PR #57 and PR #58) * Add convolutional Wassersein barycenters for 2D images (PR #64) * Add Greedy Sinkhorn variant (Greenkhorn) (PR #66) * Big ot.gpu update with cupy implementation (instead of un-maintained cudamat) (PR #67) #### Deprecation Deprecated OTDA Classes were removed from ot.da and ot.gpu for version 0.5 (PR #48 and PR #67). The deprecation message has been for a year here since 0.4 and it is time to pull the plug. #### Closed issues * Issue #35 : remove import plot from ot/__init__.py (See PR #41) * Issue #43 : Unusable parameter log for EMDTransport (See PR #44) * Issue #55 : UnicodeDecodeError: 'ascii' while installing with pip ## 0.4 *15 Sep 2017* This release contains a lot of contribution from new contributors. #### Features * Automatic notebooks and doc update (PR #27) * Add gromov Wasserstein solver and Gromov Barycenters (PR #23) * emd and emd2 can now return dual variables and have max_iter (PR #29 and PR #25) * New domain adaptation classes compatible with scikit-learn (PR #22) * Proper tests with pytest on travis (PR #19) * PEP 8 tests (PR #13) #### Closed issues * emd convergence problem du to fixed max iterations (#24) * Semi supervised DA error (#26) ## 0.3.1 *11 Jul 2017* * Correct bug in emd on windows ## 0.3 *7 Jul 2017* * emd* and sinkhorn* are now performed in parallel for multiple target distributions * emd and sinkhorn are for OT matrix computation * emd2 and sinkhorn2 are for OT loss computation * new notebooks for emd computation and Wasserstein Discriminant Analysis * relocate notebooks * update documentation * clean_zeros(a,b,M) for removimg zeros in sparse distributions * GPU implementations for sinkhorn and group lasso regularization ## V0.2 *7 Apr 2017* * New dimensionality reduction method (WDA) * Efficient method emd2 returns only tarnsport (in paralell if several histograms given) ## 0.1.11 *5 Jan 2017* * Add sphinx gallery for better documentation * Small efficiency tweak in sinkhorn * Add simple tic() toc() functions for timing ## 0.1.10 *7 Nov 2016* * numerical stabilization for sinkhorn (log domain and epsilon scaling) ## 0.1.9 *4 Nov 2016* * Update classes and examples for domain adaptation * Joint OT matrix and mapping estimation ## 0.1.7 *31 Oct 2016* * Original Domain adaptation classes ## 0.1.3 * pipy works ## First pre-release *28 Oct 2016* It provides the following solvers: * OT solver for the linear program/ Earth Movers Distance. * Entropic regularization OT solver with Sinkhorn Knopp Algorithm. * Bregman projections for Wasserstein barycenter [3] and unmixing. * Optimal transport for domain adaptation with group lasso regularization * Conditional gradient and Generalized conditional gradient for regularized OT. Some demonstrations (both in Python and Jupyter Notebook format) are available in the examples folder.python-pot-0.9.3+dfsg/benchmarks/000077500000000000000000000000001455713015700167105ustar00rootroot00000000000000python-pot-0.9.3+dfsg/benchmarks/__init__.py000066400000000000000000000001701455713015700210170ustar00rootroot00000000000000from . import benchmark from . import sinkhorn_knopp from . import emd __all__= ["benchmark", "sinkhorn_knopp", "emd"] python-pot-0.9.3+dfsg/benchmarks/benchmark.py000066400000000000000000000075041455713015700212220ustar00rootroot00000000000000# /usr/bin/env python3 # -*- coding: utf-8 -*- from ot.backend import get_backend_list, jax, tf import gc def setup_backends(): if jax: from jax.config import config config.update("jax_enable_x64", True) if tf: from tensorflow.python.ops.numpy_ops import np_config np_config.enable_numpy_behavior() def exec_bench(setup, tested_function, param_list, n_runs, warmup_runs): backend_list = get_backend_list() for i, nx in enumerate(backend_list): if nx.__name__ == "tf" and i < len(backend_list) - 1: # Tensorflow should be the last one to be benchmarked because # as far as I'm aware, there is no way to force it to release # GPU memory. Hence, if any other backend is benchmarked after # Tensorflow and requires the usage of a GPU, it will not have the # full memory available and you may have a GPU Out Of Memory error # even though your GPU can technically hold your tensors in memory. backend_list.pop(i) backend_list.append(nx) break inputs = [setup(param) for param in param_list] results = dict() for nx in backend_list: for i in range(len(param_list)): print(nx, param_list[i]) args = inputs[i] results_nx = nx._bench( tested_function, *args, n_runs=n_runs, warmup_runs=warmup_runs ) gc.collect() results_nx_with_param_in_key = dict() for key in results_nx: new_key = (param_list[i], *key) results_nx_with_param_in_key[new_key] = results_nx[key] results.update(results_nx_with_param_in_key) return results def convert_to_html_table(results, param_name, main_title=None, comments=None): string = "\n" keys = list(results.keys()) params, names, devices, bitsizes = zip(*keys) devices_names = sorted(list(set(zip(devices, names)))) params = sorted(list(set(params))) bitsizes = sorted(list(set(bitsizes))) length = len(devices_names) + 1 cpus_cols = list(devices).count("CPU") / len(bitsizes) / len(params) gpus_cols = list(devices).count("GPU") / len(bitsizes) / len(params) assert cpus_cols + gpus_cols == len(devices_names) if main_title is not None: string += f'\n' for i, bitsize in enumerate(bitsizes): if i != 0: string += f'\n' # make bitsize header text = f"{bitsize} bits" if comments is not None: text += " - " if isinstance(comments, (tuple, list)) and len(comments) == len(bitsizes): text += str(comments[i]) else: text += str(comments) string += f'' string += f'\n' # make device header string += f'' string += f'' string += f'\n' # make param_name / backend header string += f'' for device, name in devices_names: string += f'' string += "\n" # make results rows for param in params: string += f'' for device, name in devices_names: key = (param, name, device, bitsize) string += f'' string += "\n" string += "
{str(main_title)}
 
Bitsize{text}
DeviceCPUGPU
{param_name}{name}
{param}{results[key]:.4f}
" return string python-pot-0.9.3+dfsg/benchmarks/emd.py000066400000000000000000000015171455713015700200330ustar00rootroot00000000000000# /usr/bin/env python3 # -*- coding: utf-8 -*- import numpy as np import ot from .benchmark import ( setup_backends, exec_bench, convert_to_html_table ) def setup(n_samples): rng = np.random.RandomState(789465132) x = rng.randn(n_samples, 2) y = rng.randn(n_samples, 2) a = ot.utils.unif(n_samples) M = ot.dist(x, y) return a, M if __name__ == "__main__": n_runs = 100 warmup_runs = 10 param_list = [50, 100, 500, 1000, 2000, 5000] setup_backends() results = exec_bench( setup=setup, tested_function=lambda a, M: ot.emd(a, a, M), param_list=param_list, n_runs=n_runs, warmup_runs=warmup_runs ) print(convert_to_html_table( results, param_name="Sample size", main_title=f"EMD - Averaged on {n_runs} runs" )) python-pot-0.9.3+dfsg/benchmarks/sinkhorn_knopp.py000066400000000000000000000016621455713015700223310ustar00rootroot00000000000000# /usr/bin/env python3 # -*- coding: utf-8 -*- import numpy as np import ot from .benchmark import ( setup_backends, exec_bench, convert_to_html_table ) def setup(n_samples): rng = np.random.RandomState(123456789) a = rng.rand(n_samples // 4, 100) b = rng.rand(n_samples, 100) wa = ot.unif(n_samples // 4) wb = ot.unif(n_samples) M = ot.dist(a.copy(), b.copy()) return wa, wb, M if __name__ == "__main__": n_runs = 100 warmup_runs = 10 param_list = [50, 100, 500, 1000, 2000, 5000] setup_backends() results = exec_bench( setup=setup, tested_function=lambda *args: ot.bregman.sinkhorn(*args, reg=1, stopThr=1e-7), param_list=param_list, n_runs=n_runs, warmup_runs=warmup_runs ) print(convert_to_html_table( results, param_name="Sample size", main_title=f"Sinkhorn Knopp - Averaged on {n_runs} runs" )) python-pot-0.9.3+dfsg/codecov.yml000066400000000000000000000023321455713015700167400ustar00rootroot00000000000000# Docs ref: https://docs.codecov.io/docs/codecovyml-reference # Validation check: $ curl --data-binary @codecov.yml https://codecov.io/validate codecov: token: 057953e4-d263-41c0-913c-5d45c0371df9 bot: "codecov-io" strict_yaml_branch: "yaml-config" require_ci_to_pass: yes notify: wait_for_ci: yes coverage: precision: 2 round: down range: "70...100" status: project: default: base: auto # target to compare against target: auto # target "X%" coverage to hit on project threshold: 1% # allow this much decrease from base if_ci_failed: error patch: default: base: auto # target to compare against target: 50% # target "X%" coverage to hit on patch # threshold: 50% # allow this much decrease on patch changes: false parsers: gcov: branch_detection: conditional: yes loop: yes method: no macro: no # https://docs.codecov.io/docs/ignoring-paths ignore: - "ot/helpers/openmp_helpers.py" # https://docs.codecov.io/docs/pull-request-comments comment: layout: header, diff, sunburst, uncovered behavior: default require_changes: true # if true: only post the comment if coverage changes python-pot-0.9.3+dfsg/docs/000077500000000000000000000000001455713015700155235ustar00rootroot00000000000000python-pot-0.9.3+dfsg/docs/Makefile000066400000000000000000000173031455713015700171670ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" .PHONY: clean clean: rm -rf $(BUILDDIR)/* rm -rf source/gen_modules/* rm -rf source/auto_examples/* .PHONY: html html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." html-noplot: $(SPHINXBUILD) -D plot_gallery=0 -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." .PHONY: dirhtml dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." .PHONY: singlehtml singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." .PHONY: pickle pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." .PHONY: json json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." .PHONY: htmlhelp htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." .PHONY: qthelp qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/POT.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/POT.qhc" .PHONY: applehelp applehelp: $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp @echo @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." @echo "N.B. You won't be able to view it unless you put it in" \ "~/Library/Documentation/Help or install it in your application" \ "bundle." .PHONY: devhelp devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/POT" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/POT" @echo "# devhelp" .PHONY: epub epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." .PHONY: latex latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." .PHONY: latexpdf latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: latexpdfja latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: text text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." .PHONY: man man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." .PHONY: texinfo texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." .PHONY: info info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." .PHONY: gettext gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." .PHONY: changes changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." .PHONY: linkcheck linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." .PHONY: doctest doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." .PHONY: coverage coverage: $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage @echo "Testing of coverage in the sources finished, look at the " \ "results in $(BUILDDIR)/coverage/python.txt." .PHONY: xml xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." .PHONY: pseudoxml pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." python-pot-0.9.3+dfsg/docs/make.bat000066400000000000000000000161241455713015700171340ustar00rootroot00000000000000@ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source set I18NSPHINXOPTS=%SPHINXOPTS% source if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. texinfo to make Texinfo files echo. gettext to make PO message catalogs echo. changes to make an overview over all changed/added/deprecated items echo. xml to make Docutils-native XML files echo. pseudoxml to make pseudoxml-XML files for display purposes echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled echo. coverage to run coverage check of the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) REM Check if sphinx-build is available and fallback to Python version if any %SPHINXBUILD% 1>NUL 2>NUL if errorlevel 9009 goto sphinx_python goto sphinx_ok :sphinx_python set SPHINXBUILD=python -m sphinx.__init__ %SPHINXBUILD% 2> nul if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) :sphinx_ok if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\POT.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\POT.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdf" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdfja" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf-ja cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "texinfo" ( %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo if errorlevel 1 exit /b 1 echo. echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. goto end ) if "%1" == "gettext" ( %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale if errorlevel 1 exit /b 1 echo. echo.Build finished. The message catalogs are in %BUILDDIR%/locale. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) if "%1" == "coverage" ( %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage if errorlevel 1 exit /b 1 echo. echo.Testing of coverage in the sources finished, look at the ^ results in %BUILDDIR%/coverage/python.txt. goto end ) if "%1" == "xml" ( %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml if errorlevel 1 exit /b 1 echo. echo.Build finished. The XML files are in %BUILDDIR%/xml. goto end ) if "%1" == "pseudoxml" ( %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml if errorlevel 1 exit /b 1 echo. echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. goto end ) :end python-pot-0.9.3+dfsg/docs/nb_build000077500000000000000000000006361455713015700172340ustar00rootroot00000000000000#!/bin/bash # remove comment sed -i "s/#'sphinx\_gallery/'sphinx\_gallery/" source/conf.py sed -i "s/sys.modules.update/#sys.modules.update/" source/conf.py make html # put comment again sed -i "s/'sphinx\_gallery/#'sphinx\_gallery/" source/conf.py sed -i "s/#sys.modules.update/sys.modules.update/" source/conf.py #rsync --out-format="%n" --update source/auto_examples/*.ipynb ../notebooks2 ./nb_run_conv python-pot-0.9.3+dfsg/docs/nb_run_conv000077500000000000000000000033541455713015700177660ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- """ Convert sphinx gallery notebook from empty to image filled Created on Fri Sep 1 16:43:45 2017 @author: rflamary """ import sys import json import glob import hashlib import subprocess import os cache_file='cache_nbrun' path_doc='source/auto_examples/' path_nb='../notebooks/' def load_json(fname): try: f=open(fname) nb=json.load(f) f.close() except (OSError, IOError) : nb={} return nb def save_json(fname,nb): f=open(fname,'w') f.write(json.dumps(nb)) f.close() def md5(fname): hash_md5 = hashlib.md5() with open(fname, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_md5.update(chunk) return hash_md5.hexdigest() def to_update(fname,cache): if fname in cache: if md5(path_doc+fname)==cache[fname]: res=False else: res=True else: res=True return res def update(fname,cache): # jupyter nbconvert --to notebook --execute mynotebook.ipynb --output targte subprocess.check_call(['cp',path_doc+fname,path_nb]) print(' '.join(['jupyter','nbconvert','--to','notebook','--ExecutePreprocessor.timeout=600','--execute',path_nb+fname,'--inplace'])) subprocess.check_call(['jupyter','nbconvert','--to','notebook','--ExecutePreprocessor.timeout=600','--execute',path_nb+fname,'--inplace']) cache[fname]=md5(path_doc+fname) cache=load_json(cache_file) lst_file=glob.glob(path_doc+'*.ipynb') lst_file=[os.path.basename(name) for name in lst_file] for fname in lst_file: if to_update(fname,cache): print('Updating file: {}'.format(fname)) update(fname,cache) save_json(cache_file,cache) python-pot-0.9.3+dfsg/docs/requirements.txt000066400000000000000000000001241455713015700210040ustar00rootroot00000000000000sphinx_gallery sphinx_rtd_theme numpydoc memory_profiler pillow networkx myst-parserpython-pot-0.9.3+dfsg/docs/requirements_rtd.txt000066400000000000000000000002221455713015700216540ustar00rootroot00000000000000sphinx_gallery numpydoc memory_profiler pillow networkx myst-parser numpy scipy>=1.0 cython matplotlib autograd pymanopt cvxopt scikit-learn cvxpypython-pot-0.9.3+dfsg/docs/rtd/000077500000000000000000000000001455713015700163145ustar00rootroot00000000000000python-pot-0.9.3+dfsg/docs/rtd/conf.py000066400000000000000000000002121455713015700176060ustar00rootroot00000000000000from recommonmark.parser import CommonMarkParser source_parsers = {'.md': CommonMarkParser} source_suffix = ['.md'] master_doc = 'index'python-pot-0.9.3+dfsg/docs/rtd/index.md000066400000000000000000000003101455713015700177370ustar00rootroot00000000000000 # POT: Python Optimal Transport The documentation has been moved to : [https://PythonOT.github.io](https://PythonOT.github.io)python-pot-0.9.3+dfsg/docs/source/000077500000000000000000000000001455713015700170235ustar00rootroot00000000000000python-pot-0.9.3+dfsg/docs/source/_static/000077500000000000000000000000001455713015700204515ustar00rootroot00000000000000python-pot-0.9.3+dfsg/docs/source/_static/images/000077500000000000000000000000001455713015700217165ustar00rootroot00000000000000python-pot-0.9.3+dfsg/docs/source/_static/images/bak.png000066400000000000000000011230351455713015700231660ustar00rootroot00000000000000‰PNG  IHDR·M|G•sBIT|d pHYs × ×B(›xtEXtSoftwarewww.inkscape.org›î< IDATxśě˝y¸dÉUŘů‹»ßÜßZűÚUŐkőŢ­ #6±ca3Ćc f†ĹĆöŚ3ŘöáÁŘŚŔ€%c@m@E€’ZÝ’ZRw«÷ÚşÖWŰ[sĎ»FřŹČ›™ď˝|Ż^u—z‘îďűň{ůî˝7ňfä‰'Î99_.Ľxxqä5ý޶(''''çµČ-Ŕ+@śîĽF™ďţG˙ő«_ÔÖĺä\ëŐnŔ— oţÁ® :pxxP7¨ ÜÝoÇŻôŹ™7¨îü2đŕź~u›““““óŞaoľ8L%  4yŕ4pxXş÷ľřđ<đN`/đ~ ¶I™»_\ŕqŕßÜŔöäääĽF©oţ>ZyU@|päőGŔgF˙ĽžŢxŰňť#őo»ÁużŢȰ]ĎżĘmÉÉÉÉy5°€śCËÂłŔo˙řżüv˙x&/ŻÜŕ6üyżŢ·Ś{řĘ ®/ e¶ţ#ZÁÍÉÉů2ătNص|°_ţócÎm¦Üţ#t*˛7Ś)g/ôËýŇóŹöĎýţšăß>rż÷˘s7ĐJpÄPąřďl¬Ü~ÚĄ@÷Ż9wdäß2rü×úÇŢ=¦ľ%Wnsrr^}>ĚP~˝ëŐůŔ_Ś9ž"šhëëZţ)×öąý˙úלĽ5ç~ťaËĚĐŕ04jÜşćz=&ćĘmNÎëśLą•hKgöj±z™˙}¬¶‚Žă+Ń~¤kÉŘâs)·ßVl߼Á˝ľa¤Ü=cÎgÚBš1ŞÜ®mëßcµpÜLąýÝţą/lĐľcýóż=r,łBüđëËŔá ęĘÉÉÉyĄř$Cůu7¨Î7 7Z˨<g8ą–r[b8VŤł2ăHýôŹÝÎp̧P#:~"'熑”˝z¬łýםče ďGű˝f>©kyxş3Á4NŚă[ʶz~jkľ¦˙7E/-­ĺäČ=Ź2ô±ÝŚßÜbűľş˙÷É ÎźDĎţGďV˙ż.­9×"'''çŐĄ<ňľwę|l㣴/eł…·˘\Đ«ukyfäýĐ1!I˙Ó~yM™™ ('Č•ŰW“ͧî@/ńż=ł˙F6öEÍ0щ·×.mĆŰĐA źbce´˙*hë?ňţFϧĐJ?čÍ-Ćmj‘Ž\›ń»h°[€çĐKf`Ľrž“““ójĐy_ÜđŞ×·ŹĽźs~eäýL˙ďÉţë0ÚĄí­h÷nÜöň999Ż2[Í–Úé>[â9Éú×űŃŃ©'fHĐłsĹjˇ™1ę–đVo÷űÝ›´ĺá‘ë®őú¶‘r›ą%¬e#·„×qď•5e‘ˇŻnöú|ż]9999Ż6™ÜSčl07‚: ř!´őôEô–˝ŁîoGĆ”»–[Â˙ËÖeńż)÷ÚMnôüYŕ'Z‚srr^Ç\Źr ZqÍ„Áč¬ůŰѨ‹Nára*¬˙‡­)·-ô>ćY¶†ҵdĘíé-¶{´ť/Wą˝i¤ŽźşÎűÎňđ« FÜrrrr^MţC™ôs7 ľ] c>‚ÎszŚř»#÷z)ĘmL–ůß·Ékך˛3ŔĎ0L–˝N®űSćä伦¸^ĺös …Ŕ[GŽźëűWcĘlUąýŮţ±Łha¦€ßŮ ÂxË赸ĘíěHżpť÷ĹFţ3 6  ËÉÉÉy%¸—ˇ|çÇz-¶±zó…ßč×ő)ÖŻöŤżĺö§FĘżÔ :úŹFęúŘK¬+'g,[Ů;:çŐeÔżôb˙ďvô¶‹°q€ŐVřwýżĎ gä ·ýýÖ1מę˙­˛:â•`žˇ˘~-y3b´ńW˘SáŕyyMËÉÉÉyY<Ž«ť*ń–M®LJѩ3ľŞ˙7KĎx#93ň~űK¬C˘Wß ü‡ţ±·±:^"'çe‘+·Żmîbśu’ˇKŔ¨ryٶÚý×輅 ťý«kÎgÂW “ożŇ<Ň˙ű5l}‹Č_cĽ?×ô€[Ď(‘“““óĹâÇŃ“oÁőĺß6ŃÁÇńȱ,kÜĘÝËĺł#ď·j؋λ;ŽŃśéąďmÎ #Wn_»l~«˙^?1rî"zö :›ÂZ®'[BF€Ţ–Qˇ}ĄÖúŁţĂTZ?ěx ÷x9üz˙ď$ZřŻ]nÇhłµ†žzůMËÉÉÉyY| ˝±Śľ mlŘ GŃ“ýQĺör˙ď¸|ä/— ÓŚý0ă3׬ĄŠŢr}\v¦éţß6«S5ćääĽNČŞ+hÖľ­|˝˝˙zz3…˙öÉUčý÷Ź©ďC }Ł~­ţ4ząçl˙\“őżÍĐĎiś‚úźúŁ~ÍšsßČ0+ĂU´~WżíߍVŔ×îö#÷Ű?ć~ă>Ó#ś˙«}Ó~mE~úýÜš6ýĽż‡ˇÂ_ţ˙~O‘[nsrr^;Ľ‹a›ż@»)¬•Ḡ-ďłqbt»ŰË0sλŃnXďBÇXśŕĺůÜ‚ÎŇÓé_7ŹVĘďE+şw ĺń?ąţh˙Ú˙„Ţ®=cŠá–ëůn‘99ŻCţz)|tG˛ÍR¨\E;Ř˙$°s:˘óµŽ–;‰^ţy`ä>gCçż}ť!;wřľ‘:=ëĎÎ/öËŚZ‚ßŘo[˛ćŢ]ŕŹŃľS ó5~bÍýćŃłţż1Rź ü9Ú—kôůśGď_>µćÚeL7úšC+­Ł»®ýÚĘ­Đ–Ť•ţűx?g†ČÉÉÉyµB+ĄY¦ś%´Üü+ŕq†r,K§ő;¬6TlCçő•Ź :-؆˛űZm-ţ4ĂŔâ, ů1ŕWĆ´ń 蔊Ť_?;rí†Fť ˙žDntşĆÜČsCŮĘŇnÎ+VŻg×,>¨ˇ•» _„vmDm‰.ٕѓ w˘y%ŘĂPńżŔćKZSýëKhkö ´PÍÉÉÉy-ł mĄťB(˛ťçĐŤüj´ĹvÚŔđ9´Ň Zn›ý÷1Z~ÖŘxó‰V˘Çqm™ťę·ĺ8Z±NÇ\{=^Í Ç®+hŢëÍŔ““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““““ózD<ôÇńÁöĘÓ»byĄ]·a¤=–Ď>OE4.1ąk?ÖMo¦A*ĄS‰çô·§–Đî%”ŠíNBÉ·ŔxU?×KâţZ›Š‚0y¦Ub!˛u#î(¶0M“ZYB÷〠Y>…5u„^˝łă;1MýLŇ4ĄŃh ”Úňý…8Žă8†AŁŃ@J‰aë®›śśD)ĹJىJ“UçËĺ2¦iRŻ×Ç–ß)%¶mS­Véőzt:ťueǡ\.“¦)őz}]©Ó^÷ĺČţáClŰ3Ă _xŰn;J·ÓĺîűîăÔĺSř±Ă…ąó$iĘ•K—8|óÍřŹůKWń=ź çĎ3=3CąZ˘ŐlńŕWĽ‰çźz–S'O‘¦ ·=Ę©ŽS®–1LaěÓzÎúďbfP#őôµ5ą‹^z ă/–Vâ“8­±§­Ţ ©żŚłŤü”ÜÍ^˙fŠĹ"gĎsľ}ÓĂIHíö¦m´‚IżŤ{ąť˝“7!„`yyyÓß“”’b±ďű4›M˘(ôßZ­†iš4 ’$ٰĽďű”JĄÁ±^ŻG·ŰĄT*aZAc[I«µţąLOO˙­M?Ü|äáÇß_źźsoş÷-Ř…I®,7pşW8űԧܵÇ/QÝľ—+Ć6˘Xbš Śľ°í†1¦i`[‚v7ĄR°_JS^uĘvĚ!żMŐJi+‡ămźVls¨Ôc·Ó´ž(…Đű42ŤI»—°K»é¬t0·'žç ꋢfłąĺűŹĘđjµ 0ĺBU×MLL Ądą^)ç˛~Z­VBĐjµHÓtUůÍHÓÇq¨T*A@·Ű]U6;Ç1ŤFcUY…†ĐuX6Žc!×Ţŕ¨4Ąŕű†AŻ×B žíčńQ\×]5nDQDÇ[‡@˙MÓÄó<Ň4Ą×ëáű>¦i’¦)—˘3¤2ĹS%&‘˝˛şa°^˘+ŰT™fŻqŐj•ĹƧ;'(ŮEv»©V«†AÇ\lśáĽńvąĐó¸˝ÔĆ1Đ*Ţôľ€’1˛·QˇYw)îţfâ8ü¸•Rt»]‚ ¸¦’+„Ŕu]Â0DJ‰ă8‹E˘(˘Űí®¬ĄR Ďóčőz´ŰíuD)…ă8 â8¦Űí^×sPJQ*•°m›N§CE{Ôj5,ˢŐj†áęrÂDˇ…Yµ\˘\*Ž­?Ia©­(¸`™‚•¶DÁTI`h‡ŠZAPď*z\¨ú‚v č„0U´CEĹ$)Ä©˘č V:Š^¤A©?¶U|}Ľč ˘DáXú˙˛W¦TBł§( :ˇ˘ŃU¸¶ â –Z ©Ő‚ ŕVşŠ‰‚`łßäă—Ć´ îĆs}ć›—¸ÍăŘ‚ÝćÍÔŠ“gşĐľĚ‚śC*…KŘě© Q1–°q…ŹŔ P ˘LDRK8ÔÓR_ó{±‡' ô¬eý}Ą^4EčŹWn­¸Lj„(3ZwNĹ6–°H­Ő€ŻĘĚ˝ě(íŕâĘeŐ"«…‹XÂ"¶›H3\W'€ŔŔ jÄŢ2ž*q›˙FJĹQŃh4®) 3ˇZ*•PJŃét°m›rąľď“¦)®ëâyqŻQŞĘ0Éľř4Mń\·?ăZˇŹ9v)ĺŃ)A őlB*a[UđäŮ”ßx8ćm·YüęGcZŠŹ|!áĐv“÷2b±­8vQňńîŢgqęjĘc/¦Ü¶Ëä…K’ßýLÂܲ~†ý|Â}LŢű§3ÁgNĄ|ţLĘĄĹŁ'R8¨űę %ďýÓ·Ýjń—Ď$|ţ´ä‰łşm}6Á0´b+üŘďĽáEÉŰř9.5ŘďߊëzĚ7/sV=C RŻE*VŇ+$QJĹ™Ŕ˛, N 3qXN®ĐęôD“.‘ HTL¤z˛MW¶T@G6 d‡PuéÉj‹v7©Úuşż†€ÔAZă|#.€ X?1óŁbwµµxRîâwSĄş˝çşÇhÇzŢU”HQv€0%$6"v`HFűŻ—T‘"Aš;Ś›­ěXľ¶˛ "„@)5P–Ëe<ĎCA»Ý޲PĂp`m˛,‹$‘DIŠk[¤I´ˇ’ôŢ÷ľ÷=[şÁŢůCďţÉ.žíĎěˇh%:Ŕ!rŞLlß‹0-ZŇ!Ś%†„QŠď™ß`¦¦1˝ŻgĹV#PBŕĎH©Z1Ś|˙ŽŮĂuPËȨŤ°}T" w«"qÇ1QaY–eáş.Žă ĄÜP»®KÇľ&úJbض=4™¦IˇP MSZí6ćŞö >ĹY<*Ë·Şŕf˛¸P(ŕű>Žă`Ű6qÓétVŐ“ÉálĽqlÇy}Zđ_.ÍV“Ýîa ~‘ąć‹Ěq#ő‰Ť)Rj‘(Ś([zěÖ(¤5ş,áD(3!×V®KąD2FYĂI‰e Ě JbwÖ]/ĎđHŚ`Ý97­ŠĂţ쪇ť{™.o§Ýmq&x–ĺd¬ičĎ˘Ś„Änc*3.ŕ¤a( ˇ Tä‘8M ŞÂţâ­Ř¶ŤRj`€ŰŞ.‘ýKĄ–e‘¦éXŁÜFĺłßO¦SQ‚”Š‚go*Ó €^ i‘ĹĐÄđĘ( Á˛˝ŤîÄ:1 X ĽkA/H@’H’T˝nŰQŽwJśč–¤‰‰Ş˛‹#šČ¨Ž0mTÜĹpJČDâVĐëő_X»ÝĚN2łV«á8ÎşAÚ0ŚEAÔëőÁXą\Fˇ-c”޵ĺĂ0¤^Żcz-˛NŐjµčt: ŠEm‰]+Pő—ľő/~¶"höŕM‡-lS[rĄŇVÖGO¤ÜşÓŕÔÝaX«>g$‡·|×mľţ.˝2đˇĎÇ|ěůt°xď~“;v|Ĺ“ýÓÇ/Iz,fľ1ěüŰŞÍžâ­7'aź:™pç^“ç/é뢺¸¶ž°Ĺ‰¶$?r<á-7›|îĹŤ');ý8ŽCłÓ -–đÍU»Ęt˛‡č„EŐš& Í•Öş˝.†a0]žĺÖÂT<1ĺř‰Áˡ§Ú(1˛tŠDY!^<ąîZŔ@Žq90•CĽĆ˘1%wq x¶csĄ>Ç ńg¨«ĄU ©ÓFą,ĺ`5¬ †ťqŁ ě¤Ś´şÚňPŘ‚$I(—ËIâVČ„j†…×4Íë*ź$ÉŔŇ% p,Ă0®k‰u«HÍPŇ # §@"a1˛—%âPŕ8&®cęɉ>DK,Shyü% „p9ty˘UĺlݰZoTˇ^K:¤!ÂňQŇŔ*XQ3™Úh4¨×ëă@ĄRˇZ­bY«WMÓDJąá`Ůívi4X–50t»]ÝŘ-t«L–[–ő’dqłŮ¤Óéŕş.Ŕz÷a0*‡•R(u˝ _:lóö`6/2Ç "zib “ŠśĄNcH‡Ëę4gš/ dńdyŠ#Ĺ{)8lĺßĐvą˛Śa¬ţŢSŁŚ3]oe÷Ň*‘1Ţ•+M ;ĆZ^kĹö>J^…Ëő9^H>C]\%=s˝:˛ZDŢ2˝P&FPAu*aĺj‹čvëžë ú`±X¤R© äęµČäQ&3Ą”wëqŰ4 8IIR‰ëX!¨T*«\Füş•’¨$BŘĂ SĄßT*˘0Ų |¨Řęĺ0MA»ťP,~iĚp)ôp Ĺo8“RÂF™TĐ×D đK‹4 _d6¸ÖëulۦX,bŰöŔOŞŰíĚňŽă¬ZîG§Ó!MÓPM’…@lEŞöËgţ*ľďÓét¶d=ČůLŕ'IB©T˘Ű퇍…6 •đÎű,Ţ÷×´8ĽÝŕľ&ç%WÚuŕÇLîŢgPďH¦Ë‚c—óMĹ“gµÂő¶Ű,Î-H.×Ç đý3ożĂâ™ Ăó¶ ď¸Ëć?üeČŃ=&ťP×wűn“Ź>›pd‡Á}űM9‘0]ĚV Ţxȶŕ“ÇSŽl7xäDÂ;î¶07řŘĎ<ý—/ĎŃ ş´šmß|„ógźĆvl’8aéâ6:íw~ő̧sJî¤RŞR,ąŮş‡óâ ˝%í“emŢ/¶ŠVVkGáÔębuËX–K"†ÖH7©’šă-*¶(ŰeÂ@÷ĹŞç±»pÓ09Ő|šeă6…qĆ,]‡Dvl°e¤];$ô/ ě3ďŔs}¤”Çb±H­VŁŰí’¦é5­°B …J)ŤŽă011±ÎŐf3ŠĹ"q"IĄÂw,@/“MNNŇívéőz×% Ż…JLo˝ŻşRĐ lŰ ŰKVą~)©đ}sđ¬MóőŻÜf¤JP˛×|Ď«€ę,c˘dŠ0,„·Ź0NÇ.ű'IÂĘĘĘŔĹŔ¶mjµÚŔU ImÚDgy&73—…8•¤ýÉĎVh·Ű¦9đߪ,íł V&&&&ú˛8âK²t ˘€Kń)öU}@´ăRĄ$˘%,•P”ŰXË4ŇĎr8ą‹j©F±Pdżubë ç{ç0lŐ_ázůżő đ-—6«ÖÄiau¦I˝E 3EŠ“TĄ‹ëZžđö;, ®6ßűU·í6ůó§bvMlŻ Ş‹•Ž˘äÁÇ_HŘ5!řć{,ŢxČâÇÚöĂÇ… çضc–łgšÜ˙ŕĽxňGďş,/-áů>†ťVe(^”Ď3›ědGeŽăp v3ă"K˝eé"ŇÝ<k+XĘÁz¬ź˝wÄ ^XŔÄC™Ňî'Ľő˝@PHwđéÇ—¸Ľ¤gö{g'ř–7&,ĘŠ-€)L¤ÚÜĘmI?ś!,-‘D&)PQÓL§‡+}­mT)Čü 7 ŇÉj¶ú‘$ q…BBˇ0Öő}Çq°,‹NciVVVđ<ß÷) _˛Ť|.Ż %‘ał81öóô/Á4‡>ßJ)‚PÇ9hÜW&ŢÁ5$±Ňq_LJfĘŚ®š$)Ą0 aY@ dgÓ ­ĚÝ*Š˘’›-ńgKĄ[ń‡Üýv IDAT-´U˝Ůl˘”Âó|P’n§łĄňŁ–ŘlpŢĘýł`ăĚ8s]Ëdq»Ó%ľNYüĄĚ•ô4Ëâ&¦¶J†Dˇ‰Ń÷/M @—ËXĘ% ‹śä)v&Ř^ŮŤă8lŻíÂ4LEÁ©·őŔÄŤdĆz·&…²Ry¨ĐŔÂD™ń†~żE1Á‰S1ĎťľŠě˙@ž;µČ;ßr+Ëţ±UŠ­), Ş@WŚ<¶•O8$NŹ"˝ŘD¸‚ÎAǸćĽĚŻ}Tq×włŔŔ,)óo®×ë%9 ĂuA“YYĂ0(‹$©$U ß±éu»t»Ýu†Ăl˛Ç1â'”’aaąs˝ĺU)…”Úzkľ¶I*‰"IÁ·hµcĘĄWĆj»Ď롒Ëi…X}ń~Ä űÝöŞyšRJG]7Ă­ R˛‰rî¦oŰ0 {-™`ͤĂ0«ÍĘdA1Y–Ëń(ý—¨0(†á¦ó¸ ˛U;hËpŇoţµĘľÔy칇)nóč.D$-ÉôÁ*çž›cĎ­;Yš[ˇ2QaţŇ“·éĐ ě˘ă\e·qmĄÝx®íN›łťôâČ]ĚަčGt«dŕž"00±IXka0¤M1™ˇëÎĘe”=´Ľ  ^ZCÄ.q, Ř@!r$ŘŔ§Ćźą…;§j(Ą¸´ÔŕŕŽ)Ž>ŃSCáď« QČĆBjcD”ßDˇ(G;‰DŹ› ·2]™%MSVVVÖSJ &hÍüMÓ¤VÓí[M;ÚwG!AŹ®´Y‘Š7LT¸§čSrmzÝa@’‚R©„ă8ĺďN§CµZ}ISüžTÝ´˝â~a¬WPŁH‹%©\ĺOŰí%8¶ zĄ‚yMˇ¸§´‚ZśH¶ÓLÝ`ĎZŹÜ^î0˝&hQJÉd­Ńýk 0wBzT¤%÷nYgß÷WůjŹN¨Öâűţ 8,ËŔ „‰k[ ţ5eé8¶*‹7 ć]D ťn‡$UÄIBĄT <’ýăˉ'祮®`FX!ő 6c…20‚*¸]v7±˝´Ďő˛ř\ç$˝8&pQ}Yě¨"$&†©ŤÎ ţÁV>$&Ň HUßź¨Ź <_bĆ’X 2(i ěC( F·Š+ „fÓ–$FIŠĄść>>üÉ©|Ţ~ď-\XXá±cgŮ»˝ĚÝoLéĄCŁ“TQHbk}đ•%}Dč×#şďŮQ•Ü6{t|ŕâŠĹ˘6¬1(Ą¨T*¸®K·Ż®E)EˇP\“­/ađoĎ/ňčr‹IÇâűömçŰ·MP°u&¨Á„ż˙űÉŚJ)˘(ÂüŽţ—ďV2ŰܱËťn˘…©bU†„0Jń<łź–F`m´F{)š)w–˛zL[]ĺJóÚŻ“˛•r{©˝NŔ !(x"xV›P0¦ţô‚tKË“‘aňl7ä|7 j›x¦‰eYż‘ŤFSĘdJ¬ęĎÜ3źŢĚb«ŽÚk%¤”ë–Ç<ĎĂó<’$Yĺľ6p§T,b›I“$)žënPöĄÎéřşFa9™M±5+é T-$p›Ó)=ŠŘě Âuë"ť¨IIM`Ű:±dVHTŹ$20SCŘq™“O6é-›T* ;Ĺ6\Ăáü3SŐ†`JlĂE*í—l¦>—_ hÍ vVw“´l¤4ŔбRź$‘('±,R•`Ć%ŠL‰‰×ŢĂçźżÂLµĚ|óWrď‘}=/ůă'bN/HLC𻟎9qE2[1č„đGŹëX„?:áác 'ŻHΚ|â…„OO0–"üÉ“1ç—$‡¶™BÇ^üÚÇ"îÜkrvAňß>óÂĹ”ZAđĐgcž›“t#Ř=eđţOFěť2𝍟ď•ÎÚXq•Äm ŚkŚĎB»řŃ ‹ÖYZať’šč?C—’U%‘=Â8A¤Ž,ńâs Z ŠąŰěŢ;mLĺ¶aá\Dkff+ÔÂH\„%)Dł4ćW/¶ą|¦KÍ™Ą·,.Í@lŕ8Ťó®éă[âÂ"–°Q‰…×™7†ŘŁąbsöRűďă-G±m˘ÂS§çčôbn9âĘá$)v\!µV˙ž,éŁ"—Ä[AŠaw ‹]ţ!Ę~™(ŠÖÇ٬amzćĘYV•Rf5 :Ë2ś ţŢłçy˘ŃáÝ3ËR”đţóóě-ůÜd ýÍ3?Ţě÷“>lŰĆRiśÝaÝMĂ(Ŷ zÁjŻ4U8¶‰!–e`_|j8\č` °ĽU%ąËZf9˛9×h&/_ɵ„˘hIöz=ÔFVTŮB–*3Lpf=ŔfńFťŕ‚üÓcxˇĄ;ÝÍ%ź_¸e7·”üEŔ÷ý/_VŹßĎ7EѫˮaR(>…an9č%KW–Eăfł/Ă0KpE7Žú»®KµRĆl·Ż#ĽěK‘xH;@ŠkűĚJ‘ Ľ&^0Cݿ³˝“;™®lŁč—8`ßĚ…ć‹,%W©O·ťR˛«ěžbţÜ<–粴ŘćöŁ;¨Ż,qĺá:;w×(W].·Ůdé7ą8×ŔEölßÎĺ Wyěń|Ë7<ČÉÓ1=IwYXhrű‘ýx» ~!erZńě.1;[ej»«—Č„^ąIŇľvŞ«qÁ+-U CˇŞR')“xőUA‘6.» űp,g°|Ľé3îL†aďűÔj5z˝Ţ ł‡”rÓ4xYßm4xžG­Vă'ž:ŤcüĆýG¸µ\ŕWN_â·ÎĎóMS%ôEĚĄnŔ¤çâős‰6›MĂDZ/Ăj* „=~ő+ŚR<פ&řćj«­çš$©N͸Véýbŕ›’ýnÓ´†‰íě4¦íy·%ÎEe¤zyí(š) %3k0B HQ*DĄ!B€ Ŕ‹®›-ńż(ţÎÓgYŠCđç uYjń ‡wRđ\­ČŚřńeVŇŃĺ×UŠ«îü7šL–Öjµkşľ¬%+?p5čŹ)™ďřfQćŁ.p¶mS«”7uQxäxÂî)‚#¸\Wě¨ ľţN߆>sürĘՆřyÉŰŹZ|řńg.¤ĽpIň=o±yäDĘĺ­¸¦Î,čß˙]űLZ ׆#;L~ď31I Ç/§Üµ×ŕĹ«’ó‹’;÷ę Ţ8չџ<—ňâUÉsSŇTgĎ©ź:™˛ÜVüŕ×:8Ě-IN\–|˛ňÎű6˙ÍŇ!Ż­Řžˇ"´ëŘaŤ¦»ČÉŢŘ•bşĽŤ‚W`żu»ił çčŞ:i’Rv ,…mĽÎ6ž=ń"őf“{ď;ČĹŢeÎś]`ÇöŰ9uj DZؾ·€á÷h-÷Ü˝‡Sg.°x5Ŕ”>Ín“^±RďppďÎ,]âö;wpáÂ2ťNc·ńťŐ= Ż˘űţ±ą+Ü9ÍůůeşAĶiźX­źĐË5Yuś¤L›}‹öęçPL§é·ęjÓjµVąÚ¦9đ×ÝĚř—MĐ2׳ÇCÉ…^ÄĎßyĄ(ć`i†;«EŢwć2ßRŮ@7•|đ™ăüÝ;oÁ€ÁoóÉ+óÝ>‹!Ăö 2wĄč'7Ö Î^ ó(aJš~‘ť®úls"&haÚn˙a¸ŽĂö‚âľÂnöĂ”][Ä1Áëżlöů÷–ĚŘäŕ‘¶† 2B*é ¤^-–JxýĽŠŔިęXüóS—yşŮe±řŃC»xp˘ÂŹť¸ČJ·Çňňň@!-•JLNNbŰö@ÁĚ:ŮHc ż4ťÝŻÓéĐh4p]w´ţzŇ'µŰmZ­ľďS©T(—Ë«„ůµę˘••”RŘöë3×ńŤB¤Îu)‘ŮM̨D(şśLž`nĺ,qcYűj‡9ŕŢŠ˛{`śľxžçĎž \,`¤.UŻĆâU=!Ú·o†˝»f;[çž÷pę…%\ÓăđŢ=„QŔ^8ÁÎÝl›­‘(Iąę°˛ÔeŰlŤ}{gątyWąxľÎs_¸BĄęrîÜĺM‡-?“]ăqXQ…tMh6"5{X©KK,q2y‚‹őłÄ±N+·wâ7Ůwá aŔä.›ň„M#\F şRÉcv¦‚!3ŰJěżąĆâ|‡Pô8xp–˝{§yôŃcĚ”·±m[ ×6Q(‚ bçŽI¦§k„aŚP&Q2waI’Ç`X ^­Íľ]%mŢ˙?>ĂÇź:aŽŢQ"HWË>…¶Î[I#.ŕöfAš¤ţ05d†§Šěónč٤n«ý7ó§ŐzŔ0}ŘődYěÔă„Ď®´h')KQĚó­.Ë#ńIĎ,,SC®vŽ­4ůŤ§^ŕ“ݰĐérąŃÄ0ĽĘŘ›ô‚”"Š$®3dRj§gЂױ_çuC(%H“hŐC†ăúěőCî/ÎsČ^Ä×– ős|âř§ůÜéljŁelCO`”Rô\™‚jöÝž\ډŚĚ’Ž)z•ç.]áŃ —¨V«4%üÖÓÇřËłsH !á©f‡·ĎÔřşŮ n/ŘWpx¶Őe9UYO–˛&ŰÝŁV« ,T«—h¬™‰Ť ĆN§ó’RvdqvŮĘ&ŁuŚ[ ţrÂIK°Atëf¤V¤‰!"Ψ§8Ý|~°ü>Y™ćfď>|UfjŞĚîÝÓ\ZXäě…KH+F†ˇűK¤Ťf=¶L”’ś»|…ZµBm˛ÄĄąhŻÄtşIßa: lËaĄŢ&c|ßÁumöíźF™]n˝Ý˘VvhőBÚAČdÍĺČ­±Zď+Ř5ę8q7A*…r[cŹmŢž{@š¦Ôjµ-§˙Ę”ŤŃ~7ş™ĘVŮă:ÄR±Żŕ±Ówqµ5u§ŁËĹ^ČíłÓśYi(řČ©3,őBÎ6Z<·°Ľĺű¬Ĺ(T×SJ篕 ‚0]e`P ü‚EšŞáN‘ŻsË•¤D¤Éę>nšEĎĺ°ßâ^wŽmćÖVp‚¨Ă“çŹóŘ™çč„m.E6óaŚ0 ş©¤•¤tSÉbŃ:R±Ü ™Ź§i« ÎŰIŤ şqĘB!Ąä·>÷”ö©-•řŔ3Ç9¶T'î[WĎôBľrşÂą÷ËQ‰vŹV’r.VĄËłM@€ÁŇ˙€ľŐv-™Ěm4t»]Ęĺ2ĄRéşeń¨Ą6“¶ÚźŻŐ÷?s*e©-‰űář¶ čÂ#ÇS¦JEWpęŠ$Śá{‹Cٸ6Ě7%˙őŃ?yR˙ďzĐćŰŘ8{ĚÁYďý*›Ă;†z–”·ľµDj×Ç TG‘Kę56ÜlB ŘÉaNŢŠ:#A¶ě»Q ÂFd;7Ť¦K’d°,v­ňŹÇŠ~ćÜĐ!ŕöŠĎŻß˛ •$Çš:aÄ[÷íćOŽźć?xA*ů׏~Ž›&'řîŰŹ`ŁřŃÓ󬤒_»ç0?őü9ţěę2±T|ŕč>î÷׏)ŮŽúsißÖÁŞ€0űń›ÚkĆĆE†Ź+“Éá^·‹LÜBaK˛<“ĂĺryĂmĐłťíľ•µŐÓcşck  ÚţUA€eęrÝHQńô®Ź®­?G«Á®Źa˘żWË„^¤wl ߱³ŤžÂµ čwšô˝K¤!tFË€0ŃJ°”`[ş˙•…Yú˘Z­¶n Éqd9ĺŇ4%nëŚÂuą=ŻUŢ0 ŤóóózĄß9]×Ĺ4M‚ • ! LĂ$î/ź|oť_[–\ÝóĽÁn%Ůç3EŠmJi`9ĹUűÂg~Ë™%>ۧ|4â,ꏮK’$,—i ĄďĹ“¶¶Č Sě˘â×8_źC¸]ťI$6đE‰.-Ş•"1A©çĄč9ŠžłTH=l6'ÂÇ9$ďˇVž¤\*rßÍG9Ý~ŽukF—ěŃÂěL“kşK$$•}"*!EB/”¸»zÔâIZĚáí´é…‚ҾУ ¶—踗)ďE@¨íń±P.fP¶%;J3Ś ľfŞĚ}EƸĂd›=d.Bľď#»Łń~ÁkMCćű>ôz˝ Ó'ÁPË4EĐYYˇ811ČŤ~-YĽ¦Ĺá˝§ĘëŰ1şŁßŹKłMđú í¨;L¦ŘÂj…łŘ?žŐ•ý?9roËÖUtW·Ă2µEy-î54…IDä¶y©kËĘŚ±”Ŕ wĐuŻ"I©‹«tGäýL”§(K쵢Ú1WÔ’ľŃĂěLc81‰ÓBˇp’*±(Ş łK ÚxĘ%VŽgG‚ÄŃ©6ÍÄ3!Ą3Ět—ÚřI•Ž3Źm Ś^ŤţaL$Gv•đ¦ăkŚK  tP›(¶&űĚŰđ=Ef4€<Š˘AlO–o<Ël°ę^b¶4›ŽúÓŽnŤľQŚRˇPŕĘ:QĘż?}™ťžÍ/߲‹jř?É{óËŇóĽď÷-g˝kíŐKM÷Ěô g†‡”H“±$K Š@ŠN"  “ŔH ‚ „8Š ű?+J)°ıµY˛eJ ‡CÎr–ž­§÷ęZď~ÖďűňÇąçÔ­ęŞîjJŃ 0©sĎ=÷|ç=ď÷ĽĎű< A«ĹŞçó—>ő”üě§_$ÔŠ7wöů·^~ c-ťŔřľăČrK–›‡(µ˝nš}ühÁboĚ–y#Yc7•äYr¬U$„@{+-ŹOF‡<>’Źë€‹-Ź@I>wq“ĺ(Ä“˛”GeŠ©’Iőđ:vg†Ű!ĄµdĺŃËćÖh\íŕć/ěvň_żx•XJ~ńÖRky©ńź?µ†:%9.‡‡‡MˇV/,ĄőQjw)%ý~˙LwśíČ“-ľ¸˛$™ÍBśé´Vźk‘ŇQÓ9”RMˇ»´´D·ÓF©ŞŕÔó ĺ(Şv{µO}\ÍĂĺy+++Ť|Źç)"â€F{´ŚSJ5篏ćEzmÉZ˙˙n·KÇ"eGß`˘čöÄÝęßŐJ*Vşkě燉ëřÂćţ٧řW_x–ŤĐçąŢéŮ÷ł¤DÍů´'éµ;™µîŹ5;`Żřv˛ĆŤĽÇ,/)łă¦BH‚¸ËV[đąö×Â!ˇ|¸(´Gŕů\‰,×ú-$p±Ó¦řxJŇ»Ôĺo~úYfeEůřá~›źfqJa[kS­ĂĂĂcłŃ|€íI¨/‹NgµSŮIŽůbÎ’„:Ł(Ď#KŇá5_ĎOzţ?1!PÓ…‰ŢA(źiOç"ĺ˝üuö†;M.~şű"›<ÝüŤĺ¬Ěă%Ôt•Ô&`…š2Óűřž‚˛z×9e~ô,ű¦X°ÔVŁň6i¸S˝TĆRĐĄ×ér0;¤uqëĄHu<'‰ÓM\âůŹţ-şn•ĄÖ*Ŕ™ć4őĐd3´8—§[\ż‹ ↝ËGuĐZăű>EařËWřáÜAŘ6 IDATĺ6˙éSË\ŐG´K—ĄôŰí˛ď{xÎńŮőZJŇő4ÂÚ‡i “iI*ŇÔĐj특ä×öý3ÁOą"·‰<…Dq˛(7EÎýDqˬ’ŘŁëňE‚/%źwą“¤äĄĺąĺ>…‹#Ôša–Ó|R©ĐĹ,S’ÉJ™HŻńú~‡–’¬Ć8G[+’×·÷¸Ř‰ąGŤ,F˘}ţÂëďaŚĺ—^şĚň '5 µ =Ď#+ ZŇ@üçŤŬ)“Éäćű>ÝnS–dĂ!Z çPqŚ™NQBP~«…TęˇăĎsVµÖ6řEQ0™%ÄaHMKąFŢęÄ_óŤëď_“ÜgÓ1Zd( :ZĂ›Oś—eÉÁÁÁCŠ‹×/„`2™4yyžóćđkŚeUüů¶…,"Ň`ŹŔµx!ř<(É{ßDIŞV»Ł*B˝aT”ASDűçľ'g†ńPe ŽsTź/qąűLóŇÝmóůF¬Eh» „.1DT(€7A(ĄŹ'4‰AҦĺ‡$ţ.:ď „"ő*ÔZšżě{ÍąWÜežď˝Ěöđ>·g7±Á«ŹŻ?mb\îaUA,c¦zŹł˘í–ůT÷‹řľß¸č=*NkŹ9çšéö,ËŤF§˘5-ˇ–ţZlŹŐŽ9Ó´¨ZiYĘß»»Ë_\Ž›ŤTµhřd2y¨˝¶¶¶ö¦%8WuɄӧ ŘÉ´ {ä…Á÷Ž,¤żÂ–+ú€uoFäű(í=tLY0Kf|X®˛kÚ9mĚ“ŽP|ÚżKÜęPZ‡0%Nęc[Î::˝.ľyĚ]L:EűšŇ¬“xź"Ës$óŤO­)+)K1ß„9çđ€˙{Â˙üŃ6˙ÇK[ôĎČĂ‹4—Ĺ<«µ¦Őjaś ôu•Çή;+ęś^›Ť,ŞíËĂŁŽĺaOJ¬sBŕźrüyh ˙Źoß˙c{ Ď?XzVÄi…Ţ.ęä $ąĆ•Ţóh­)MÉîđŮ70ÎRNZ?! ˘g7ąźßŁĂ#ą:'Č—(‚qľA&Ć8U śĎ4ËĐ­' Ú†¸,ÄDCęb˝ĺz|"üţ$ír‘zv¬¸5¦˛Óµ¶Úô,Ú8ÖF “iAéď«â † /á’?!Vĺąeδp|”uŘ/2WýP[zŔłÁ!AÜĂ9‡) L™!tÜH¤9çX]jAú5)N´vă®1qOť»ŔĚóśß$Ďă§V;§&ĂÚ°ˇž:\ çžTş‡ÁaűI,ëĎ©t-™dŚayyĄłŃ]VS«…ÖŕţÂw-ťĂ*…Ç áźóü5íy^ăDRËŠ-ę;ćy~*˛±řý“Ů„v§×,~c ăÁ.NHĚ)ú›5ˇ>?ŔÁ}޵Ż˘(˛6B—8•łĹ‹\ě^áÍťď2 n.ŇXëČŐí"dTµhlPzĂGr›Îťä)Ć'’’Dr‰Op±}µ‘Ľş?¸ĂGé»Äfé4ýV -5µDîRОd7żŹ2!C±‹+=ĽĐ1Ĺ7ŚO* W Ă„Ľ´x¦EqT÷}É^ŕ™řSH)ywúM&nH:ń‰Ňălő›`(!aľJ¦8yúď ńą¦?Ăzď"ÖZžl «n×­2€ĂĂĂÇ*-,®ť:)÷z=JëHsCä+‡¤iÚ 'Ź_,Jź˝?Śâ6M ~ ±öxŰ·(,Ąq„b:-hLĆ9O´<ŚX“´öŃţńn€sc F™ă~pĎT/łgő.W˘/¬yk-e:W3TčOżă#&żP¨.ÂŤ+#ť´{î<\4ă«©á_ąP©śl±žfŘpü:ŔCÚqô!ĎŁ$!ţ×čÖtŞn·űP.UĺĚčźxgś–‹Ť1”eů'ş¸}kďuög\<ú4ŇPeŚD=d‚PĎ lu®5îq7¶?ŕ°Ň ;,«D^Řt­µEÁ8‘dv—ÄÎ*Ż€ ŔM;”Á%E&PJ śO˝O–Ý%.OÓ {ÜŢâ¦řťb“ýéŚ ´´EŹÂ•‘áü' ¤mLx¶šËŞŰâůţË  ö(şĚɨ‹Ě8Ž›nm’$O¤VSçň˘(ÂYZŕ€P vww«nüü7<íüQ†á±"YýĚŇ’(Đ”ĆăÚÖr4Oňańď‡ČśâVŢć~sEďsQŹđ‚ĺyÔ¤©}:Úń˘“¤»Ü´ëě—ô=—C«ŰéĄ)ŞQ˝@|ßÇ$·Ĺ=\™!tÓTç“xIznŻz€?+jĄZ›sq1-¶WOF-µ1™Ś)r˙…dťXĎË«§‚kÔ h¦Ő) ‚Ň9”çáfłcZČzľ *Çc¬ÖŤťđy‡ÖĺËĺ›Nę8Ö¦‹m1çłc ‡řO§Űo^0u(i Ľ’¤đ(̉vÍśŹ\äYž˛mo•»ŚKC¤6ä*ˇď6Řh_ćţh›±w„#-JBă ^!˘ŁŁSä¬ żű}\Ěĸ\%3J•P‰şXîň.Łń>ĎŮĎG1KŃ*ÓqĆĹŐ hżZły™3ÎĆh©é·—XUkyÁá¬Ăm>b:/ Ń.`"v(Ă]¶(§1^QÎĺşš5w…§:×Á‡ă·Š](BâŔŁt%Ęh<É©~nN*ŠśG“±^űµ¶’Ľ(в˛Ů•R>„Ř._S%ťŇţ0,x‹Ňb]5ÓpRAJA¨%ĆX‚𬬮đ¸Č¬äí¤Ď=ňŚw@Ď&č…ŮęE峤,5Ą[8ëz‚ö;Xk‘RbËkͱˇeß÷q“7‘Ňa“űH‚“-Đ+´Z~ă/˙¸5 µ¦ăţ¬Şřý˝^ď!ŤďZú묜&¤¤Čsł´±!]śŤx’\¸hŃ^K‡•eIÉIJçľ˙P†*;k)ĆcÜ|=gYö0ň'-!-ÂúÍ@Ö÷üY:Ł/Ńő–ÉaĂŤu8ěüA‹Ąö q‰KÝUâVÄAzČîtßiÝ–ĺ&žçłÖYAv%ńČçvz‹iZTLq*ĂJCa# ڬ jÁŠ»ČÓ­OV–Ŕ‡{Ü,>¤´ą*XŠ#R5d" …ŔĚĄô‚b « ÎĘÄ>—‚gzA<ß$ť·¦©ßĄÓé´yŹ×ťŰóćňšż»´´D^Śu´BŹ,MN;]ÔִͲ,ŹŠŰĽ0ž"IKÚ­ăĐŻµř’Ň8âŹaL-UIqĆ ĘYQ8ÉűĹ»®Ď%¦¬™)ž’(ĎźrĹÇíhźĘ3C¬ΖäI†TkňĘylŽTđy€;|‡ô.aK™‹ű---ťI°^ŚšĂ4 mQđ»F1‡bNý…ś;&`_ď\ęA‡óĘwŐ s8˛˛RM€c°T…ř>&Ëđϸ&-ÎŇÁ9ÜyÜ ÄÉó×[c ív›ńxÜ)u!räm?ÇXIVj¤˛řbÂt”Ňę®4ż­ó­ő ę!2k-w'7±‹4>6÷+ôR¤h|®x/Q¸’»Ů‡¸ G™O´Al âéü…#šŰ#č2U>’sú¸Đ.df‡•jB±†őÇ2A1ĆřĽťľÉşż‰g=Z:f–%LĆ;Ü4×ŃeL;02'?¬Ä´/µ¶¸Đż@gÖçzůE°ĂLŚčĄ›$Ĺ!E¦ţ ;·NXxĆű¬đ˛>e"L ôHsÁMď]îú\rĎT`šsŽLÜ.CńťőHĂCörG™—Üž~Ŕş‰­ĺ-<2ôŮ•R0ÁK{€ ąx€|˛ĹK¬v60¦äŃw™ć%"L‰˛.Y°Of4ľ­”kŘşă0§8ýŐ±ÂE:­J¶–ż«źłh‡§E ĘŐô€óÖDpT8 /M5H+«9™')’kÓ•~żTÜJYYč y|cf­Şčăň._öK^RwHJ¸cWx`şOL –ٞĎŐâŠÚeĹźáůááZ¤Tt<´0ekMEŐŇu1\=ôžV8U …×!ZźmxwI’4 m6›=r׳řŕźś"¬‡]é "ıšˇ.&ëó× ¶.rĎ#9ĎÍ'ň<ÇCŘë‘&I5¸–çˇÇľŕK‰Í2Ň>µ,ĄÄɤHŹýV‘W•ëDĂ»OGěÚŰhÂąlŐ3OÜ+ö2q+ćúŢőf·îĺ=Ščkĺ,"p¬CKë ®đ0ÖâS”)ŕáąÂYĺ#~ÎEDáŤ@§ŐKŔzČٱę3µS”N)‚!ž¸DŕWYKý%Úi łśűĽŹL{dĄ©Äş•á¦9d°sČSý+|ró%&ÉE·´Ńx†ĘŰH+öý`Ť~ĽĚpzČŤâ»ŇdžcÚŮ Óŕ%ÂŞc(mYHL0<óÚ–ą@w®í:›Íš Vôz=Ň4=ćŇwZÔ\Űš0›Íđ<Źn·Ű ŹKŚQáĆâ)‰Ŕ5ŞçIĘp„ĽýAŁ“ăÂăy¶–K3ÓčŚ˙QÇŐ(á˛ÝĺräłcRnem&.|ü'bXzĽiVŮĐ!—ĹŚ–Ş”jÚ5Ú PÚC)Ťs–˛Čq¦¬@†yÔ­IőpfRqq…‚ŕ%f“Y3l˘µnKĎK×ZDăĂ0<¦kű*é…¨‹Ů8ާł0 Ď…(×y¸¦HL&“J>ižÓáÔă75Á<gÖRIš6śáGűµĹűp8lô}Ł(jt§Ť1M‘¬¤%öŞĽĽ¨ ăätša8¨•kfŘ2Ÿj]‡şD GR(ZínÓy|oö-f !m#Ă;/Óşn•g;?ŔÝŃ=öĹ ŚČ‰Í )¬Ęđ] —Á!ľđŹľľçő 3#©ÔG\„ł˘jןA—:ľma(±j®˛Ł Ö9Ľ¬CŞşŤr1[á–:kÜßąOżßGJÉRÖGO|¶ŁŘI›"cTlŰ Ýžj?Ç•ĺ§YK×ŮIď’cr™š=‚|™ľße=Ü"cFÉ€»ŮĚě«*©ÖH¶(8D(‹Î#„ÎÉ掲 0*;S­Ć#`3şŇ¬ą:ŹŐĎQMÝšL&ŹjúVMq¬%óNşž>jýU¶˝žnźDʱ.’…Uq›f•Íî,)O•÷ň<ÉtV šýQ‡'ĎřC”…~;bIćlÎvř(k3´Ńc°¸ăထńř®ąČ:WܶNŃş˛Ž,ó#~—ŇUQiŇ HŤX@m«‚Ӣ¸j…}PK,‘ç“9šL&ͰŇyĐ:ęÄZ/žˇx4‚u<©ÖçO’„V«ŐP ęÝף¸hJ©cî;eY6(„VŠD)0¦˘"<"¤ř€ťŰŃ ÉÓ†Îꢺćü QéśÖVÂ'[Ízˇ€:‰,·Î9‚0FÓéç 6řĘ …ĹČn5P–ÜLß!3tŢ! łůç¶Ý[­kěNöČŰŐ®ŠCHZ†2 pŃgqşÉTďaŁ„Eź;á,.m6E0ÄžáľÔ\›ő&¤l¨ YDŘhJ˘FesÉ~ŠĄĄU>Ř{ź¶é»>qÔâŠ˙Á(â>÷ÉőÔü|Ę2P7Ť¶Ů ^d%Zć…Ö’$3 S˛oî!˝€Uoťv\É_í ďs‹·pVb‘‘±¤PLĐiŻ|“–v±AަÍďu2"×a+~ţÔE–eMR|ÜÎż¦ă,Ęľ•eyné»:ˇ:W%ÔŃhŘ őÚ}\q´¸Žż×p®RHđ}IYZ´–Çţ_Ľ ćéďUÜčüŃÓ† jÂĂŹZl Éz8âîlĘí˘Kꞌď[8Éť˘ËvŮaĂKŘ2CbeŃ^€-sü¨K5őUu@ś)@zÍ0YťËb‚§%f:DúPËd&ĆɱNĎ"ed‘‡zž{´(>ßn·ĎĄëąĆĆăqĄ’ÓnŁç´­Ç ť-ćáş@XD”Ă^Źl:E[‹:G.ĄDô}úW®4ůý4·Ţ dYÖtżęX^^&‚cď‘@BýpqčdtŹń|Çă1«««Í&[Đ pąťnk- ®¶!}ĘĚGĎőÄ4/ů_ÄH‡đz^ ÄtŠ ĆJ6†9~ŮAąŐąCŚ &•4×ü§’ pU´ĐÎÇřăǨąČĘř`.Ëš…6äţ 2ÖÍUúťUn ?bčߣ3l±Ü[! B®č*(o4…-€•93˙·ĆP”WĐRóTďÎ:О ´9(EŰo‘f w±ínŠPČF­Ća)‚AşBĚAş2Ŕ…óĹëŘ/ĐŽ:ÍÚh®űm± NŁÚÔ Q)ë:ć<ň_‹›:W–xŞ'“Jźx1—?đ¨s:€.Ť«†br‹wBú+Í*ť[ka¨>®íĹ Ą-@˘t•d6Ú’ĺpÂv2ăVÖaâžLOŇň€˝bŤ /eK< öRUeSĚ­áꮲć Ŕćtw§6NÓc(l}CëÄú$čA-yĺśăđđ°á>Ib]Dę)B˝Ŕ‡=ë{DQô¶ě" RďŢňŮ uÎÄ@ĹÇť»űśÖnÖZ7¨í⢭‹Ý,ËhµZôz˝*)›}–Gř)ŚéxĺĹ´ŰŐůłdBa Ćy´ÚŐ5 & ÄŁß°ľh¶Ô xžÇťŹ°Ńé–Ěą!<•ő°:e&›–ţńżK JP6D¤mD8Şţ§†@ä1&8JJ Ś3dŞBC5WýXmoňîîű$j—C1Ć .µźĆ÷}.öźÂ#f/żÍ‹CiëÍĄ‡hŁ›!+ŃŇW<ľÉŰ{wÚ9…S nŽĆ*!‘NWCÂR†C‚líś2ʰĽ\OGG–Ň'×c}˙ŰĽ5ş»ř·5:t2áÖ¬š˛sš&cť­c,ľŻ(ËârQů,\žö /®ăď5˛¤’^Ě2C+^¤…ÁtVĐnyéŽ˙QĆŠ—!m>Ď‘UŽ <Í3=ÇFvŔÍYŔvŮybÚXéwóEČsŢ.+ů(ŞÚ™y:­.Ř9 ˇŽ~Ą¦˘ä=p)®"âU,+LOđôá8e¤Ţ蟇ňâű~@ŤÇăfXĺQşžg^ë|ŁU«±xžÇŇŇiš6ßc1NËĂ‹9Ýó«««äy~b†ˇ’w¬‹‘“Çz6ˇv»śÍ¦x$ř§tś8ücꎵB VT2ŚĆňtŚŔĐk/WCL“#ďzŢLVŁüę÷y† ß[âíť·q­ŮüoÂ*´§@U×aýqŢĹŃĂI‰ŐguÂbü1žég›ŚÂÓU$švvÄ?hř«Őďfn !\áS¬,­ńÁŕ}ö˛m”WňŽ}…K‡Ďqˇ}•0ąŘŠ@¶Ř+îp(¶+-ôydş˘ťíewŮNnÉKŃ Zi†ł7n2ĺPÄH3CĽ0ÇŠ$u3‡Ł Et9ëQ‡ŽSŞ˘hś–[bµu!Ö_\Cőwýś Â0l€˘ZC~ńřZţ«~ëNwýÖ›şĽ¨Üä] ‹€GťË—––Îě‚,vňŇ4Eýôżű_|É÷+©ß_äÉTz·_ˇ¶˙ńČΔN๢R|p‹—şźˇĚoŢąÎl–łÚYĆM yźKţÓ+yőÎ+äÁ€Ü?Dú%ďďĽĎöřŢüów‡Ăł•’‘Ő5H€SÎh™U‚˛Gˇ'9H>Ą^dąłzę»˙´5TY–5ŔYýt»]ä<žĹí®×ď˘FôâúCHŚuŤ¬Ţi€ŢYą|,«äńx\ŹEń0j[ńľĄq(%?–Â`j4ofk|#Ůd{j*îUBOó\Ďń…î>[Ţ}ć ŕŮQĐaF‘N0E†¶Ŕ™Ş vµu&!(ľ…`Š“ë¨ÖElzH–•ŹäˇÖÇ×r^µżrMş^Dy›N˘Pµ`˛ďűsíŰjříĽ‘ç9 g%Š"–——čľFżÎm®ŻŁv:+Ę’¨×Ăą;[˶9–jĐAÉ`€¤ľ«‘Ś:ńźy|Ŕ’çťn§—>Ą}x“°x®ěŠŐĎůt´C_ÎO8±®ĆD"Ç–9ŮlTş6ę×VkŤI÷ä-° ¶¨Ö:&“‹Ťsť«ž'¨ő;—ć._‹9ă¬{ąŘb]\CRq, ąxn±˛˛ŇĽ{·~…˛d6Ţ#KZť%„żĚ4(ŚlÎ!Ď # QŃ1f44ó%“éĄSşŞMěÚ¨´O.'řé2éáy>űłs|’0_ÁĆ %~ş‚HşČ¤ĄĎÔßFźsĂUč)žđËĘĽ@:™Ç¨¬Kâď2ŐGzĺ˘ÔüÚk˙„•nźßůÖďÓł¸ł»ÇÍÁMnNŢĺÁî€b&ř?_˙ö†ű¤űwwůúîWř­·~“álČ7ďľNżł„t!ĺTqÁżĘďĽń{¤yĘ{÷or‰¸ux‡ŻŢý 7‡ďS kÁa¸´|%<>|peîß`3ŤĐw¶[QH{ IDATȦŽ6"ʱáTÉÄżO©fé:ÚÄM>î¸ÖŰN•Ú<ëţC•sëů•••cô˛óD–e"çëĎ÷}˛Ľ¤4¶yÎÎ:˙b.oµZMaíśk¤ë®ú ĺżüRiÜ172kĆV\[kÝ‹Xxîű&fjKđ„iЪݢX ]‘7瀝cřÇsŢ.mmńÂ6E6Ášů0’ŇM+¬F‡ěŕUZ­8‡™~„ Ú8˙2ń4Ö=˝¬żo]Ä-î.ę—iŤü,˘‹ÇÂŃË7ŠâĆ&öId“ę–A=”áű>a6& ăńŮmäĹďRŽö<ÂV‹ÂZlY>–Ş Ë‡•%YQĎQÜÚNőqz_d$i B·ş”V“ć5çČ ă‹ĺé}4? öe›âäBŔA–e…?ŔSŚ(ŮTĎ`°ÜźÝĂ©ĽB*łř3J5ÂG!"ž˘Ë%4…š”=Jýx}?+s<ˇĘÎ(Ľ˘‹Syp\V ÉëďľĹ®}Žk—ź"ľöηxg÷:‰óĺëżĎý˛ĚđŰoý6źĽü ~嫟őÖ%^ů諼ró+¬D«gůÄć'řćÝoňÓ?đ3XSMóľq÷Ű|÷ć;,·×ůß^˙;tŰ1űă÷v¸´şŽĐ–­ţ;üČ _ä˝ö;ěDÄ7żĂÁpĚ`2fŁ}‰Y°}tŇ"…DA٧´†%ąĆ•v%x^OŁźŐÍxhý, WQ5˛5µ÷ůăb± µ&BŇĽľ§ÎŤ\Ô.w5U§ćŠŐ\˛_ř…_řžŰůßţk?çůŇS -‰4›+†Č l8 @|áŚLŔ˛Ea ‘ Ąh¨ RÚľdĂOi‰”‰Q§»ş Ťáy_ş†~fMłT€çúĘĘG ~—¨ăň!®ŘEz„źEDĎś{ Áńűx‰­%¸ęá™ÓŽ­;YĆ:Z­6ž§çĹóćâúüRĘƉ˛V&8O^ĽŽ,Ëđ|?Žź8›J릋ň¸kđ•!öK”(ČŇ'T•‹ť"/Ş!1'<±ń<ďŢ»Áű{ď‘Ů“~čňç8Hyíö׉ĽP¶ÍFlŹď3HFüć{żĆ'.?ÍwľËýmž˝đ%ZxĽöÁ·řł/˙ż÷ć׸°´Á—ßţ*IšßľńĎö_Ä…“cë…Lp*É*Ú„eŹ­čÝ¸× †axţ˙yž7łµ~_üďő&-Ś­$OSäŮc×áÉŽr»ÝĆ÷ýęłć|wiíĂ@’´ĄĄ(ÝÇ^ŘÖQ:ÁvóÚl7ÇĆIŠ]@„¬ĆŠĎöS>ďŇ“ÉcŃU5eI§H'#Ľ°[%j©*QđyH)1Ĺ”Č]G=ĘńmTk 3Ýa8vw©i#=Éî˝¶ŐZÓď÷ĘB=ÄuVÔňd:#MŹ[ם7¬µL&›äZÇÉ!‚G}¨ĺÁ`€ň}ü^Ź|®Ĺř¸sŽŤ”ň±¨íŃ1Ž@”tÄ^‰Ďéh Ý]ˇ]˛ňaT뤉‡QĚäĺB8˛Ň±—&dá.VHŹŘŹ«m0Fç=‚b ! ąś!ŤOA† r‚˘GéŹ!Ň’ąyNbáŹ0¦ęĎŁ] =>FSNd+BŕT 6ĽË|ýö×çC¶'wYn-ńą«?„±†ç6®±ÔîńÜć5Ö:ëH!§SvÓmFé!ĂÉ!y‘ó˝ń«(Ąůńk?˛"g{r——Ż~’NÜâ׿ó\Ű|šŇř2äwľű{üř'„¤H˝€}鋼uç:©™ńĎżřăÜÝ}€ n{•PCŇp—–ŠY’—Kéýý}Š˘ ßď7]„óD]8W90ą0bběéý!sĚŤ”¤EYÍř 3OꏋEŻ–đJ1ťÁýAÂä±A1çŽ:jIb>–!˛GEá$·ňŻM׸7•dŮq©AOk.µ%?Ôń´w€˙“‹zDKYü¨łĚ×€ňę(yž‡™Ý#ňĘĂ$ű¨hŤbü€űĂОlÖĐyóđiHlm^SžŹ;ŢËh4¤,˦#÷$ďú%<ŹQaj ăI®c:ťVóA€ßë‘ q®\ŚÖxs~ń#Őyć!…#‘Iá=˛c&Ł@wV)D‡ÂĚyí@nF)Ś8ę–Î' «ű•f„,)¬e'™†{ 8ĘŰ”VP٧xa‰™U›J§ „S¨<†öá$ŇyX/A1ľÔ¸ň|ď4‡Ĺ†#bŃ! öpň8őHáágË„ŞĹ8Ë.ŢľCRTs"źąň)Úa‹««Wđ”fµ˝Âr{‰őνh‰űă۬ôz$EĆÔŽygôł.t/ňĂĎţ˙Úç˙unÜd=Ţ$+2.ő.łŢY'+Rr“Ł•Ć>_{ç›|áŮĎăIMVćl­^ÂSł|ĆĄŐ5–˘%2&”ňaŞ—ÖK(ÂÚ~DżUÉN§SFŁÓé”v»MgNWyÜZ^DJÓ4e0ťň»wwřĘöłS&-đÝýÁ±5ćű>×÷Ěć!O«¦6xÓšW?J3®ďě5ÇŞźů÷żô%uÂz°Bk%IZIý1Ő¶M8ëłWĆXS¸ µHAË—¬ű)žËHěéčÂň‚˙€PVfŕRbMQń»|߇Á—‰»>®śUíżÍd2ćÍŔ‘§†@´čvşńXĎ5ݶ.O95¤ÂšŞ®ą+ŔťżÖ.¬u…Ťô˘¦ěŁâ$˘Ć1Ę÷ÉËś;&ĽľĄsóVÂyí*#ż@ÉEĘxĘâlF’–ř~HUHpćŕďŢÝáůđ>_›¬„!›>FÄ•ŃĂt6ĆÉCŽťIÁÄ€•·E„oc6[—¦#µŹU &ó±Q…î…ů Ex@©R# ”óp…‡sĘ@Đ‹ 9§SĽ˘_MŃ.¨?x¦…ÎzŢĺn—o|đööY‰70ÖĐmĹ\YŰb–ĎXî,‘ĂdÄzwťŮ´Ä ŕćŕ#’rĆĄţĺęCŚíŃ6˙ŇK?Ĺg·~Ăé!_űčpmíyzţ}o™—.ľČ›wŢćŮ Wy°·Ď›·ßA˘é]>ÜąÁîpźg6žf”Žy0x@żŐĄżť)7Ž®XćéţóDZE^yŤ„>n𲦳ԓł˙Ů?ţ2Jkľ±˝Ëg6×Ř™%´|źŇ9ĆĄá·>ĽĹ¬Ż’Ď!Ă0äżýýŻłGôŁ_KfyA§Őb{2ĂW óO),Ő†¬pŕ)É^šjA…ÚľzçżňĆŰüŘłWIň‚˙ţţ»ďŤsű|éç4€uu7MT˛C ¨íi‘;ĹnłW†h“É1×G…ę7ZzŠł†Ů)łˇ(xŃ€jx¶9Öy{E7¨m•‡=äŕ· {=lr€đ"„ňŮź¦Ľ ËäiAŰďŇiwfÎŹć×<ľş3u.$X*„;ľ†ŹĎFśď­©”j¤çj.x=XV#Â罎¦»×j!<Ź˘,g\GéáćáĐ+Ńňř3.E…ć:“‘f9žÇ-,đ·>Úćßůö{üő·ůýaÁóť6ë TĐź?Ă ĄŰEË0±ěĘ»iń˛>ŞŚQÚ˛lâË[é»8ťá»áXQপş*Ũ´Ń°ui„ÂŁ8CŠpq^çb!¨¤>ÓÖśţPýMdűŘ4Ŕy)^lMfĽsű>µöYžßx7¶_ŁE,w–č¶Zką´|íĂ6ă-Z]Źíá6I‘°Ţ_˘µi}r›Ńó{\l_ĸ}p“ĂdŔł×Řę?ŋ˟f)ZćÚňsäEA?\ćŐŻ‘ä)ăCžżxŤWŢű:W×¶¸vńiľöîk¬„«¬m¶đ*ÍßSň±Bs-|™8lÍÍ &ÇĐE^úŁj ĄTS»ŚÇcv§3^ąsŹ/n]â—ßzŹgW—ůúý]–㛣 7ÇS>ŚXŠ+u_JBţ›ú®ô{xž˘(rniĹ1ŻÜ}@¤5űYĆ[ű¤RŚň‚Npw:CkĹ+÷v<Ź•v5Źń÷ż{ť/t‹?÷âóĽ»wpÜ~*Ô6 çndđ}éFÉ’-oÄĺ EűÁ±‡×9GVnM5·‹ĺж©FĽěăGmĘ"CiSäŐ2VµěŚí&„łHĐż@yřŞ3;ŕ}·ĚNP˝<˛Î6˘-˘đH°{}TÔ\«ĹÁ…ÇZÖI '8·µu]=aţ¸ó×˵ŐTEC˝cz§łĹĎl,$Ë’b6Ăçx+Â6Z­F×ôqˇ¤Ąĺçgn°P”’Âů´Ú}ţĂ·oňOw‡t´"ł–Ě:ţć'/đC­v§_ń7ÇwPޔ̖|8>¤đ‡¨´ŹôJr="¤ÍKŃ9rÓ˝“QşAěcEI'ÝbŢ9–$%Šl¦đăňÔ¤rŐ\,p=ał Ň˘D ËL±Â …B¸ĆĹĄË|g÷›LĹačSÚ˛ŮH8~±<×Ć-(2ĐJ!ŐÜî¸đËĄ7!,{lú$–űËôZ•ĆĄ j1Íň)eYpď’—%FäLő>±ë!‹ ’¶‚2´ăB¤´Ĺűö8G*'$TCw Í ţX鬝zßkŠB{NUY4ńX )ec”Ps¶ţĆ+Żóďý©Ďđ?ľňM~ô™§¦ă$a’\îuŘO‘RđW/ł”~Ŕ˙ţ­7YŽ">żu‘Ł×w¸ĐiqeĚň’H+%ą;šĐţüóĎđËß˝ÎŐ~—q–3Ésţňç^&Š"ţö+Ż1Ë2ţŇË/ńwżsť˙čĎ|ń{J‹ö»ÖVe­X“¦† řřiaç -«:á)D×cn”sÖ:RËÍ4bß´ÇkŢ[Á?ę`M‰ g«ů ô]kŤKďŃ1˙Ýą@qđŢň'(†·y3¸ĘH=g·Ě†|šĺx­ˇ‰<‰ř|-UşŹ•:‘‡ë5Ç5ę<2'óđ"U˘.8žÔéĚÍç ꡟ2Ď)’äX.v€ő}˘ů»çËVŻŇ·>ŢCyCJgř`4Ŕz):ď’y‡•"Q\0/p±żĹ7î~¦xRS:ń&č˘E.R¤>^wł-ĆţÝS) g1“Ó2Âł™@dúxE—IpŻióká±ážć©Ţs|°˙>#ď%9–Ú¨˘z÷z¦Ť6!So-+—TăŞĎ0®D›żěRZCKôYő–É\Îf08zśsU§1ĎIĘ)Î:JW0p;¬  N8"­"— ±ě’»„Üdk]¦’Š)ëî Ď/żŚ”˛Ń>>-Â0lTjNŁ ÔúͳٌŮlĆnšń?}í[üĐĹ rkyfy‰Až3NRîŤĆüô‹Ďń÷Ţ|—Ř×ü›ź~‘vňÚÎł<çć`ÄźşÄ­C®ďŕśăRŻËő˝Öâ/\Úŕ×Ţ˝Ż?űňKüŇ·ßOőzĽwpČ_ű‰­6S_ý+QČË›ëüë‡7ËŇV®!’¤<&Ióý‰ŐĽ—-ło2¶Š!Ë­´kçÓ™ŐЬ§űÜJCvĘ ÝĽâ Đ~ /ţĘB±^Ţőd®Űż·ŚÍ @0NÝćă…HąËuv’›l%/˛ÚŢ8—`1T‰¦F«Ćăq…~žC›ó´ÍfŤ¦âyÎ_KwÔś-¨Ř8Ž›E]Ëő‚˝$Ąř ź[:8Č2ÖĂŕĆhÜë‘g6Mń¤äţ,e-Źééžú»HÉn’kM¬†âˇÂ6ł‚˝¤äRKUÚ˘lĆ׆üę˝=ţ…ő%ţęµK|4MůŹßĽÁ_˙pźřĹ—ÎĆHť`qÜMNĄ›¤áĄ(QÎŁĚ49!ôCĚ~LŰo1+B±„Ű[nÓË—Ş TYĚ÷Č'üda{V2]üďąś_f+j†(=Ě‚6ˇu†Žęs±}™ŰŰ$á$»d>ť\…¶-ŚJ+N°áan»(?ŁđGČ2¤(5m´ĎGÉ;<%ža)^ĂÓ(đç…Ę*ë —ŞFł„Ť3ŘÉŤÔE¶0ĆPEQ°WÜĂ9G?^–킣—ňiÚ¤‹ÚĎ5 §RňÎî˙ĎőůŮOżČŤÁ˛4LJËr»ĹŹ]ąÄĎůUzQŔZ…!˙ëw®cťăŐ»÷ůÉO<ËŻľń[ý.߸÷€ŤvĹ+]Š#ţĹçźář˝Żá+…ĆYÎwě˛Ečąăáýń„oÝ{Ŕ…N‹ßzď‘úĂAW+•šj·˛<˙CůŘ?ô¨ic»eÄŞšńt0˘í+Ôśâ$Ą`5V,…)ÉŚ÷“ɆŁý*'×neÖ9śP0_cU±(ÓŻ ×.b&÷Qí ¸rơí2VÇźł±8`â<,ł©®˛ÚŢhhXŹâÖ6»@łŢj ş'uX:Ż®g§I(-ňÔ…nm‘eÁkö(ŚĺóÖŽ©Ö|ůÎ6úŇja2Š"â~ź4IpYĆ«öřç.mâ/\óÉk¨˙ýýá›Ă1_¸¸Á(źńśďϡ˘ąmüłŰ|vŁO׫‘{GfK~éö™µ|qąĂ':żxk‡ű_ĺüýę–Ş:÷N1€"DXE>¨\ĆśD¦==Ć“šŤŕv™–S|ÓĄeV¸š^PřClĂë=NN|ü¨ÝŃßXťŕ˛p.kŘbŢ«ÓęOv’ľ^g’OŮq7Ŕ§°gńlL‘CV3…}&PŞF%„Ĺ +ţ2ľ¸—żÇŃ{¬ů—Ř·ýV3#ٵ&ćú˛ÁĹc÷í(j—ÄęůrÎqÁm‘ĺ“|„Ż|¤”äyţHíűzžVSś”đ¬ă«Ëü䵫DZń·^˙/o®3•’ ˝.›íÇ,/Ńógîw>ř íoíîńS/<Ë/~ăîą§yőÎ=bOó§·.r2ĺ…Í ~í˝Źđ¤$- ĆZţ?ęŢ<Ȳě®ďüśĺnoĎ}«¬¬˝Ş7µ´#!!4€ Č€ń™lĎb` ‡Ŕž〠3fĽŘ˛e@2’P7ÝR«÷ĄŞşşkĎŞÜóíw9÷śůăľ÷*·ZÔj ć×QŃ]ťyóÝ|ďÜßůťßď»dąĄúĽ˙ča¤”Ľpc•­6™1t’”šďí.n­ (DÄ=ý­SHx=ဠ°i¦™Čú,ů-ľC6e! ijAĘVĽN;±D2Gy•]?'ĎRÂrk źçyäńeµ‚Ї0­+xc'ÉÚ7¸ÍcĐ‹ËDĚEža˝3Á´:ĚDéV‘{»Ä6<ťÇq<úž»is"daMą'†_ß«K{»×*6ěeÇ;żC;ߡĆhżßç_<ó"Ą+Íźxč>šiJ¤ýĚđĹË×ůţSÇđ¤ŔJI;I¸Ňl3[ŻŃt‚9ßçÓ/śç»Oçlw{dĆŕgi%ÓQwţÓK×če†›ť.ýÁ“´“ŻŃN3jĎF?AKÍg_»Á'ZâZ;f±–ëqJfO7»śm÷8Űîł•.‹d4f1¦‰ň-[y›V?ÇÓ9IXXÇjRJfčűk3SY`Á_dÍ»Acś°„A†É»č¬ R‹â^’ęč3T†ľmâ÷¦0Ń澯O‹%¬´lŘkéu›kňđîÝp«c&J D"$Ń-znťsŮQłĆŚw©`žĐŹČ]Îąí—iĄ-ŽÖŽ3[šÝ˙˛NŃČąH©Ńź0 ©¸Säyľ Ďx'ÂÁPačT6Ôyoö*Š|űŇ?xę(Î9mwČ]Al¨Ö«TĘeŢsäußăĺ­&ď>Ú@ řŰďxĎ­®rqcéJ™‡g¦ŻVxus›…F}4žž­UY¬WůÔŮ (!xű—šMŐëřZqamźxř>NŚŐůłŻR îě~/áxZ µ Ióo‰qÎ7ą¬2-rČ´™őű„ľ€•”L•%5żC7ŽńµşĺÉ­5T©ľôCŇźi˝L© Îć8Ł*s$Ű7ą-áŘż1;,m±NŰ®ÓÜ>´ż¸K|~/Ôŕ †5ë°ŰíŽ q†Ĺĺ­Nđí%Ŕ†äšťşž·Ó×ćá˝ýa‘ĽÓé¬Z­E˙éĄóXk‰<Í‹ŰJrĄŮćý‡8»^L ŽÔŞ\iµ©//_*ĎJqd¬Îůf›ľą±:‘RŚ+É«Űmć«eż~“cŤÇëUnöcľpéßsâ_¸|•““UnöOßXçáŮ ^XÝd¶RâüF‹3“ žXŢ ÔŠ÷.ĹUj-©- AsĎrż(đ2 řEśnŁç,pK;Ő9Kw)Udy‘÷¬µhbý˙Ą<>Nł×ç±ë+ĚÔ&)—y- F éA)ňéôb•›­mB/¤TV‹Śű3Ú‘ n†Ć9ĂŤşĂeę…;•sŁiBžçEww¨’s@Łá đ}źR©4‚:HYŕşkµî ĎľľÖš_˙ÂcüÝw˝•$ŽIňśöĚ‹üµ“Çřň•ë´’”@+>ręđŇŔS’Ď˝z™O<|?řĘE&Ë%ľçô ţís/!śĺă÷źâ˙~ęŢ2?ÍÍv—ŁcuŢ>3ÉăË«xZń¶é /ăwž˝Ŕ±F !`łźpßTőnĚĹí6G5žş±J3Iů{?ş_ć7oöřýëëüűŹ2ékţlŁÉo˝zťĎĽçAKGfŻc¬ĺJg›Žż2G»WPB’Ű#ÇŁ<Ělí—6_eݬ…ű› ‰źŚëMl®Ú öţÜK.¤ q¸Âé¸WEI…S)ÖďrZ˝/ôąĐ|Ž^=đg䱏 ďMKĄUŞĽ#žOľĽďëľą±Ýäw_ůF˙Ď:ËŹźüßä‡XëÜäZvž”>%3NÇ[+ŠfDAH–Ń`\Ď2YžMÚ†ĐĂáěn„Ú!ÔĄR©Ś¸9CY˝Ń{·§óżóďĂëKĄJ)úéŻJčÓîtČLaŕ0D¸Hq«ű^ü ‡’‚Ją i÷đŕąw_>ߣ¬'OKâ$çĂßp””}C‰jĆ .%UľÚťĺB' ŰO ’ ”Ś [€Ü$¸<Ă9F8S!iű5˘ Ť›Yţ×/}šßîI>żae ›(üÇĘú&7W¶ř‹—žáćj‹—^»Čµ•5Öă ~˙Üďrný>sîO¨7ęŁŃ×P>h¨i8ň§ÄĽ&žb›ŐÁ‡\|€ťNç–ž›RÍáÎ1ě’APtWs—Łâ:cöJkÖ“ĺ [@ …˝GŚtžC$K»oçp áÓ×ţ7źç{—>‡}Ép’úňołŻPŻÔ™a…ˇK‹›ô\“žkŃbť{‰sé“<ß}Ś^|«+ÁH×3·¶ş>|řŚÄq<’JÂ#`€sÜůýÎáŇąŃ`2`Ď#ËňÂ"ÚSôúq±Ś~qG %Łľśs()I3S¸AIA˝Vc˛^ĂS…„ťpn”ŤĄsĹßżp/.Ą ›ozA ÇtáßÎőďuFăąŢ$k]K–&;6=ą+ď´Ŕµ¦IŚďűdYVul?GX*Šßkë׹'l´›üÇ«W&ŕÚő Č ůÇkk7č´R>ůIJµŐăńs_Łą¶ľÂ?ţŇođŻ_ü§ü_Źţ6J+ĆĆĆFj;»¶Ăű‹érMśĺ˛x.Ű mäI’PŻ×‹†Č×ŃęŇ1ăBH>óg_âs_ü2Ë7oŢőúa.o·Ű®Uą°Ýâz’ńňV«ČËOšɧäy´ÓŚľ)4“gĘ%aŔtą4ZŻ‘çဒĐJŠ<{l¬ÁB­Ę—Ż\GÁlĄÄŐf›ľÍřěĹ[÷¸T/ă)I¤óŐbOKóśHkŢż´@ "¤ŽřŽ/°ůüř×ÎňÝń˙čÂ5ţçS‡8S‰Čó”Ř8–;1IP(č¸AžzȨO®í˛'_u—ɲŚŮŇeQC›ňľ÷ČaI‚uŞů . Ś·î”‹ĹŕmC*Ů č#|é“D+X™őć‰Â«­U:zí6?§pn,›)Ô=XT—¨#…¤goŁíŠć/˙!ÓŃ ?~ężćă'~ŚŁŐcü‡Kźd3Ý`¶ľŔ}•·ÓÓpśÁ’“c0"Ąëš¬ŘËl›őQ±YČ빸T*sáŇeZíÎ]sńh9Ś˝®Ś{˙{g®NNó<ÇXW¸Bj…1†438vA· ’Ű©Ł/Č-l7[ôűń¨¦9HwxťúřĎ=ňµľI)AśäDŃ˝IhĽŢĄĺŰĆ:Ô=C3Ó÷Ć•ą9‚í_zĺKĽiöAĘ~utꉢhź¶ě†¸F‡-:b“ľhˇńńE´KĎÍB|Oßq´»3vęŇA@«ÓĺŮ^"3†CsűGĚ]oÚ‰k˝>Ź]]ćJ«Íwť<ĆďżxŽN’ňŽCsŁy~c‹™J‰PkkUšIÂXP Cžş±Ę+뛼}nšËÍ6WŰ]–ęU®l·h%)''ƨzßóřěkW8»±Á›f'Řě'T}ꇦŞ<·˛Ĺ×שĆżG6{1G f«ăxZóľ©1žiuylłEl˙đ#|×ôŘ€•­Xí¶ńJ1q_Ř*ĆëúŰ8ą{ä°Ä¶Ç¸ž%T%’4)Rň€a“Ęq҉Ôw×Vôó*ąLG ŐY‰—Ô±A„#†R6I´Q E'Yë­ÓĚÖń•Ţĺ….ťFd!‘™`;_§fgHň”@†»,#w†ÎËLę9¤„›ů%ö i•T…Ď^ü<ď~7mÓ¤ťuxóä[ąÚąĚL4Ëńú "żÄ„7‡Îšv»ĎJX0Ł3S)ÜŁ†k}/xô‰Ż˛ľą…’’JůöĆ;Ů»˝^Ź v9•ÝiňđŠq<ÖIéKĹ|ä“e9ŻQúýŢ˝éT‹[fśRűýţ„Ą×«sűńżóČ/¤©őżčęI hÂ73&}Ă›ëfü)Łx#[}«Y1eÚąFŰ@䱝o™ “ĆX“D„”ä&!čţ)~m†Ľu…m9A»×ä- ÷ńĄ«ç9{ýröÚŽĎ/˘„Ç+7_ŁTŮęnˇ•ÇVo­<ÖÚkLŐ¦čć-juŤMµ¨±ËDfçćŘa‹¶Ř #ˇ%ÖE‡5´đn9ÖIE˝Zą'uŹŃď)ÄH!çĆęë›XgYťĹóîľß'‹GęUżşĚj·Çw;‚Ż5ĎÜX廏¦řĽevŠ?ż˛Ěáz•ÓăcÔ|Źjŕ1W)Q ŽNMđĚňM–*%Ţ<3ÉWn¬qzj‚™RČËk<<;Ĺ\9b, XďÇĽ¸ľÎ»&ŹjeĆ#ŹŁőŹ__GIÁ±±*ožg­—ĺpfzg-ĺ0äCS žÚî°ś¤üý‡ř™cóç0ƲŐO|K;NÉŤ„°‹ÓÉhüż3ŚHđM™‰ęiÚ§™¶ ’ěöĺ©ęx •V´AßőžćÂjy19ł^ZÇ„­QŽÍ…Afř ‡üŁh/ŕ\óEJn ă ľôqÂa3Ź0ť€\Ł•Ä:‹0i śĆ©‰Ću;G#h°ž_§ëöű>ź»üy>°đ!¶ŇMb3_^ŕŮŤ§yďĚű'Ľ€‰`ĺ4]ŰŔ2nEH™“•‡ńµOÇŁ\¬µfmc^>G)Š(•"”Rwäę„a8"I‹Cĺ›®Rňĺ^Ćo]ŰŕŹ6»¤BrŞbsG)ô‡ÍsĚm®ß÷ó„$·Ok´ľĄ…}ÇHCáŚS.yÜ}KHd‡˘”äN™KYNB.öűĆu*:ąÇóý jiĆRşÍd`đ<˙4Ŕ ÉłgRlž.Ű/QŤ r™Ë:¨ÚlnY Ş8çxéúËh© u…Đ‚ë[ËĽíČ۸ľµL }¬łL”'85s’‡–îçĘÖU–Ő9ÖúWXO2;VlöC+ĂáýŚą9z˘MŹ&¶ém*Ś1ăŽRW4–(đß7b»] żÇ /ž=Ç;Ţúf&'ĆGÚ÷˛ śs|xi!%AE?˙Á÷Đďőb ‡cÁië‹s Ťzť·>D»Ó!M~ňÍ÷ă p<ď?zxĐÁvÎńÖéqŢ{h Oć@ÎÇN Špgř‰‡–pB"\2$Ći¬(‘Ą1" ©jĹ?~ŕ—»}~ta’ŹÎŽ#` V Júô{‘*0NFÝ>¶ĹkÝ,Ś-1‘NЉ[dtpz÷ÔÉ/ĚČ’š}r3ĂQ€łßÖI˝&Â*Ľ¤AnŹŠb' ˙&^żÁD8A)x­}égtÓŚ0›Ł§×)çĹ«č”nx貌vš,Sh;F6÷%Um#|哸ĆíźY—éÇWĺ—ľíW8^;Ĺ˙ńâo˛™lŇ·-^mľÄbů827vZÜŕr˙,vyT(‡”9/şěřĐş±ÓíqsuŤ·=ü&N?ĆćÖ˝~źzµşO’h(Q7ÄMB1ć­×ë¤iz vĚJÉ?ş¶É?ż˛NÝ×4SæęüćG¨ę‚I_ŻŐŠëűqÁâżÍsĐ—M+A€Ź¦›@ĂöGp‰;áężž°¶ ôľ)Ö|3CÇĘ16·xä SľĎ«˝­TĹÝá€u±ť‡,ä˝6áŘ” Ą4ycŹÍ' *Uś‰AH„Ž Ľf‘Ňl6©Gufë38őr…Ç^yśźúŕßâ?>ýGĽóř;Xoo0^cĽ2ÎTm%ąMŰm‘¶cŽLśĺŕáżëL#śŕş8_`hqQ~úXńŢňđĂ“ă`-ď^:„çyśŞUF„ĺŹ=4Ęé‹őÚČ!Í9LJ–& tśc±âq«[-řř™ąÍ뜏ťš#uer“ Ľ‚í?ĺi91ĎŻ_¸ÎO/ä7 —FąÎZ;Ć3ü¸F,nŻäpÜp¨öĚThłI3č@ŚÜ#wF&űř2@›ňŔ|żô*Gők© µ$$‹Éé0¬î2FŘ_dlr’ëťkčRLBL=ť§Ýkcńń<ÂaýŢbäNă›*Ődžvp »#kŕÉbďě»;·`'xaó9~őť˙;Ź<ů‹TýëůÄvĆáŇI˘ ÄLmžz:Ć•îyVíŐ[2ď'ňK#ľ‚sn$=úä3ĎrňŘQćçf©UĘ|íŮç©W«,ĚÍěĘĹB]ń!§g_Ś˘h$)w‹#$řÍ.żrţK倪Vüýç/rľŐăO/Ž i-¤Óía¬eŻ>=0…ݦežďJśŮ’ś(9>EöŘŘÝnwWăOýŕO˙Ň#Ö’~ßřßܤHËýŐŘÁç5m I7WoXbHśbÍ”‰­"Üăt†H!ń˘ôďµ>CŘÂvo"ŁIpö9VşMNĎź`¦>M?Ť™mĚ0Ѩ‘ş„ífŹ“óÇPRQ/×ńµG%¬pyý '¦Nňâő—ąďĐiRúLęyŞQ}„o:jXkQB““ŃŰ(43î(©čÓmŞL Yx„÷z˝}ţÎwb+ĄXŰܤŰëqce…3'ŽŤäĂľ§ł§ś’üˇ‡NgCa~3иÝaŚÜOşM=ň&qŁ•  cđ¤%Ô)·4·’“CŚ0ÇĹźÜ ,^Ń-(ĘÍmjŢăçh±[Ă·V){ër›úŞ;ŁĂŤ|–ZąA?o‘dĺ|¬Ú_‘˘Ň2**ßŐÁÝ™T­J! Y-SÚUŘŽľ_84> =Gš§¬ą×Če‚¶Fő‘A‚źWHJ7qĆ+,))cEVtQU†9*n`uO’dW.®ăL*ţŢŮë|d~‚ź:6ÇwL7XţĺĺU>:?ŽűŁë­µT*e<­1Y¶oަ”âç/Eś( ~ăŠŕ˝cđO® ~b&ĄßďŹ$ü˘(­őĂ?űČ#ž'IÓ­%{ Ţč¤c\§xb÷Lc™ôSfBC'Woh c=VL™ľ„¶Çpâç%¤ŇŘŢEJŢ2BůŘx Už!ŮşĆü‰2?3CµR­”›cj| ă¤Pśš;IůL7&Ż7hTk”Ł€“sÇŃľŕôü „‚’«q¸rrD‚ą‡eIÓdŤXtc–)ScŠ*ăHdQ+…k(ĺ1L¬·ÓJĄřÔ–8N¨”Ëś9y­ÔęP­VGť±{‰ťÂçišŽdJ‚ Ťův‚Óëő[‰î ÝܡY†….ٱŘÜ ”»§iNÍ-Úń}ź4M0q“#aDIBmČr5ň‘7™AÓe›N–ěÓHÜVä¤yLĹ5*Ďçl.Ńy´«ë+H©Đ^ŽNëČÜĂŞ öl´*ŹHŁFúEȬ̩‰3śŰÎúć&7WWą˙ô ĘŤx)JÝÝ“™»sq–eŁ\ĽSblÔ‘Ţ‘‡oGćô(ŠŁqZŚŚµş{›Y‡FHIEtÚoâűAHHŚIńŇČj^kG—®1X‘“ଵ3ú˘Đžg¨© b×Äd…ÚŐ-ŐYi}˛h+StVÇ7U”bTKgUnßö™S¦Ä±Ę}lô7Ř× 0™2d^přY ‘îhB¬ĚE· śga6‘}<á#śÂŞ›ř¤™`±Ľ„!e5żşgÂ&Č󚆱(ĺ©Ő§hg-Ţč}ĽmţAŚK‘NŁśÇ*—h'-JT Ľ’_fÂźŁ.'‰‚Ň.Æa8ç¨U*LON`Śáâ•«řľÇŤ•5.^ąĘŇáEZ­6őzm¤pĐ>>l8ě4–BĐ19żqi•N7xˇŐĺR/&R’Ď­mó}“&Äţë…T+¤di68lŇ“źÜřŮ#ĐËá§áßÝ€OL'Łőś$ yžŹj"ő‘źüĹG|O‚ß‹ÇĚ –“ăU•Łölüšśů Ą¬ňÂiě î´­Ď SÁ·}B×'ŞÔ1YźJYŁT‚ím`ÂdžrĹj6|‡^Ćaq2Çb<ŕ„ą8,Z $p$Rś´ŕÇőĂ…Px–ڬw~ µjŤşśD›€ŠG =@d~!|d 9Ll©;‹\A{ŮŘÚćô‰cT2a;;`;ÝFľ^§łˇ”™R ĎóF]RcĚ.7©Űéöďs ŁR í—é'ksô]Č.9>©qŁâ˘ŰiâiIUP^„$Ĺä9AX(O,oß ±–ÜHâ,/ŕř·=ŮĢClúTĺ8SĄ9r“Ňtk,ÂéŔO&đ„OęoaDŠŃ¤TřéŇz·ŘŔy@”M`Ą! 6n/*n=ĽS€`%ż€đS„ޱdžĽč>g˛‹Ő1B2ŻD‘Ë ă·Hş - Mš9ęrŠzXc%˝LĘnXF’.âśGš—89ĺČsŘ7ů÷}śjŚî7ĘëôU“.MÖ’e¤Ń”ťF4Ž’ęŽ¦$Că‰RTâňµkäyÎěô4Išňµgźgb|śőÍ-¦&&n{0Ćpí e“Îvcľ°Ńć}“5r îi]o±j>Ô¸…ďÝ·ö˘( ČMF>`z;`܇ŘÂg×ŕÓé¨`b!‡ťŻ_űµ_{}Űź+ʤßă‹`=óYĎ|<áĄeçËJ,uť±$$NŃ·’{;nŢ[dN˛fJlf>łABU»›h®Đ¶m›Ô&ă˘,ÓRf4Ň•B‘» ‹-ň° Gërřoi5išă,H%0.C[ŹcáC„AH/é±ŃŢŔŚ…ó<IÎMT¦©Ű‚Ľ˛ź„$$ľď‘î8líť&T«űťŇśsś<~Ś'ź}žďüö÷RL†×ďu:»ŰDno ÷c Zë]ąŘ÷ý mŻüă®_mđZI’ '¤ŃO-8‹”·_Ć)Ď+^«Űí¨Tyŕ`'ΠýRJ¶ă&­¬ËvÇ€UÄ6Áwĺ"oa}M€žh‘$1Ń,c^ŃÁ5ąCä>(CL`Č0w2',ąî–şi©€¦ˇ! ˛12ŮĂť;¶w…J6Çb}‘kí×ČĽVˇÂ ‡j’’™Ä©ęŕ„-öi tť`te«Š1l˘Ą"÷zŚąY&‚IŚHX·×wíÎůô“c¤fŠ08Ç|µNfăá8Xz†Aŕ ˘­‘)}:¬e×±)”u_ű^qę=Cň×ĚÔ$őZŤŤ­mf¦'ŮÚjrţµ‹Řů;đ˝}M†˝ëgřůľO•ř“ŐmŢÔ(ě ‹QŔsŰ]~b®Ayăz­5ŐJ7ÄóJÉżß ŕđjľ¸Y·;sńΚHýŘ˙+ʤYŽú%Őa´rʵĚÇ"©(Ăî—v”•a&HBĐÎőšX‚€Ąş.ĆüÍó|ňąßă+7n26ţ&>ůňW¨‡ó¬řU´§ńe›mŞĄžôńeÁž÷„Ź'}¬Ëń„Ď—ž‚K7ŻqńćŽNÁSš†ťfş:_śŢĎ˙żB¨oYß›”’‰Ę4Jč}§÷ÂoÜíë‚í,’wЇŻ÷<ŹŁK‡Y[ß`vzr#r'éěőXHîĽ˙<ĎńµĐ#ÇĐÍ{([ŕe ˛—ŮWä ܆7ĹD8‹4‚Ílť—Qž-÷„@µdR!É ~hČĽNÝą™ăçf#hŘĹ}MäF“a˝Ű~Ź.tUzj›’#ľ+ÓŠ˙^Ď—wĺxë:˝‡)Żá{k$IÂl0L $hŻxłŘ##Cęâsµä4Ý:­d›qof„m÷Ľ‚Ku§ć•4ĐŤ6·\ży“ľď=ś}ĺU¶¶¶9˛x4ÍČL6R9đ×<ÖdL•Bţ·ł×x|ŁÍ×¶:|nu›ź]šâ˝ŐđŔkG×C’f„O)ŠŔ:>ŰôůĎëđŮuřŇ&D ~d"Ţ·¦‡5‘úřĎ=ňHśä„Á·^,Ü8É–ń¸™„DĘí‘“@CgĚłĽ“«7¤ČŔŐă%Ź´ßäk—>ĹĚř<ďźçj·ËK«WX~'z?|úŹ9{ő‘Şđoý·|ŰáwđĄgy}ŤÓÓ÷ń©Ż~š—®žăÄĚ <­ůĘ+Oó×ßő1®l\Ĺ#ŕ+çźćęú ĆĘăüöź˙ś…Č9·~–ĹÚâ®űžZ‚ ŘWderîř~Ć´C2i9¬âŰ>GŻsűËżŕśóţ˛tmc«XIzą˘¬rĽ=L -S^B¨,=«ČÜ7~źžtŠ pxAoGˇ-ĄÄ÷"BY¨Ş&m"2#Áď“ę6ÎźŤˇl€Tv¤ŕĐ5˛ž Qm0NĐÍš$°}µ)“ç`Â-ś—$“…˝ă]’Ş—Ő9\;Ćvo›-uĺŔďŃ„dÖ€ww›f•TQy„ ş8‘Sµ3·-n=˝…§·Â"¤@jźf˙۱Ě†× Ď'P~†”`25ęLÉ%ŞGŘŢŢŢ…ÉŢIę»]BtÎQŠ"¦''™g«ŮäčáE®ß¸Ią\âü« |źjeżÎĺÎźQ.—Y,—8S ůăŐmţ—3‡ř‘é:ć.XF7H¨Brë0YʉFČ;ęđPĐç´h±¤“;N!^oqűĂ?űËżŕ{ĘűVvm÷†CĐÉ5ËI„APŐ†˝ŰBU‚„@9ZĆű†¤ĂŽ„=k’<ĎÍ?% «\Z»Ě›ޱžZ.­ß VZd­»F-¬ó}÷}„çŻ?OäGQąFśIŚÉGE®¬SXWäűa›ĺ‹DyÂp÷~çyˇ_EŮ€H[ęÔt„MJX'°^L®űH"­ lˇj€“¶Š´͸E–ćť:›ˇ—wČD˛«P”ÖGf%L´Â"¨¬D®ďś;…őSÇń=źŐô ™Üé|S+`wmZ„$±@E ąJHsĂlp{Pq+,ľ·†· +–bóľ×ÄÓ[䉇2„˛#>…Ź€SĄ·x!qÓjµHÓt®»5Ş•2‡‘™śů™)V76QJˇµćŮ^fnfß»˝ş–…6řéЧ“梀_˝oo y{Űu, „(Ę`­µŔŁ®,UađłŢ [™űWÂÇ[ĆăůNŤgŰuâ°¶ă^Ć›*-®¶đĺÝÇ+…ŽĄ¨Ŕ6Ą˝U"q‘ćNđW_ĺ‰mĹg_ţ ČBH˛4CKŤŻĆĽi”ő*ÍňęęEnnŻP B¤#ü—q‘đ‡ŢÍ;xµÖĆXúY„@ …’ŠŹ>řIíâĺ+LŚŹsti%S“÷tíđő‡&I’P«ŐF–ľw‹aqĐév¸±u•,+¬Z­F˝^żăŮ“vÄĐUŇyĎ5é´¶RV&Il™~V¨jxĘŕňxä˛"Ą¤VkV¦ +3ÁőýďOµŇ Z>v’PůĚV"…ă”ű TE«c’p «D\!Ź}ňŘ'M$YnYInr­ý QqŞú»;„ĂĄSE§4Ř‘ý>"‹öÝĎžwźČ6ý€młrŰďĘR şť|ĚîđE„Ť†Ţë‚áć*9hDo;FoRZ˘č*ĄčR¤ä™FyEˇoťC:|‡˘cQŘHµ···GăĐ h4ŁőqP áF˝Ć§O±ąµÍúć&.^˘V­šlß&ą3†¶żIjx@ĂŹÎ6řNϢ¤¤ŃhŚĚ ö†;NLÜ*´a`S-Ä.€7:rłżö—¸G<Őnp-‰öĘŽyżĎ[ŞŰ ă}Ľ‰{ _Z—LQ°µ.}ćkU:Ćń»ç/ńäĄsTËG)yżBŮ+ůÚ!«KÄ=C/ŽůěłźçÁCg(‡e,9ĺ0"Í{”‚÷śy;§Ž,ârK§ß§—öBRŹęhˇů©÷ü4—¶.Ń7wơsá“[©*w[J)n¬®ńÄSĎđ¦î#3ăc ‚ řşrńđ9ĘóśFŁAt‡ŽŮ0vćá~żĎ‹­ÇYo®Ť0ąőz}T0ôł¤p»Š5! dHłM·˝…çyD• [&É$…şKĄä¨łŐZJmr$#uĐ{T«Ő©D‡!źBâ1_óV jÉăr ”Á†M¬š.ABʮܤÍ«âU¶ÚT«5N—ßĆĽ;y‹«xiŤÜf©ě`q#ăśŰ…—֭ͰڽI¬v‘YóűwŐ6čÉ-"ĎŰ#_V`Qď%<˝Nµü4Qp —+ä˘_nRy{VˇTF’_RJň<§ŐjŃl6Gk ŃhŚ4ÄŠáú)…žÖ$IÂöv“kË7¨V+LMMŤH{c¸ţrëčőb~ ńŁă++¸DgŻ ÄčZ±űs‘@4řŢíf‹Ěęőúm×Ô0Äż~1wţ_âvo(áXbćč©ăדIH˙ë ť ú<4aÁćŘő˙DuLăLĆĄ6¬Ĺ}ÎT—XK„ş„Ż}zI%5ˇ˛Ż1Užć•ősôe“úXH'î!…@)‰“9iß•<2›pL>Ěőć*Ť¨Áx0Nô“‘ ¶ú[ڇăwżavş~HąŢÖ§|RJ®.ßŕʵĺ‚I›ĄĽď]ď¤\*t9űýţAá·{ýˇ®çÝ®÷<Źz˝Ž1†WšĎ±ĆĘ®ÁQďAjĺĆčDź$ ˝^ďŔN®––PÔžL– 2ë–j!ču»x˘Ź§-ýĽB•FŞpë`Ś)äElŽ„#üŃ0’$ˇoăD +RV;)ľµI;ďîŇ&„żJ\F†}ŽÉ‡™nĚ~ňím.›—č-tZ%S˝}ă4?ž ¶n› uZĺLő­4Óm–{W y•ďş?«aqďެl•)#ť*ČN2gŕPcłÝ'ir°ÓÎAá¬* ţk›ř(ż`´.¨“Żß_$ íí;Cň!|`¨ŐĽ3ŤbŤ » RJší6Ý^źóŻ^äc˙ĺ÷Ż=°B,‡Q­V‘ĘŁ—¤X“’giq`Ŕ‡ô˝ú´NîžjxJҨװֲąÝÂ:‡’P¸ v»Ý±ÄSSSŻ«DÚďľžkżŮÉśăĄ.“^zŕ6ÜÉ5çzÚćŢőqO—:ś“dYŠ·ń{”&¦Č»+äá,‰ůôŇc„ ±ĺ„0ă&)Ó˙D“¨t“O@ף ˝¬Ç8Ý%¬2*—S˛háCĂ2t{ÜďżA%TeŁ­«ş]e=âčZßB.Ď_ťIČ] l Ąäé‹#FĂ!Łá€8 9ŘŰ]‘îňę"p}ŰFÝcU׳{ĽßÓů„OôO§-¸‡3´›JOöÎ{ëĹă$• Â!ĹŮg"%„ަ*K´Y’ÎdLĄĹ™ÔäWźRs„ЩƂDÍ Ą!e‘SŐµü>aŮą‰{8“ᬣ ¦eEj{d2iĐnÉŽGRi AłŽĹ!Ö8&ó)Łá}uł©ŁÓWGhŞżµŐ g`CŚ«űÜŰyŔ''żb=a(ö(Uc¤˛§ů$’TňbŠŞ>D©ł˙î°H.1·ŠĂŕ.Qň\±urku€±Tb˛:DEÍśw7xýĄľřş-m7ĽĆ3°B÷PźctĄăśkOŁÁ€Aż×X Ńây“$ióŠÁ`€CPŐ†$Q˛É!şUŢM¸vł¦oŠŢZ3ť-–+^3+m(«’d™Ó¬ężúţÁ?Řę‰~‰ř:şlf™ä^$YÇaT˛UTV˛¸$É}9]pł/Ńş&ţK’Ѹŕ ób“Ľuéőx ~–öŘËé×»Ô¶¦g»Jŕ~đ vzűÍKéŘěvµ·&mI K*]W˛fťt&Ąd¶XpďÎţýŹBžçÜąuĄÎüÜýů׉Ű`Č®şţFţ#EëšOç0GěąŰ ŘmźßEŐj’+0Nb¬"Pv%÷PŇŠš˘,0NĐëIÓlYÍť0'RšZ[Â8ĂÔ9Ńň7éPˉU kfŻ,Ç=śI$„˘˛uźHÄ8Yc0¸*ĆD !aĎÝa”îňxöʧ˛ďł?¸ÁXŤQ:˘ó{[Yg¸ 8oÚŕ$Yq“×wßŕÉô1“ŕ…ÉqŽf2CŮĄS„Ň(Ĺ…ú¸Ö¦3@©3÷.eŁćJC\Ź%#jJNěv•[SK¤eÁ©‚¦j Ü ^ç w»5l¸(şşś›0`qÜ0«˝aC7śk0ăÖÚVި»IňżŁ-hcI˘ MBşc{]“±‘[˛€ Ž2/x?ĎWߦzŘ8ţ ÁR“ńlö›–ÜJšó×AÄÍ­âI3Ő!Ł &č¶­L˘ ‰cf‚Ťx\ĽžÍŮëGTĺ”dń݉F7@"ĂWž2·obĂŰ^Çş:ĆN—Ýŕ&qŐgĆńĘwńRřŁŢxĹgíjŤWUu9žUř»‰ő$ÓĎ…RJľxô„‡DQČł/xéŢÝva_żţ­ĎżáţŤ1ôűý•ŤV·ńůě#NĹSúě°Ă™íş×yösź˙×MQ´mČÜa'@ ¨)‹KŔpŘtŐňů ýř!"Lq˘‡ ş ™‡ż‹Wű|ĂéjرÝ"­GÔ®˘ zŚyą˙6ApćbÓ^ë—!m ήüĆşdŤ'ęüĹř/NNxóµWyç­7ÎÉ»uĎßHn]Ź4vŮőűÝŢłéc>uď Qň€Ué_Ĺó;?˙QśÓ‡D m#ŹÔ• B宦6ŃRZ§,KśmÚ*Â8Ł.ç„ju!¤Cş‚E^b\s ~bŤ˘(`uDX†‰ÄÔc9F (­Ć%‰ës/~ŕxvĘiň ÓbĆt1c/=`7kô2…iŤ¬2ŚłśU”ŤIËĽ±űÚhŐQ)ˇK‰Č0˘FŘ€2—¨ĐŞ] T°Ů_]H6c)kś“8©74g(jÍar$<7_\úľŰ÷U(Ä’ăętD5ů”Ż÷żÝX¨ÓşŐ\5Ž|‚iŚiIg>ńĚóüʱčŹ÷ť€ šÝ|­ÁAŞ•Mĺąg$Ö$đz=śiŞ#_µÍ0n3&Îc1ËbŐ)íËš8üş’Ű×zďHą‘čŻhČŕ,¬âq•ŕ¤Ň¬$ąJ8všŰqŁT2·« 7aĹë#Ó0ĽŹţ ˝Aă†ĺŞ 2L0á7)ÄKX'¶š‹ş•Ýá>;ÜBęB̸= ßh .“QB´‰Ů…d)W7-Îľ“đüčăÓS^óöݍóE—őóYCťuŇYš¦gÝ3ű>‰@rč­!_üfÓ;öůMâ&…“¦Ř äj±APĘb]@ś¤íł+*·Lö•ˇëŠP­v`›‚Eş’Ĺ"Ç!W QE}l)M?ĚrÁ˝d‡ii0ńä%ůýtŔ“Oв`.Ž9ĘŹ©ôŚQ´Ça˙6»î±é!ťÄ–©ĚŔʆdF€łW{ĽŢ§QąXĽÇT>Ă K­-J¬227˘˘D8I-rÂz€ ΛőYł(^CĘcS„¨qÎ m¨c!ădĚÄ‘»«af¦¤ÄÖ؆w"Ý<čmvŇýöů×u˝Őšîǰ/xŘ€ßälďîj,A@Ą-Z[Ň8 ŞĘsE†•gÔÇBŔ`ĐoĄi}n3ťN’نßB€µ”ĺŮf5Äđőł#$đÝńŚÝ°¦¶đ˘Žřĺ"a¦żą1%aĹd±’äúĐKŁŹól)ó ßîźrwQ•sÂŁ?&Űż…ž~†ĘqÖaŇż‹ vÎű#_ÝťrY<ý1ĎĹgÜvo`ŃěpëŇßđúĽ ëźCv!„# 4áÚűÎëŢpŻmˇřęß|6,ýÁĹě9Ixńs0VPŮ(ś#+ŐuÍ|q‚eÂăů%-ĂĚql' ô=nŤďńäô!Ď‹#nRSÇ'¤nŔ}ů6Łl§m?™?FŮ€ÚhNôQŰjÍ‚>wĆ·)tÉĂâ#Nä#ŔÖCRĚu#4Wcl”#ë„Č 9­Opńś,Ž(ÄęÄX”÷¨ô`Â2Ě~‚ť °V$Ž×’w#ĹO‹?Csůbętă+/”4˛ ń-5ü6w†/aŚa±X¦i‹§Ţöňř¬vCăyžo=Ą”ěěě Ť%/5YbŤćôôt«kpK¨Ç€ůq|zzJłËŁŻÍ2––ŰqÁý8?‡ËtŔ©ů´H9Ş#¤püÁř”Ý~B>}B¶ř'Ä»wĐ' ďcňcňä?'éßşŇpŕ˘đ2łEcĄ:ě¬ŕ·=Ţ[4ŻŚźKćaXş»-[¤J)„<}ö çę"ˇîµH’¤ĺ6lÂ^>Áőť°_ľř9OĹŻ¸é^!"!"»[Ü] śýhEúK/b‘â˘9J Š‚XÔ}„‰XŘ`ŔB-^ŠCr:ý}’čs¦‹wţśT-`şKiR†Ľ¶űŹŞŹůLżĹ *BĹÍo §ÚâĆ}ľ5ţ[(ĄÚΗď¬^ĆŃY˙ý~ż%,v7oţ7® ŹŐ-*Ť6–~Ú8LÎfłkqÓ4m mžŃÜ´Ř#ÎtţgóţZ’Ű—ł’×z9ĆčvŁ«”bf>Íc—!ök°t4˛^/§s’ ę ąU|Z¤h'ůθ Šʧʰ÷!Ćf·‹üčłôďĆ˝ĄěöÂG—t%Ąl“­mÉ[^>+MÓó¤­+&Uh*YžÍmŚáäääZÉ_ż?˙ED‰M!DĂ$÷¬Ěy>ăyń{[5h?Fßf2óÄJZâ ¨+0Şaß—eIť5-™’őíquţ‚8°ťsť©ěud?±FÉ ŐjôQ–%OQąn"Lť’%·(«’ç?cQըдÎ8>FîŰcíDZŠÚŞ„OФ”<ź˝ŕłůűL§í±u’ ’†SD¨duqH\źĽŇôÜ.ą<(Çá(«[”őMŚÍÂ'ô’{ઇKfÜwďp8ŢçÇó?»˘b 0EŚJη̆ěóÎč°ť€ü{Ľ0AŘ˝^ŻMŠ­µ­Đ»µöŚ4&š‘´i‚őaU´±dIłIştvYěîî¶xöétşJ:ňL˘ĆŮ•n 4cřđđđ7&ąýťńŚť°Fë,F ô2ÉýŐ"¦ţšćáaPó ÉŮÝ@:sŔó*faoŚL3O<ýg Ćg*ś)‘É‹Ły˙ď—'™—„˙¦|«^ÁéééÖŐĐîFĺék‹yx=ÁśÍf×JR}RáĚë’Î#–Ňó(™/*ҬGĹK}Ü3­rm%y¶^ëj A Ą¦®”•A,çbc;ŕl†Ń YÖăŃŃc”¬ő,'ň9®JŇ—(aŚ%s»ĽpŹÉMÎ ý„'î#¦Ő ¶ô’óbÁűÇ?ˇŚWçaá1UĐ$ł˛ęSÇç;NXś4č`NRíQ/+•š1/ŢDÉ9YüŚ"ÔCnBĹ(“2Nv9ÖO©ą ćÖ„Čđü&mWÜćÁŕ5„X%€yč”·Ă˝Ę}Ô·ó}ëÝ±÷?ú%?zďç()ŮŹV hiš†µ6x§ĹÓ““•ă“$iż…‹ÂC„ YŠ˘8Ďb9_9ç6]~ĹřĆ '›˘}ľ« ~w<ĺüśΗ‰Ü*~±čńo'cľ(“•ްsŽ$n´)ĹäD1¶š"T„’b!1ń«íß Ń(ř6Öu´-ŲâŤŐaÇŚFŁ µ70źĎۉq0"—;§óöﲥDQQ, NNNÚvíu®ßË$M&˘(şTtýžýÎŢź$ÉÖ÷A›ŔEÁŃŃQ›0ĄiĘÎÎN{€Ú(¸‡”’˛, DŃVŚ„€,Ôí‡-Ą¤?ܧpŠZb¬@[‰s‚Ę(fe̬ŠĐV"€(0ÄrNµxÎlzÚ~JJ˛´Çhx‡ÁpČ“é3â¸ŕ M¤ÄDS„t¤ů (2†ĹLQРٵ¨x(~Á”#úý>“ů„Çó/0áśą±tťi"eĐţ˙‘l&?¬L"Łó‰o@ÄÝäU¤T-nv=üüzzzJ†muż;|µŢËzu!bóůś““¦Ó?űůÜŘßçőW_i (‹f}őy@Y›Ąé‚d±ěşÎfłÖԧѡż\ďŢ´ęşnqÓɄĹb±ő8N’äëMnciéłV-Xç·­ŰQÎŚ'Ľ”•çlUżLVńÁ˘ĎŹçNuŘ0î–•T§dŃ"H±‹§¨ţMęécĘě?á"]? ¶MR}K˝Áĺç&–ëNlÓé´yˇ’AăäŐ‹*Âe…Ęk‡v“äë&™ëç÷ ˦ë÷-čFðh“ÓîÄÚÝż˘uŻZţÇÇÇíî±×ë±»»ŰZJú„Ł*¦k›¤ÚȶęVćS&“S˘(&îí“›ŚîŢËfYA¨—ÉĄ"eĹ”ůäY#[^ź)÷öîp˝BUgřÎoL™§!Y0I>A&2)ĐÉ ˘eFd3î…oŕśăłůŻEOXŽÉ˘¦Îś™$ ä sfŚ@_ÂĘ0Ş ¨k“’¸>ËÉÔűÔu„ÍNТ$ $eňŚGĚŞ‰ČȝͿW«Ö°ˇŠűÉëíĆ⢖ŻOĽţ`0h“YŹďónIëz·@‹•üäłĎy÷›ß`<QVßű·˙އOž"D3*ÝŔ´bĐTżŽŹŹ[ĽWÇěîî¶ HűţťkŰo>)ń×ß—źm»:Ęëq™9Ĺ_g„Ңť@›«‰ ÖZú˛ŕ[ý)o¤H#]'Ś|Q&ü»É}J+ŰJĽ4G¤Ă~㨤ČdD5=˘J›Ë{>IŤFW>çî»ÔZ)„îůµ®YĚ'8gĎ‰Ç áHĂš^T…jeöÇw“äőo`›ów u.ş˙4M۵gťŔĽÍń>#'¸(BG3¬uˇĂ&Sr7ˇ65;ęPÄç~ËY ­ł|Č{ŚŇÝ6 ˝Š´Ő-8ŚFŁvMöĆ$9?°$I1Ťxëő×H“„?ůłđţ‡ˇŤát:G‹¶çh»ĐÝăOOO[ źdűĽ¦{ťţűęÎĺľcŕ;—™8řßřZa Ć ^TáRÍŕŞ|µ)[ö"ÍNh(ť$7_=ßö’5Ö­AÎáŽţéh€Ł‡"pšE>@'ßä˘äÖ?Řu6ě&É_JâLʨˮ͆Ť‹§gÔf•MJC*#I{MuuťÜ„wŻ[ ÇM×ßťüş-ĺMĚt?±Żß˙şđx×;}=<›w“ÜO®”ťžŰŐFĆýÇéÎť˝Ç‹üÚmĆ ¤8Ăă†Ę"]#YăZ˛…‚0ŚHÂ}»GˇóŤ6Ľg¬*ÉĘ[ě&ŤcÜÓü)utÂIžs#¸s¬¬ę>Zć8U ‰ÄŞ-đŰ&Ń;h‘Š„J•ćU9Łoc¬¬‘CF5”ăd„p’Łú8‰~ó¤–Éíů*šĽĎíĄÍît:˝r‘îŽ.äÇo‹Ĺ…˛5BnßşÉńń)7÷ůóżü+Â0äţť;üŐŹ~ÂÓç/8Ř? °f# ÍË÷­·Çśs-CŘ·ě.RXŘ$×m3űoá˙đ˙Ća ••<Ě#r«č†p 1ç\c­›Ö( S­ľ’µ.€E05JXnC´®G˙'ń`€ n ĂWM™ëűŘřĄKËŹˇ<_U§Xo±®ĎIm1 űÚ¦EŰŤHY!sО@Ş33g*"e¨Ś"íď´­yc¤őë÷Ę>Á˝llýţö»ńmÜ‹;ŰÖX*‘ź3ŕńaeŤr!}sČ^Źg“#ňđ9Ť¨{ űÍß`e!IĆa2*/ĹĆhY"”Ąr5ąŢĹ…–ÂÜFë]”šŁ\‰Ĺâ‚ZćDfH*SJ·`VKHEs[‡ČPł>LBŢčýQĐčŘoíî®é^Y kÇ{™6nGÜąyĽ,ÍçL¦3ОY—?řč#ڱچCziÔ ×ĎÝUÉéJú1äÇĂEzé]¨ĹEăŘĂ$żvĚmí$O«guD,-é–In" ·“Š,pLµúĘ’5x­_0JCĘÓČä/aŠÓ52J°Ü`!ßĆ];Éó»Ťî‚ĽŽ×[O|ýÄâmháb<¬ŽPY"epHŇ B‰şů]Ňď°NRV¤¤iłŘęň”8Đí¤°éú»ÉĹ6:xë×ß]<Ň4%Š˘¸”´ł>±˘â±¶W]KwbőŚv˙ďËÚ‚3Hyv×µ(¬Ř»ĎÁ?|;ÍO¬‘2DĘ4äÁ ÔÁ9AÚ¶î„m‹<‰SöŁ[ôÍÚÖ\ä ĐN3R%xX„S5c{<{„•‘R–ŕ˘E#Ö]§ččă‡sżď¨dÁóü;ĘúÚŚ©ęCv‚N(kCćF AŮA4â”çTZS—!R9t% bsîś)oô~‹88ł˝Žt‘źŇ4m™Ë“ÉäŇo0PŠŃ°qËyöâ˛(ŮŹyňě9·oŢäăO>ag<$Ď—Î5]MGżIJ’¤] /Ó†ěntëşnŹócé7!ą…fŁ63Ďę¦4PćÜâxţŔYĆAÍa˘1ć_QáF Ç»ĂĆ8§>ú+˛ä¤1lXĎsNOOQJ1ŤH’dk6»żţ3rY#Ł6î ˘JclóěÍďtQŁ®kŽŹŹŃKźęŻć•Yę8ž—˝×.nÇËäôŮäxĺÚ ŕ`t;âMß$Ążń>lsZM°ÂŃ ś\IJĹbJg/čł‹Daś>Gčş,Şú!ap„uMľä¶ÂŤł’\LÉ“çëc b$°¤2ZaŤÚxÎ[Á+dQCvô6»_fůĹHk˝¶~3ôťwŢćoýîwů싇h­ůŐgź…˙ßź}źŮ|Ńn8.ú}2=™L¨ëĄT ׹¬jŐ˝vµđXÎápx­ű˙ëŚŇJ>Ě{|˛ĂÓ*ŢŠ8ćś#ˇâíŢŚo ç « ĹÝhÁ°Sbý#T¶‡™=B¦CĚü)“Ó9io°Ň"˝*6µX‡ĂáJEţ˘öj÷řé´i‘{<ďúůăŽxÜIh”tôŁ[3źť’¦=†Ăë>źĎI–snhqIhúQŐĘú±éąŰpÖŻßźk6›-Ő|Gń:Çźžž’$ ;;;M%úcŘ·“ýwÇ1ń6Ř!×Ć6ĎY)uˇŚ”źÓŹŹŹ[Ĺź0{…éőv|©ićŰč3Lńśéé‹•Ä< CúÁ›ĺ[Ľü6ĂsçvX ŐHÝŠo!‘D¤ŤÓ(Š¸Ú§NŽ)Ųš©#’ :÷;…s‹âµ†Ü›ż‰µ Ě,BŁeŽ’:=â4ůŚyňŚçů=5BVGŇ«©J…“çŰ>cn÷^Z>9GGGí\zQ»~St«¦Zë+ˇW>¬5ĽóÖĽőę«<|ôĄyQpëĆ!?üéĎ/ňnŘ…¬>ź&w8::j‹>Öm™7…dz٬…_îííµ0É_«C™Ż<®bŚ“ ˝¶VŇTbv’ܨk±y›jÁśARžĽG?üö°ĺ *; ú>˝ľ§ÁYdÔŁ””ÉoS”ŐĄ-ę‹b˝ă+6뺬ŰďŮâZk¤°¤ASíÎMŠç+ßJ:i¨L°˛ą)Ş_Ş–Š´HŃ8uŐS‹n7Ŕ·d·ůş×ď%ËŞŞj!7שŢůócÚnĘ6Ç{ĐbęöÂ(ĄÔÍ7'˝+%1×+ňIÚŁ¨ŔMĐ1ŕ±N ‚!qĽ,ŠÔ B ĂlQQkÓÚmđŃ/É‹‚Ď>yĘŰwŢEąÂ­:F˘4.Oą5şMYVĚÔóĆEŇ Ś¨pAE­-20č«F˝f«äŐËKIĆfVrÁ8üÂ9)}*ŐŔ Ś¬(°“ě(ÉT>i~ ÂşRWްaśî­X•Żw6ÁvÖc4ᡔeYn­rMˇ`4q÷ÎöööĐuͧźÁ7ŢxŤăÓ 7ůŢţ-ĆŘsĘ ›~Ëëűą¸k­~ŐXô÷ę=~­eÝpK ݧUŚ*ÚbŽ …áF\1 -3ٍ¶”»/xy' ®r˘éż ßÄĚ7Ň_Ĺ ŢÁ† Ó»›dv-·ťŚ1+şś_ŐéË ®#8!‰d3XHŻ­"ë7Â÷óůĽŮ)yiqŘ&Éíü˝^s ZO2Ż3±vLżcü2Ngëxŕmއ(ĄÚ˛O®|«9Š"‚(Ĺłę×ń˝EwÄIʞrXŰ@´č‘fMň6źM¡ÄŇ{]ÓĘćTuÍĎŢ˙€÷Ţ˙~[ĽĽű:ÎZJ—·«‘.ʏ9şE]hNŐ#°Ši•#ă éBS‰±ëQ©‹±Pç.äĺčô>ŇྨHčQv4Ą˛¨şÇíţmfBÎg%R(]9'É+ń;ŚÓÝ3ǵç·îľé=vEʉôă`=Á¸h‡ .Z*Ţ|őeNNNPJ‘Ä1÷îÜć_}ďĎ`™°öz˝ eôüĆĘ[Bw%Ź€Kĺjşż‘¦)Ć´ÖüŃýŃodrë#·Š§ULaŁP·Ú˘—†s UÍí´F ›«ń¸x#›ssR.ŽZ F»xŠĘŃógĚßĂăŤ-ęëĘ0*ĄÚŞ­Ż€^§Őż~~„ ®É*íbUSµ}¨LHo0^&§8kÉúCŚ )*K(Ď’b˙Oë.†Ťy¸Ô6›,sεť¸>·ă"ŔEÇűďÔĂ?|âęŰÄžE1QÜŽ·Ń7]/üdYŹ ę±(Mi)µŤH{M•|:ťR›ĄsĄ€ĘcŰçřÓź˙‚'Ďžˇ”âó/žđí׾ĂČbť&g¸Ć}¬6 Ő.{ńÓú„"«xǡUÄ>·=čÂĄ®‚űtçaż±ľčřmc"pŃńžHÖU…hźÉr-ńß±oK‡Ă+ˇGÝëpîLm'ë PÉ.‹:EEvÝkÁĽŠXT®ÓUh*Š—îßăéóçüíß˙ŞŞćOľ÷T3ľý;âIťňĹéçAČKńŰěą;$‘˘˛š^uł•gě±CĄ÷·~ž pv=Ď,ô€ĐĄ-ȧQ žOdȸ€«íyEŔä-” ÚÍĦŘŰéÂnşPĘőwxQN°>—{ăkIÔpoÂ@ńűßýFkţü/ţA €?ţU[U]Ź$9“’óŐ㣣Ł•›ťťťs*7Ýđ‰°çöTUő×Są]ŹbéM>5™2ÄÜĹÖCŕčÉš;iE?ph'¨ś`y?]poP§$ů÷{Řü92ŰÇ,^°ßĹuôŰß×W6čxňUlŢ ďO¬’¶Â(Ą]b¸:ĎĎÄdËÝëbvJTĘČ3F0˘2О6(顤#Zâç̢ùęçIo^”|ťYŮ=^)ŐV®s˙]d—tćĂ ůÄo"ťyú6;żőßi6/CÂ(mŁ+Oâ8?ţú˝ŚW^şĎŹńwţŕ÷ÂżüáŹŘßÝĄČkęĹýńKT¶Ŕ%ł©f7Ýe7; «v85GŘô‡%•=J1'71–Ą¶Ă6çĹ«ç&á^ř)#9CD5Z®VQB©uĆN˛Ă#ý1ĆX†"ŕµäŰôâÁ•ÍÝç·^Zoe^tĽë„_©ŞŚĹG–äË´”ś#KSŢ˙đ—eÉ·ßy›—ďßk…ĂWä–ŚZ_µ€fA¨ëşĂ~ďV/şŃ…Wx¬ýoЉĂ6a—µă:Üšü  °ě†57“%S­Î}oöfěbŠůszúý¦jŰ»žţśsÜşq‚—ďÝe<ńřŮ3ŽŹO8<8ŕ˝÷~ÉNp»—Řç”ä¸űâ!˙ĺń÷ÚůŇç>üşľn@Ń-bů–ßluóIęvtá×ŕPvťXXĹ‹: HĄ!زEÖW5·“ŠýŘ`,tÉŤ…á[ăŞńv>ú>YŻDD;աɧPgż{éĎw“Ě®äʦ™oź­·Ö'–ë8u5‹»]&hÍůËĄ«‹’ă$*EUY ̬iËĐ´CĺŔUäE‰ "’´OQ ´¶KćérbU'VŹCőh÷úý@\·Ĺíݵľtbľęţ7=?Oé&??Ó~ŘB–‰ąâ6µlÂŹn˘ůÍąF;ôţťŰh­ůřÓĎĽ8>ćńÓ§!Řéď±Ţ&“Nä#óšXDě w±Z1±ĎA%çôÜ.łzLm÷‰ŁÇ8Péu1·ŇÔzZlźe?ú1©H(˘ç¸µďĚ şôŁŹô/Á+ÉힸýÁ«xlÖ¶›?ŽĽ.¦W©křăםÎÂ0D*EYi‚@HÁt:]Ű|…<¸{‡4‰ŮßÝ%ŽŁÖéΫ\řÖ/l^ ÖŹ÷NgŰ&ůťßWüĽlŢUżăa"EQlt:»*şj:^Â=®‚ 9çŁ0lşźĽ÷>Ö6ďo8čóŮĂGHňĘč-©8­O8.ź“ČŚŃ!ea(‰„JΉő/ę»j‚TeyݎžÉÍ A­÷1öŚŕŐ ?%Ťž“»ŮF„Č ŮŤ÷(ÜścűkTkŢózď;$ał‘ëjÚ_뛄ŃhÔv4/*2¬ß-8 ¦0PkŚ=+2¬żĎ( ŮŹŮŰÝ! „+Rž^mĂ»›^]ń‡u•?w7pÝ‚Űßhr ŃáD‡<­cBiémY=p®±ó˝kbMí$7Ł‚[ĂbqLVţ)áđ&?BF ÷(wŃ6¸V’ŐŐEěî~»U›Me}8Ź]Ů6±Bŕ,ΔTUIś4»˙Ľc˝~SýĘ'ÄęüáIgΔ i.É’ĹGH·ü›Í«Źu Lżű^Ż`nzvÝűߦz"„8÷ţü~a[O$6ď#I’ É®Üȶ+śá’ĽJöŇ=ţýG''Ü»}›˛Şyöâď}đ!ý^Źăç3ŢľóMfĹśĎů9ŞŚ¸5ĽŤ­rq‚•šÄŚX؄ڎ S&óď˛(^! N.¬ä*µ ¨îâ–ŠQpÄHS‡łs„@8ŚîŞ€ÇúW-QŃ6Ľ‘}‡$JŰ…ęşU0˙÷ľ…_ĹÖ„hĆ‘'icŃÚ’%!ó ZŠţŮ÷˛)ĹJ%ż;1úÖćeÚ]ÉŁuMF_IčV~á?ÎäÖGĺ$ĎęŇ*i.…řpÎ!\cŢs7«é–øbś†”§ż$S˘’fţ°™‡Ă7p˝ďP×ćZ¤±n’ę%«ü˙îâ7˙ĄőeĄB¸†¬äôc!í p2¦6Ş…”‹˘ ó0€’4¤ł˛@GÖŕDLQ9$)Ďćk)ÜRö|’Ű˝ŻKz™.óeÇ_;É_ď7…BlOÚó°„ů|Ţ’­=\ĚC..*’|¨áztĚ?z>ăÁ;ă!ˇ uÚşnáčpŹ[7(ĘŠ÷?ú%~ʞŞřĹGóćý79Č8.NyÂ/ë>·Gw©‚ăŕ˘z‡S»‹’%‹âućĹkÔzŹ8zŚ›6mFËB€c|ŠŇ-Şs„ÝFw÷€˝tŹSű‚©9n˝K(ćMő27{ \Ak˝µ-s÷™úM‚—ŇĽ¬#˛éx/»†aC´S‚@Š !YÍ#jçR_0đ_‰żŞhâśkó‰îńţźBsÝżńäÖ‡góëPş†Í»ĹÚé“Ü›qĹ^¶¬Býżdă Wç©ŔÖ,ć Ńč­k ‹vŻ~’¸Ş’uľĹ}Ö¸tbśĄ7’^uUPŐ†4ë“őšÉ%Ďs7żT}BĘfҬʲ=^)‹˛±őĘ ^×Ő^2±cÚj—Ż|™…颉UkÍ?ýÇ˙˙őůź™ĎfĚfSXâm¬mśĐ¦Ół6NľX —Ä‘şŞř«żü‹ÖŞŘ,Ďĺ'äM¤łm<®×á=Č'ű۶úĆĂÁR¸äţťŰDQH^<ţ‚wďq0Ţ'Xô†O1…ăÎřeˇ™‹Ô˘Dé>ÓúEum†€˘Öűˇ%×p´¦G^ľ‚ŻÜ†Á1Iúe „%nMâ+Şw¸×żĎÂĚxf>Çš3]Ĺň%n÷pttôĄÍ@úý~#łě\‡Ľ ,7WZ”’(Ů,°Űď«Îą–I ´jWĹúń>±ő-áî÷˙7™Ün·Ő¸<Ľń“ޱŻÜF–›9ké+Í(‹0¦FMţÉp+§Č¨‡«L&ÄwÚ$éş&ľ…ę+—ža˝M‹¶Ű‘şZ!§©Ü gÉš(°H*Ş˛Ŕ˘ e†˘(ŔĚW޶çĎ r(QSĆ)zý†˛ÔKŘXSíőI®q’îí&éţŮUUµ•.éúń.t]Ň^kë%ô®"ěúďÄĎłľ`Ń…ýDQ„^ ýĽvü7?ů„O‹Šć˙ĎłS~>Ëů{ŁŚş®¶&°vCІxŘëeĽňŕ>żúěsÜ˝KYU<~ú”·^ťzćHę‹ŕeCúÉy5Ą9Új¬2+^Á>Mç3¦Ö{Tőˇ:EĘUČEYÝB›ťĺ3t$ÉGDA…+STdVćbin„÷ÉâŚGĺÇĚę*lň €7{ß% "ňH ¨©ŞcES= ^Ć»żř‰U-'Öu6o×ĹÄăYż w}b•RňÍwľĹO~üCţŰ˙îżçůÓ§ĚçsţŻöOřńŹČxĽĂ˙ď˙?ţŃŮŰŰçź˙ß˙Ś˙đ‡ŃDwťşŰ9żó[ľßďŻŃCý]¨ě4rË©Ĺ|lŁ đôlW2ŽBÓŠ2ăĘ;TÍkhv•<îˇŮu•6Qĺ1˛¬€űĽ_FlT*t]ż«-¬ŻHmd ě°yѡP‰]“dą†íxk$®L#ŢD>đŽw3pr5ô#4ÝÄ­Ô‰Rť8ÉĘů…-.Gßqí—*¬x’$e3‘»m±¦ýşđěÍ9răîšLŢđ~W¤[Ă0¨×ëĄóŞ~î†ßÇ6 !Ę ł”’—ç™®V¸ŮóYęů¸,n,%ß»|ťó+ŽŤ4Ŕ?˝4Ď;~źźźhń…˝dŔwVx´QaÚĐÖ%Žî6ŕÖDŃxgĎä$ł·o36:‚m;ŚŽ´řÎÓ? ×ísxň(¶nă97WgÉĚ©§č‰AŹ Y#zĺąK–{Tś‹$i]ď“ç6I:B?:L.­Á»Ď¨ŘWĐ;A “k:^4ÁLu†$ʏž˝‹LM4#C Řgś`Ľ˛g]¶^JY–ň·a'źfV¨ŕ»•bT™{IALłLť<+şBŢ­O”©Ş¬ŢŤJŤ T7’Ô†ŻżłRîcƉ8ŕě±›«‹˙[čJ`n€›´ŠßżéLJ9ŢbŚŕEĚÖ(yÔAłjäqŹ>‡z ÁZżz۶i4eG±Ý µˇÔ5–0‚];ÉĘQň<ŹV«…ďűëŁ_ˇacčY!6 I2 ×+Ę`Ş·vĄ6BEQ×Lv4¬¶‘“Kżb•Ú(QâGţ éĂZ(Ó‰ł‚!¬iZÉ\ďv»Ąž\łŮ¤ßďď¨a8üü*Ű ž?‚Ňq~·~ŕÇő0tŰ.ś zŁÁŹôÚËËäyNžçüŇ/˙*Ď?÷c^ínßş…í8~€nE†áÚ%ü/ý5ńů·®‡űŃORűĎ˙ ěŃń˛ÇuÇëZnḻń9”٦0HŠĚ˛Ý:P0wl”‘f“F} )sÂ0)YZn“ľ“ń±GÎĐŠFY +¤fĂń©çg‰ĺ}¤ůn©Ó îCJËXĆŹç.â®…¸Ńľs“TDčqˇ§›č}śxśŃĆ~ěÓÎćČSĂ,.ú~*vőVôđw^[•řUůV˝GU[]]-÷ávDµJĄR´î”s é©`1ęúĂëh«aY–e•PĽÔj5\ץ×ëí¬Ëâw»Ý]ĺ˙C˝G$w¦m“ ľG;ůŔćť~®ónPe1±8čÔŤ­ßŹ X˘0ŔŽ^B›$ ĐÝQŇ`‘Đ:’§˘PQNŞëş».ueÖF9fÍfß÷ďJß¶Űí–S–eřľżv°ě°m¤ëYř©Ží­U°˛¤ŹĺT1śúA‘÷qĚí1ËşVt>KłUün€ĺÔ0śQÂľi€cδk&ŘR§Q¦#„(í°zÖ0 KŘÖnź_˝ ÖÍď_~ű=~÷Á{ůţ•ëĚÔkůuľđ÷~ Ă0h#´sźTëŁ;łLŕ1—aXÁFJťŽ˙ iÖŔ4–Hłé »[ľsrD.p _oęs8ý 2=@‹«ěqg@ŔM˙&hb“8Ô™r÷kĘĂĎŻTf”- ĂpS}÷ŤŞj(•e U°¸ŃÉ5M˲ÂB ÝÔtzÝn©WľÓő‡‡R>PkĐuݲÓă8Ą=ßn(8~ťaĄOEŃËÜššäŢj€Č3Ş7yŚ&ÄßioňĄÔBŕi›g”#’řłxâmt»AwĐěáĘQĺÓHĽĂŃëÝ*¨ĄßďÓétÖ‘ÎîćpŰ´¬2pŚ\#ĆŇó˛Ô%” Ţ@g5 V±D8 ëX¸•:aŞ ˘ovtr-=GČ0ŚĐu·R#NEQňŐŠňIѢ Nx•zŮfW90Ă-ßin#žW}Ţyű-úĐĂ,ĚĎ KK‹´FFéőş\żv•äc´——9xč0®ërňÔ=ĚĎͱo˙VVÚ9vśÓr‡†;›ě”}“rMÓ÷ý’µ˙~HSrŠý›ÁŔÚ–‚LxĄŽĂU<3&M"¸€}–G±fYamó,¤e8îš®«”Eóť"ۭۛ•ř˙açaŰń~IcI’k:WW:<:5Îń‘&‹ý0ËxöĆ-ęŹSăcĽtí&ź;r€ży÷ď-Ż0Vqůî…ËĚ÷|~őŢ|őÝK¤ąd)čóÎâ2·zűëUę–Éď^dÎxevŽŁŁ-:QĚ­žOĹ4¸ľ˛J’eü̡ýX¦É3KnE1ž®s¸âňńŃ:OĚ.ńßěeҸs=+[¬ZÜoĄöłq¨$U‘}Îąxĺ*QsäĐA8€¦iŚVF©0ŠŚ4B}S qh“ăaQl˘l­›furiłŃK4LѡfdDZQÍŚ4°u“éĘ>°’Î`茔ýć FĽ‰;SÇpC˘Í2úŞÉÉ0ńrăűŰî,«Őj¤9$YŽ©ëdŮúЉ˛ĺ[]_Ťať|eˇ5’×ÁU6Q©Q÷«ž§Ýn#„(•TřBű'Ć$-é)R2ă€í3iE\Ź\fC‹üvĄ 2ťwý*×u—^ŹĆ4†‚ÄqLµjŁ'{ČĂtw”,X$´>bóÇB”§JĄB«Ő˘×ëŔ›Ôú‡µÔÔGUó=Ď+ł[Émum1©ŹĆ)qÜ!3§:îŔű~)9ăj)IşJ/îc;U gŚ č!ňÇÜž§kWKó UÝńüB†¸fýŇô5śŐ°s˘ţô}€cŞ”ŮŻÝv:SŮ•ř­ßţŇ4ĺŢűOÓh4řčÇ>^čÎ:Ä0 Çe˙ÁCÎč«}X©TÓŚ,Ďńlł„mĽ~Ż×[w}Ąćˇ®©ÖꛪępÖA9W–e†á{•ąPű_©xÔëuTŹűźöŔa/$Ë6VR2˘‡ŚÖc–›óľK˙VÔr«™ÁٞŔ SV„Ą­9Q¶mÓ÷ŰÔ¬Mň$Dł$ÝyBűgčf’šQď¶W9Ţ,±óaD.%“yŽešÔëu’$ˇßďoę¤u˛ś1]§ź$,űž^”Iw“}ÚôŠ5Ťe×uńju˛¸O’Š˘„=€űÄŮ;<€öXFŽEźČŹČ5—JµXAż‹ĄĹë:lm6 Mbh} BjN9żv1E1_jćPkáőd^u–(˘Őűyţ<ĎŃňv’_űŽ®‘ć9©é8’†m!ş†gŹZĂ*QbÍfł¨NÁ:»Ż2ż – Ôäógn÷Ú!ž¶y–Ę9cfĚ´›IŤ^ş{ú­F"5nÇ~®ăę9–Č©T Ü]Ľüěô%4ÓCŘS@J?0‰í‡Öe 6ŽáčU¦›á°†łĂ%ČŤóßO§3)e!ÝĄiÔkU’Ü$J çR™^ĂqŠß™†íuĆR×X“ś‰S\ŻVHÖ IÎěääZzJ÷‰’µěC?ĘH3đŞ­A†¤‡­ő‘RÜ!YŁHcŠt¶ű$ÄZŹéŤ›s8{˘„Ă ěPBµZĂ8BLËB× gEŕ'Őű®V«ÔE'”äÖMüżţs¬ý©}ö?CóCžçw蛪±É™áˇ~Ďn[Şw`÷ś<^â¦ÔßkBŁjW©[#řýX_E8Ë8ÚU}‹U\’ë}„`€ďZ»–&rĆŚó¤Îíuä+i°×ۇgVi'óĚË«yäî*M}„˝µ#Cmvw»Ž‡×jK»“}ă|Xż ]Eť0NŃt ŰĐ·UËŘŚĽ©ŢĺfZŠĂCe`­…¨‚Ą ˇ Ë/)x…’'˛m›?üĂ?ü©fnĐ4cŞ[°ő‘WK™°,ť]UÔŞ$zŹëé –™2aŮ„é`Nîóćě%šőI®őú4“ŻĽ{ăcäRŇnżŤ ÍB\Ą^c%qąŃÁ­cÖďS·-f>_yű"ß7ÍĄŐ.?¸v“ťBÓp ťŮŐ.M×!+Q„§Ůoˇńż=ű2‡G[čyĆRżŘł«qBeSxż¤)€$ÍHÓǶ±ť*aI’ahiUśA7ş$\ąĂiU܆0ŚČ¤V8©„QQqÜŘ@gă(ąQHšëx•˘ÓYĺTŞÍu­ż5±µVą Şďöů5!ŘS«ňŇő[śáôĚ4·z>§&Çągb ;ĎhąMۢîŘ<:=Ĺĺ•3µ ÇGGxcnŰ=źź?z€ă#M´ysoĄ '6<—QŰÂ1 F]×0h86‡› fj·Ś:…,TSHmňÔÂ*·˘{k|l}—ŘbŘĽĹýN¶8ĎsFZMZÍć:IPe‹kNŤŞŃ˘ď§„VÍěb·¨m q“¦µ@.­AövmTô[TĚŰ$ćÂ…Ôä0•I@r%9K–Ě%†ťqÔąźš[Ş©Án3ňĘ+'W€»«bq’‘¤9®c’ `i;]?M×w:3 cSŮ®áąjo¦R3¬wkDRe‹•˝mŰĚąÍL;ɶŃDˇ…S7%Q®ţ”ČüĚ`!¶ö2*ŽEÔ_Ĺî˙«>†Ě@Hź\ÖŃ[ź%ËĹ®3‰°^2eŘIsçˇđíćď–,نkfčDDaĐMĽJť8Ó ¨V%¸^Gß|AéZˇ f IÎää¶lŢŤó“Á|ÓňĘĹ’†«X&o’ݨ ;ʰ*ŇY·łĘ Ď=ËěÍ›ě?p tŤ&ViŞ ŠeYĽřüóLNMŃívxú©ďG“S{J–®úĎ0 gWađľ˙Ô÷8~üDń÷˝.ÝŻ…äćuśc'Xů›/"Łaš„gßŔźÄúÔgîXżŠd¤ Č1»íq­Ţěľíđ<ŃŞ^í*ZB`čcî8VV§őH źĚôŃě6-7ŁŰ8IOs–Iś* Mă–5Gf¬e_u ĽdŚ™Ú^p59Gšg©ˇ’ĂöýÔĽZÉŠVße·-PŇ_Џp7†Y©“ÄIN–K\Ű$ ű;fG…XZD÷‘čTÂď¦in«Ą¨†*Ź !ĘL­Ęz(…ŤŤÍKâ8.mĹýŃýÔa u3gÔĘŰËëä4Ť„˝^Ś&ŠjX¶ Ř3%ßY˝Ęż|ď2çş=ľ67Ďí¤ÇG[MâLCČůî2ŻÝĽŚ&/ÜśăňŇ"GÇZüëgźç@=ćŻÎÇĽŰř©ŕÉwWąťŚĐM2^ż=Ďës‹!xgq‰Çî%És^¸q›33SśĺOž…÷–WpM§Ţ»ÂS—®ń‰c‡±ŰĄ®ĎŢV“—®ÝdŇsyyvŽo_ĽÂTµÂ„·Ö›~#ij·°3ĎLđĚ<íE Ž[Ĺ´=‚Ş…„íěp‘ŃĚщé÷C4Ý«ÔIJŘÜ6fhť ˘µZ٬:˛cEUn3;¬ĆV¤±ťžżn™iÖhY&zžqćŔ>ŽŽŤŕHă¦]$FKiÖul:q»Km~îČV=4)Ů[qąofŢ@a¨i÷ë:Ž®q ^ĺ@˝JĹĐiŮŁŽ]¶ĘBpßŘă–Î%?ä/Ďg˛Ząë†<@™­T2j;ŮbĄě˘2 Ş‚ l±eXŚW'1“i’ë=2#Ŕ˛cŞTs#ťś†Ą#lť”š÷: :$îÂşkŮI“)g/Ëc>ľÁ’śE +äN—I2Ý8PV¶† ´w#Ł7¬¬śŔ»±ĹÚ :ơ×6ő]9ÉşU+$ŽŇLPp„[&†ÇpÂA9ąĂ•UŐ‰lř~‹ÎtĹyý` űÝ]×Ęnľ•$–”Śę}FkíÔćrߡ“č»čń±ů0EÎa×§f řĽóvĹAćčdîă·o“d>•Jő®ËĺIg*zÍ[ţÖ? IDATŹn6_Ą×wp/ś—˘Ď»Ągdů*~×DzkxÍŃR™Aä}vjěšRö{}Đ+Tk­"­wqŚlÇěšďwS­˘“[ńm!—‚(Ý~É —5ǡŐjńň‹/đđŁ&Ď3ćyĺ'/sŕŔA.]ş8Ȇ™Îן|‚ŁÇOđî;oßGÓuî?ýoť;Ëčč×®^ĺŻţâĎůäcŹ3R©bíÝO|ĺéň"Y{™řúUś'0ď9}ÇS¦Š+•JI:‹˘¨$ťě4†a'ę€ŮŞT¨2ŤĘ©ßŻ /jS†ÁTc’VĄĹíÎ-Úé-R#bwA’¦ĂXäŕáŤý5L÷eśÄ$5ׯŮ<ŐuĆ–ŇŰtŃ’*Ň gŤJł|ŞM«ŐÚ–46<Ô{Ęç݉t¶q¸®‹DgyѰA×Hv’ˇhPbdĚr١×Mh4 ťÉÝő<ĎË–Ľ*ČUŽúVűŘ÷ý2Đúi[ËŮëÄhš@hFI˛ÜĚK)ŃdĘ!ÇgĘŽ9ď{,ÇĆ:;|;]ĺ+·ř&ů‡Göđµ[KüďďÍňĚęŹx3(ôĂhĄÂŁ3{xůęyF'GŮ_Żq˙L×5úqĚɱ:÷ŤŹqpd’goΡé:IžňřÁ˝üdv®Ľ§ß;s?O_şĆĺö*ý4垉" śII˱Yíůěiâş./śżBŽä˝Ĺežž@JÉţFťÓă#›â‡ +;•¨…(°“‰©©G„Á2ąp ™ÁA4KC°¶˙.šxVJš­Ňë®ÁĆú}Ň>Ž‘ł±EöĆů+ĄűHgM%Eč6a’“KmÇósc‰X•¨ď†´§iZ©vĐh4¶ś?SqůÂ=ÇÖ9ťŠ§ÁşőN(H¤išňqK#«Ż´ÉĆĆÖéK+ďNc¶ˇ`+Jź~Ł=PíFHZ†DQTĘĎٶÍLsšŃd”ŮîËéM´x’—TĹ ‡K}ŽžlĐ:ŘÁ5ÚäĂP 53ŻRł Bă\~‘YäzŚ%]¦ĽĺŮŻüÇqîĘ–C)Űívé“ě†ôĄŢťçyDqŠDâ…“ěyŢŽ°gĐbÚ6$i†kĽ %‰ş›1L|TĽő÷›Ýoâýfn[fĘŃJE´©iB ŮfÓ%˛™1ö3mÓěÁ¶×5bδ|&+¦iô–©$?ĬM‘óh¶KÚąIŕ|)¬$^­"ĎóĘąŰuÇÚ8¸¬ˇ>ę¦‚ŐşŽ«ŻE ŞÁBgXŽWF`q’“ç…\׎ѿ^´‹ úşaáz…äL’fĄľíVCšU_׼ŔőꄉFśě^˛GeÁććnŃh4Y^^fîöm¤ĚYZZ˘R©’ĄQ266Ε+—é÷ű¬´—±›™™éB͡çsâÔ=ÜĽqťCGóöąłLOĎÇ1÷ź~€‹ď]ŕСĂěÝ·Źöň2'NžB »ZEŻT~ňz­N2{¸‹<ڍţ·˙Ü\;ĄTT¨Ś‰ú~j3n,Źěf —Y†K…Ăëp¸•¦*= eh•>¤mZ4Ý&5s„öŤçŢĘH’bNžĂJ;fŞe’Ů&µÄ %„.uDbcËÓŐ˝hB0_%–†t0ăőh_qx„1«ÓÔ簖bŻ×[G>ÜMÇ:! ˛@¤…Ʊm–ŃNY8×LĘN"Đ­zywĄé¨*ŞB Ą,ÉmÚŁ?m)°i'fĘIJÁ(%s$`Űß +´Áí”(× xÜ×úó,ÄźśhđÂrS„™d6Śřp})3ňz:¦žáë1‰Y.ČŤVQšŤ"ú}×+ö~ĐĂŇ¢Őî.Fš âÜ*$k ~ŕŁIŰŘ|~ŘÔ›ctV—0EL’›ĺ|u˙;‘ćĘgISŢ~űőz‰ÉIŢ}çmĆÇ'đ{=4McrrŚ÷Ţ»ÄäÔ$íĺ4MŁŮjqx˙8I’°Ľ0:>ÁŤł9zŚ—_|j­ĆĆÇɞګW.32:J–eŚŽŽŰvФ$úĘ_ă˙YŢŹVoĐř'˙+ňČńňď„´Z-„¬®®Ţa0…%žZIQŢíP *úVíUl§ˇ “®ë|ďGKd9LOXx«7úřAF®e´Žt°Â>7Żö°*9c“ţĽÁ˝ăžŮC?Lđý>©ĐĎVyőů6§OćŇ•&ĆŠµ¶wşA&ÔkA?Ć0 FZu–Ű=¤L1Ť;÷@łŮÄ0ڞń…Š™¬Ęk›5Q¬iŮQ‚©kş`eeĄ„Џ®[¶ąµu-§bĹĄA ‡zł¨>tV1µ„D°«jÎđşPĄ¸JĄ˛©úŔřřřÝEëń.ȉ»óOn¸?`ŇI8ä…ÔŚő$R)%Yž—* [ MÓh§ďú./óü»7řN쥛¦´L“?»2Ç×ăďŹ#ÍÁĐ` ąŻU(MdiŚľň-܆K."ěQd&ąşÚä|8͇÷L”ďY­µŹ†ĺó”µÚ_ę»Î}<ËbßŘhI^tŘW57<őNT0ąNţK30´ ×ÖU·†íp…ôýtłČBFQD÷°ôtGŘ—a˘‘ ×+‚Ľ¨ßĂÔâ®íú$őćx±öV—qő€8-\oů˛]>żŇťŢXI˛ŤÇH‰3ŚząÎÓţbyQ*ČE׫lKZ“R222B»ÝŢňýo5żŃ(Ú÷z˝-»Î)']­u&ÝíP •íS6ĹŔßé]*'S×uţŻwŹ<Ô Q7·ť‹xúŮ6'îX#70¤Áů7W9|ŞJÔŮs™žăđq.^žÇ´4ĽfFôy㥟řŘ žúÁ9F.ÇŽLĐ^)ěM«é±°Ôò öÍŚqéĘŤšEµ˛ľűŞmŰ%ąpeeĺŽ{×4­Ľ÷Í|ĺűôă”4Í©şľß+mş `Ôű&P»f‚51M3ÖHągŇp L-\ŻRú;ůTĂĎÓn·×ícE8ž˙ľ3·©,Ä&·#«(ˇkQ¬]Ó6*aÜ1lź“H ?[ß<`łŃË lÓtuâŢőAÆ&y¸ŚîŚw ťO€¶Ě˝k§¨aŁôO·Ű]'9óA:ť•¤łKЍYŤ8ł¨ÖšÓĽ×Ć3 ěJ’AĄRÍ!ڞ]5qPťĘň4$łbA $gkĺPčéZn«()őűčŇ´éÍÉŇ>aTĚW}Ţws¨hšĆÄäőF×u9vě8##Ł4šMĆÇZŚÔu웤Zç؉“ěŰżĎŐ°őÂyŞW-L-ˇVőHrÁˇĂGĹő<´Aó†ń‰ ŞŐ“SS–~ĹI!0Oއ&É;çĐę &˙ŕźcźşźd({Ş0ŇŰ‘ź6ëq=śĹŘíÎdÖëőO×éÜÉ>Ýl î–r’´xZS4ë&]?ŁOuĎEroúµ¦ëš,\ÍéuSlŰä?~—ĹĄ{Ç'níáňĄeŽśd~ˇmëä9Üš[ĺÖíÁĘjČł/^˘^łą|m‘‹——8vty¶ÖJŐ¶írml¦Ęs<«€öÄ™†ĺ6{7Ŕ (Öp~ďşšŁÖ…ÂëWcęőzů=ŐÜ˙Mz©Î­ĐÂĎ ˘­­­á <űö5)%®–±ÇNt,~Řîp©q©ńV' ›füúä^l¤Ç€XęäIDĂ‚¤{ĎZDł*äýy4§JŘžEÔ>ÉTĄR˛Ű•­P¤'ĹMţęľU)\J‰gčŚ6Š}A©¬!ÄNýn;ťÝA:K3d^´żv4ŁĚ¦Rmˇ^ĎL@Ć„aŚĐŠ& qZT´vŞęT–Gá:ŇY'ëć§ąŔpš¦YÜkÖE×$şşHń\®ç6ěf ?żrć4M#Mb\3EçÖZ‹wżĄŻچş(ě`.ő-łw ҰQ1fË÷??Üđe;Ů«áě©’ŽRëčnś}Ő CqjT»[¸Ę„J)ąr=Á¶4nĎGĚ/ßăöBÄôáŇÚu¤ĚŰ´f@ď×hxMŢ˝p )%só®\]âŢc‡¨9 ^yýHI'¦A­ęĐ^ ¸rm™˝Ó-V;!ó  ]Đď'\ąŢćäńéu¶t;íN}( zBÓ‰’ ÓĐŃÄzÍw•őŢ(Ĺ(ó×\ëlĺvąžüŢj©ö”d:Q´VQŮ®"0ĽV”ŹZGę^7výŔě® Óx«ëńňJ•8_Cş¦a:ú6µ)%"O9QéńˇFŹ1+ŮÖ˝-şgIň,Ĺ^ƬŤ‘‡m„U#OBą©×¶śŻ0J6Ee™6[Ě›I)ơÂ)¬än7šßëő°]ŹŠWl(9”]I2 Ë)C†Z‘Ú†ÄŃú˝ĄâpŻ6‘f‹ ±Hóť7łˇK*fź8X"ěűx•ş3B?µIóB€,n)w”%ÝÂáď^śEÓ-˛Ž4®_˝Z¶ĐŰęů… ”®˘(«t;ËäYĘŮ7_'N%ýÄ Lt,Űavö&ý ŔwkËČąréaĐ%‰#ŤĆ@jŞ8•4ÓĺKɇJJąXżüëÇOáţ˝/Lí%N’rľ®ëC2;ŰcŞ•|Y†eźëfłYnĽÝ µq7B¶Z‡[Ť(ŠŘ»nŢ yůÍß}f™oýí"7fCöîÍHµLÄH-AסeŤp`ßc5ćVHÓ”zÝARŔ.r ËíGŤP­ŘäR{Ĺ.ÚÝö#>ńŃŁľSt˝ĐFVďQ °=6] +++„aH˝^d‰Tu#Ér2)±-ť,-ÚOośżqÖj5˘Ě"Éî\ÇyŇ+Ç,×Ęu¸˛˛B2´6E"۸.„(°Ńív»¨24›X–uWßîďzdRp+´xeĄr‡[ :†®mČk2ĺ‘ňĎŽNq-ymĄÇąŽĎŻLN3ˇ5Č%ŘzĘ3^âGď˝Ě+HÓóí9ţĎ·«ü«çzÄŢIÂ0Ă·A“Č÷R‹˙$YĆęę*ł«ţŕG/ˇY«qÂ?űŃ‹äÁn –E*ń ŤvR<a)MŁŃl"uť Ąd'/S•ąŰí6iŤZu°^ÖÔ`ŇL`;kíΕ64‰gĆčů*~wÝ0©ÔÇd…(Ő6Ĺ;Ź˘d›á.ÝŐĄÂÉŻŤS' r)Čp°í‚•FÝ;2»’Â!l·ŰäyN«Ő*‰°»B˛$ ŰY.7Z#ÄT bË©•Ž©NxÇy\4H±Eżł°îú*Łąťâđű—R–ówc?ÔPŽĄR/˛,‹fłYžÉ»}ĘQR˙ďşî¦Ę>ŰŤ~żĎý§,ÎľŰăüĺ€×ßîňĚK+ŚŽč4) "Cɨ6Y@FކFŻ ipp˙šVtÓmU9ubŠŹ>RHCAĚĘJÇ1‘ŔÍ[+ bűČf8¬ÍÄĂK!ß'*So}Ţmô\`h3„Ě‹®,˘Ś1´śDşx•˘,«Řúđµ‡Ř¸a„ĐŚ‚Ť›é„QŽil/ô#ÄdÍpö@šřˇ¤R­žą‡) y)„Ć3ĎĽČÍ[ó´Z#ĚŢĽÉÍ›7źź+şĽ…!×®\!|ćç™˝yzŁÁâÂy–±ş˛Â­Ů›tVW1 ť·Îľ‰m[|ű[ßäľÓ§yőŐלڇeŮÜš˝ÉĺKątĺ:űöî%—đň«ďŇ bĆFüřŮ—i·ŰÔ«çĎźgqq‘‹.pkö&×®\aqi‘‰‰Éxŕ‡!Ňď‘?ř0  {¤°WŠ}ą“aTť‘T)DaŔl»¨ě¦ŕ išŇëődů;lđ<ëł R‚® >ő‘Çö·0Ź8‹Č–ÓdOcIšŁkĐlşřAB’fLŚzT«ťnČé{÷Q©Řý¤¤˝E)¦©3{k•4Ë1 ť…Ĺ.y.™Ż’ÄQůßO[j%Ţ]Ż×lčś<×6ču;TĚcĐPDĘ;ł@Q‚j­Đý ŁŻŇH'ő0)ąD™Ęęg^6Vs€2«ľ‘•«ćÂťrC?m)°ŤŁĐ$•8şÄŘ@XÚmEmÚÔ9UóřöR‡˙zß4ňö”‰ KË8wű cŽÎ'L‘µ_â'Ë}ĆŞ›©ŃNţęmÉsó’­߸x•fçŘSŻńµ —9»°šĆÓWođŢň*÷ŚŹĐ‰bľxî<LOqąăsn~K7¨Ú6߸x•fĄĘ߼u·ććI¤äé«7ąÚéqyµËţV/żuë+^›[äď]aľ2ď÷9ŘŘ:ɱöN4˛<'ŽúĹ~t+D‰$M3RÖŰakÜšÂÓ¦ID8ŔÓV…0‘7bF!Š€ť,"Śb ÓÁq«„‰†íTĘŞ.ýuĐż ±ČäZ§7UQşŰvĆžYŔň,"ŚlÇĂv*Ą“úměm:Ň©ł(Ďbzýt]CEâŢŤ PŮ?•ĹWYŰÝĚ5 ٬¶)ČÂă*l˙NCe°ˇPŞQUĄ»m'[ŻhŘŽÁíů˘ę´wŹĂ/<>NËma&ĹwŃuŁqmËÔČ󌣇&XZöÉóډńBJSžA‚,ËiÔ]–WLCñ ’$ĂbF[––}j›‘fATŮo`WŞę]n†GŽ“ ËÔ!ĎÉâ®™˘k›Űb)%qTpT”Z“ź™śŤ].ö×BÉÍĚaÔ˛őŞK+ JęKUÖU•l+•†Ílů¶Î­&ě„ ± L¬ ĺĚÍ/ŕ96–ˇ•r~n®TČł ×s·ŽŠ¤Ä”Čl‚•E´,İ+ńjf†ŐýNs‚¬w Ý›€\CŚü2BwËĂ)ě÷YZZD沟/,a~~ľp*ĺšĆępćáňHň+9w~Đ÷+9¦#d2%Ť{hůŻ(ŐqoKöví°hyŚßŹ1MŻRŁ ˛4ŰU§2S—h2)$k4‹Z}Mr& V Ă 4Ţ9™Źâ1~ňŇ‹„ý‹/e/ľđâŤ7^gyi‰öň2Ź<úaľ÷ťoáąKKKŚŤŹÓëőšÚĂěěM0LĂ4xďÂy ä×í26>Îą7ß`nî6ŐjŤŃ‘&q’qĺę ^ý Îśyó.súÁ‡xĺ'ŻĐíöČҡ šU† ­snł,C: ŽS–ĚÔPz¦ę{îDvRejő{Ő†–ŽÚɰ—YTKăíHgŰŤfMPńLfçR>üÇ=Ç=,Ó˘á6©ă– â9Ś7ĆiÖ][PńlöN7Ř»§ă÷Ľg˛°MÓ¤Ő¬RŻŮĚ쩳o¦ÉHËĺčáqöLÖ©U-ěáŔŢV ‰Qš2r»=`^záą+=6Ć÷ľóm2™311N–&¬®¦‰BI$:Er®(»[<ů•/bšI’215˝&ťÔďđăgźçŐ×ĎŃéö›.«oť;K…4šÍ;Ęk Ű«şčl÷aMnč_ü‹ńwîÜęBr´Ńϵí°–b“ŮĐ"ČtŞFvG7±˘1Ańá6łĂRJFň„v&ůť1›}ž$CÇĎt’´ËĺĹŰŚ::‹˝6÷zç8±˙0ínŹ'Ţ벿f25rś=Ť:×W:ô“„Ĺ `±ň©3<2=É×.\&—°ôy`jŚ8ˉrÉŐö*ËA€e¸–ĹdµÂ 7nńđţiÎÍÍ3îYĽzë6BntzdąäžÉq~xĺ:Žađs‡÷óÖâ2˙ýÇćKoťç#3S;—¨5!󂤛űDńZ§0˲KmYÍí0Zá¦VT˘$Ç«ÔĐ — ĚŃÄć]5×ÝÂ6&Ó(Np˝ZYučuVqĚ5;’dú @[[ZŰ‘Ť,=èŠčë‡aŚĺTJ…„~ĚwÄ'ąNškë’EĘéÜ-lOQfLUn¸D-„Ŕ÷{ÄQ„5H"(UeË“$)µNu]/˛lg‹JŚ˙€tŞ|÷§,irQŹÔĄVµ°L‹şŰ ˘7ÁČ«ŹQ«¸ 3ZMM©É:­†C­ZE×4QňçżĎ±ă'Bý‘Ód€ě”4]¦Ň$ÂaaŔ:ŻYľ^_p8úÜŞ é† 49Ž™â2Źé÷cw-rzmcűČSÂPĺéZôm9UÂX’f9ĆN®Rf“Ë©”÷›$i–bč!4Î_ĽÎÜÜŐZŤ›×ŻĄ|×ĺôéYZ^"K3>Ŕ}÷ßĎ믾Ę#ţłł78rô8oť;‹ßë±´Tč\¶FZ„aČäÄYžsňž{°,‹ůą9„¦S­Ô!đ}.Ľw !§ď?ĹŐkł8x^ŻÇňRĎ8rä(ׯ]Ŷm&§öŕyă“%9Ö¶–e­szU@ł±«ËVl^Ąˇ·Ń)P†]iđ)}Ü­ÖÁ°2Ęroěá¶Î;KyžcqŞqx&!M“BçĎ0pL›¦Ű¤bWŃ„V’¨T–9Š˘uN¶"V¨÷ĄžWe“Rý©†’Lë÷űřľWúľ_}â+ÜĽqť{ď;Íźü«?f˙ţĚ޸Î˙öiföâąç_ä•Wß$GăŤ×ßŕĺ—_dfzďĽýĎ>ó#îźáéď?Í©SÇĂçž}–×_{•ÇŹKřÚ“OňłźůYľţµorüä)ľńµ'Yi/Óëroׯ^ˇZ­ńŐ'ź MS®_˝ĘłĎüĺĄ%lŰćúők4[­­7˙qŐy‡Ľ€˝N„kH‚L'ŮÁÉͤ “ęÜmú™†ŕhrQ EEíNXQ–¦|¬jaJ‰géLą ŁVĘB?Bä!sÝuy›3űľ|>aŕ§ĐÔr~22Űéqh´ĹBĐÇŹî›ăüR›+«]&<—şcs¨ŐŕP˝Fe,öC2™łŻQ#bµę\ZíreµĂ‰±QDžqnnž§ĆhşÇF›·{>7;=¦k54ëś_ls˙X‹×çyüř‘íłoCvŘ5Ó˘ÓžG}ra•”Ŕďat[n\ÁҰ?Đő¬“cĆb7Ę Zˇvb9ŐŇÉÓ “~$ n„&é'Ö–Ľ”ťp”ęß5!ńĚdÝąKA®WpÝ"ˇř=*Ő:RXôŁ'ĽŮýK)SŚdđž• r w Ň1éÍ7^çßţżĘóĎ=ËC:i„2ôźüË?ćôaZVÁ1@ţ—ú?qřČQęő˙ä÷˙1GŹÇŕţu]çűßű.ÇOś(“ŞR¤2żµZ­”mďG+8I\;Ć2ˇ^‰ËwoŽĺ0ZĹĐŠ*ŻR PvRµŻŽă¸„ľ)~‡eYĄÎövI“ae¦•••˛SŰÝ4–j4$Yńs–i€”†I?.ÎĄÖ¤ksŕźH)0ôB†Q" OĄĹśtXJŕ×÷Ŕ^NUŔ çłS I­•…î÷ű»JŞßVŃÓs’Lâ”S•=+Ă\dŢ!áĄ2>yžóŘ/ţßüë˙‡ĎţÖ?â+_úKFšMęźăő×XZ\äÁťá©ď~›}űpűö-šÍŻ˝ňîŕAň<ăW~íµ%z IDATí×±mż×ăSŹ?NšDüřěK|ÝĘ™»q–ĂGsëĆu‚(çÄńܸŃíţ)QXll•ąË˛BşC‚'ľôEćççxüg?Ëí[łüŐ_üźřäc|ë_'|Î<ü(oť;‹ë:|ę±OsöÍ7řŤż˙[Äą†ĄK„ł Ą9‰é‘N©í«6ô®ÚŃ ň¬Ś€ 0Śą––Qh–exŐAĐĹ ÚnPľB’("’ö€^ˇt1EQÖÝj~š { gř=ĽJ!T;Ţ„O˙ěĎ•^ňé*žg¶M˘ÉĹĹ%&&'áĚĂŹđřĎ~Ç(XĹą,ZZĘS‡TLúţ*yô˘ĚŔu‹HÔ´Ş|ţ—~S™Ĺ|ćÓCđ‘3÷ ĎÜG.‹¶3{÷âş…†Úµ 7n Ą™¨˛¦Ŕ¦-,‡‰.ĘXo6’$aii©dqš¦YęKË(l)°eů- Ăumˇ·Ó‡,ž-羣óZ¬×ö<C+¶şÂÄ©¬ŠrYJăU‘©jńí^Ż·)äBi?J)Ë÷5¬ď»«»Ńl˛°°ŔŹôC:ŚÚí6KK <óĚ3\˝r…Ź˙ĚĎđę«o°ĽÜćSźúßůÖ7čő|>˙ů_äožx€ůąyŇĽ ĽűÎŰ?q’ÓůaŃüĄ”ČŽY\\Ŕ´ ’E– VWWxóőשTŞČÔ§ß[Ć0 {üӦɋ/Ľ€ßó1̦ť8yŠsgßÄ0‹ŚÉ™GĹs=Ťg~„×^ů {÷í_GTřiŹŮĐâś_ChO_~š^~ ‘\ć€u#^k(#«‰Â÷ŃhÉ{´ŰOáéş «ç8ßIyˇ]c.2VB`:Ć Ł5üźR6`şę±'y‘ń:äý SăHež†{{ř™˝SŚ;HÉŚçb&2Ë8±g’©Z•<Ë©¸Ô‚đpt S€†ÄÓut!¸gj‚‰ŠGwu…×bzdˇW™ń*ÔŚBŽňxÓfĚMiZ)9ž^d<«nXŃ€t&D!éV˙„¶f‡7d¤¶†ýŚ‚6Ao ¤¤Ö#uú‰ľ#iL×$ž™bĘ~w TęŁd˘Ž›ÎOr×ó ¨\ż‡g&XÂÇď.“fµúŽ» ¤oö…Ę–ą®Kš ˛!˛0ÖT+H}t­ ˝™˛CŻł„@P©Ťţ˙´˝w'éyß÷é~yňěٜ̆n/p‡Ŕ H$(™P%•-»D—-˙c•ţ˛č˛,UąXĺPe»Ę¶\–mŞX6 “ 8‡;.îî݆ۜgĂĚNřĺÎÝŻ˙čßŰó›Ů™Ů9|ݶnďvßéî·»ź~ŢçůbĄRŢäP¦#FWE‘ŔĆqśë–·ZhšFŁ^GźżŤ÷ßţ+V˙ńoÓţχô;Hi@P ‚€tk*ĺ ¦eŇëvh5›hşÎ±ă'đ<ŹĐj5i®®066ŽçűĽőć:|[7opűÖMZ­&““ĚîŢŤŞŞ|ů«żĚÂÂ#ţÖżű[„QHŕű›6l¶ŽĂ¤)Çq ýŘŤ÷BUŐ" ÎU{Ö†ähäŘ[ŤZ­Vl˘¤¬™TÁ>v’$t:ť"ˢ‰<9† (†%%‡7;RzRĆâŤ×éş.aś"DŢŮń}K pôANx”+54{=‘]QD!ˇ— ĹČĄ*U.ôŕ›‡ßď¶ŔҵMŹżqČâ“Ä`w:|ß§Z­]ÍíŢ…§ĘęFĘ”"Äšś‹Aʸ3i§$BÁK54î|ô·Żś§Úeďáśţńw1L“ń]{¨ÔG…ŠPuŽ}öKÜřä,żńßŕâ…ó´Z-¦¦¦p]—ĂGޞşşĘÜţ´šMľ˙˝ď'1/˝ô çĎ}ĚXCcďž3ăWo-±kĽĚ‰ç>ÇíŰ÷™Ý˝›Ń±1nݼɡĂGh®®ňŃŮ3$IÂk_řź=ŔÁ‡Ůŕš¦±gď^Ţż‡ëş8pńÉIöď?ŔŰ?y“ßüŤŻˇ“¶»4˙ĹA˛đ˝1Bćy»fčýđű•2îŃý×đ2,UQŕ(S"0N×*(aj`»ůĂęő»ŘZ€ˇDDQ@’夯L1 Â\¤ţi-®\r&& ÖHc~˘«k¤żĽj[`Ž´,÷-×UA&2z^\ś˙NIs˛m˝ńúSL‚0)Î?J-ÜD×kÄ9EP5Ňś3 Í…QŠą9§†]TčÁŇ|Rˇ' AnÚV˛,«€lĺt˛űlËŢ8†eb†‰Ć ±’.hŰ X‡ĺł6#ťm–ŚČ{Q©Tčt:ÄqĽŹ6ŚeÚĚífřŢĘç{X3s+Żpąš¦=µP…]33>z‚}űpěŘ1FÇF™Ý5NĄZáŮSÇQU…kWŻ25˝‹W_űă“»çС9óK_ú"“Ó»Ř3wąýűq‡™é ŘCĄ¤S«×YYmńňg_ă•Ď|–Ő•eŽ?Á‘cÇxöůç)•ËĽüĘgčt:9zŚĂGŽP«× |ź4M9~ň™uP‘íÚ“SR`{ś3áŹ.ţ_=úUţíŮßgďȦ“ŐÎG´˝yFÜ ÷—ަj—©ÚeŢřäf˘2….Q2mţź÷ţKO=O¨Śłš¬Ć:ş*p´a¸Âél‹g3×ĺžGÓuR ­4IÜY·żŞ:蛋¦iELľĚąĆŁZĽ$›s¸Ý¬©‚’•E/6©Të|tö 7Ż]ć7~íK€ ű'ߣůű˙żóO——QtŇ~žű˙ä?Du¦ď÷P«.I¦&:É&@i!D-›8 ČbŰQ•<GTóöQô— Q9úˇŽn•°m'×ŰL<=Ţ6°®‚DC5*ضťW 㦖fµzn]Ůk/ăkÍŹŤ\Đ{pţŽă¬3ŘI`•I|”¨čf kpţqäS*× h%i­Óůi–łÜM»‚axýŞđ#đ# ·:žCB”K @č„I^¬ö<Żx163lxrýrB˘l#Éöü§iQ+ŠRŕyĺĎě÷űźÚBĘ츮[ŕu·k—›¦‰ă8EőJęAÚ¶]Ěi6›;b–v¸jˇ(ą6äÓŔţň<‡ÍLLÓ\gŘ`ę*ťö*Ę łˇ*?}ç ‹‹Źůú×…”ü9ó•ĚÇÖ‚Ô˘RŹ´VpŤőëÚ‹L’4O¦«Őś8·±µ§Ş*Ť¶¶ÝnóÓźĽĹ©çžĂqÜuç/ 3›µ˙¦LFÂţRČżüţÂkű^cążĚde’ŠUĺňâ%NcÉóŮ7ó·± ‡?=ýĎIł„/źř7ĎáÇ=NĚţ™~¤h!*@ŐH™s&¬ő2Śů&U¦ +wS¤ŘŢ)7„Z…¤ ™O·[!*˝öÔëÜř.Ę›©“··…BąTkŤˇjg(ŮŠS•83°©dÓCÇĂÔ‡» ~˘Żcc%Żi*xýÜÇÔs¸T¬T)—s{Öp“8Ą*‰0±ťÂJż‹®뎿ĺµÎ?ÉL,'ÇŐú^E„d…Mt·˝Ś3ä”&:A˛+’F(š¦}ŞXä1𒤶“WÖ|݇n¸nnŘű+…řţf#LTaḹܖ×ď˘ba˘`8cĹ·ÁQs kš©t~÷źaNNaî›Cµ]0-âG÷i珨˙G˙)ĘĎ!íV } kµš©F‡4M(•ĘąęŽa022BżßÇóúąlÚ ›hšľär‘őťvĂ40ÍśŃj®2>1I­VËĎo`°ÓoZq?ĹÎŤˇ„ČM-¤Ů¦i…FŻě,îÔ<( Cć cSÇ´Űíťűp,–VăA”'%Ç đ=DÜ%ÉrŁ©áç"͢TC3+X–Eŕ{dqCĎČ´zay+ëޡ4SéEfd…{v'a…ňgl¦ ĎŘ„Cb”ĺxŞúŔd)Ô™˛Bf-kC’¦–ó\ĄÇRlrÇłđ“AKh›ďĄ©ÂŔ˘—šŚf3T‘S5Ť›=—‘¬ĹxIC‰—1łëN!RŐŞĄ:ÚuF6%ůÄq„aŹiZ…FšmŰ-‰údiB’ŞMEZlH˛¸‡µ‰CŹš*pŐ8jŃ Ll§ŚŞşř~5 Ş˝VĹę/9Ń.NµĘ…(°Mr‡+q¦O &Rúk»”e٧r’;~‰MU”\€^ú˝ď´",ń´ň9®×ë[ú„Ë ‰tt’ő^݇60ż€\ňj§Á}ŁĎ·<Îp€ÝîÜ"©—&KË+\»z…‘FÝ{fqŤȧź{íĺAËK1qÜd~ÁŁ\®Q«ŹäďŚăϨ˝ÁB5ÉÔľmţß§?ü€gN=[Tńd’*7¤S®©ÚĘ"ń˝ŰŘsp_ú,YżźCÝ–@‚ÓďáţÂWhŚäI~–ĄÔjőâÔu]×qńWjdËŞé:Ú`Ť¤f®‚j­F’ÄśŞŢY'5%Ieň]ެ;©°ŢEţ CǡŃhlËe–Š*˛° ;b2ąŐu˝HŘwR(E&©(qýió‡c±ďűÔëu’T'†žCvDśç"¦Čđť4S1µŁ®©GM“6˝ĐŔvËhöX®U;€Ö„~ç BĽTúq ßśäđM‹%´Qţůvç/żGĂ'ąŮŰ‘ÎmŠB;1XML Eŕ • ×V;٤%Ě:1Ž&hm’Ü ŹÖ™?áŔáŁ<8÷&•Ć(Í€Ü#ˆ‰fÓ÷#®Ľ˙—çÉ]®`©Aq Ę % !ÖÚöĂŘÉŤ®&› ©j0Ü–‚OŐP5<Í&3]|ˇŇ ŇĐGS•B÷Ó:ť ·ű·jUnÄqÉjÁđČŕţiÎA‘ł‚ř^ů1Ůé|ąŽ¦™3żo޸ŽČRćöŕö­›\ąv Ó.óŃGçYZ^áÎÝGLNĎňŢçČĐ™°ČŮ3giÔ«X¶Ă'ź|ÂĘňr^ťF°´Ňć˝ÎŃíÇh†ĂűžgllŚ«W.ă{÷çď Ú€)gOźĆqĚĎł´ôÉ©iáűţ@*,dttě‰$w”7Üžü›“CWÁĐTzę‚TˇbŔń‰Ăô˘arpě łBŐp¨)qćsży‡»ľÂáŮ_ewc/3٧ÄćŘán˘ń00 ˛ÜéĚT÷|‹ĄČ`źRV#lď‡Xµ1Rď1ŞUBD]q·:[Ľ ź&É•-ŢR©ŚP-„bĚî4ln™,Jů­0 H…’ëjbFk°' Đşś™Pâ¸ŔgYF{¨jŢbŐM]׉˘ç°€­ďEޢ ź •R©J"tÂ8ÝlLSÁPĎżř"W.]bl|śV«‰c;TkuŢzó/<˙<§Ně!p_|‘ţŰo!â8˙…¤ý_¨ĎěĆŢ·Ő-xy‹ĆÔóęĄ'ZŠkřiŠivW*”Ť€ČÉÔr!PÁŽTÜ5UP6B/"Sm\·L–ąřAo Ś°}’««ąçĽ,ágY†SŞáű}4 MÄ[ź‰|a7V2‡ýťEŕl!®ęvńňĺ®&%˛Ěś˙ć>ëűɅĎM|ü^„˘ą”+Ő˘j—cčúŐĐ=eŤü$˙nŻ×ò,ęő:a®á˱‰L¶d@“ŐÓ­ ×S—ßąb¤ ¨ëż;݆íł* [˛˝µS¬íľĺîUbČ\ץÝnŻŰ” ·ĺĺHůşŤç°U»sx ĂMç?M˛Ć4MĽ07źxpž±±1–––¸üÉeŇ4cîŔAŢ÷:Ä^˙aîdÓé>GŹEÓťć"šŞ1>>ÁŮ3§9~â$wnߡR­rëÖ>zĚ/üâ—X\\ Ýjqáü9Űá̇0»{Íć*Î÷Ă€Fc„0XZzL·Óedd”»wnsŕŕˇMŻAâ˝Z­\ľP¶9˙¦†ĂHý ř)̧%ýyáń‹űżŚŞ¬AşäôKs?‡ŞjÜö,îxĂ9NŔÖ6ĽŰŞĎű&Ź +¦ëŚ)“V˝ËX%‘%€@ŃL‚Vż\Ĺo·‹wi«NÂfCQ úř=Ő(8<Ďëc*OŰ$ĺ¤'!úx˝Í(ăVF ź ęăy˘©©®%Q¬ şnš"(™ YÖĄŰMh4ňŠaǤ™Z(Ůl5rČUJ&şô;şU¦T%đ=ÂČĂ»ahÉĄ”ĺ C“^ĐĂ5üdűO´ŚEň”ĘĂë?Ě퉰(ş/íÖ ŞfRŞ4Â~Ř{B.ě‰c®™‰.^×Ă­Ś„˝8Čżsrş6Ú@ź YxD˛ĽD˛˛Lx÷6ćěîüĎŹCUU._ú„ç׿Áüü=nܸÎwţřŰ<÷ü‹ÜĽqŤ$Iq›ůů{´šMFÇĆxţ…—řŢź}‡V«Ĺ±'¸xá<ĺJ… ¸sű6§ž­ńčáCVWWx÷§oăő{ěŰ€ÓĽĎŻ}ă7i5›|üŃžyćY>|H«Ő¤Z­†!őFűóóÄQ„aš<÷ü ÜĽqË´Ó„ĄĄÇ,//Q­Őx0?ĎŐ+—i°đčGŽçÚ•Ë8tŁÇŽ*>ñX6dPnř7SÚŮŞ` [űB‚¸'yµZŤ0 ·ť9Ô,Â-3Oě-Ë"VôĽ.®f¤™JjhCjO†ž‘Ąką¦iNľ×CQRâM6h랥A±ĆóĽ&aĄ;ŐJ–ÝUYěů™ěw;‰Îĺ~…+ý ^ö$®)Ë2tsČíó|­GĂx2ÉQPö:p‚ówŮ53KŻŰcttŚ=űöÓl6Ů·o†a}>L–¦?q’G23»PŘ»wŽ™™ÝdYĘ®]3<˙‹diĘ®™ŮâXµzťFcŕč±AŔçżř ĺ` IDATE±onŽfsC7› M9ƇŹh÷ň îľú öń“¤í&Ţął¦ôßú"‰i|ó[$„x^§TEłGčE&q*d~P+)ܲ©'(Š@SÖ›Üu`Ôé…ĆSٸ–‘a«ą˛@†;Ţ:ýČŘÖŽ7—ś)•–^gµ°ăŐíú‘UHśm7ä)YĂ,FCÍŰGkI‘Ü–×Vű„ýÂ0 T®QÇ‹´m«ţr8F Y.Ü-,ˇ×ž`3;FŚ­'Ć“vĘ’M-Äšä0v[UŐ-“4™ ¶Z­BňF¶ř†‡źÂ/ŽŔ6ż=ßš†n˛䤲‚ *őz}GlŇáó­^©1Űh4 u‹á!5lĄl×đÇqă9lfOJm5«k•Ś(ÉHÓśqüÄIĆFŞ<^\Ä0ML3ǧ7uŰdßÜYš±onP¸xátMĂrë,,.půŇ'ضÍ;o˙]7¸pá"™LOOóÖ›?¦ąşĘňň2–i195Ĺî={ó{ťfě?p‘Áňňň mč^óoű´ő—ĎŃßä@:ôhÉbÇť:‹ˇůDŇšßböÚ}>7Ňa—mۉPň*nŞ˛ßőI'˝€ćŚ’öPÝ Ro•Ŕ|é)×@b—7{6]Í0´Ľe.»nišćP*±FLJł×VAQůčz“Žź«|xy™ĽwŹ·>~¦¤xýž»–“eŞą˛ÁŹĎ>EGú!ÜĽ·m¤$iĆ™K÷x룇tĄ¸ĎyKYĺôĺÇt#—ź}D«„‰Ęűź,ˇ¨:Wîvů‹wďń˝wîň“ŹPUŰĚ8wé&?řÉG´űĄę›*—ďvA1xďâcÂDEQu~đŢ=~đţ<ď_ZFčUî=ZáěĹ›^—·Ţż€˘¨TjŁDJ±8kĎŕęęj±ţ¦i˘*٦ vn‹Z)°ç¶b)Ýť® \ĹĎĘńÖÚörȧ˘(´š«9f˛2NćóÔŠKő×T•čŢ]D’6W‰=@›śĆúŇ׊âB–¦ř^źrąÂK/†GĐívąsűV®=ź$ĽúąĎÓ$sApňԳܟź§Ýja™&BdřľG&ŁŁcĚí?@łąĘ‹Ż|†»wď°˙ŕAtÝt±Jt»]–—łĽôÇ‹‹|î _dyy™rąĚá#GŮ·oŽüĹwń=§äâő=–——řů_ü%\×ĺĺĎ|– Č»=Ť‘Qú˝¶ă ľK›Á†ő[ĺ{"-‰%±Ě˛,ŤF­-Ö\U‹DV~‡˛,+Š.Ăzć###E5tă aśĄL]Łßëŕu—H“„Jm„X©áÇFˇ°ˇ©Yţ=ÚüĹ©V‹‹9|pŽKWn°oßQł˛´ČĚěnîÜľĂî={ąuó/=w0×fí<ţ˙áŐ«ůu&#˙Á?˘üŐ/Č@i(ş‹ă–rv{ÔC!ˇŔh%_›4S‹¤ĎOl*µQZÍeT4łRTßD’c^žÖₜ”Ä*şYƲ‚ŔG$˝ÁGcýß “rm€^g[ËJ«­„;8ůŇíK(‰WŽăä­đ¸Ź­'ë$BbĄBą\}‚°!ČIs¦])⤶žlY=H2ˇ7HÓCt‰ ÍŞZxjća9Y0Lt¬Ň(†aĐëő¶¬˛Ę]sŻ×+X¨; H€|ą\~‚tv.©đÝ–ĹFóääbY _qžL††µĺŽ~ŘK{'CV¤ÖŻ |@‘€o·RĘf•†ÎŇ5ôÖ”ňĘţWč }Ój·lĎIŇ™ĽśLÓ¤\©Đ÷óĘPÉ6čušŘš‡˘Ů$B'Ă,üéłŘĂÔS4Í@ܧ]Q4ü0Ĺ)ŹäĤ^KóQ“Ĺĺ ŹyöůŠgXÂ/„Ĺ/ č<(JÎĘ Ă°V—p”ťřÜËń×A(ÓĄšČ§Ă*0n†ě±}ĘÚěaUĺahsÇ· ž˘Ź+Ç^'äŰCďür5ßěgQÍťŔ[YÂ+˙:(ë«2˛r#×U&$›Ĺ]ÍrÂijPŞŽˇ( ťv!˛Â‚öáâ2ďž˝Ęßţňqţż^âŐg÷09bóßżÄgNÍQ«ŘÔ]…ĹfČîńµĎČj•ů_ţŕuţá7>O’řQĆÂă'ÔůđŇ«í>{vŤ"Đ9ul?Ýö )˙çwN335ĘôDcsřĎ÷ŢşČßýŐSt˝„K7—Xnőůě©ÝŚT ®ÝksăŢ 'MqţÚżôÚ3Ü}ÔdţŃ2_|é0đçďň·~é$eGçü÷ř­Ż˝ŠÎÔäď|Ť7߿ȷ~őe~đÓKü{_{>Ż[y,Ú ·aăúŻ'ĐöĐĹéN2—Jµâ "q’*D™…íćÉJŕwŃÄö¤9/-aYi° $™ą6ßë +!†"Xů×˙˝ďţY1O­Ö¨ýÓßEČ»#ťv‹ׯłkfI’2;»›Őć*wnÝDÓuFFF łžů{wŮ»oŽׯ±˛ĽÄá#Ǹyó:““Sů&ղٷ?^żĎŇň đŕÁ}Ž?A»ŐbbrŠ~ŻÇµ«WŘ»oŽűówɲ\]ćĐ‘Ł'ŤFT;óY:"MH -#ɢÔČ‰ŕ–…ďőÉ×|2ľY™Jµžł:ʱő„$SčGVa3˙4"şÔńMÓ´(8;ž˙Äú '·=ĺŮZźűľĹ]ĎzęŽmx¸ZĘŚ0mëôo‹) ýLçŽďň84¶ýŮ đJŁK5}€˙ł:AŇ}€Vš$ »tŁ9çÔ¶ç#d«v3€¦äÔě¬ MD‰6Hň\Â0 ‰zXZ ­Ź˙ëI|÷Ťż˙¨|ýkOjgüŘD7sReiĄĂ·ľţ<“Ť<ąŐ5•ń‘2?÷Ân­|páżţó‡"#&˙úŹŢç˙ÝŻD IŘĹÔ 5ĄíÁëď^CUU~áŐç«ç߸…P4ţ›˙ý \Űâď˙ćĎ1ҨŃîz|çő÷řÖŻśB%ĺĘÝ +]~ţ…PŢřđőŞĂóGƉS ]MązŻËëď\¦Q+ł´Ňć·żńy%…˙á÷ÂÜě8öÍđü‰Ľ{ö ÷.Ç }?âďýÚó¤©ŔŹtÔˇM¦†@Ű> )Šj Hwţ€t'„‚áŽXę=´´˝)Ď#JTbJĄ<–~wSŇ\¨Î(aĐĎ7¨C˙?U\\·D’Ä„~-Kń˙ďC绌V­2öO˙ŮÁOćäó#ee ”-ęťXĺ|ů~K(”ďűOól6¤˛ëşE‡UJ>- ź—ÜLK¶\o?‡˘É ővCnT,Ë‚Őđ˙’0·łSUDyśŢçţ}Bes(ˇŚĺ’!MuEˇT.Ż+2´ZM\Ý_—kČ$×rŞ…’.Aaâ%Vy]×éőş´Q€`H­HQ¶W™Ľđ˘ëú–Ę;O›˙ÄużQŕ`)Ŕ&ażÓçĺz—q+ŢQŐŔK5®{%Nwętý‰äU«Äś(uyˇÖĹÝW©»Ýšăwi‰cd‘‡jTPT›Đ×Hť“‘Eţ’sĎžţ°`c‹wKK[XQwşΠ5pýÚU =Ż2ŰFVg+ŠB©2FL™¸Ň ú­oa9Jé—żúDb+×±dFhY›^·™»]UG3—(QäĚTq‹—'ŤşĹ:ZFÉ!náőZ–…S#Ěśbţşu-ţɇ¦Š|·č÷Ú–]%Hí\†FXąlJ–Ýu‰m&”ăű>ív»ŔQnf°ŮĐUpŤ%iŇí´°l«4†»¨Fi…P3Ó©® \#$ [ô{],»„áŚŇŹm˘d}µIŐsťZk¨Reh®ů«řž—EěwMŤÂ6â Űć›0É‚í÷ű”Ëk8Ŕť¶Edň(Ű*Ç+*Ż5ŕ€•1e ž) ^v7głŰ/Jńu]× Ńîaëŕťś‹|¤J„H“-Ýx†‡TUh6›dóóÄöđçŕ™ŻŔg~ :ʱoľ˝i«K®ˇś/m0süŞŞ'9 ፾žk G‰°đ#…$ÓŃ4…4ęŇëőřčěY„V˘脉F/Ô±,‡0đ˝> Ä gĎś)®ŻÓn†!ź=˝Žč!pUÝ~żŹŞŞ,/?ćÜÇgi5WsLx§C†Ź¤ü ţM [ÍŘ놨YÂ>»Ď«ŤűKÁŽá©P¸:śîÔą:Äbýý‘˛Űňx±ÚezĎđŘç†ŘY 'z˝uŮ鱢«0;Ő Zqhu=„P”ő†@[­ßćkźă‰]Ý'čŻEĄr Ś:čĄBě_Ä˝- ̦žáj~o™$óő3ęx±ąÎ"F˛ő°KϰŐ>~o™,M)W8ő1F˙ŃŚą˙ µżóMěă‡vKK’…‡<)»?ŻHĄ2‚LĄłäp7fcgF¶éE)đŔ7®_[·~›Í•9…lqw:ťuf6Ă÷a«c˙^Qr%Śv»]/˘ö ᤠĺ3Đét—߀$‚Wż Ď|žýU”ć}ě…KŰÎďv»´Űm˘(BÓ4ŞŐ\z4ŠS2!°M}@<ÎčEfž”ćëŞŔ5"bŻßÁqËÎ(~âä@˝\łIz(äjY<’k9l‚!‹äúH‰J 1Üll7łQ¨%hŠ`ĚŚ±•„ŔRS¦íÜOĽ—ę„›h·n6bˇ˛ŮôR[Ͱԍ¶ŻKI™µClMĐK4’ˇęÁ´˛řáź1ç&ŐĆ(AâňńĄ€[Ź–; >¸ŘĄVáâůsضÍŐË—i®®05=M§Óá˝wßfbr’z˝ÁÍ×9îŁÂÉčúŐ+TŞUÎ}t–,KůθsçBd´;ť»ŚSŞńî{gq]‹Ş«1¦í"–b :éô–elą{P3‘úDQŚí”P — D ¸ĺÚ@籇©řO´Ţse€”( â Ç-ŁęA$YV¤ŰÉ=–ÓUZ˘…­ŘŠ1¤ŘG>QŽ[ÍĆ0í8ş‡&úë6/Ab ‰^Ă„JrŇf>ĺ‡mäö’ąřwÎfN’ŚRĄZě7 …o6tU`j1aŕ“¤ŕ–ŞŐʱAd„‰ŠSŞř=LíI„® t%Â÷˘ že^ż3 Ôi›Vn‚ĺ*…çĄ`´„)ěddÇÁQá•JĘK¶Çsz‡mGl"7Ł(kA‚5ŃnYI”š2iŘi˛-‡mŰ4ĎĽŽY'UňÄN ű¨¦…’e(ŞŠ‚@ÉR”4-_3ëáEÔĹë°ëtW 1ó@Őv?żíyHF·tÖŃ4Ť J¸{çĽű6+ËK,·nÝŕ˝w~ŠŞ*üĺëßçţ˝yććöóďŹćę*ÝnUSyűÍsíę‚ ŕÇ?ú!ŐZŤ™™ŮmźżŞ‰®JZ‚­& @ =aĚJ‰3‹gvăH„Ęjl˛›”´{ü»©¤Ś›#Ł—čÄCqx·rŔíŁw^ÇiÔ‰Ź˘h¨fŚŁPzžhŔ®Ţ)ŮCv€ä»$źs©6áű>agĘ€‰-çB˝¤Ä‚Ý3S¨şŤĆ<^nÓěúLŹ•Q…•¶ĎrÓÄn“~ńxĄGš„ÔJ*(ú@ţĘä“kÁKĎDd –:ĂŠJ’*Ľrr–Ą–ÇÔ¨ŤĄ â¦'FpÜ2IškĽŹ×ňb@TéópąÇíG+«Y“XďЦ Á*ůü0@Ő­˘MÜď®9Ź€Äěęl,Go| ä‡hłJSöf®® „HÉ0 ]Ő4͢4·} Ô"ˇâüř踥*I¦“eZnˇw·”@SH„QŘÂ0ÄqËD‰Šnmn-±–ňú7:•mçP5<$~×÷}şÝn!ş˝Ő|Ó4 )źµk¸ 0«2°ĘŤÂNă¤Uc÷öEÔĆ4ĄZčöÇÄK󤫏H—îˇ[6éŇ]Â[“®<@wË`•0˘ú ŕT`ů.¬ĚŁL‚˝Ď=őä5hš†˘jDqĘÂ\Jgié1AňŇ+ź!ÍR&'§h®®âő=jµ:i–2==ă8ěÝ7‡9đ>ísźçćÍô{=^|é%:Ý/˝ü˛4%Š"öÎÍŃëőrëç,ďm(ŠĘÉgNqĺňe¦§§q—Z­ÎĚě,µzť[7®3wŕ «++^ąR͡‘ŃQ<ŻO˝Ţ(˛z4|˙ŞÉm"G&K±….eXJ´3n%™†żC̬,6tņ͒\GM±C\-ŁźęrűôoťáÎť»,vÜY(qk>%L"jęíhőźiĂ'ˇ:ň]’2D˛ş PH2ŤL¨¨ŠŔ6`jÔAWS¦FmŇ$A3löÍN1=^gfĚĆÔ¶ fkěź©Ruu cďt•3¦Gm 5E˝~D©Tâء==0‹eęÄA§ĐUČŘ=YB!eĎdSĎăĐě¸i€„T+U¦&FńBđSźůě6ö¤O}&ĂłVXĘ–wJ쪗AdĚŚ»9¤& ™šśää‘9f§Çđ}ŹŞŤ˛†eŔžÉ ~¬±i[ŠőŰÎé-WqXĎUP”ü{”)&¦ić ‚,KÖ9Ťm~ďÖśÖ|?Ä0ll·J”ä¸Î§}Ç’L-8»ÝĘ‘µ’,·"–Üśű÷ĽÂÄÄ$ŁŁcÜşu“ç_x‰3§? ŽcNđŞ˘'1Q˛´ĽDŻŰav÷n,ËćĆŤkD=ů%D}zG*;RG6S˛LŕÚzˇł<| B(…$¨tĚźŁ<'é{–í®É‰ú˝ěžJlݶ4ě4&żMŇ”h§›ĺa§2yo†Ç:ťŰ<ÉÔyZôł<É5•<¸V´„=nŚŁe4cťŤőŘ͆@ˇ“¬ĆłöćĄfť\6l·1aĹwxďÝ÷čöúśŹÓeƵŃ"ÁÍqv ¦ť—ňĂ0ÄvJ„‰J”¤hŞŔŹ 2±ős¸ú2ü@ W\3Ţ´ĹfĺÍn«ŐD×4śR•0VšO¬şšˇĎĐ «pJ“hťőßđH2m`i†!YŘ$ LÓĹ-• 5á—Ijn¶{Śă¸0O-î­0pĂëňŁ=¬ě8qUp,· ˛ '5M3×Ç”z†[ Pă8¦wç2Úč.‚$Cé7Ń$ý6ŞSÂżskj4g˙)‚»—ŃĆv#J#Ë·PŻCäĂĘ<´ŕŐoaTGwt–Fy@ݵkŠĹGŹ8xč0/ĽřËËËT«UnÝĽÁ+ź}Ťé™4Mc˙ýLŚŐŮżo/­V›]»¦™Ţ5ËŘŘŤ‘¦¦¦1-‹Ý»÷Po4(•ĘŘŽÍŘřŤĆ­f“ĂGŹQ.—9pđwďÜ泯ľĆřÄ$ÇŽguu•©éi4Mct4g98xé]»h5[¬2ÉJ“0ڱlÓ.ăG‘%O‚ÔFS˛˘ő­(`ę)A”bZΚśl:ăÚąI,Źďb[UÜ´N’ÄxĘŔ•‹”ĺl•†ZĂTĚb~š©ŘĄĽk—Ű h†Ce RÂôÉŠ×fë';j’04ÜQ“UŰŤ#H Ür='ĽvŰ„Źí”ĐĹlĐÜîJ;Ţ, đĂ”R9ßŕ„Q”'Ě›PĆlěIşąĽŞStôdÜ»qýZ®g;?Ď3§žĺÂůsôzya`yi!33ł¬./“ A˝Ţ`yi‰( ™ś¤Ýj36>ÎĘň ŐZË41 “ýpíĘe 3/ LMMç›hĎăč±ă몴ĂĹŤ­†´sŽĹf¸Ő{0ܵčőz„ó—1v"C’…ŰXŐŞ[!Zy›( çđK8D±\0¨Nb=ľ ŹoA–BóŮô1”ç~Óv řĺv±Řu]TM'ŠS ]ĹĐó°yŰä8Zö™1˝§ĘK©ńc¬ř<—ŚňâŃEŤßł‰Ý 6Ü “r˛¤.‰ išróĆuöÍíG×ő"Ȳ¬Đ’˲üEQÝŃ4­¨2ĄiŠ"Rl#^‡MM…B”h¨z {@‚Čb©l ňťKfăjŢşëK2Q G· IDATeť5]ç.9ľďĂ@a÷´°ŁŐÍÜ™¤ď÷yÎł¨Ü&#Ą¦V8jF¬Wnď[ˇ2 Tő»Ë€ZÎ<ŻO= -‡\ ´÷<Ź4¨ŘOVđrŰHnűë{¸ÂÔÄĆ|/'Ť=ÍiMŽ~lQ®4ăxh~đÄú©Mąš«Qô:ËŘZľÁŠR•$ł°’Ăd!iOü´ë×u˝xö6e„ŰŞ ¤´ÄÚîĎ) [˛ĄÇń–¬ęF#Ż6¶ŰmĽ oˇ Ő­tVĐě"čQćó´ÎżŤuň řçŢMÇš{QĎqëZŚúă˙š¦˙ň7 Ç­;y-›ą­•Ëe4ĂÄcL]CW)đS˛›Đëukµ6·lF…ş‚tŤĺÝm/JaĹşS˘¦iÔëąóQ§Ó)섇ź\śýŞŞ2>>±iě&BţŚŽŽî»nlgż«)‚q#bŹíájąú‡Ş(dŠĆBdq»o<%!RxˇÖŁŞ†O}ĆÎś9ÍŢ˝ű Y©ĄĄ%&&&xîąçxăŤ7xéĄ—Šż»‘™˙iě`‡ŮŃfÓëő6…=©J.źÍ 0Ť@’Ş$XŘN!ľ×)HO™P R W_O(N3•T«R*•‰â^ŻKeŕ´•“¦˘uZŮ[ ĉJ‚Ť[Şf K˝G,Š»řJWqxĆ<†2‡E‰Jµž“zşËh¤¤Š“CŘďO36Xűý®ţ¤Öy&Ą†îşüh©ĹőNŹ“„†[ŞäťÄ ‹©ĹO5‚ś|ě”Gń}ÇqňŞ˛×ĂŃĂu8Ţ0Ńp*"qSéćŽ_"'ő¦J®Ő.‰NQŞˇyLI“k@×4MÓ Ă°m”¤ŻśĐÔ+äČę4EťrĄ•ĽyqË4×,teÂşS¨P¬żĚ=â8.ČĂcĽÚív NŐtĐD‹wAQ(í9Śa—ń˝je˙ň»¨†‰}ň‹¤JŢĺ(ĹmřË˙ şË¤ăč˝üM2łD©T*şŁĂÄ·á!eâü0!I3Ę®Iż×[g€÷BĆrM”̸x÷’TE1ë1= V¶ŇźŽôĺşn±ö˝^oť-÷NćË1‡d,ß6ą•ĂV3f-źi+(¸ŞŞťë}‡ĄmÔ\-ăĺZUl_˛W“%Üŕ ě‘i˛8Ł.ÁęcĽŇŻ#Ô'q1ň˘†ĺ§†Ĺ«‡“ŤáQ«ŐÖ%ĂţÄ›}@×R,m˝vk&Ŕ‹ó$-W=čA꣨:ş’˘«ëŻŮO,ĘŐQEa±ő»é'Ě*Gqs¦ˇçőPł ŻzÚ,FÓÎťĆZý&ŁL™UÚš |č8屜ťŮic*=TE¤ Aš[Xţ,lÜaů+EÉ• t%\מňS‡J5Gďµ— ¬­ü0ĹÂÄq«96Óëb¨á¶! Ręč §”Ř<݇Ąć&I¦ Zc͇7×Ü<°Ş¤“ls| Ő0›7ŽăuBÜO“y‘óĄ7y’$t»ÝŻ˝2I–5Š˘"A€őµÓé ’ˇd) €ŞA#T T×¶ĐL żŰ&‰B„a,_×u‰W˘ď÷đO}ש5ÂÁ†ŕ.Éx2¸ËdŇ c˛LPvL:ťN±Î˛Ę=¬ůÜëő@čćšÎ¦`.–iäÁ?mŁ*‚0ŃH±q˘ţé÷H»]´ą(ű±1ęH†ô3—äşźŐVYţLyLÓükOnĺPݶÇn;XWlđ…Á]ßf14¶„ŤŤ›1§*[ăŮäĐKśůř2ź˙ň?ŕŰßţ6~sKA«kÁĎŢ ž( ëµĺyš rÜ ¸źN´666jŢíŘJďö;ÔňŽł$·zxFÁ}~ŔśµĹŠ4 [ąĂůŔeO >BńÎv@Gî_-0˛«4ŇŻŕĚ­€pPe9ÉĆ5ç”Öˇ™®OW÷Ëúuµ@; éâ`© [8ćNYŻ´¬ä§¤” GSI^^ °ć«—'8=zޏ@[-rRŢ_‘ľ€( °Ťh¦ęT’+ąrđ|ťdŽ*©©E*şu–…k;ZVqf Ś­ęÁ^Ë^c{ő K΀ڒ; dŮŰ5XVß_éçyNŤpĚt×ęA”JüöŁáfĄ*$ą Ťz~ʆA»łP±ö·é8îXżĚ ëűN&©·s˙“Őó*ká˝$Mv›Ż+ Q˝%][Ý"×J:Ŕë`¸ąąy k2¸é“;Ŕüüú6±€Ç]â<{®’‘Ś#ň4ŘÓiL0P*U%©I.fĺ$ E2Ŕ±IˇF“V«SŸ`m ă+$J%$…]ëŠN:>†YŽc쯒 ˙¦cI”yXĹ{kŽ_»Ţçç_żţÖťGx˘Űä8}…ź¸sŹÎWÁ´0‘㎠žďîŇQLstçI“×!ÄVGQZŤq,"Mb:ÝJ‹z8čáŁ= 7“óßJ’©‹-ZŁZ›LĘüÍ2ßóĽúđ«uŻogl×*Źă)eµ¦q\Üg¸—ÉĂ^µeÜ\˝N{p‰a÷Nü] t,ÖŠqW‚V‹ ® Ű‹ “CÇA˝źüâykŔ'oŔ_\‚/®Ăżx抣Ţ×·:>ţÄőOĆRĄ*=a-7:ŤvěZ«}·ůł¬ßćö ‘+›©Ă°0qĆ䥾Q°âV¬ŰáëvÁÎąÓ‹ö…#Čô"Ťě«8sG)łĘaZ(ëmÜCn,Î|Cł0űµ„ÄdŰ`R Ďóş<żŰüBäĄÄ0TŤ?‘cĺ8Çő¦7ĆžHŇŇĹoT¸Ó+Ă7YăRý™-é˛lµČÓ4Ż´ç0\˘$GPX=0Ç>ëY“啲‚.qZ‚fk|ÚőqäÎdk‹´S(ąŤ{ĐCĄ˙ŤDŤf‹\IJeâűU˘”„˝©jËöď—˘RFPHüf›˘4I˛ĽĆÚé‘ácŁŐëb E6†–Tó5ž. C,Â}1ͦT$IĘĆć°&j·…ý 2E%´^ °ßţÂî7_c¸tP)Šâ¶ľ7Ň™VVĹŁ[ÎvV»ëş•ÂÁř3âĆ2BVłÉĎŰëęůieľŕŮfí±ľŰ=hF˛iš<72¸ËűZđ‘%čšpŹóĹh‹Ůd®°öŹ˙üGĹąű>„eŃůč2úâg`ăÖcßQş[­-ĂöJÎöÍč­<^ŰýF¦ 6r›©‹e”4dJaRpČÉh[%Q!IÇP…;ý„e;fď0¬°˘hšg°š‡™o$ܸ~ž‹—oqk˝Ď•+׸téGŽaqqqßk›$ŤíGzšdGďF <Ď[*A^H„Pfź-l>eJeVŐ•‰ł1ˇQ(r<<żęô\žťŠĂóF‡{Ę” Š0-ĎoĄ‚˛,¦HWA.řŐçÎńÔŐ[JrĽíŕ™)˝ z—•ĺé<]Á2+…˨:„%’'Ż÷9ŇŞ4‡MY"Š„ó}~˙ô>ţrĺ¸öôŐU޶8Ź%%ĄRdJ! %™RRňôŤ[i6I˛Ś8M0mi{…‰ßhđKçŻS˘ř±»W° ű[>ꏶFŰ2ůž®Y㉅J˘ ůqâgžbţŻýMŇnŐÔ*%µĂ´Kň¬GŘc˛OÉ‹GJ(cŚ=d^J­-‚Źăú”ĄKŹ0Iwŕ˝ÂD‘ĺ%óm›tlb1Ťj˛Đ,U …ď{ő! ×ëŐmĄŰ ŚŰ«®\Ýn`ŐCc»&ߡY«RŞ&JU¬îŞť?+aH)UY`şî‘;ÄŞŇëÝS›Đ(±d Ŕ%ç"0, —V5aş$yTµ …$˝¶ yFŢëá=ü(›żó›xŹ<@9ÝZĂ;txKď6H;»={Ýż^Ż˙/‡.6lćsfƉq’«Š‚%3bą]UŇwEÚŞ;ü: ݇t)‚›n…gŹ6űDŤďC źáp8%v;•0]l¬Âę ÔAż~†Ăáo;é¬(ŤZ4ޖŸ:X©ŘŞ ŤSrĺŇh¶j˝ëá•©8|L®ŕŤăp‰b­¸E"RŢTĎsk¸Äy'ťćEá3 GiÁRÓĺ˙9{Ť—o®łĆÜ·ŘĺĚz×”üŕ#wđÉ×.¶PÜż´Ŕ»ď8Ę…Ť/ßěaKřÓ3—8Ôô9}«‡-%ßĎq~ăů7(©ăŇPő:-z.}ŕ>>wć/JLYa–RZ(eÔÚĄúpÔšŠŰIg®™ă9IÂîŇhTd‘4XĂ6+G±Ň¨H[šM;«ä Ŕ0±ét« (I’1›9›R@r‹f»’® 7qŤpĽžgì´Ł(˘Č™%ý ŕđĽE’ŰSçŰMtŃmw!ÜÄ5 ćŰŤż[EJ'UZjě­°ˇ·ł÷úwsss;|Ë·“΀ë“C+Šl÷>/˲&+ěGşŞîŐ#L2¤4p-ÉĆĆFM6Úí,Yŕ[ÓÝ0wiwŞß˝×ŰŔ2«“žź$ áKϱńs?‰uh˙]ďˇ˙GżĎâßůoX˙Ő˙ űíďÄ˙»?Iw~Ă0ęŞĆŇ˝ňä$+]·Ő´ô›x‹¸…Ű!”Í2¬”‡›©kÖ.`”1~úMĽN )‚5 ŰQ? ň?T'¶{ Çq$}mRJćććj|~íJv@‹qűŘţ.M}ż0*=ň˛ÄoĐJA&Úu†HËŻŢ×8d”l°`›»Ćá~9 R1Gä20MzŠ\í8ě´ť’sý€cM…˘i Ň’ËĂ’Ź%Îsňh ˇTÂđ–ŤkÖ‡}<ł¤aÁ(+h[ HT°”x† E8†¤”‚+Ăű–—Řô‚Žk“%® ž•Ó0 .ŰńXi5k¸VNˇ >ľšđsçÖřµwÜÍwř&˘pf”aŚ2‰ăĎ×Ő( )ł×Úę(¦ąÄňĆDâJŐDÇé¬0ČJ kě…”¦‘sc#ŁÓ´qlI”í?ź$}M’ÇK †…ɢ«jÂčĆĆĆógídMÎ×ńCCöz~Ç©ˇŤšĽ®Çn¤łÝŞş Q–%ëëëSßł=톟ŐEą4/ÉŠ’†kGQŐűNŁń˙ŻT‰oĺSÉmQ”fßoVŇ©ÁŇňp˝fťśękŘ/ŻŇ÷Ł•„´¤ć[U©Ń¤3Ý•˘RĄB ň'~âçÂBΤW;ËBpĚ˦‹a˘Ę{ř\ű&†„˛”v aš$W ťPZË~ţ$iĚuÝ™ŔŢ:!ŽăÁ`0ĺ´Ą“ÝYÁć'¦IgZüYš_ĆXfQăĹ6ŃěŃO†diD–+Z~ÇlĄ%bL:«ż AÓhĐ6ZµF˘4Ŕ2 Ň$&;ť)Ă!N†(¦ZŚ@u0ňuüč3¸-HQ…‰tšaőCB˙Ă`ścë5°m{&ÍŽ–R†a]śĐ›üívô»0ůýU%¸DľcÉbK«°ń• R0ŕʡ&HOţQ:†đL<+\ZĆVÇf’4ĄňśC­6ÍF‹49 \©p$€B ÁR{ ǶQy†UŽp¤Â‘ IF—†¤ŰjŁ”A–x¦ÂŞ‚ÜŽY%¶”€ÂłL\˛čB’t›M[MDQ`§Ŕ•Š$“,v™ó=‚`‹€©Ę‚‘Q"ů°gĐj¶°ś&a\‚*§Š»Ť´t±m‡8Ř$KlÇÇq›Äi‰*+ĚsaTĘ5EQĹ˝)‘¶•/ó$É«Žží3Š T™ÓiHâÜÚ“!ˇźťÔäď,Ĺ"§;v»ÚMgvű|MÓyÄV°u\uśÄëA¨  IDAT‘îEzÓŃÝ,eg5䩝%wůŚÝbŃö÷Đóń»\»vŤf«Ëoüwyíµ×ůŔ‡ľ·:Ů—©űŃ1făÚŽ‹ă6‰ŇťlÜÉe&~łCő+!é1·b3WIj^c´pÔĂ‘;_®ęű T™Ç)¶ăW"ň ŇŚ}(gÓcň€Őđ\\Ç©‡v#mmO^Ó4ĄĹ<ąşĆ«ë=Ö˘+Ł€ťö„EŹÍ4ă|ÄrĂ#Sđ̵U,ł>µë !„Ř7 ęgw·ŔŞa EQLąčě×:Ţ˱­Ńh`Čę™t,Ú!3IęÔ×`ŮŢŘ´z2U9É)ĄHŁŢXŔ_3â+P% LÓÂű;(Ł€čůoRV‹öÇţ;Ä‘Ł5Ś`ňŕˇ7†I+X˝6łV¶»lo9ąý?ů_˙̲“Ya)‰Š?[¶„âVˇr¤”ض]ᦳË4’/ŕĚ-CY/#]Ę”Ě8…ś˙nŠ’ŰęhͲÉĂ–´PQSzÎ:ŽkúŰe¦O~żi; rPY],(Jf»&TŞ´Ź)Ő”ÓXösŠE;Ĺ“ĺŘŇń­Aqʏ)w¸1eYÔ:"]Ą}w®2/Č7N#[+Vxăň^ Ń|K•ÔIů©fł9%5ˇ˙®1ŰĄf9őôý•đŚ0de[dcůŞ\Y4šcrFĐźJ¶Ş Z!T6žoă7Ú$ą$/ŞJä~—P%ąPnl0ř“OćS”§OSMĘąeżŤiš|ţóźĹµM^|ń%xđA.^şÂ'îbaaK—.Ňívyú©' €ďűŹĎ~ćłĚ/,°|ř0žďóÂóĎqéâEn¬^çäÉ;ĂŠýzîě9n­Ý`~®C„ĽňňË\»¶Ę÷ýŔGą~폿ëÝ”y¸§B‚cé´,&ÉŠ±deA©¶UŞÍ©…*˛ 0Ś­ŤEŤ7Ű®6Ťłe°oÂ`‚Őµ‚żŃÂvfëčß^°&±–:ˇ¬+†äaĆo¬öůĘ0¦íجŘr| ‚ĎĺŹŢ8Ç_~äľzń*qBAĄYúµ+«ř¶EÓ2ym˝Ç?úy>zęn>ýć%ľuí+펜ŢŘÄtľ~u•nĂg”ç|ţěyÚ¶Ĺ•a@Óu¸VÉóó7×YŹ^]ßdĄŐ 'RĘ:°ę“ŻÖűÝo-¶WQ4«Ů¶íJúKďXS¦“CŻáô5¸c;p=í``N˝őA%ʉ“¤EëŃw‘Ż^'żyůźţĘŁ'ęöˇ®ÚîÖ¶Ó>,LVÔz4[wŞ~ţçţ-%·űŘÇ~FRZ‡ě”y» U’¨xkqř¸—rČNjç¦0 1Ół4Ő·±»GPé|p«{”9á !VIҸ’5i }ĐŔŢ-ýw}čÚľąN ’j3Msf#ýůqR)B4 „ô“*IË”5!…ŘĂ1§?·Ş$ćä©N[¦C”ä0 Ła€eääŮä|—0)ńĽr ˝0UP'ŚĄDYUťÜţ஋í6)Оv,ÔĐ0Ă‹hę=ĐĹŽ4Ť)ĘńßpqÝJ0 Ř2©ŹíŐaRb[&–iam“ôU±$Nó)[xĄ S.¶ăR$ýZń *Öä¤iL–+š­N˝ŹGágoýŰŤŻŁE!Ň4i6«ęţí¨ ”eµV›Ş“µÉůëü“+ëüW6xjqw«Á‚¬ţ޲-FiŠoY<~ü(˙öµÓśŢčqG·Ă3××xęÚ NtŰ8ŇŕŹĎ^ŕôĆ&Ź­,ó+ß~‘ņĎZ!•âĘ ŕtŔ—.^塕#|ćĚ›|ůÍ‹,·š\B‚Ľ Č ľ|é߸şĘůÁ—×Öyp±REŇŽ›¦iVDÜ ć´güÜľ0}Xô}źRA’Ř–D°;‰SďgúÔ±Řv\0r%ńĽ ¦‡˝űz•ĎyBg•‰Ő8IÖ‰şĆ×ę˘Én÷Łc±vźÓ™YŐ†4dT^7ұë„R,[1ʵűÜß qŚŮ08zXBq§Ź Ş“I™őđă/ŕĚFĺ1éÍ‘íăăÄö2÷”u¨Ć¤dYF§ÓÁ÷ýŰ"Ťi‘fŤ}Ó§۶kÍŃý„ńĂ0¬ĺżşÝn­I7ËHŇ”,©Hk†aŕ·IUÓŮň\–ěţÝŇPřvŽYö G¦‰×\")›DĆň+«¬ýĚOĐ˙Í_aô…Ď0ř·ź`óţ4Ůź~Ş®dżňŇ‹Üyç îąç.^}őuLÓÁv„a`JÉo§žü:ď}ßű‡´Z-FĂçΞ­ľŁÇŽcZ&Iáű>ťN CJzč!.^8OžĹ”EN»Óáĺ—^bcýIŁŠ7'ŰTxfDnGQŐ:´+Ň—V.J ‰ëz¤ńpG5Ą"ťĹ¤ÉVŇbYąr‰łý«_°ň›÷Ě”á°"ŤµŰíşÍ>+Ŕ]˱hŇ—~v…0řř­!űĄ‹|s3ŕË·†üč‹řTŻÂ@×±qM»,HóśűŹ,ó•Ë×ůťWNS(Ĺż~ůt}Â?µ´Ŕ·W׸1 ±¤ÁçŢĽD üéé7ůĂWŢĄŘH2~ů›ĎÄ túMţřĚúiÎ7Ż®ňÔ•ëÜ5×áÉ+×q¤äĹ›ëőo0Đď÷kÉ7=f/×ڶŔΊ’˛TŘVµÉí'A6y Á &ľµ;su`ĚÓŃž !ŇP8˛˛ěŹŘ?đâ>ńĽűßN§Ó© ¨ű2¬ÇÁ}41V>öşxLŤ&öű}úýţLëµŰ87HJ˘#nöy[#Ŕ“·‡˛äžĆŽX)…‘^®”i:‡)‚5ňÁe¬…SP¤„ڍń0śú7 ĂV«UkKÎú.h¤&}銶Ćűî'Á¦7¶ÍÍMň<ŻÉĂłŽ$I)óJĄ$Ibš­9°ćpĽN}č5ĹßMĄ ŕ!áđi’ĐhÍ5Gn޽zďÉxt«’^jĎaŹ»!Y2ŞŰĺô¶ťĄß!M–ŃxMŘÂRD»›I*e0ěßBJYZó,Ą,w~Wőź­ů®Y`3$ÜŞ…ö")MâLŽŐo \ݞ…Ýn“.„ެçŮJH…¸^›(ŰIŕťQ iVĐňŞg&ŤmŽăÔ„q=߲,”Rô0ř‘—.ń×{Ľ­éóL/äŻ=˙&gŇę>$ŕH‰e’(äh»ĹăwăWWůňĹ+!xęę*Ç!H3ŢŘčÓq˛˛ä[×n°™dĽĽz“Ż®r×Â<ŻÜĽĹ7.^&- >}ö"§7z\ěąĐŻşwĎw8Új˝¤ŠŹÚĐd»2‘ľ·Y†~5©9Ióęů°äL*1:Ó±¸‚ýTq Žc${ďë–,ńĚ<şĹp°Y'ëťN‡ůůůúĐrĐ~Żßď×jU‘Ît®—ç9ëëëôz=ĚWbźnȲť )9l…t[)—ʉ˝§Ąăä8ě¦ř"§PŐ¦X)~ü%śąeT‘­˝ŚµôÂô7®xeÎOÝ”>qůľĎÜÜ\ öžuLękÎŹ $zˇÚ¤ôć¦e|ß?t–†Q C¨JW1©*¶úĹŠŁţ`}i(<#%M7Ĺö4ć3cĄvčćpëźýSěcwĐţžQ F‡W_™Ţo˙&ţc>ř0?ôWţSjóއŕĆFŚi9ăÓ śĽó.,Ëćž{ďăäťwQďűîżŔ•Ë—yü‰wáş.˙Áţ%ÖnŢäńGď§ÝjE!sÝví\rçÉăĽđâ+Ľ˙ýßÉŇáś;wŽGŢńZ­vŹ\ď[ýTl\ĹhaŘ ÚÝĘŻĽHGVE\rvŃSČ ‰ß3RGCň<Ăó›(Ő¨ÖOĆ;Ö// ŚrZ ‹¬4-6·~™4Yhżá8[m0ôł{iňË×řë'–ůˇcKJń[ođĎÎßŕď8‰·MňÁ•’®4@)sľÇwť‹žG×µĄÓAj˛­Ą1ąóóó5Ńa–MƲ¬ĘF:ÉPT5Žg3­öçyő!CJ‰ĺ¶‰ĆÂ]7wŞN€*úô&ę!‚ÚzSźüş‡É$Ý4MZ­V˝QlŻbjŘ•aµűϬ­ôÝĆ…¸Šµwy!Kv‚PŠ;❲šş\\˛TFNú1ڦ¦iBާ‘?ŤŐ]ˇnR7±–î‡"%$ÍŘ"ďčőęőz8ŽC§Ó™"ÎÍRA™”aÔ‡`& &˝‰ę} ÓéHúÉňË40„Ŕ0JlF„ĂËíÔ RŐJßí„ß.(U5_Ú šíyâ¸"M$ĂU,‹ł˘đ¶tI-Ź8+±Í‚87ÉöťčgPŻ·[iĄçY†-Ö 7 Yg˛,ŁŐY ޞ,Ä1«ý^C[˛¬Ç(qj}ŕ(ˇŘB`¨Ýcb©¦ÓŞő‹ĂQÓöđÇZíi:–ů ôAăŘS„©Dé}\ëšî÷ű !¦d>ő!Jw’>yé&©RüâŰď¤k›|xąËŻś_ĺ·W7ůů‹SUDŔ1 ˛$Á1-\ŰćÎů.‡Ü-u…ď˝çNţůÓĎńźĽýiQ)‘\Ž(S‡¸Đëĺ9¶!9µ´âÜFŹ ÍXi5± # b‡řĎdÎŁ‰ŔÚ±î =Iß÷Éň’B)\ËD±ŻăöőÔI¨Ć±BĄş©.Q2Â5Ó=»Ë–,1UH$$¦ŹçmąVuuő k”ÄžYµźó‚=ׯ” :M‹¤Řą±'Ir sŻ=yŇĎîMżrń&ßdžŻ®÷y¶7äަĎŻnđĂGđ¨ž'Siř¦äďaJÁŁËKśÝěsÇ\—Cľ (=ŹÝ6o?´g™Ü·0Ç·ŻÝŕm‹s´‡Ő0ćť+Ëśšëpnc“ăť ź ›=N-Îq¤ŮdÉwqL“ĺ†O˱iY[÷®]`€Zzo’t¦×aݎ!@y©ČňϱČÇÔŰq—BÔV•:ZV…ÇMr˛Č÷|ʤaŕ8{®†S?×ݍźY®aň¤‰P“­v­¬Ąiôřł8”ĺʰneąŤe”ř˛@˘hËŚE;#Ç *÷–ńkČ‚SÍ5^7ˇrZ~ŚiG`4)Ł>ćܨžç9źí‡|ccČɦCÓ”¬&_ąŐC‚ˇr«óm‹yĎĄa™Ěy.KľÇ˛ďr˘Ýb3ÍxxeQ–t=—»çÚÜ7ßĺžů*>ĎűiQđđˇ\ÇĹď;vŽcŃOžX9LT–t›·-t9ÔôYnx,x.GZ ć\§ÖdŐ<­$˘ńé»q#ö:L˛”ŔsLúý>†aÜ6žU«)LÂLŰ­ČŕYŽi¤…”•‡bĚŤPEF‰]«-éŘĚĽčX LĹb=\×Ý›ĽC L Ţ•r§VÎc@‰`3w¸y óťÁő—r# /¸˝@Ű»‰á¶Ŕy;¤gˇ Izkڬ÷ŁĚąo ĆţŔăJŞĆžĚJvĐ•Ű ęö¦¶ľ›µÂ˛źÔFšĺز¤ăOź@’˛Ał=WU€zëŇĆË&%ń¨bÎđőJ â\‚ôđýi’&ľ•’ľü:«˙ŕg±aůc˙€Ťőë´>ř‚gžbô…ĎĐřî˛đ÷ţqnńĆé7yô±Ç‰ă4MŤýGVŽXÉ2 gľőM>řˇďĺŐW^fiˇC·eUŐ ×n â”ăwÜYËěÄÁ-¤€_9Ë©űOaZŽăŚ%cÂ=}Ú·Ź$7VĎó(Š‚0ě>‹sżµTů–űŘlµŻKUýÝ0}<ĎŻß)rnőÚ6é‡6Ťˇô}Ó4w]»®KłŮ¬[IŰź©u˙ńóůoOçR” P,Ű6żúć5~˙‘“xjwĚ‘ţn˝ţ“Ďţ^1mͨE®'+Ą6ˇń{{Ýk§Ó©Ąmt€Đ߯‘~‡öň"o¶Z„q†<Ǣ×۬ŻŮóôsíŠ}ťĽ'݆6Qeë·°í­€Gޱĺî•´¬00˝ĹšHĄ[ă“-»Űééëfĺ0ç9Ývßő§Ö `iié-•ow“[qbŽ9QeŻKµ6Air&đŮ̬qřÁVČ;¦(K@á?MŁc ěEp„řE@öBű]{&¶ŰÇvůŁYe èݙ†Ă!¶m×1ův{ZvJ›8LšH¤YŽc–´˝éw#.´ĆqxŘ» ÂÄťQFqűČ A¦l\Ż}ŕü$—x­CuLrĹp,ăhăůŐü·r˙:1©p´–aËí{OĺJ©”"ŢÂ[­§ą ŔĹŰÇŃK¤;lSwĄ„ąG»Ý©(i`Y=?+ gľ–BTéĆT·L˧Ůn ˲‰Â˘ŚŘÄX¦¤Ő°ö”ţŇ÷żWőnRćSC¶Ź/ŽRţ§ó7ř[w­pvqÂwřÔő ŢÓńřۇ;;ââö8¬;YŤFc_ů@môPS×1HcH÷ŠĹşŤŻ”˘×ëŐŐúţu<פąí±XKAće•Łř®M–&S.«łČÁ´”Y0¸…!«ßpr?¸xů2ßüösÜ}â(Ź>pĄ˛Ş$?qáŇlW…ĚÁ`€ă8u,Öň_űAF·Ą–c±‘¬„!RHî;zŇôűý©„yG٦P‚©C?·8ćD,; ¶¨,Ú «©ËąĐ'·Č,ˇ8éé€ Fz™Żb8Ç(†«H< ď ¬Ć!ě۸™É6ˇ¶ĂÝ«M89´H»>É…aX·xn·Ĺ¶*ˇ7ç˛,hmÓôŹsŻYUżÂ`„o%@B8 1¬­ö|őÝYPa”ösŰÉnµČZµSY9·Śáűdׯ‘ľy†řŐ—‹Kő\űä „Pܸ~Ť'żţUîşënžzňX–ÉůóçyřáGPJqéâE}ü ^|á9šÍQ#}ěq^{őŤ&›ŚF#6Ö×yő•—8vüÝN‹‹.˛xh‰ŢfŹ$-‰˘JŢgĺČaŢxă ň<ç±Gî!Ë{C Çm!Ýâh„(ŞŔĽßňĘ á8ô6בRŽýŕ›DŃ“¤b<[[† ˛ ¦\Ë ľ•SŞ!á0DÚMšíÂ0ÄuBJ’}ÔAö‚ŞčŤuŇ-i·çhŃüČńy>öâyŠńËn‚_¸oe×Ä6( ^˝µÉ»/¸ŐďŁ Ér§=XĄ”śí ¸iˇ>Ľé–ŐdPÖ,XešU" ¦“gŤMßÍ1fňÝÓrzťN§&›M’FŁA:ÁĘŤăhę ŞÝi&-wK´w’$Şr´ËŁ„a\)€X–EłŐA©öXťbeLWsKáMPčîŤŢÚíöž›aläëĹŽ5G˨ȌQńüęs|áěçY™;Ę÷?ř,ŞE’(!Ę#li#Ĺź]mfr\K\n¦‡ě„;ÝŰ(ńEĆCÍ!ë™ĂĹŘe4~†›˛`ŮI)ÇĆ9fôžź d—Ľwł[éYăż×oÂşš»ŤIÜd»÷ $MÇa 3H’d*Ią[őI§¨I[ô˛(Řî­goÜ…‚ž•IQnEšĘs‚(Ŕ•É•LK*,âpŁž_a8Â5“)<í¤‰Ś(B0Çóe5?Ç}K÷żłEëDCl#Ĺ’Š$—Řţ8ů ,±ő\ۦB)ĘđđÇNcA4Ä–ůľInV|żA0@™` šc§±(âľîî$C\sz=µošôĹ®×Ŕ0|ܬŹedS† {Ý˙^Na“{µşżłĺpŻoóS/ťŻ˙żSmŹżr¨˝kbűôęóžË=ťolöąoľKQĽzm•ŁÝÝ1TG»FyÁÍ0â±ÇÝá7şúhš&kiĆ’ë Š­C«ĆHëűŃĎĆäýOĆbť'I2uŘ«đą’<ˆaz_|ŹŁ(Şßă˝b±†zDáĎÎś,ĘHw¬!yţĺ×ëvąóäÝdĘŕʵ5ŽY@RTaΖKťîÄLĆâV«…ëş;ŠGB"qnxCHîkťÂ,ÇîˇŃłkçřÔ«Č_}×_'ĚBúaW9\]çXű8i™V°„Ý\ 6r›[©Ť4*żf€¦ĚYv2LFąäžFĚśYU…„JđăĎâĚ­P7€òČF#Śö{ęö ŢxnW™`{›p·d˛˝: Ěţóh±iFu©Ŕ”•C‹ŚJAi4qÇ2YŇŻ` b¬±Z$ÄIŠią8^“8”y†yŔ~¨[de‘%)¶íá."˝pŽě›¸=BüŇóXGVPiJ92˙7ţFŁÁľô$ľß`ccť ńťď˙nFĂ!››ŘŽCż·I»ÓáĘĄK\8˙&išňĐŰćĆę*·ÖÖ¸pá<–eqßŰNqýÚ5|číś9ýQś°~ëI’ň•/‰÷Ľ÷;ůĘ—żHĂopöĚ>ôáďĺęµ+Üw÷TBęy–§•2‚i7“Ja7‡˘ «*Eeö¦Ô Ö÷ͨîᨏ#wŻÄŐ-Ć2!Š*ÍćuĆňgyžŹeĽlŇ4GJË4¦Zőús`é(!Dí~VI'UU`ĂINšD¤Y‰0äV‹ĚňH‹J>É4q.ńs;ŕ"ú7ÜÎHÖ¶ŘeYb¸ú{üú™_áÓW˙”§o=IÓmp‡i­0L‡ĽďŢ÷Óő:ü“/ücZ |ëň7yćň·((řW˙çożeX°Ł|U"&·2»r=”¦P4dΊ›b &okEř˘Â:ůŤâi¬Ö2y˙†7”¤ˇ@řÖk7 Üdűo¸]†Ń¶í]eµŁNĘ&çďÖbśµŘ ÷Ă0FyČ‹˛fö—JPĘf-•':OÉ(bŇh¶Č•E’ ›‚í2Ś’FłE6–a4„"-ÍZO7 †8Ć.±P‚´0)Jöm±Î˛ţeP*QËHFI‰0<üqŇś„›;lĆ5ěË)qś U·MU‡^iě,6(i­ĐĂłJ ‚(Ĺ´l\żM’µ•T‡;>G­L‘&IµţŤ¦áQĆs IDAT푦·÷ű‡QŚ!%ťN»¶SŢo¦ľř®ůo„)Ł”»żţđIîîVÉm^śŚd9ľmókĎ˝Â÷Ýs×”üěźäh»Ĺ‘V“źúü×y`iž[ĂëIĆńĄ.őĽxsťŐ(fąÝâŐŐ›´-ÉĹÁ°(d—‹ľ[Ĺbŕź>ő,®msrľËŮŢ€×Ö{üÉ™ |çťwŕąîąĽÉˇźmĺ«ßcť·Z-Ң¤(J|Ç"‰ă]Ş`°%e–¦)e:¨ $ăßPŠŚ4‰˘”‹W®ńÄ;áĐŇ"gÎ_áěůË Ă «+¤OŁŃŞ ·:Ćë c±ľÍ_QJńfr†˙ńĺÄ×n|…'׾Ć3ßäąi-LĂäx÷8/ßx™˙쉿ʯ~í—QŔëkŻqąw™+Ë|ćŤOďťÜę‘+ő̦—[8˛Ä1–(éš)+nĘś] ĘPŘáSřm‰*2ŠŃ5¬îÝdĂU†âq’Ľ*eOj©MĘDĚ2t’ŞőĐt;™ńë’»vqÚţ€ěĄÍ9+,ŽcÂ(f~®[%i±B©‚\™4Zsu˛ĺĘé¤Ç0ĆŇEELśf¸n•äEI‰*K c•Ő Ź[Pä1q’ÓxôÝ$/>ËčKźEe)éĄ ä7o°ô÷űî䥠7řîďů W._¦Őjł˛r”sgÎĐh51Ç.esss\˝r…FŁÉÂâ"‡&IVWWi6›ĚÍÍsüŽăÁ×^y™î\—,MÉłśCˇyěń'¸±şŠçz´Z-ŽßqŚžß÷¸ëÄ ‡3…'ÎŤF«’|J „ÖUŚs żŃ&ű8¦Ö6UXFJ%(!iµÚuuľĚF:ťJ\˛,%K#\ĎÇu˝Ű’˙ş[ŘeLcě,ŁuN÷ Ěx˘ér1Éx´ĺówŹvIÇćnŁÁ/}ëy^ßŘäôFŹ“ť6+Mźł˝g7zqě0¶®óöĘkQuZŤ™ń\^ż»Ě(ŽąÝňµk·8Ôlf9đîúQLçüÁ{—xt®Ëź^¸ĆzśĐ°m:ŽĹĺŢ€®ďňţň*'Ú-~÷í Ě7.Ż÷řłŹÔž˝r˘­ *Ž©nśˇk蚨–Ű>‹-…ASdY!Ѩ‡mlIŽ©öó˛„‡Žť ŚÂĘs4âŢň ‡óć»I˛‚CT|«|ŘĆAµ(ŠM±8*C~óť˙•x™˙č៤mµy{í-îĆ‹|l*Ý/ß~‰Ožú$oßy›{úÇxçî;Ř–M^äşq?,a»!őܤ7l1c¦śöF8Z‰IA9YËZľ‚«ßA3‘­ľ‹Ů:A™…Dů<Ąż8A†¦Yod * p4űŞĚ­ŐŞÍY–eÍöŢΊNÍ6µ›Ui~?Á5Ë Ű$ ×@łńEáŐ¸¨4Mwd”BµAë"! W‘š‹ë5(Kź(bééžIš©K -!AŁůK˙5ţ¤ßGŘťż÷Yśgž¨ťfžîiÂ$ăůŹ~€˘,ůÔ÷}¦:ý—%OčOăygÎ>ľ©ÍÜlµ9ýČcH)'ŐÓ';Ę“gO€¬Ú›Yi`ŘÍZjm4bë†ČyęÜ)%˛Ľż"a%’1á0AF,I‰Ż&@3ĽĘÉk#ĎʉňŤŠ˝®ëdF@VPŹťČFY©#tłbjqSk0żQŻ?ĺ3ľŽ4Ob~Ľiăë‚t˘ßÇ1ŁŃąŔçű=Ĺż~ímN´[śśT‚gâ_ľö6OĎ$+K$đéÇřß^~“RJ>utžWnßĺĘzŹGf:mÄyÁGq{0âăGçů“ W¸°Öăö`„m輽ĽĘńV“O[ŕŐ;÷Xh·ö%نj3+Áp!¦ˇcú®rOÓc;Č‘byÇáßş˙9J Iaˇ`RÄ!ĂŘŻ\Ž&ňajěÖrBÔUwUAv‡7{ŻQĘ’s3Oň˝GľŹ?˝ńGÜÝäíŢ;p€vĐĆÔ-ň,Ç(un¬Ü Î"ŕ»ţľďáăBçý0`)µ9î†4ô“ś|:őä*Ž“ ´ĹđćÜ9Šh…H{©·‘SĐ•X†AłŮ¬űU~P°-•$´Z­złRÝąÝÖÂÖ8ŢétjL÷Ţq8Çu,ĘlŔ(5«ęˇăŐť4MeÄNH‘J Ł(z„# Ó0śŮę%Ťw/ YY ™é¸ś;ÝAŐq2tt˝ (úڇ&Ž×¬±qâš$H $•e ß|é®câ:Ő9ćı.ž·!ľŻŠo˘0—k˝‹—î’e9'ŹwpăóY9&ME-A(ŇD9 šVźęű®šI`H˛lŔxh`9†;CŽĐdmHriciÚ¶ű™©KJą‘L–e‰ßčŤčÄXĆöqXJÔD×ĘŁA‚aą5ěo/;ç–‘a™2źý~Ďój…Łíöń˛,±â_Zhq¬Ě(Ë Ů;˝çćR–’8M8Ü đ'-UK×9Ü řâĹ«ś™›ˇ'U¬î´řÚµ›ôťió­ŰwI‹‚‡:M§gÚň=™é ä˛Ú[^Ľ}ʤ(¸; YŤ977Ă#Ý«q‚6ő>íçýSyŽR–PëĎ2 ěZÍžBTd_%‡ÚjUňyŐ»śVĄď-#+Rs‘EʡŽF–ą·´DłŃŔ˛LĘRrůÚ VV×?xŁ ‡vÄ+­sUpěë,†w05“gf?ÄQ˙µř\čżO&S,*ĚîBk˛,iY-†Ă!Ký{č†ÎŚ?Ë|{~Ém}3•Ěb0ló¨7fÖܨNÚŃ·1:ł”qaw–OĽr‡Ě˙n:;× ôÉŨ‡˘Ş­űÝdŐ©Céą©°/颭›k;:›¨QL’Ë×,2$¦žsR}Ă{ŹUpĚ)ÇÄăô*ÉK“„q2Â1vf„Wó%ŽYĚ4éţŇçXúÇźöOţg8źř>Âd„®U-0ő Ó‡†é*JY–Ü»wŹÖDźU2 Cź8“H -ÇĐŠ*PO}ÎęZĐ"žĽ\kk«ô×V8~¤»ŤŔÉÔď§JRK9d<ŚĐMżŃ%Ž#˘$ä¦áóőUňłVÉOtÖ ĚČK ŰU fH1íNs–0#˛Ę'}kh(p HkżlC—$鎫Őj‘¦é®rE¦±ÁâWXN…Çn·ŰŰ&¦iňt ë?WĎOĐ´LDYňľô=v„•(Ćž$włžĂ‰N›+k=>qü0o/­ŕ›żýę[<}hŽ(Ďůť×ß!°Mž[8Äk‹K<2Ó©*RrĐ÷04ŤAuúľ°şÎç>ń<řă‹W9ŕW‹†mq{0âp3¨,,÷i 2×u+éŻRâ9V]M}vĄM©>×ńŚC°ôl0-4 Ó!OC\{Ň#’qD˘űř °ézvÓ˛Cş¨Ü‹žę>­;|höYnŽn˘‹*Lş®Ë÷=öň,g<óôü3hhiĄ4 Ü`×ďú˙kR°śY¬dŹCć&qXČ7ýúěQňŃ2F»:dơAŢ8wßç(ů;%>]lĆüíg(“Ś Ř ÷:0NÇń(Š‚ Ć¶ďTlPźię˘ęěPĆ ‰điµŞ {Gě¬+±1*Ć„4ÉÇU|Fă!:wî ůĚ'Žsţň:Wď$\ą¶ĘÉă]®ßęŃi9X¶Áˇą€óWoqp®Ĺhś°Ľ2 ŽS>üä¬mÇËŃ4 ÇqxÁ,ęęąZOĚuů·ď_!ř;Ď=Í Ű<;×ĺůÇp 9ßeÎóř‹ë7ą˛ŢçÇÎśâO/]ăk×nqîŕ,®ÜčŹ8s`އ»L!Xhú*}Ü\JŽ´|÷‘C,E1WűC7}Ú–E?N('Ďóö-ůĄŢ‡˛,É‹’(É0 Ç2j2öÂŽÔ›¦Ié´’–žmĘIŇÂDč`낪+űÂ3'ÉJ“Wß˝B§ÓF®­sö±GůËo~ßwéL gę7OĹ‹¨«ŃšĹ‚żŔa˙sÎ:v‡˘,j×VĎóřéŹý—ŚF#>uâ{É˛ŚżůôŽĐ®S‘—Ĺďť—r{Цáę%Ď·‡h“Ęśž\¦©żŽAę'ů%ňáM|śŇ:˛ă稍P×őş4}ŮŻ€3Pź8Tň< ˛ŢĎPzrĘYm;ŇY§Ř¶‰kŘF> NbÂU‹(ŽÂ ›;ß—2B!I¦cض]1NÉÇXĆîóĂÜĂ0mú˙űoÁż÷CGŽăM,Ź+ŕvČx4˘;3Ë`"ě8KËK:4Oż×ăĺ—ľĂG>ö1ÂńĂGŽâş._ř˙µµ5ľďűϵČÓŃhȡs,.ŢŁ;3Ëď˙ţňc?ńÓëő<ËöřÂľČG>ú1ň<§Ő¨ 'šPÖşݶŰ©Š˛rěqĽŠ‰ůŹ.h:üëEřŃđĹexőŮdcRé4»”eɨżŚkf¤…FZX¸Ţű5ˇÉ¸ÖuĚKťBk M޶R Ć™IQnđJYY÷)ĆďV,­”•AiVŽ[Ó©iU`ßkŻj5V®zjľzÉ·ŰŘU;S±Đz˝^ýY fŁŞĘŞEĄţ[ý;5şÝ.RJÖ××ë–‚óěgA€nÄiŽeX†¶ ĎĄ™Ó‰ëvC˝»EQ°ľľŽeYxžW3ł˝bôŽ3·" ÷ăúÂ̦ٞ­˙÷tŇ”$ÉľbÉ[Ł7ř_Ţ˙ź9ě扙§yuůeÖ’Uţ›'~…űpÍüďőzőQÝO)%BĚtg0šVc;µ„˝FÇĚyŞ9DLÚgćřŰ4‚u4gô#˝OÚ»ÍĐú~¤ľłBŤ‚Š©5¤Ě+â8®!űŠđٰĽR †ý1ÓŁ8Á±­Ę8ÁĚH˛B Ů38ŽS'Eyž“Ć#,-Ý“4Uń&Í5 ááy>ĂQĚ«Ż_á…Ď!ĄŕŇŤf}^{‘Ożđ_˙Öu5ąµ8"JJşź†oŃj9\żŮCŤ#Â0ă»>z]ÓřŇ×ŢçđB›Ŕ·ŽNźĺÖťu–WG|ěąÜ^ěa:­¦ĎňęĹĹźřŘ dYŇ뇬÷BΞjńWßąëZ|ôůGůň×ßăăĎźâ•×.rwiȱ#łdy‰ăXäYĘ#'Z̶ ň—%·ďn´™…€gźđxîlŠ,%I®!µ*É]* ţćđń|e~d%˙řŕâÜŔo¨¬ľë8Z…Ns tÇő+žJ<Ć6ŞŽd)) D׉876ĘäT‚·“˛Ć´RÍVă)e?”4Ô~ěę=V]5_Á°¦çO'hJ©FiĐ !ęX:-M¦~›ŠKęs¤”µ–lEŚÇăZYf/5éßŢét“ś˘(ixމ@]´Ę˛ Ă4‘“nŔ}뢼cŰvÝeźÎ‰ňt„­çd…†Ô}dâY›çR ˝çy|ůëI«Ů¤ÝjrńňUľç…Źňâ+Żáą.Ź<|bǸRŠ’˙őŇoń~˙]mźa ¸9şÁ÷ĚšźXř)lË®IÁj˙S¤<ő\r‘cŚŁśŔ{ .Gśś@ćŘékč3‘˛ŮM  Îş”ţá]?GahŐ©­ŮlÖlPő°÷»Ń*fźJ,¦•űŞ żę —2lĐ„ -ô‰Ď˝$Ç&ŕTÂŃR‚ĺ40śJ<[ćÎ7şÓĐE%ž]}FĂŰ 0ś™I‹,Ć6‹ű*‘i®á¸~µq˙ଠűňuÂaŠéTUěŻ~ĺKAŔĄ‹«My}ŤîĚ,łłł|íí·)Ę‚4Mąqí*/\ŕąç?ĘáŁă3Ď*I‰Ć_óEććPbňŤoĽHsRyďÝwxëÍ7đŹ˙äDzČÉóśW^~‰á`€i™<űěó|í«_!Ď3ţÁ/ţ 2ż˙yęš$°SFáËŞ”OtaTÂĎo­O®ąĐ°ý‰EÖĆ–^bę1Yś’P‰ŹG1µ„R¸P&UšĘől:±UĎ+ÔEUî¦Ç˛Ľo]“۵Řw“ŚQCť¸ş˝¶ŰĆ®*‘µ$Đ”¦ŇÔěŐň-ÓlŢé ˘ţ¬đę*yŮmío7”^aeS –ˇ1 BÔLXÓ4y÷üŢżx™sge®ŰŮ6 ©CzW§[Ý3;&ŽB,ŰDćŁűŢĄĽSŐü¨f«MP1ű÷Ú(Î5žäoůţŕĆďsuxS3ůąÇ˙>§ş§abNŁđcŠń{oţ.ş¦ă™.›ó;~ö^# \÷ÁÔöc4YAVD1Ä×ѬĂHTň‹hDśFNđÂ; uřQĘ0*«ĂŢ~Ť<V*Ů…ăSŞ"űŶďd"•Ϋ®W‡¨ĽÔ˛JĐ ÜIW%‰z‚ ‚=ĹŚâ1ľ•í‡lŞ”cÂQŚixDIÉâŞŕý‹‹[h˘ ‰ď™\»= /JÎ5xýÝežűĐ Î_¸Ă\×ÂÖsd™Óí6@Ď;ző[Ě5xúÜRJŢxg‘0JŽŞô˝ĺ!—®,óÔą#\ą¶D§í“ĺe˝~•-»„&Bò,t]#KctMĐí¸šs(ĄÎZ/Ćő\î,§x®Oo¤±´šđ‘gZŚĆýQN––Ľu>âąÇ„H&&Eá‘ç‚¶ˇó“ó`ipÂkcČKnmp4YĹCmŇQ”rD8ŚŃĚJ3IbÂd„¦éL­*ŠĄFşE)AĹ«í*±wc“»Ř´×o·~”<ŁŠe{ÉÂF\śžŻś ·B%¦1ë5ŢxR SÝĽťbéô;¤$ľ¦]ał˛Śzö»=Ď«]!˨!ŞÍŻ˙č _¶-žzü,¶u?ĽŻîW’$di„íxřŤâ8"/2DQŕlŻNrFPťŃźć)†Ł1Ż^ăů= B0Ť0M“ W®rúä‰Z“{zhRăoźüţ§÷ţ{^]y€g<Ďß:óÓĄU™Ó°zvŁ?xçßŇőş<}řľsýEô˙đţéç…m?%FŔŇ$O4ÇőŹ2’÷đĽ!Bw‘Ą…¦…”˘Ţů y±·G»řžĆÁ†QožĘJw7<®:i)έdUEŘ/ilÚ­I‰‡GQŚ;q)qŚS/É ËíL—C VŇ(yBśd•t‘ĺ&A±'iL›€őó<%I <żnşŚŁAQ›H ÇÇó«JĄ0 ĎÄ2u ­ Ś Ň,çڕ˸žGQäőiĚqfggąwď.†aTV…Ý.ŤF“ĄĄ»öš¦Ő›±ŞćŞJ°kVɰÂîŞęŻ:=¨2ÂvkyĹŘVő[]3ÇĐîŹĂ& dŞ5©MHWq¦‘ç“8Ľ[’[“®Rl˛>ČxňÜq‚ŔĹÔJŽÎű Ç)Źťębg;ĆÁą&smÁLKĂs5θt:ń´¸<ĐAI«éÔqĽÓöh7lŰŕČá6ÍŔan6ŔĐ5Žn'źÁ¶«}Ě´,|ĎŔ±Jşť€ů…8ŽE«iŘ)f\>Ţf0L8|С۶ČŇśÇçđ‚&˝\ĽóÜ“Mf:&ŹźöiŻ„<{ÎYTř`ébZ6kQ—{-Î6ŕüJ)yÖÍjÆiU“­÷™+ŇťßaP¤cL­*öÄąA!··ŻT‰ŕt,lZÇ2ď3lŘiľ"Ź+ŘÍ~Éă[ןšż5–*ŇäVYD5ö2ŁR•Ň­ź1˝ĺy^C7·#?뺎$YŽ”ŕÚÁ ÎyÔ=ştő:wîŢĺCO?I»Ő¤?’ĺ9ť@ÇÖ3ŠR4Ző§~‹&$•!Ę„(N0M?(ŠŚ˘ĚŃő»T”ÝnašĎÇ0t:mY|ó;/37;KÇĽwáçÎ<şmr ` ‹§»ć­ţëpđĎţ#ÚNe©¬öÄéýbi|ßřÔ©ďĺk—ľĘ8 Ńäç«äV×´]_x5öcZzZťeŠ—ţ5V0CŃ»Đٵ‘8ZźÔ†˛-áfŇ’U,=Ĺ6UÉ©ú;µ1m·Ń*‹ÇéÄăßeq«żWňGšnŕ8vUÉ,ËşźJĎŻ>7 {X““ŚR6ČŇ4Ď @wöáT&ĂčĎżJňĹ?!zĺ;ˇŃ<ů0…pÓAI!uś‰ÜQްődęp"H'–˛i–ńŢ;oń©O†v»ĂÉS§8ţĐIúý>Ď|řYÚťłłłčyžńŘŮÇ8rôžďóđÉă†Áüá#čşÉçOŁ Ť3gçČŃŁY8@žĹĚv›Ř¦Fwf†ůC3čşÎcgáˇcG-š­vĄn h”ŰB-6Ľšxüá’ŕ/Váëk0*ŕďµäI8b‰xŰőŞ’\]L*¬Â¨TĘĘQĄ(5ňr÷ ™ ¬ŠEÚn5k6řN-[çO«z<¨+Ěv»bSďPŐü­U…¨ä¶¦[~[çÂĆÚß.°Z–…ëyÄiu=ŽĄ×˛{jäyÎíĹ»čşÎCÇŽppnŽ7ŢyŹ+×o kÇ61MĎŻ~Ëp8@•榩•čŞ*/‹I`µ\LŰ'Î$eY‘Ó\ÇrŰUb†¤YŠÂř«Í±‚ĹlČ_©ŕ¸mk®”2±\ÜĺÉ™GŃs“ŔŮŔŠ©ą îńĄ Ć˝Ń=lĂbĆ›ĺ÷ţŹßű@Éíđs˙í糪JŢ˙÷šŘ  ŕLbOěXE1Â/^ÂđgÉVŢFw«–ápä•3{*ĂLŻMEJq]·î|)čŞěě´–§ű¦+aęŔ¦äÁÔú|Lvš¦ ţ¤]]9žY€€Túx“ĂZő°jÓ‹ Ĺ8I1mË SČ‹ü~[óűľ»Şä6ťĽ(p]Űń‰ł’† †¶×Áq޶´k„z•FŐ]ł«úó¤Ńhřős0&®NžkN,h®cŕŘ• mŕ[†eBžĹVŐ‰H2‰Đ Z­ćD¬˛¦¶ ĐEI§i`hËt[:B¦„qŽe™\ľ‘°´’ńîĹ1ď\łşž1ŰŃ8}¬2ţHs Ëi’Ä!Âü‹[6__…Ż®ŔkřŢYxľkM¤ăűTM¶®ŮZ†1.*§AŰ#J+§´´Ř»c¬bˇrjSUŰÝT¦çÂF±B‘öö«Ł>c;ůBĂ06Ôöúţíb©2R‰ÚvóU S2”†aÔD 2Ť(!+J\«*2l…Ô•Oşé– IDATeÉ[ďľĎÉăÇ8˛0Źi|ůëTďď:č–_«UýU,¶ôę0¨žež'Dq†e»IOdĄö”–•4Ŕp8¨ŁÚ{:­k˝q’đ]}žąŮ™]Ąđ´\CH8Üś#Ő8˛Aŕ4ë{3}/Öâ5ľxţ ’ź8ů Î/źGüîűĄěSIjŘrŞÂŁIžl†Ü-VRO/yv kkFŻÓpn" —lí<ÖÜ“¤˝[ íF*ťŇ-U˘íNíŞ*RµXôÚţT-őPŐB™ĆáL‹˛+üĹÖ1Ť‰âÁb¤”DIJ3đkśO Bâ5f+÷ŞŃC·5eP8$©W8®4MI˘!ž•ÝŠĺKżţk¤×ŻŐ˙ź0 üĎü š?űY,ŰžŕK‚ j}ÇŁ•ş=/Ą .ôşŐ“gIšĐh4ë—QáčöNĐ$•"¤…ŽÔ<\ŻňB0ö°ÄhO‡źRV­ aTŞI‘ĄcÜ)§˛˘d˘J EľN¬ą,Výş„ŁŢ†Ń@} ĎÚźÓY)!Ę-,;x wĄé133SoŇĘäA0„»9ťígľ˘>(*ÜíúúúľçëşľInIÔőőő=ÇvČfłYévćžmÖ8[‡¦iôU{Ń÷\†Ł1—Ż]çăĎ=Ë·_y…fŁÁ‡źz˛Ú<Â5¬ Ž}§‘äÚDż1Id Y´Zmʲ`Ô_ÁÔ ˘lĂĘsz(«Ú4ÔłśeYr7ĽÁ]yˇĺ<Ý|†ąöI aŐ˙FÁ>~çĺßć§źýi4tľ}ë[üđÓ˙ţÂÜţîů2 S÷‘nÉjé•Ó1so†Ľ=đčg]+çéĆr‚µµ‡_&ččȤO™ô1Ú'‰W?B«ŰźŰ9|©1]Xp&˛RY–Őę°as ÷;$ą®[y¤éž‰Ç:ťUż˘8&đ˝É3¬Śrd™ÓhÍ`Łá“íă0(§±J€$Ńx± éi§‘‚\:Ýĺâ•{ľĹé‡#eI8\©q¤Ą¬ą˛B6H:JÉAVŁ(ćüĄ%~¨‹póö€…C t}3FÓ3SĆQÁ8’´ZnmK®ö2™ Đ5Ht­¬˛rup›úsVč\ącňő#Šb’?ü=mżę¤„™‹4IĆËŘ&$šWwąŞ‚–IĂź$ýšŚö”˙R#ɵ >âUQ÷BáA‹˘¨×ńÄňé÷ŕAL4¦ç«ç§:Á`ßn‡[ó!uhě÷űűŇ:žćm$Ľ Â¤Şś{ŽąŁK›ŠĹÝv‹ŐőaS”ÇćK_ű ľű…Ź1đăQSŽ{ě­i®‘ Ď«řI4Âő«ç…!dë af"§b±‚qŃh´&EĹłÚJ˘KŇŮyF¬Ń0 ži Űń&ZÇßľó‹;|×ńO"„ŕw^ůmô_řůźýĽćx8˘äL3&G'*ŞVÁĂ^ÂĽ“pĐNłsćť [dŐ"/cÜôŻ0›ČW/`¶O Ë”qzÂ~¸ľ•(s…­mV•ÔŞEšeY]ÍU•łťt)U%«Şüě¬ 7]‰S. –eíë—f9ć„Y™Ćc4MÇ šh†] Ic—!&âßTŐh4‹ Ń"ÎôÍş‚%¬ţ‹ßˇÓüÔgp=‡sö ě§~á±ćÉçf0 ߯•ńh)4Q‘ŇĆ©µ©*©Můş«J eY;¶«§‡ˇÉMłČŚĽ4ëÍ-I3ŠĽ@ŰFü{ëő›zYU˘Ă´q˝&Q*(Ę]ČĘF7h‘Ltm-2:Z\˙Ç,*s]q˝€(äE‰±Źď·ô‚"KG®ë=ľ±rćŠă^Ż·k»űďßę dţ´ŽáĚߪI ěŰĘz+Ô˘’‰Ń*ázMĂ6őűHjH)±- Ë4I’„ —ŻÔ€w—–Ç `8č3۲¶%NC“XZFĹH©á­:ʆ8z„ ’]’Ű$·Óm:Ąńşö$…äžy¬śÇě!"{b!Ţ˝˘Şţş®Ë±Îq,ŞwěŘăüłßřg¨rű‹ďď~®4=S'C‡ńÄ}LĺĚ;)mł`ŢIљڲ%|ń>şÓ ë]Ćč¶ăaű3Ś3—ĽŘűĹ2t‰gFäńQ8Âóř~P=Ďm¦çŇĆq*cŽ,Uň?FLŻŽGiĎťaś:¤Ĺö‰Q)+… uýʧ;Š"ŤF]ÍÜ:Ľ‰™’ÖÚíţ©{ݎ’DU}hµZ5Žl·1­(°Ýü˝ľ[U)JAxÚív}bŢkxžG)+mNÓ¨pĘÂv·ůšŘzeJĂ.xü‘“$iĘÜL—+×opúá“\»łBRĽřćU–ű »f@&MěÉş–íĎKs_עy˝^Ż>P·Z-¶sZ{–ĂqŠăŰČ<"^»K”vď] Ý„ä<„%îĚ®]Ă6#vŰö‹ş‰lśí4ó%ŠxĚq7ć!/Á Żá@eY˘O÷ZľŚcöšI1ľ‹Ţ8L>^!±žąŻŔ0=Ô{ĽľľŽ¦iĚĚĚÔ°/ ¶)ź^Ó*ţŞ Żš?­bŰ6ľ_mr[ăîô|őwę]ěőzÄqL«ŐŞ×ůôżIłś–_iÚş芔şéŐĆ9EQ4gIe8×Ůk hbB fČh°Š”ż9K"“VęN7Ocy-á»_xŚ~?b}}@Yf ŔqÜĘ^7ɇ)÷–†¸®U%ćŤj2 Ť‡OĚb™Őótm“˛,XY1ÓmĐÄô!¦Y]C#p1&ťC?°Ńtťţ fyu€ˇgWdeŰ®`gç/Żc;E)qŔłŘş.ł4ćĚń…î˝X'® „F»Ý!Ď3@ÜëÉJc˘Bq4ÂŇr<3ÇCĆ•*ÉkÎPhMâÜ ÜáţĹ[ÔÔ> ĐétęNăô°,«>|N·®§×O–e´Űíó€ťF’$ôz=t]ż/Ůn¨®±ę>¨÷@±ăďźJQDí+ŁŃ¨îv:ť˘ą×đ}B"“8¦QË’íµ9FŽ!Ş"ÝÂ\sł †#Ž^@×uÖú}lŻĹĄŰľőúeĆ l»1OF)Ĺ&˝ăÁ ‡ëůXŢ Rě/'QJAý~ż.Ľ´ŰmN7žâ¤ţ$ÇâUĚć<Ĺř.I‘$.ńÝ7)†7 yÂŻâčË”EÄxí]LmHô˙çÓ?ţů?^ă{Ď4+L¤_ 8䔵Ś=‚Jěݵ m™Ź!ŹŃýCäržP;s_µ`§ˇ@ÖŽălŞ )âΕkkt»>‹÷FX–É­;}îÜĐjzÜ^đŢ…»,­ Éó‚·Ţ˝Íz?¦Ńđą·< TeuMÓxű˝{k0&dąÄ¶őMSI;©JÜĄ++´šwîń\›¦§äW$Ť›wS,×DZMÂQŹ2Vr-^€Đ˘d-.MC+H“„4+°L›č/˙śbmóČ1Âď|“b<ĆčÎżó&ös%řŘ÷`Fuâ,†XzFśÄäAcß-uí۱™Ő|Ë(q&Xޤpjvlö±ô¬jOětł|żA)ěšô¶Űő &-*-#Jµš4—&IV˘ěxÓBĂt*˛PEhrĽ`hSŻ|Ţ‹˛ňY—šřÄWóóIržlĂĘť®bMźĆŐőOą”ÄŐÜżýVöR6jÝCE´śžżµ˛SőaĂÚvŁuĄN͊ذ[ZU8’¬@–×1 ‡›ÜqvŞ‚+V;L,=Ý6Ť `¦ÓÂ24nŢľË3OćŔl“$­Ôš ›NSǶtŢFüÓu×ÇÖ±-S/‰’Ó¬ąŞ#'Ë ]«RkÔ„Ł<ްVŐ»Šô™a6¶TvľYˇA‰ ”Z%ýµM›z»ŽâvŘi˘âÖůyž×\ťŤ©±Ň™ú-Şj»ő÷gYVcbwŠ…Óëi™Ę‡öÓQt]Ý0IłS×Ń5X__ßrT·¸Ö†˛ERŘřA“ů1Üwâ$ăؑüń2słsśż|fىkoź×%…ë·&ĺ!¶IUN2űŘkLďÇÓÔ¦ÓÁ2‡b„¦c9–gŁ;M9”9BÓˇ¸‹Ą 1ňEDy‹R;„îţ§çóĽŕ›ë#Nů.'ĚJĐ;IâM­;EFJĂŚáꎻeş×F¦C ÷ă¸ÁĚ·i§™€Şb›eĂqĘÚzÄŤŰk4—Ţ ä᳼öÖ-Îť™çŢňsŹa0S”%Ož=‚i\ş˛Ě#§jXĂ7_şJšMŁ(J–VF\»±Nłá’f%‹÷†´›‹÷ú†dy-D˘qůę ÇŽtyĺÍ» G9~ŕóŤ—îďrăć ł-‰Ą—"«’ĽB#XÄIľg‹F0Á㊌BČÁô˝w°yŚôÚ4×C¤×ŻĐúů˙ ďPĄŁYyxWšŤşq*¶ČG=yšÍ¬Ú%eQ oR`Ř•yÁvěŘĘN¸»–x~\š¤Y¶ Ö°ÝHs Űm…#˛d„ĐL<żA–ëdyN‰TÄÄpĽ~qAÝ?ŤĘç] Ďo’•q’“&Yyż¦íÖëWë´Ď˝çy{Şěv˙>|;¨z¦Ŕ}Šjě*Ńl6ëĎţnŘŘu]„VáÄ-SG k‚¨Jđ•…ît0Ó„Äťt=$P׫F˛¸G·a0`Ë´xăťó”eIŁŮäťó—9qâ4EžÝ9I ?¨,"ĂŃ:¶‘Łk ‹Ś0J@l<ÇýnpJo[‘F´â ”ŁŞ[fČ,B6ş;‹f7@(]ľÝ(€ä_ůŤő’ŰŮżýłź+JĚű#86ŹY*'5éM% a’Ďc&ßDłŰČŇD÷Úäăurďü őŕ2ŠŃ­Ö€ęL]şşÂŇňCZÜąŰăCOĺ•×nňĚ“G‡9'ŽĎ˘i’µő1zę83ťoľs‹'ÎݵśĎ_ZćúÍU浹·4$Žs.\^¦Ńpą·<˘?Hp]›·Öpť ‡wőĆ:·îôxäáĽţÎ=nÜ23ÓäŰŻŢ%Éuň˘äÎâ:sm˘u]ÉĆiQş.á~‹ L˝ Ϣ ÜÁ°|¸Dʲ^IaÓťérxľďJ<3a~ÎŲ-‚Ŕe¶ë2ÓqiµŽ,th–Ąaš• ĆlףŰvkĽf#¨ŰŔs 4!90pěčŤŔE–9­@Ă1K,·Ĺˇ]„,9Ř)9|ŔĆ2JÍ:8–ä`פÝĐi·=fg;´Ű Ň,'pŮ$C)„@×őÚĄ«(5„Pä9eÚ#Ír\/@3<¤$/AŁ20ű¸Fr˙ýÓ*nC™'5\¤‚Ť•¤$…ą+\h§X¤đĘaŻůű‘2ÜiţV¨ĂVŘ×4˙b«"ÁÖ÷¨Ńhl s]·–2›ÖŢÝ [›ŽĄÓóŐ~äeYAĂb;ČŃô>dć¤Č–×A×u˘(ÄŐCžÁü.×nŢ!¸rý&Ł0˘Ńl•ŚçôŢ/%HŁYÇ ™öŞ|F—"e0ÎĐuăŠ!ĄÄĐZ~d‰ĚcĘt„LCĐt4«f¸U,€ŚĐ ˘„2D˙­_˙µĎ˙âÉ–’Ś·ÇO:-Ě|TëCŽF#FŁŃ¤ŐÚú—pZ2/Đť(C˘AĆH<ú 3꡵3‰ďĽřňuÎ>6OĄ4›NËĺÚőU?s”;wűĚÍzŚĂ„wŢ[d0Šh5m–VÇś:q°~1nŢ^ĂuLzýĂиz}…†o3Ćôű×n®1S\ÇŞO×_ůËó|×GNsĺú*Źžžcńn:´š>őÍóřŮÓ\Ľ˛ÂńŁ3MGĘĘ0áÚÍšaqćŃ#üŮWßćč|ůŮŞÂ/P2Ś9i‘—˘RÖŔ"ŮGG‰©ů¤łBwHŇ*™0ťV}ĐŨ–uě¶,2YU%·ř¦; űRJŠtŚ”lt¤2ÍÉ®‡ĄoĎĐ×DĄőť&IýűŃś vĆLŕů-Ňx8i[çÄILQ€ç7°Ż&xkEׂŤ®ÉęŕV±Çő s÷JäôNň`»_…#ő÷»Iîçű·jŤO˙–ťŠ [ç«9ÓŐËiĽîNó·ĆŇéŽb•`[ Á2uЉjÉV¨Ďv=‰ŔšßS|\·Ş§QS/*µ QŇn¸¬¬ö|ꞔ|üůg±lŹBŞ÷¨ŠĹIaáO=ăQŻ‚žMF…­666T†-Ď«NŚf#×N€~0źg,O ’¨DćŁńÂhA9 @ ä#.Jţ|ąĎ‡; ľąĽÎŚbëVŤ ±m!«««"ć ť!p(˘;h†A:X%v?S?”uiš~¨›ťAš,Ě·i6¶©síć®káşöYZÝĚC›?Ň!Ërz˝·ď¬2Ómâ8ááągNň˙›™éú¦ÎL7 ÝrůćKWxü±^yý:Źž:PýM§Órń= ]“ŚĂ’4UUĐ­p†'ŹĎróö‡ş3\a24ĺ4V#L;Ŕôg§śĘ*;Řď©1÷ŮŔĘo„/ż„°,f~îłŘźţÁşŞť'#LsJO´Ľź4ň 'jXz‰kfäYĆ8µ°ťF­{9Źkɱť†&Ŕ3s˛¬Ç8±°śĂť% G2Ä2¦·Ŕ°<’8Şu)5Q9•%É:EŃܤďš!v%í•R•Ú¦ë÷}ż®tmwýş¦!îޡ\ľ‡đ|ÄC'É…¶ÉĆŻŮl>tÜvßżßűŻŢ!…Io4”şÉ×Ö`Ȳ >ě&¸ĹýNoŰÍĎóĽĆÔîĺW®f[ݡLłÂoęB`ZF]ÝŘîűUUĂóĽÚť'+2Íđ7’ćKËCt]cvƧÝňé RµYY­ôW׆ôÇŹuŤSšÍ`ĐlXHBÓX_Óm»JnÜZç˙cí˝cmËîűľĎÚ˝śzŰëoć˝éC‡CrX%R”hZbbKöJdY±cÉ€ŕŔq @¤cÂcA°bHAśâČ $`«!–#±H¤XfXf†Sß›÷ćőr_ąĺôÝ÷^ůcťµďąçÖ7Ęî>çěµ×ţ­ßúýľ%l„ś8¶DQŞ›ĚÇDQÂBŰÜÓAEiUŽÂă CŮÁŠä@e…˛?âî?ú"ţ‡?‚÷óżD«łP'Me:ŔłUbQMĺżvÓôÔó8‹ń9hsw­wJب¤ •!­–RHHÓT™'Žu¸M¨.⬾č»ŃuĽZ¸üâвáVOđ—KţZp°Î­ţ -—łąąyčß®Żo6Ő'J @¸6ý~˙ŔŽ€˛:6đ‚ć4A›(#Ź©˝kLöÁB ¤Tx.ÓnŕO‰}o§«ůf5ضÉć•A˛‹Ć­–Ô:¬{mú~g}Ţőżkh–˘^ŹĂápZ]i`Č>¤çá'?Á3ă ç®EHéż9śđSmŹźnşŰx łę2i2&—XZ(0óu†ĺłµă~÷~Řäböz‰EłPýAÄýµ1O?yßwYßPUÉF/FVĐjąŚĆ)R*#‚n'$ˡÓÉ‹’Ń(˘,sn­ö9yĽ ¨„,ËKßVd$!H“Ç6±-Iż?!- Î>|Ś4-p]‹Ao Ű”†Äµ÷?ř&ą‰íµp‡h2ATŽUR+[0ÉL:Ý•:9‰Ç›xvQW1ÇévŤřÝćQă)´¶6*|«Pšµ@Tx´Ú‹ę·T“ÉŁŠ¦đţ|ĘJ–Ňúž~ż(•}y^ Lw‘4MđŚÉ¶ůČJÓ]¨-«ŞÄq¦×W,S˛ç†`ÉłT¤…µŤ4&„`=IYtťZńEŰś_Mh» S+ŘQQbA” <RŇëő(˲®ţÝëłŚŽł7ůK'Ułó?›ŹtĐź–eŃétx˙wŕłKđŰ«đ˧໠ü§6÷” ť˙=‹‹ęN&“ʇôX\\$›ŞaxŽEUć»ňA¶ŻÄ1 *ü°Y‹4dc2ĽŹoďË•¦ąŤ3í¦ęąŐ1y4Ü$°¶[ŇÂ"-¬Ś )eÍńH’d‡•ýěXXXPúĽSŇŻľ^Ë™¦Y“[GŁQ}Ő¤ç1_˙©źýŇ÷z#žiü˝#-âń¸Ćąh‰ß÷É&w)Ą‰ë5ÉYˇµ(˘Uř!„±;Hü°„™Ý®Őâ(Ë‚ đévtŰÍF8 ôŠťl‚VÓĄŮp°LA»ĄţŰ4ŐÉɶ4N% <:ívËĄ, \Çäô˙&Tž«‹ľ•stQň=?Ä÷>*/J\»Â¶ö˙žm‘ĺYL^0Ó"Úß©,ĘŢű“?†ŮYÂó:)5mź$«¦6¶»ëČÍÎă~íćůçPVFÝBÎJ/č`š&ăń˛Č-*l˛lď×ě0 ĺ#ž¤ UĄZl%6qZ!Laĺ¸¶Ł„)yÁlnáţâyŞpĐA¨$kŇĽÂ[řź´°j5„˝î–ô%Ë’č·ţŮą7đžx ĂőqÎ>†ŚĆLľţÂOü“JÖ¸¸y]ÄwKÚ{]EýŻe‚ řĄSĐ0ámČKÉ{ś˝‚†aÔĚÜ,Ëjńň©B{žG)·pĘ­Ř˝ŻS–ŤŽUPd1i.i¶ÚµŽjO°X? ęSA•’$)łfđ’ţ*Ë|›ýŁ)$ vrf[}–e©Šř.ó0«Ą9ŹMÖĐ)=źł(Ó´0˝3üÓúkď –pás˙Á_éŹíŹuüÝ•6Ńx´ o¨źEݢ”6®× +ô'!iś!šĎî Y™˝÷wă–EˇëV+¤Űń E>K“‚]ľMŘX¦¨˙۶´ÜśJÄ<×! <ߣÓň(K• –e(p0 *±L1­"ftß·±ÝĎs¦Ď\ eŽcÎN·,RŇTˆ$9ČŞŘöŇPN ęTÔ2ŽEQ"ä¸méJŢadĄ†ÚňŇŘ‚B$1Ńd„ëXŽúýeY)‡}î;iNÝżă…$ä…ŔBňd°Łh‘IUĆ=,Ő¤=Ű ą1Hř—/źc—ÜĹ<Ôiqk\ÚÂÜ‹ZľÇľ}…gŽ.gĂ8ˇ4MÂ0ŕż}á%>yú“˘!xőŢ:wŁď޺Ï˘ ¦iňżß†˙ęIŘČáo‡omÂçZ‡;,jBď¬éÉ´č-ËÂő<˛¬ˇlvŁ(:°ěZ%ž]b›Ĺt2§đŠ,Ë(ňô@­xŤG—EB’ćc«k#Ą¤Č‹é{´•ĎXFEQ; ł±ÓqśäÁŮůš…ăÍ^ŻúZe¨,ËúPifbyc?˙K_úX§Ë?zč$Ç|ć´}0 IH’«ĽE·5  )E×mŠŁL"UŽÖ­Ëůń /őn×΂¬5S1Žăp¶Q×Ďk©m·ó5ęÄ2őL\K*B„Ѩ'n2(°Ľ§˛* ŽŠěR(fż4ę$/ËňşŠRB’¦Čă'ÉÂ6Íö¶m3(ŇE)Â&R¸Di‰”jŁ+h[ ĄhâO“ę"éá)ń”¬„-˛Ň$Í” ĂAIŠ=%}%I‚aىăzDQ„kćŰî?)l­®ŞTOxV6s}Z“Î˛Â Ë ! e7|ĐýëdÄímĐűW˙‚ćŹ~çř ü÷ľźđG?Mqç6É[o ş]އÝvý¬.â»!ŤÍýa˛“>ÜO%/ôŹ„p҇kř<äűľCźĄÄh4ÚńŘ•–t05l¸¶2N9SoËTÓ4 ŻL\WµŐ• GHZäEu6»e”ÄÓMYźă¸ ™Ë:A‘R{k+ÎƢÝ0`YĎj k9]ĐnI˙üźż;‡đç˙î?Ő]´˙ÉĂÇčş&ťN‡8ŽëM@W(¬r•n«O4ęQ‰ŽŰ 0Ź(§Şi§`Ż$WcĘß­ą®$kŕÄaŘŇÖ-g-ܾŠĐ†éAŢ´1L ÇśÚŽ‹Ď¦Đ™¶ăâúÍ©ťnyŕZ2 ‰e”yBV”řľ&MMń†{˛Ę"htkëoň!I¦IgqVQ> !‹ĆĆď†Ő$Lär«ó•Ć=<+ŁČUĺ–O’‡Â,łÜFşó§®™”É6e¬0đÂnÝuqĹD+Ě‚~ň9şŚď{üÎąwX‹ĆyÎďĽu‘Q–sc8ćkW®óřňxá2ooôBđ‡o_ćÉĄE¬j+ŇnŤ{Ĺ2]eř­[đŢüH^Ăĺ ü쉋ł18Öę,:>¦ŘŃl6)JŐ©\›˘Čkťéý®·ŤŞVŞ1H3¨cVUUxA“ř”ELQçžçjM»®‡4ÜiŃMĘLŇ|ö1ĎÓŇŘ߬÷G4OWÓ4ĂúX3ŕo,¬pÄĘČ ĺw¬Ą‡’$©ebpžfm4¤a^Âő6‡mLď(ľď×Á^?¬ůIÖIęx<Ţfú .IúUUŐZr‡m-ĚăoÂ0¬u-ł,c4‰‘ĄJTc‹—ßHXďĺ´›&ĎżŻÁ™‡”öbŤńĚ„,ʨ„ňQ.‹€I<ÁłŇ=#¨€ĺŮ%RŽ 7$h.ÇUáYeM6ŹŐóLĺš”cЇQ4Á6dL}L<ĘŔ eî’*ýŮÝ`[ł÷ŻŻŹr­E\×%ĄŁp”†şiőű ó1šť?Iš•DqÂýń„/_Šů…çŢË7Ż\ç'ÎśâwŢĽŔ8ËůÉG"—’˙â«ßâ±Ĺ.ý8a’çőBڞ˙N“ËëTľGZVx¶˛+óîO-+8^ñůä©cS\°¨ďävŻĎő^źÖŃ„ir¦Űćą#K\îytˇ‹6ă„“í&ă,çăť`ŃUE¸Y¨îhíV Őď|š¦H\ţë+ŕČ%<ä«›Î#t,ś˝^ĂQôaPżg:Ńxě˝®Ą’bšišc™ŠŘŻ ˝žçŃétö„ZĚr|˛J)zH)Iâ1†LH?h"D@4ăńľ°É´0 ›*RRfľNµ®<S”‡jh©ÍŮ=EëdĎÎ×ěĐcýĚtSnÇQűÜ˙ň«żňĄłF„k›uőUc2,Ë·KŇá۲1m…!JŽ J|_™@k]Ü˝nć°mň١«*ăń¸&ŠĽ›ŰĽ®ĄľçbY&Ł |ĺ›c6ű9e)‰âŠë«9§Žúx®¤Ě†*0Nĺ»Ň$QíňF‹Ľ˛•ÓŘ!*™ŽĄt“4Ăv|\ŻAś)§1%źĄ<е#Řd<Ŕł¶Ŕţ¦!‰SI’¤ŰZ©2ó§U5­â ŔjŐşŞ2n öFÝâK¦-*Ő"‹łĂ9… ©;Q4ĆźJîÄ™¤¨¬ZOWËťé!AUĺ¤nKo×<¬#€ŹHżů§d×®ŕś<ÍĆżúźAŚ&dׯâ>űĚgŢżďüÍ’ főA›äî§«Řn·k]Gź’§[Ď7J>Ř(ůh»äăABOö}‡ćO¶łż}ޱŰőžça;Y^bMµ5u/}`=fń‚YĄĽęĄ”$“žUŕXJr)ÍKë†8_ŃĂ_űµwKř_?˙+_•Ľu‡ÚM\Ëäµ{k$EÉJpŞÝÄ6M6˘¶çňţŁ+ÜčŹřÄé\ÚŘd)đ9ŮiÓp] )y|±Ë0N8Őn‚ÇÚH)éř_˝r×ďŻsşÓ˘¨*,O,-’V’‡:ŚÓ”A’đH·ËfÓrÎv;B°â{;Öˇ.ÍĂĆ|߯+ŠŁŃO/ >ŢŞřd»äÇ:%?ŮN±ódG ™Ý‡5 J'qóĎ^ßyţzP2ŽyY)c×&™K’÷Ň:7„¬Í§ĘJ`8íiŐ6E#kK>/+ Há§%cn/Ż$HsV️-’i>%°¦\¶ă!‹ő{ -ŃAĹb)ĺLł†‰č"ţ Ý™Ş%9߼ą!Í2#4KLĂŔ”#Šö°§÷5Â0"J]ÖFËHűöTÚCo®DiŚă^c\Ľé̶mÚmĄÝ¨ÝL¤T'ĽYĺŘש1°—VCť¬ľ÷Jź›wSšˇ:¦®ŢKql#K6yVŕ;ń5¬o;!®çM+±çp\E)H*Ź hL[ń#„Ěpn ľůć¶3Ęmň)ĆTĎXŹÇăŻ6D…ixľn5F4§’3ŁaĎö ’Č Ź lRŐ¤Żl‡ř9¤°)¤‰0Š<ĹňRPNŻ×ń$Ž ďm»çĽ4‰ňťĐ—Y€ąfaxĎeÉŕ‹żByý2+˙ŕó¬ýO˙ŤĎü$rúäˇćďAI{»]?KZ«ŞŠ j¦ía®ź‡´ĂKUUµ¸÷~Cwkôő†aĐétČŠŠ˘¬}‡ńÖ°ŰĐťž(RŢMOmÎiaâ‹8ŽŞúZUaE˘ŞSaŘP]…dL`gubRTĆ´ˇîŻL6ęĘ”’Áł1UÔARł„;<ĎŰáR§+¸zăh·Ur= vM^tR¸ĽĽühk\ş˝%yá7ÍË0‚-ďyőC°Đb2ŚYOÎR+ŘŽWëóΓÎ4ڶ:{ŤĂ’δzÂnkSŻÝ:!°l—f¨ÜźţŕOîÓ•C—çÜş›’¦ďşÁc§ Ü]â°ŇÜ+Ŕ÷C%•Ťń¬âP¤±˘¤…˛×ë•2Ćń[őÚ«Ňőm‡÷dJš©ĂLB¤×ÎaçA‰A…ă)٤8Žđý î8 ÷˝ŹJB”;¸ÓßEę÷{v±˝Š- Ň  ńń\ňM%íTXX®â;č.I<ŢŔ·‹™ďŚSW…„Ň 2¦ßŹÓ¶iQ3IRCP±ŐÉ©PĎĘš^łž;řfI`–ş ]ő<ňśÍŃ{úb&†UÓÓ/…[@)%žăŕů>Ů´›,%H±ő·ě gźźV)'44tĺPżťŽŇ×Ý’˝Ţ÷}YŤĺ°âG„Ý>IŃż„Őy„ÍőőâCX–S¤µš‚ĆLhFŻŢ´ ţ~7µ“´ŰUŘźáp¸cŐÉ…&z°]ŻG’HĂÇuÂ&Ąd2\Çł¶îଠÙłăúw›d…N†eT$ąAeř4›íšśE븇TF3a5†+KĆxÖ´Ĺ%LŢĽ9ćo^Ł?‰9¶ĐćÇź9ĂŮe ¤úüIĐé*É™(ŠČÓ1ž©l-Ó©ÄÓAläŮ$oż@r7óH/\¤ýßü*Α#d7®a--#‹’ĆŹ˙eżđ‹DqüŔĘ ˙ xł×űľ_'YĂáđŕłďnŹÇăC'ył×+É‹8+plIżßß·˘'„¨Űóq4FT1J¨,K˘ŃzÝ}Řy˝ Î ,§ëůjS*&¸VIZz4Űj] űëöö@©Ţ!Śè+zŘoˇl3uěĐVźzčg¨«ő{éSęńn“۵µµ¨–?ʢHpÉqGéŤgWĺKÁ"ĺđfű6ďOج>„iŮ5†nW…T…VÇČâđ^É™‚NGLwKđg7W U8t±a&AŔżűł!'Žz´B‹ăÇ\¤„Żż°ÉÉŁ6ď9›Öj1»ŤJBśŰŘîV’X‘˛í=ÄOÉKA^ą5@o´ăážąő.•SĄą‹2‡†°h}âĂ$űž•ăZĺTaĆ!lt¶T9;eöýýŇÝ‚ĹcL™¨nˇ0ůáµ!ß|ë:Ă(a±ň©÷žĺ=Ç=jN'™M«ł\ËNĆ#,"lK’äöô@ş˙2MłÖ«ßo?Ž+“—‡mđT8âxSí_yž×–؇ťżůˇ[zČĐÉ’¶Â-Ë’ŤŤŤšČzëgß5ýý~˙Ŕ"Ăüő›žć%yQŃ śš;±ßőZţ”U2U†ßŘ*2eOeUA\(O۶‰Ł T1¶YQ™ťúśE;L•ŇÂ"ÎÍęˇă¨–`;č9j’.RéwH… Vkď0{›X[[›Â&¦,ę$Ő.Wé6î`ä÷ÂDŘăAEţĄ: Uxłv÷©ŰŮSpl!Ú‹Ęiž˙ôŮbmŰD“FăÚÖĄDą"‘iěű`Đ#áUÄą˝«BËě<̶Šu\ŰK6Şád5”'-,üćr­Ě˘dÇX¤ ĂhL!AŤZFńöćóť·Xh†¬ Ć8–Éßú‰÷q¬)¨¤ 0Úu%Š˘z?Ť˘ey0yyöţŇYľ‡ÜH| !ů@sČé%•ë"Öa‹V†ap«„—F1]Ëäă-sú·ł•Ěö Ă ŰUdýľ?PŃl:tQNk‡FaGŹZĄÁ˛‰Ó­"Ăaşy‚ ˲ Íš§TUăÁ:ľ}đAIň4ž]ÄÍ–‚çŚë¸f˛UŤ—jíDą˝í°§×żNHÓ4­q黍N§eYŰŠóźş#iţęţá—´e˝1šŐ€BzLŠăT¸ˇK1şCě} żfşjb…®2hĚ­>5;-»µČôCźg’ęŰ^ľŇł×ďŽ_ÚmSĚ{Ö !Áp ·ďe¤™$Ë%G—>ň\›ĽP›†Ş5“9䕉”)EmohËČÉҢJ>K8$Y‰8H>K@IP;‡¤iJ6I ,W’3łm°˝ća0žđí—^ă[/ż†ašÄiF§Ů Şů©íŻ!‘(aiĺÖŘéë·nń˝×ĎsůÖ}Ž9AY1•ěŮô3ő Ż$† ’`™P–9Qśá¸žßäO_˝ÄýÁźţÄłü•Ź=KQ–\ľłN+đ9˝č‘*¦Q›Iśăş>ž˙`S°ĹĘŢÍ‚1—kąK& Îś]¤{ú$ń÷^ŔűÄ'ńńďQ™Ö6©’a•ëůׯń‡‘ś1 VKaSűý~ť°ě%•˛×wëu?™Lę÷çA$gÂ0D A^V¸Ž‰i(`˙^×›FĄl7ĄŔ2$“Ą †éâL+‡Q!d±Ít×9ĘÂSIÎałĘɤżs85í(*łVGĐ1@c9uJ»λDÍ:ÍVdgqů¶mo;4~ó7ó]an?˙…/|°’`Z5óŰF‡qz °pďÜľĎÉĄ6g–`5q¦ &…"ۦDČśá¤ř “¸çISm» k笸íÖî¶´łůŔnE«Ű%üýó7řĚr—/>qš÷·CľşÖg5+řÉ%ENžM†Š…óÝßŮďęJđAäËv»]ߏRxI¶uMZ?a"1(Ş Ç6©JŐQßëzŰ,ńíÇ,±Lux«¤sKź9ŽcÂFszH;üęý©°p¦RŽ łäy!”’‘c*0C7ŰI·:AŐ±XŻůůš?<ëűÔEŇ$I5Źć?ţÂţĄ›©ŹkTřÖTę%ľ€gÝŰlěôaSljqŞŢ\gSĂ0ę¨OZú‡d™Âđf˝UŠ"Ă ¶´$g+#óC±­}¦l8íŞBÍęë6 • TPŮž$Y•ś:pw˝ä#ďwĄŞ@č‡b9ľ:%ĎMŞśž^ŠĘŔ4ŞúˇŰf…!•˝i{xA“4‡Ľ(·A¤dj^ ’¨2íă•Ň%Ě3’´¬“´ĂĚA%%7îÜçě©ăĽzţ|×áéÇβŃňńçžáµóďpołŹmZ¬őG]^as8dąŰa±!8şÔ%Ë ^zóe‘q|e…Ŕ÷‰Ó‚qśQI‰eš Ć&qÂŃĺ˛<Ƕ nßŰ$I2>ľHˬ\ZÝäęÝ ®ßŰŕ‡—n |ú™Óř^XWmł¨·MĆĆ2 /!ŽÓméAŘČš•=«,ç9ľˇô:Çă1™/ÁšBlv»^łĘu·bŻŔfFmV°W7bţz ŮŃ2I†iq»?ćN‚eZTu`łm{×Ó»˘ÖŤFőú××Ý/ 4 Őš,K|×&Ď2dÖ'Ë$;łcVu5UWË+·…ăLťÓ>ESVS}gi+rÄ>U!ţ´z5ö@Űô•­)\ döÔF¬t—óKjÍ Őó¬«išu%s/ /]±Ě˛ŚŃhTŘő˝Ű<ľŰäöK˙đWľx?sí¦U`[*áŁďáT Nu‹0ČČâ’Ô8VŻŻů8¬ q×qXW\\×etçLŰÁrü:yÓ2h»VćgZĂW®+X‡í&ĚvBšÍ¦’-+%Uąß{\˛˛ä2K>ň¬­°ł3 ÇőŐćż#ŃžĆáR%ąĆtťX†Ä W±Da›˘˛H˛{®ŘLU ň¸‡k•*.U)ă¨x ŽNž—ÜßčqęŘ o\Ľ‚e<÷žÇą·ŃăSĎżźWŢşČÝŤ>®m1ś¤¬,.ł9qtqŽ/YYlc™&/ľzŽZŢnťÝÝba«ŐŞ;_eYqosĺ~Ź´¨hxé!bé^łňűÍźiš„ŤĆ´[«Š Ńx€Í„8I±loÇú ě-⤎«qaŃh*>Áx4ÄaL§–Ť´¦ᢎĄ»ŤĽ4q‚­^<b;.~Đ"ÉU© “‚­‚Nt«éˇsţůk¸šîţĎvýwĂŃj•ť7j‡;ýîŐ5|ŁäÉ`LÓT'A‰ç¤ÁU´Ů<‰‘ŢÁ[,\ť4ÎĂ4K;‚:¸úľŹc™\~é«řÍ.”9Ůđ~ xŢÜܬłúŮ›ŃÄ„4Mk¶˘mŰt»Ýz4t@ÖÂŔcęv¶“‘§‡eYXfĹGžµ(óAÝ–ŇżS'Ý »~FYŚS—(·kź{ĂjČć}ĆĂŽëă7–!y©ţ&)<KO×6¦IĘeÄ s¬Ĺău˘±W‹P plĺ´ćŘ6–iŞÓł!¦r>§Žge±ËSŹś!ÍsŽŻ,sső¦)yíÂuŁ ťFH#pąug•ë·oú.ĂqĚęú$‡Ţ`ŚmY¤YŽëX_ę°Đnňä#§=<÷P—'O.1Äś»~‡Ľ(ůřÓŹ°ĐhâůZ»r‚3P—@UuŃë ŰíÖ'żĂ ]1ę÷űäyN§Ó©É1¶mĂ“ď%hµöd”ëŔÚď÷IÓ”Žďăzë÷±ć0­ş*0˝ÖµŚăv»˝íŕ†ęąO& ż÷Âüëoţ?ůÁ9~űkßç…‹7©*eC™¦)ťN§ľFJYcmőű2źüëäfssPX0­­‡67± ç؆ Ť•\Śo8 ™ ×j<šă8T’ş˛yĺŕű:aÇ#«ÄłJ\1f2\$ŤÖ9 ’ÂT,ëą‘Ęč ‰Ć4ś[™ 7Tő§µDZ…¤…Éěň0=XÎaáĄ$Š"677k ¶nŹíÇ€˙?ŰĆEvÔ¶Ěštr˘ČA#0KÎř–P‡) ż ś&U2Ŕjś€č¦Ó¨µÄ÷ŠĂÚ.T[\Eďű˘âÚ+_Ç ;PĺLÖoÔó ˇ0óó6«Ń©ßýůív{_†ůˇyĄ¤ŽĂšô6? Ă Ý|řY›Á PŻa ŃZŃ»}F9ĹNR·ć(ÓN-GDŁ !ÔZ’M’ܤ’‚8·đux‰Ł Žąő.«*°"*‡ĂZŁÔ4Í˝ă°!p¦ö˛ľ«¤ÎŠ<Çžę‡ał§OÓí´9sę$I–±Ômsűî*¶)yëň-V×6i5;M®ßľÍőŰ·}›ţpÄÝŤyiŇާëϵ9ľŇĄÓ yřÄBĎ$Ooň왣ÄiĆ•;ëeĹÇź~„•vĂnÖńĂd'7/ÍúŹÇŚF#|߯÷ây&“ ÁĎóęX~XŘĎěZ´,‹ăŤC }ţÎéŁü̱E\Ó`ŮÝ CŇ×gYFżßŻc™&oĎ—’,ăo]ĺwľů _í"đÂëüÁ‹oRHQç$ó×ĂVˇOÇśůďžźżůőŁs8¶Iŕ)cK(Ľw`çȬĎxԯןe™äĺv¸ŹŇoÖäpCŞŽhŕäĺ€É¨‡eŮJk_ÇŇ]J~Ő‚ŞôŽG4Ü Qô™Ś{8Ž‹,i1ď )§Îg;cńh4˘ßď׸jM<Ű«hµíąĚő:rg×÷°&”éaM~@»5FŘ ¤h!ʻěLÂźˇ‚„ΔuҨ%śćoB›@ ®˝†iŮtĎ<ËÝs/°rć=·©Ş–UW˘temĐ=ëcď{.~Ö/“&ěěwzÔ ĽŞ’ ÇÁV‹Mă{fO_úeŐ•šŮŕ˝aĺ ´c–µ9‚YaăŞZ—Ćc,Çß•‰X!‘•ąM!áPĘBĺ®­OËRřI Ő™¤ĐjwYŰě±Đnc[7nßćř‚ŤmJ$kýŰ2é¶<6ú1o]Yĺä‘EšG…ĂŇâýářQâP:—:úľ_ýó\ů„żty•o˝u…Ĺfȧž}śß˙ö1„ŕç>ő–›*ˇ×]íČóĽ~_†Ăa˝ő{3˙Ý ¬+Y–Ńjµ±lĺô&eEžĄeo[uU–Z»Ąăie„ ˘Š±­ŠŞµ}ŚĚ6klc^‚eT•$-ײ,Ćă!&žĄaQ‰m¬Ü"^ßVÍĎ Bşx3Ňy¶ŘN´Q˛4&Yi˛›=‚úp»ÇÝé‰ă¸Ö÷ť˙śYV˛îD­¬¬n!Î M(›~:îčk4=¤ŃD™Ż1é$ŤĎ Ä—¶ÇaĂ0v­ľę8™e·_ů2G{ń$×_ú2g?ô˛’şˇ•o4\CÝŰdw”qE€auwNW€÷zŹ´ŚXYUŚĆˇż9›gSK)ë÷s<×Њ١ů:±ÝŹ8(Pm[oęúĄ‡"]9xľzâhŚiÚ4¦ćt˛;ul,(1¤ĹxF!á°äą˛Ş°L“ĽPN–a`ŠŰ2'Đh´E1­FiZÜ[[c©ˇ’‰AśbYMßa0IyăŇmN®,Điä•M§Ý%™b®}[bŽm0Š *)i6:•Áż}ů:Ż_»ĎÇž<Ĺ'žz hÔkgĐ_'°vVĎĆ™Skf×–Ţő3|PŇ”> EÁ`0x`…™Ň4ůą·n˛ěąśô\J)ůóŤ˙ˇe~¦íď›tĎ*|hePÄŃëk}~÷;Ż#€˙đÇžçO^z‹ÍŃ„yĎYžä8k˝YQđÔ#gÔŤÇu\ď †śżtϱyčř‘mżCďyRJ–m㻊Ľ)ÄYÁń#ˬmöXę¶IĆ›3ŠI‚a\rë^ʇŽÁ0Ľ)y=KUQěĆťuŽY!h,L«¶˝ZĆ4-`umŔĂÇÚSň»‹4)Š’K×®đŘÉníh–äAKUĺwXWxĄTđťJxřA“˛,T1ÄĚ·áŮëX\X;Đőş"«“[K敞41o?ĆÝŢCóóź˙ü—ô‰rDX~+lSô/cŘ&eĽI$އ´–ęb»SŤ~PÚYB˙Ť^řżý˙Đ=őžërëÍď0ŢĽĂĆő7é9M^jłťNg×ÄĄ( ŇÉ:&C,»e;5&®^,3ò,Z-…»M"e;8­dhžnăéÓ¤ő>g‡®ŔĄiZW’g±|»MĽj—V;˙Mä$iRŰńjÂF4î× yP y'żB\82ÄŞ W Óßošęą¸VďTF…kIŇ$ĄŐęL«~†TřIŐ†’„ž…ď %k˛˛Đ¦Ópi&+HŇ ĎóévşJŞ«*±D‰ďšřîöÍČ bĄÝb}8ásď?˛ÂvzŽł˘˘(Ęm-’¬´j&üě3€-ČIµ;Óań¸úp˘O‡m±Y«·čýł‚éş­6˛¬pŽź`üGżŹwć,ÖÉÓ‡‚Lčç—¦im3¨ˇ/^¸cŰś\îňéçžŕüŤ»”UE3ô8ÖipłĎËç.réú-ÚMîmö9uüX]ťýÚ /qëŢ:ç/_ăôńŁÓů¤yÁZo@« K×nŇ \×Ăó}®Ü\%Îr\ÇĺüĄ+t‚-čŔć8ăOżž+·îqbą‰)Š)T&äíkwF%¨dyqQĆZŻ”&ţňEŽ,u±,ÇTĚý,/yűęm Ëö] ˘NmäM†ŰÚŁ°eˇŞ„Ç%aŘDSáqQmĂ€©÷NÔř/=4nO?˙şj=Ýdu+~?ČÂn۶ůő_˙őwKřüç?_ĘĚě6ˇuĂiPö/b8.ĹhŤŘůŇŘJĘgă°~˙w‹Ă–eř>×~ř ‚…Ł”Éńú*7ßz‘Ń˝«tŹť!+޶ˇ™áş*5oG\–%éř†ěcÚ!–­Č ZöhĎ8,á2O·ĹaŤ®±ĆÓCĂ^q¶’öŮXľqöŽĂŽYPd1Y®L |_ť/&㎙" «1çó‹ôĘ1–ô°q·­ÝHSłĂ> ĂPDË, e4áŰ•Ňü¶i’e Ë‹ĂPqŘwL\Ë$žmpd±C§á¸‚Đ•yŠíVÓrČóA…ď<{nîdĹR«ÉĆ(ćłĎSEĂŶU'Gbě ?•±+‘y/ŘÓa5aµt˘Rcv%ťí7„Rň‰n“˙óÎ&ßŘpnń 'ůŹ–Z{ŕą·_Ôp#­+ť$ çoŻÓźÄ¬t[üÔ‡ßĂíőľ2Jsž>µÂzoŔŰWoŕ9WnŢćëß}™÷=őáô°űć…KDq‚ë:śżrN«ÉÝŤľçńű_ýs9}’µŢ€˘¬H’”H¸q·ÇŤ»ë~@’dyĘÍ;k¸®ËŻńČ™GÓ¤DŘ Ü)‡ŁLŰ´ë5ÔÇ6ݍB™€S<Ż9…úlĹârJ>źÉO¦qTGl7nÄ,4l/yĎ˝ŢĂ™äVâÄß#hZ{a·˘ &dţG@ě 0;ÔŚnýeł óô3c|ď:7ßz‘ŁŹ=G6°xň1úw®’Ť6°Ľ)¬ZcS·ă8ž l)˛¸O5ą€aTvŰvw$ą†aÔn:I’(c€éЉá,ŮDWu5o·Ş×ěĐř`[’ [ i˛¬ÝíB9ĚPĺT­[Y^%¦Qrˇ¸D$c& űTT8xlŤă9%„Ä··´UҡĚtëÂóCŇBPŠô3˙»mSÔRB ’—*#IR,ÇĂő¤ąÂlšb;Ž§Ş M%Ç;Ž(±ü.ľŻ1•\ĎÇvC’\±ˇ ˇÉŘ=Š˘¨Ů¬ĆY-`­%[Ť5Ă0˙Ź yFçłźĂ>}˙Ď~ü“ŚľüGȵűt~ęŻRLĺ~S}Đş°š‘—ď¬óřÉ#,µDiĆr§Ĺ8N9Öm˛Ôôyűę Ž,.đЉcܸsŹďľú&ďęńú óęŰďđŮO<Ď0ŠIłŚóW®sáę ¤„ďżqǶyűĘu&qĘB»É·^z•Ë7oă{ăIĚݵ5:ľÂ/sńú]l˦Ýôxńők<˙ŢGyää2ă$çő‹7¸xí&yžńŐ^ćÄŃ#\ąąĘĹk7h7Cľýňkܸ»IłŃä?8ÇŐŐ{[Yćkß}0hňĂ·Żpo} SB–—–©p°e^Ą ˘ď*¦ŕRůśÇ Są–y^l ¬ö ą˘’˘&ÝEQűĽĎb9=Ď«áWóIÝ^ëpË÷żńÁä¶Â‹ţ·ÝAŘÇNŞ„hâRřďÝőZí©Ë^qřäÓ&ÚĽĂý«oŃ9ö0¶ŕ-ŇhŔđöEś I%¬ş­ŞÉ:y™ŇđČÓĺřL‘bŘ-,Ë©ńĚłť=‡ă©ÉÇěĽi6ől’«úĹáŮąź'j؆ú;ŐÎÝë•4 ”ŇŢŇć,KňĽ@’+ĺ5"“’0kT˘Â%ŘVlĐ÷˘ń °;&؆ŹesüfMt]—Ą°c좰clű7­Đُ Ę1ł¨lҬb^ˇGUÇ*Žu\ŁDšM¦6(é!5ů9ËT’’öľ¶Ö:ˇŐ8HMČÚŻX «vZ I'/BşŇŁe>Únđ§˝1uĄĂůôĂđ@ĘŤFن+řľĎ NÉň‚gΞ`'xŽM'ôÉË‚GŽ.`×nßÁ˛Lřç.]ĺ}O<Šë*u—ő^ź;÷בHۢ7łŢë%)«÷×9~d™s—Żú«kë\Ľv“N«A8ćĘÍŰ †#>ôĚ“Ľ}ů ·ď­%9'V:\»»I§Ůä‰Ó]NYäŇ­u˘8ćŇő›L˘c+KÜĽ»F«rŁÇ·~đ2HÉýţóWn±ĐnŃM¸zű>ź<Â…««ÜX˝K+ô¸żŃă‘Ó' Â&Ą´đuWj<ÂťôvŠt¨ňÓTvľiľ=ŹPů‚"˙J©”•Ť-BŻ–í„íů”ÎÉRiť$Ö:b‰r€oÜFŘ!Rt¦¤¬‘ą¨á{ ÖAMk-SfeÉ‘'žçŮĎţMÂV‡‡žű4·/ĽÂhí&í•\űţ˙‹) Ęl‹Q9 6‘ý0G/bäwŃ­i/“Ź3ęoßý UzS%oľ_cÍ4Ľ Ió]ŰŠ Ŕý~żö'Ö|7‰ś˝FEôz=…k™¶8´^›eěďŽ#·ŢL–eă7IŞ«ÜŞŇ䬉¬Šw¶}Ć<Ž'‚:hĚ.Ç,·ý–J‚a7ę“r<é3őÔŐX")âÜÜE¨gű0 ‰oçTiźÉxăú¸Á"qÍŕ€¤áĐişś\ qĽ&Á×–Ä|+¦H6‰Ł1žßŔö‰r—}”ß¶ÝżöéβŚv»]W}vĚ÷ú˘N:đéëgńĚúďő0 ôÂyŞh‚óô3Ď}™Ąő ˝|‘,š†a]ÁŘďeÔŘ,Ô5¦řŹśä®ńŤ×.ň‡ßy•Żżú6Qšrö‚<˙ĚSHŕűŻżĹ©Ł+ś>~wŞ«×ůw_;GQ[^Â0 zĂ1§Žá̉cdyÁßóŁÉ„wîóÄهř±Ź|˘,ůă?˙O?z†f&eUMń‚‚˘*±-Aŕš,¶§óSUtš§Ž,xď{ň1Ďëo_"JRÖ{C~řöU>úľÇ9{â¨Ú,;ŤďYx®ĂĆ`„”&)ńx!¨ŰĚQ49PWa( ,9d<ÜT8Ůć©lg[k×4*eVbVŰ`z$IBŻ×«5…u8čÎ]9˙‹3˝Žçea‚ŃAśb¸J|ěŔď×qXW=ćăpYIV}Žg>ós­.'žţĂőUVĎ}ŹcŹ?Çę›ßtD™Ć†AŁ2ŹŹúĐ˙:öč›Ů-9P!­%Rű †ŁčÎW¨âK!k,ełŮÜJlÓŚ(ÚYyŃóÖëőv´őő˝fčg8ŰŤŃŘpť8ěůQT[Őbí4ÉiËŞ¨NEĹ:7ą!ÎQ˛[Ż÷”Ýđ ŕZĎV=3¸Ć<éŹ7”üTsŇh©µ|Ŕ2T…”GŚŚ6T¬k/RÖ¶wÁ0LĽ ÉńĹĎoÔfiă1–2)Ó‚°µH&›sxÎ=ľyY‚tN†ĆÓÎÎß~ j%'dÎĎtţăĄŃTµÓéÔ{ë~×j\ą†tM&Nv†QĚźľržßűöůÚĎóŇ;×yîěqŐ˝Ír>ňľ§9ş´Ŕëo_bˇÓ& Ľ×ëQ–%GW–xĎŁgyďcgŮč Xě´q›NłIŕ{ś>zßuÉ‹‚0đyň‘‡YZčĹ °L“•ĹE:]Özuđp]˘$ˇÂââŤűÜŰčŃi†Ž,¶i‡.‹ť6G—–(‹Š$-č¶»,w;řžĂ©ŁKB°ÔmłŇi°¶9 ŰjN JrK{őż( Ş2Ů«ĆTťP ŹúŘŽ‹.>Ů ×2*|;Ż»e:Ő±VçSZRV˙ű~ć3óCż‡uĺÖ‹żŤ×n! 0ě&˛X#UdÁ‡ýş‚ ĄB´Ö§^ř×TD¬…ăgąwő '#ě,rőĄŻpçíďsâÉňú—›#Ź<‹cĺ,7nâčy  IDAT›Řń+ŘĂobĺW1‹»U —ąů5,{L)C «QĹ,WšiEmˇB]É…Ýq‹óCOľdiiVŰuĺ*)ŠŇŔö:ÓÓK„%GČ2ž¶{›,9Ç ň٧9 8.Ĺa'™J/YŚÉ IŁŃ¬uŰ«¶iĺ„í Ř)ŽYR iZŕ‡MLŰ'I+ :0ŃĐ-ľ|Úâk¶:Há’¤ŞE&LÓH«ŤçMˇiżÖܵµ>pĄt´E5˦m6›uPüÝI—?†Ľ‡|Ło’U’#ĺhÇőűé$†AúgB±z›rĐ'˝pŽÉ‹ßĆĎłŚżůg?Ŕű÷˙QŞőA>óşj»¶¶FQl§4\ßő¸xëľ‚ ŘźűĐS4]…‰{ĺ­‹xŽCśf,/vąyç>Í0 ,Ôó˝ľz—źř؇xôáÓ Ćn®Ţ#Í3:~”›wďŃÚ­˙ʶ÷˛ěşď;?çĆw¦{r΀“H0ФD*R%Şä•-ŻdÚ –!Űh˵[ňÖşärI+-˝Ă*"A @™D Ň Â`f0Ŕ„ž™Îé…›ĂţqŢąýş§Ó jOŐPŔÜ÷î»÷śßůťßď¸29ÍŢ[ą06ÁŘÔ NÉáć#űyá•ÓěŮľ•щ)\§ĚB«ĂÖˇI’3:1Çą+3DqÂű)J¶E˝ZćňÄ,ĂCC8%›ůf“áÁ~â$cŰćuŢ˝ÂÄĚ<;6ˇi093ŹiĚĚ·č«U諺”K¦])ȶ]ÂŹy¶q}Ü, ńĂHl?Ě Ď¤4M&f­§Ĺĺ±ç˝ŐĐőćŕ›ÖąýýßżKÜtü‡°#d™Ťy4K'"µ÷očsTayîMÚóśú †a0´ă SNQÜŠ&`úÂI&Î>Ďŕöýś¸˙Kl>p †1\ą„m4±‚ÍÇ1˘łé8z:Ť™äóObš-’´„fČřSÄáViľşúH’dI6 ŁĂmu+‚Š*VضŤi•đĂśřÁ4üůöŮu‰šĘ2ůĆ×hýk”ďx²đ^|ˇţŻř/˙çwâüÖżY˛}«řr HMÓčďďgvv–N§S ¨eńâ…Iž}ý2żňţ۬8ĹďB05·@ɶضy3ÓóóRűµ+ßF1Ąnk̶K4;ş®aj‚ůV‡˛ë`:a1ĐßÇ\Ë#Ërµ łsóŘ–Iś¦4;>;·Ś…>"i¶Ą—ř¦ľ2Só@PqJ,őZ C×č´f(—4&ç4ŁÄćMC\G#a°fA×4d|¦EŐ-a™’č —j­OâďšS¦äČŤŔ÷Zč„¶‚bťL+áş•bî rŞuYÉn-Ě &ĆŠíÖ•ŁqŻkšçů›&”MOO{ggFťĂý3\nGěłŃŚ”hî-ó}äşÄ!« -,¶]׉ĂasšöÜ$^úGďü9N=ö ŐÁ­XN™íöSŽž@Mt[ľ“,j“ĹvÍ®"4‹,ń‰šó}G0*ב!ân^OU‘Îâ8¦ŐjäśŢvăreťŢ!„´RÖ4­°‹V‡H€0 ‚¦Čr ?1şŚnٰxn7§)é‹x/2Č„]HőÍzSĚfăkĆa]Ë »Ü ÖÉ„ŤŁLjü6†°ŚLĘî‰*ĺJŤ$IđŰS”–)ĆÄ©FśYŘé­‡el G–S' ĽB"‰CHÚ‘®ťN#›_±ş%©(Qr*EUóÍŘ’—Ëeţ~Rç٦ŕśCtřő‘ô•qíęúŤ:Ţ­„ůVŦ•śÎzˇăăăB/á’„ł®đ¸žc°Kt(—kX¶Ţ•ĂÓ˛¦ž‘#®b€«g×KíĹöş$-–e˝iĚíŰ?ó™»‚`Ě<×1ůâóWŘ10Ě7NµI¶±˝o;±¦ó“MţÝkcüÍŘž¦qCĹA_c/'ť-'˙ćyŽY*SÜĚŕÖ=\zé‡T‡¶ŃśşLz8Ő>F_=Îŕ‘Oăů ÚsM,=@ÓsôRa8äa“,\@e4Ł„îTĐDN Ľ¤YZlîkĹa%ä®â°Z?˝şŞÍĽRµGůŮ+™•đ¸–-ÍxüHa˙4Üň"ö/ crDÁR4Ł8x6*ýÔ´’xĺJ˛ĘVTÄÇb.†iŢu¬Ä$ŚâT+tH[­ăjX‹$Â%$q@ç”Ë•«*a+Ť4äz…$ŽpŚ€0 HRpËRLŇÜ(´|#S_ąXĄk9qžż±ŽÔUĎŁç`ňZ,[Ý7ÔŕX ¦"8ठk«ŰÁn”´§ˇ+É0ŞJŕňJ˛Jš•áŤŇ˝V9KÇäq¦ël«YčBPŻ×‹ëUq¤×€`yÁ¤÷űUňlYÖ ?×uAč$é˘a‚M-'í•+U’Ě ă.[&Ąyž“JQĺŽĚ.QÍż$Ep…‰dÝX*÷ä3ÎܲězFţ<Žc¸[Ü[ä6¬—Č{ďÎ":~nX8ĺ¶-1ń^§…ŁK‰˛$Ó®R¶Qcąc¤ŠĹkÍC×uŃo˙ůź˝»ß‰™őŞFÎť{‡xäő F[ŮŐčG^R>÷ňÎű!îçžń9škń±á~ěuP™˝Áµ×eLáSó<ǰ]öŢř6jýC „Oß–=躉pH(ńÜýßavÁĄ¶ĺzR-ë —jh¦C2ŽÔo‘h;ń9L(6#ČŻrYIQ —Ť×+ ĄTÇE’»RiĽ×ueą»Š ÎjáY–ŤeŮKÁĘnNâ5’L:í$=Bô˝IŇjIžd‘/#׺¤Ż<“&¦ĺRr«X¶śďaŇYu‚ŞÉ)[dR<Úq«±ÖU6Xú}9¦%4MÇÄ/ĘňYĘ]©ätOŕMJz´ęw‰IÚ€ľV§®Ĺű—U®/OÚě)Ă[pĽ çýŤ%·˝IrÇ8ő:•coˇóĂG/ŹBš’zężűG°sŹ|ć]‡^(…:,) *Ó4 EžçĹşč=D `¸ć‡Â,ˊ߯‚´JV«@ŻŐâ“kOÚě:¶AÜmÉ/żľwc)W*¤X’¨‚L5»^VDÚDď9đÝUř$©LRsÍ&Śĺőščú’땦’ô°rQ&|™¤VjD©±Ägőw§ćn„f¸ÝĘE¤‰l kŐPP—^čŃj -Őj•/|á o*ą˝ýÓĽ«ť$ć;v 0Őń¬8xA@­ĽŤ­• ą¦ńź.ÎđĹŃ)j¦ÁˇŞËW/MÓAđŽš‹X'«±7÷B°Ň4Ĺ,•Ů}Ý[pĘ5L·NůXN…j˙©Q&Ó]ž»˙Ű̵ŞÔ·ÜDâÍ ŇŚRÍ,“¶.{-mG‰Ä0@‡Ł^%§+ĹáĺBî˝RfJîL­µv@ĆaĄ<˛<÷ęc†Ů7QűP–eÝëä'ęÚ‰*Cĺ°§^§­%ĎYäË´–—nňB“~f×H# C´´ą&.X*ě$ݵ áşU2,IzăęµÄ¦í’%ť®`Ž %eqĄÚ={ž‡ž«&ÉqŞĄ«Is+˝ÇŐ†‚łI‰ńHc«31LGp ´vrŰ{}/YH˝kőü -Ö^|s^·7–)Ř™ŠĄ „2 ĄŚĄŇ OӔ͍2tçšÚwlM™š¨ŐjĎbµ$]Ę*DɢaC«ŐşjoZ>˙ÜrŤ0‘Na†–¦ĺîA©Ó^XŇ}(ćźşóǤ\©u ¸*–Ć©†íö-ş…âĹ2ł›x~n(Ťx…<ŕęß.ó4 Éy@ř¤YJžkrŽ­3zcqŻ€*䨡¤.ő[>őˇ»ß˛c€ńąYšqήľ ^dđňLĚ‘ˇ~ʦɟŤÎp%Śůä–ŢÚ_C×ĎĚ·Ů_q8`oLÄ»—Ń«’uzqÉNsA©6ČĐÎ\>ů4C‡î ËsL f/ťaď%É«úÍ„b3™7†žÍaV¶ 4AĚ{ĐL™ČŞCÖ—ăwT°VťPkUp„…ŽÔĂlŁëFqmÜ­ţöV‚—łŰív±(“$AhĆWőY°¶l– âLG\¨Ž*ŽCâT+plľç‘gÉš>ëňűĺäĚ’€ ڱK.V©ÜőyO źé8ŐĐÍ qäQęѬM…ŤNڢư¦éřˇLrzď7üŘěJ€‰â÷Ż$»´<®®ëüíŚÍýSpď$<5GŞđˇÁüM9ťĺĺ µ[nÇűŃ#!¨}ţ.ġŁĹÜ)ěb—]‡¦‚fE…ě”b«¤CÁ)„EĐV˛,+Č’ËdkÝżúţ,˨V%2ŚĺĆd›úŠóż÷ú^ M·,«qÚµ3ĽÎ¶ľňóTŐ«( Hs%cD©t.+‚ró*V®`‘Ťë‘<ýlÜd‰<ÍJ#Î4ĚR­pOÔ4­»Iş±öŞŠYËŮĽęĹq\Čýń˙ń›Jn˙Ó]ź˝ëŠo{Ë/ĚdlŻ•pŚO^Y஭#LĄ9üú×ŐĘüÔćö–’”§çÚ|z¤ŹŇşÔO9ză°šSęĄZ):¦[chç!¦/ś˘¶ă(y†Č™ľđ*ŢńIŞäŐë‰Ä6˛ÎeôlŁ<ڦë¤Ţ‘ą aTÉ{â°z†Ş˛<«ĂÚJqXi`övÔT±ˇ7+mő•†ş^%ɶmďPĹážo-×ËďEzeŐÎÄ™¶¤ú»x­ŚŁdA”¤Ú0 I“pÝąĽdDAšěČI×>‡…:0je’$Ć1דĐL fq8Đuť0ÉIâô*hZD©±DÜżw?íuVÜHGm›˙ă˘ÎĂÓđř,\ŕ·´ŤąÝ©ďWdIőýjľô&8 µR,VŻ×lÁ4ÍâÔűÔu˝čę<žîZQ߯ŞËTkÝo,­V«Äi&!|¶Yŕţ׺^=۶)ąUü(ǰÜÂ’x厨JrÉĺü‘±´F É ŇĄ+äb5ůłëËҰČěR?” GëĺQ*+·J!Ă.9h†¦dŮĆő‘W‹Ĺ@ájYâŇřŁyĂŠXhÎáäŢăůa˘Lăơ~tMă—_˝Ě/ŕľ;®c<Ś‹~íů3üáŢÍ|v ĽÖ}¬8T _%ŠŐŘ+⬠ѕc‰9űŁ{Ţw#YŃžc`çśúf>A5ű1Z2‡n×H­Ýd±O¶Čęď%ĚĘKĄĘř+•Jq│óA·zşúžžâ›_˙t]çW~ís ’¦i!591Á7żńüâ/¶Hbľţ÷ËOúg ‰”<ĎyüŃGxíĚi˛,ă_ţÎçÉł Ď÷™śçÄńřɤiR´ÓTžŚtÝ(ŕZ8$]˰őôŞVSë8Őˇ"WĄĐo^%ľĽÖťT(ŠżÓĆÔ"âÜÂ0KhiłX9‚T8D‰ ^ď“’ůi4ĂÂuĄüJtpŤM“îTÁ šŠ˝#Ď7`bŃš¦áÔűHón2čYŠmhĹ»żLÓ¤^—XŁN§išK6sŐ:SX®rą,«™]RŞÖ Ő\…CĽ–QŻ×IsA’f8¶”ÝęuáZkŘF"»Šă¸E° ü:~ᬷÚH3AčvŰ.&a’Óënđi&SŰ©u!=-‰!ÓW®ř©ÄňćyŽ×š‚<%ÎK”˲ň´\0|#CQTŃ{7Đîz}SŰĽýOŢX §!šĽŘŞłÓ x¶ą‹˝}} •,.§đÁç_ç##ýüŢľmčţäôE~09Ď·îcÓ:jW}_w ůľ_ÄÂz˝ŽmŰ,,,ôÖ4ŇTnö/=đ%ĽýcĚ_>‹PŘŠŰÂ3Ô˛§É,şU#)$ŹfČĂ6Ií=„y}EdµZ-Jµ[­Öşď˘ÇąÜ!MÉ ­Ç7Ms‰ÓeŻxüµ<Ă^…§T÷/ËH(-}Ĺ©Vť¨Jˇ®ëxťúsyůH2A”Z”ÜZq˝–űä »AĚS2OŚKgTëýç!ĎJ®Lŕ|Ż…AŘ5dŃ–­őűU\R„ŐŢ_µVC7­®Ćq ä˛â8oj-ŞďWŞĺF,k e ˇXRŁ^U…U§A%ČJ ßó¤´ŇSVF/Şč°üýŻ7¤ţs?”÷ěŘsss"LÉABŠÔWdvß÷IcK 6 "zb©:ř¶[ ´×˝>L4$·AîEmJz¸b‘f‚ĚčĂu]˘0$ög¤ĂĄ&ń¸I’Ď÷ZB¨Zę«ËyžŁ˙×˙đń»M­NÍ4¸‚™—éŻ`ÄuŠŤSfâŚoîă™ůű+.÷OĚńą-}Ś“j饮0°:+ż`uc°čDeYş;Źđâ÷żÂľ;>Äůg@gć łD˙[î0Y&Hcź’>Źiičťăhddć¦ËŃŰP%m•® …Üűíońľ|#GŹ21>ÁŁŹ<ÄŁ?Dą\ć‹ý—¦ÁěĚ ŁŁyáąg €›n>ĆcŹ<ĚÓO>AµVŁŻŻź“ŻĽÄž={™žžb×î=ÜwďwxňG?d۶íś;÷Żť>MĄVăŢďÜĂ+/żD»Ýćű÷ŹÓ§^edóf}řA:Ľ„ŮŻ‚´Zd9Z÷”ß+9#HµELNäĎ…>Y.(Wę]ńĺ«uW†–f‘„Í%82M7ä;XĚu‚Db|5MĂë´¤äŚ4‹rąÚ­>dD©Îz­bUuę5±X­Ećş.Že’E!±ď!ҲE<_ďéŁ) C»DVŻB˙}ýýë^źçyńž$AATu@mtjqf]Í\ĺh–§ľďÓjµ °ýFďß4Mě’CśdhšŔ21đë]݉ś˛cęA¤6»Y–‘f9–dÔŠlŐ7¨ 2d!A”ḒčYŁŻĂ—]”8 b©*"ô~”ˇ!ŮäjD©FiY›ÍÔs‰ˇ Č3Ę]ĽĺF51‹Ď^VTsňOţäOŢTĺöîű‰»*Vźiš1›Ĺ()ŰŮÚ·“˛!Űâ†&xdÁăŽţ*}¶—eYÎBó ›jlÜWŐRUÝ•QmżŢ8 ˛í[©÷Qîáő?ŔŽŢÉ…b~ě ÚóD˙íŕŚHkŘÄ6š–Ţ9NLnŽä˝Ţ8Üëň¶V÷ wôVżT%X˝7Ő[ —Ű{}žçĽDar×2Zís`Qk[u\TE!)gÖ˛"C”9”+žŃnÍAÚ!Š3·‚f8Q†Č—Î商&@čIÔ‘DR§ŚnşdČůlëA±–ň\ç6VÉÁ4­‚ó`ë©T¨Is·"×RĄbE;ÖŐ~–eE’±RGÍ4M)ą•ç´š di"˙tßcž/:”nt- !I]" ‰˘×-sz­$ɶí"QÎx ą7 ö¦Ö¶şŐ1P•_EB››+*†kiĄ/Őj•$ËI3YdČó¬¨ Ż—äŰFJÉ”:Č˝:ůy.5˝ÓÜDäkK‘j ˝Ë‹ŐOĎë –)‹¬4Ś®U-*ĄąE%e°±v¨si|šZµB¶±ő]Ď cAuČ…Q¸fnT–*+ôşĚ…aţ÷ăwGí Ű&jN8ď±”ąv¸âđŕ\›/ŹNóü|‡{Çgůđ`ŤOö—ד?»ęF”ß9Č€©´Ő ŞIŃ‹}u’]ßžĺÜó1˛ďFćÇ/b& łF˙mtÂDsŮ4†;€žŽŁw^E+m&Ó–VqUKUŽÖšPBŽżđGŽ^GĄZE×5Nżú*Íć‚ü˙šŕýü?~ęIÚí6żň«źăţűľË[ßţNÎź{Ůą9:LĄZ即g Ăé©I¶nŰĆŻźezzŠÁAţîk_ĺÖ·ÜÎÔäç^ťfłI’$Ľă'Ţ…]˛ůÁ÷sčđ6oŮ*ź_áűĚýőÇ»÷[Ë)íÝOjŰ8fĽdrE©[itĚ&%=ŔÔ3tbü Dh&nąJśčÄIrU‹Şw¤X2L-FCЇ MnRi–F©L`€$·0,ôă8&Ť ŚŽˇç2É ˘Ď›eO2%-šĺ’/kÁNVÂŁnTňF×u¬zwS ËȢË^[rFµ°”MĄş‡^°&KwĄˇH jóV2Ş­Z÷ęĄ.ŃČÍs†÷^ĎÖ7Óśeß­ďĺĘéçźE7 ®ś|’ˇý·ÓÉ·ćXŮş‘a–l´ö hiě-äčKfJ0x˝äŔ´,yčĽxâŽăđęÉ“T*U )®żsç..]şH–eŚŹŹŃ×ßĎő7ÜČË/˝(+mžĎţąrĺRW°ĽĹö;yéĹă†ÉŽ;Ůđ—/ŤrđĐ!Z­ĆuËlfÇÎť|÷Ű÷đ™_ůUY-ŠBľđďź}ЬÝ&ťť!ťžÄ{đúîx;qe4IĄĆg.5>-[šdńBq*S^‘…tüËv(9UĽu%-ÁĹŔ[v}„ç–LRý‰Ă1KŘ]Í<Ż˝€Ą-u R•¸< ‚řMë*ö’ÎÔéYµ-•Ťł"{©`Ö{ýjNe+UJ¶EĹŽ°,łk'›’$aśQ.WŠDľŔCgé7žŢÖçň$»R© yŻZGžçô÷÷ťŽŢ@«î?Žc*•ĘŞŐ%M&+¶:Y–’„ Ň„# øKú[$íištT2ş‰m– *8ÓzŽ’řIšc—¤ \śéÝ$yeFm”Ú¸ĺ.©Ą=G·Ĺćµ "Éâ]«z «.â®›áb` ă·"źy§ÝÂÖ«XB,ţ1őś,Mhu˘%ĘŤ¶§{­śçççůł?űł7•Ü~đ—ᮿ>m™Ç†Ę<2±‰TŻć9'§ç.—Ń€ť¶ÁVÇć[“óiĆŮ·™·Uěkţ.Ă0ŠŠ;\‡UhĹ8 lÚ}”‘}74gŘuÝ[ąřŇť&š¦1züQ†ÜA'ŰL$†ŃÓy =Ć,•Đü—Đây„=B.¬B˘)Ë2ćçç ‚`]]Ňޡş!*iň}żŘ[”Öďj$ŕZ­›tťŹélą2õ̅•’Ľ47bÉL@¦U(9ň÷Ĺţ†®OňĐ%‹tIgŇuoŃδwd˘ŚŽŹšf`čSD´=ŹĽk‡ĄQ$őmÝ"é`äŢ’¤E­%AŚ×%˝UşŐĺkůýŠôÓ»–Tu>MÓ5á+]żV’Łë:ގ‘ö$é~·#¸éOĹ@%í·ÚűS&ŞÝť¦)YšJŐš$\Ćş^{nµŻśzőU†6m*¬e{cµú#»xC×±LŻÓĆŃ=L-ÁćÚřľ‡×ń sŰH —/!dgĘrJ¶čČ{ „A©ä é¶”]VĹÍrÁÄÔY®cą l»Ô%ô6yęé§q—ţMR!ĘÖ%Ťi=y„„–UŞD‚8Ś^™ ăy Ô«lŮT-¬‘ŐgŞëŁ8Á â‹UëŤŢTłŮD?ňéĎÜÝJj6_9“0DDYĆ«S3Ű<„ŮýP‡śŽÍi/äĎölÂľ\,VmľŃqś%“LM Ő"ëeô.IH4ľ-{Đt ˛Ś4KŮ´ó0ťÖ“gžŁ3u‘Úć#ś|ć$ő‘]háŚňşhµFI¬]Ô벂ٛ$¬ÖZRuhÓ&¶oßÁˇ#G9|ä(7Ýt3×ÝpŰwîb÷ž˝Ř¶Í®Ý{9vË­ rűoömöěÝÇđđ‡ˇZ­˛s×nFF6sěÖŰŘĽy űŕćc·°művvíŢĂíwĽ•ť»vłoß~vîŢĂî={¨Őę\Ľp˶9zÝő˛Rrď?ťz™ľŹ} {ßAě}©ňçhŢű-’™ijďz¦S! ěV¶ĽvŰ®Ş¸+!ü$‹$ǰ\‚(‡<)*iĄîBIŻşŢÔ3’$$ ě’‹a•Ń »°•Ô˛•îęz]ËhűéÉ”kIr—źľ-Ë*XĐBNľň2Ü/§Oťdßľ˛ňßm=©*©ŞW*šÍ}řAöí?P$ą†a`ë˛""z*kŻĽúßýöw8qü8;wďˇŻŻŻ«kŘâˇü7Ý|l – X2ŻŐ}ôJ֨߯Z¨*č ¨Ë˙jŐµŮf9¤Y.5i;-,t7VyČŁ8Îq{žż®%ĹFĄNYuÚ]lWŹt\(«?®+eˇÂXf28Ş‘v ¤$NH[Ýë»lđ\VŹ’\J6Zşf`UIjžEř~Śi— y§8ŽI㯜<Ĺ•+S¤ÔŞ˛ %9/˝|šááMdąTyăőł ŹŚ ľďqöĚiZ­ŤF_ńît]çÄńŘż˙@ńNެ‰Ăżú˝q×ůfh^ZhćC‚öÉěĹ—éŰ~#§_8CmÓnôŕś¬âj>Qó<‰µ›zŁŻHŞŐw\‹2ŠŇÁͲ¬ŔŮŞ¤\i‰*ˇmŰE…¸R© ÁňęÚJ¤5ĹşżŇS–&¤±×•A¬JČ(§Ü=pyť¦vµĄio± CtŁDÉ­&˘ËL—‡Ĺ ťüňeţţë,|ăřĎ>/“şÝŰY e §\EÓ­â÷ÇÁ\ŃY[>l(3‚0ްťîňßżĽŁÔ‹É˝–ëU’łR޶tL‚e×*[ű° MőX{e?×R™Q°7U{î™óŐżů¸ó˝ďçţűľËŁ?ı[oăŇč(ŐZŤ(ŠhµšX–ĹWżü%ö8HŘ…ťMMMâş.Ź=ňQňýľÇő7ÜH«Ő˘Ý–×´¦™››ŁZ­`j_űÚß255A„ěŰżźń±1ŞÉ1™šŁR®ŇžďŁë—GĎŃß(Ójűxť6®c266IšeÔ}LL/†)¶m˘i‚o÷ś9ó:Ź?ţ$GŻż‰N»M§˝@uřň—ľĚ»ßő6&''1­ŐZăŞů·ÚP‡Ś4 ăÇ­đú…K\Ľ|™{÷rýţČWďHyFA^›B’ꌪ”C7,†J şµM\ćşáA6•]’4Ăí±ÎKÓ”ÝyĚl©‘úyWyŁC‘—ÔäQ§‘^LőyŞŠ 20J|\M<ĐŘ{3#oĺüóbh‚ ™›ź¤óĐ߲űŘ{™‰71ź Ňżđ%×"¶Ź°0ßIJśBR«ÖÝĎm6›KD™UëNÓ4†G6÷^.—»Ŕřŧ‚G__!ö^.—Ůłwď’ `ąŰvÉóśÁ%íň0ŠĐ MĂĂKžmŰ·łwßľ‚×ţńÝŔ>r= Ć.ˇuĺ]‚ź'Y qŞ8n­ŔA€)ü5ˇ$–‘aů ĺr•4uéř-,-]Ç«ł9ĄěŚOHży·,OĎľďSZ§{wń¶JžĹ˛,jÝŔ±ü”˝ÚPq9Ĺ÷}&ĆÇ8rô:Îś:ĹÄÄ8'_y™©©I>üSăô©Wąéćcśző$ďóúŮ×8vËmĽôâ Ć®\á#ű8Ď=óc¦§§ůäÇ?Ä÷ż˙Ot:ďú‰wpřŕN&'&9tčŁ/1qů ~đŔ}ĚĎĎó‘Ź~ś™™iľűí{8|ô(~˙nąí6~đAęŤ:ďűŔOňüłĎpç{ß_aőűëőzäu‰Źę÷ĎÍÍvÔŠ(‘ĺĆ –!“Ţ$ 0-AŻÉrÉHÉóaÇ˝L­V'ŠJxaŔ(ó\dm´řŠ&äőIÖ¤9ďc•*R—1)†RrČĐRÂĽDh¸´#C°µ* ᡑŕ)yŢÂkůčV…j­ß÷ČcŹ’‘¬X ]`č1Aض‹˙žk÷w˙Č'>ő)2tĆ&ćťťcë¶­<ńäŹŮ±cG/łg˙.\8O˝Ţ`bbśoş™KŁůńÓOŃh4¸ti”›oľ…Ë—/qđĐa~řŘŁ»ĺVÂPZż664GWŐu}–fZg¤bóÎ]Űh…»ëŐ%ë5Ž">V1Đ„LÄJĄ«Ý ×JeşdË2Ź«Ĺá4M‹Šh‡Ť}»obäŕmśůŃ=ŘŽ‹ßś¦37Í?~€Íoe&ŢÄB>Hßüł”Ę&QéFćć[hşśß˝˘úŞzÖl6 Ňl–eĹý)ŘŠ­ ˇ9J"LUúT‚«*°ËŐT…pĄŃ»¨ĘĽZ‡Ş•˝Ţ°Ś”’‘' ´B’SĄŃ·ŘnĎÍ\[úK×bâxžv`Rr«č%W*ň ›%Ň—O0őżýgHbň(CÇ{îYÂS˘ďW ÇŚIÓyÚ-‡F_÷=wőűWYKI¦IČyQŻT$_c#âů°xŘVřTu@WäŘŤ^ßn·‹ żă8E‚l¦`•)#‘¤1ť–$Ý)EŐĺÚHţ˘»I“¤)Ł/0??O…<ńĂÇ9î <ŻC«ŐbÇŽťLMMđřcŹ01.cČ©WO˘ë:“Ś]ąÂË/žŕĉăĽňŇ‹Ś]ąĚ­oąť§~ô®ëňSů÷ď’{^§Íľ}{řŇ˙Š<‡]»÷đÄcďţýdiÂÉ“Żň®wßÉ©“ŻP*™ěÜą—_>IŁ^çÖ·ÜĘ?xjµĆM·ÜĆsĎě’C«Ő*Č8*ńëmS+™2\{eBTËŞ78ž}í %§Äüü|áTvöµ3´[-ćçćht+@‘¤éşÎÓO=Áž˝űŠÖŤa<ţčĂěŮ»Oęv«ŠU)„VÖŕ{÷Ś]F&YłI籇pŽ^OëÁ@ß'>i Ľ0ĂqdĄ*MS‚(‘^ëkĽ>Á"¦QUŇÜr•\Xdi(ÉkUŇ€$Ó)ą˛ ¦ÚŤIn¬ĐâČ,Ó’EŮőVÓ5\kôžâćçç‹Ó÷Ů×ÎđŇK/259ÉÖmŰ9÷ĆYŢńďćńGaüĘöíŰĎ#=Čěě,?÷‹źatô"I’păŤ7ńâ‰ă´šMŢőîwóŁÇăÜą |ęSźä™gžĺúëňĆąQ^8~‚©©)nĽń._ľÄôÔ$şarĎ7żÉ–­'ýҋǙźźÇ÷=víŢC˙ŔŹ=ň0ĄR‰}Ý*ŕňßďşn1{“µF/žWvI©ĄAÉ–ZŠi¦6ŃĺŇK˛Ĺ§áőਢT ż !µm}e•ŐČ“/Ń ‹’S&Ă"ŠáţvŤńŞÎ»˙0eđŐ ›÷ @żßoęYWrĆ´ěR?’ *ËńŕBhÄç.ŃúÎ}´zŚüĘćĐ0iÉ!đ;8N™o~ăëŘ%‡-Űwńĺ/}ĐČr¸|ů ó M^~ů&'§yéÄ ŚŹŤ155Ĺî={™™žć…çź# CöîÝǙӧxůĹ´ZM^{í gĎśaËÖ­üĎżúK.Ť^äáG{S•Ű/üá/ÝufóÎ}×qrj†˛i˛«QcŔ±Ń{‹®ëT4AÝ2‹DmŁCUôU ŞV«…|VoGańஇĂ(îŢ—ĆĐ®#¦IŘ™gËcĚŤ_`řđŰ0Lt‡f¶/(áĄŇ~Űóc±#—J;aË’°˘ÔßéÎĐ2ü0ŁTZÄĆ_+ěmôâE‡řţßăŔLMMb—JX¶ĹĄŃQlŰćźú^ŞÓa}Ë^*›iN^äČO|ݶ ™Ç­EY’ŕű~ˇ0;;[Čߨ˙¦†J,<ĎŁ\.S­Vń}ż¨ őžú§¦&ń<Ź×Îśf˙LŚŹwˇ‚…….]efzšˇM›™žf``c·ŢĆŮ3gźťăö·ľŤ“ŻĽ„®xžÇăŹ=Âöm;8{ö ĂĂ#ĽđüslŮş•Ł×]Ďěě,GŻ»k˙!’Ń $ăăFDc—I;ňÔbîÚŤ(™D™Vh)Ş\ĄÚG…xA«Ë¶\‹M™ă)YŢ"čřŁBÉm>aÔ¦d®L’č‘rQ­OÂ&Q–ˇ™Şő|żCupĚ”$Ó !ň­PUĹ9 C\ץŃh ו“:<©ßśçyaźśç9##›I‰źš™™áŐW^f``€©ÉIžîYěîĽ9ţüóĚĎĎa[¶™aŰĎźçĚéÓTëhú(ş±Ô`hh9cŽń‰ Ž?ÁćÍ›Áő7ÜČčĹ‹ěŮłӴصk7NŮĹ)98x˙÷+_ć>ňG+2´äá¤ŃhĐn·7Tyč=´ĹiZHĹq,ŮĄkJw ®“f tZĺjQyJâÖ{zŽ®…DALŘ”ËUĚJťVSă7¶Ăkľt*zt‚ěęßbę9¦G^(…ĂÁĹë´0D -H…FűÁÇýż˙ŠĚ[Ś'­ď}‡ľĎ˙ÎŃ=D~Âć-[ř…Ďü­V‹j­Á;Ţő^榥ÔŕŮł9z=ăSX–Éío}{ńüöîÝÇÁĂG8uň.]şÄáĂG¤|`«ĹÂü|ŃÂĽéرµČZĎŞý;đrÍâgí-ţűň9!„(bе U9‹˘¨°¸U„«ő´’U <ĽJv„X¦QÄáʦťěďˇ59Ęuďúiônµ ”JÎć"+LëJq¸·Š«äú\×-â°ŞXw:ťâďöŞŚ¨ ŻúÍJYE]§’úŤT÷Pé‹«ÍÔ÷ý«żËWq– ěRĄ¨úľ·ŘóZXz|•¦čňa99>a'"Ó\ô‹çIĆŻPyŰ;)˝‘<ŠĐđ_9AžÄxĎ1n·ŁâĐń[ŘzڦA'˛ĐDŢŐżz¬ÔQSťÍ•†r‘RP!óóóضM˝^' Ă ét«ď·„Oä¤ÂíĘ$xAk]Ë$R B_Zh×jµB-a#ŁZ«1<2L«ŐäŘ-·„óçŢ`hÓ&*ĺ š¦łix0”ójÓ¦a Ăŕâ…ó4úúäÄńزeĎ=ó4ťv!¤Ş®eÔŞ•˘Q©T¸ďľh4ęÔëu<ŻC{LŚ_ć‹ő—Üú–Ű˝x˲9věVÚ톽8Šçűx^‡ůąy‚ ä=iűî˝MÓ¸óÎwE!wĽíí|ĺËiXÜůŢ÷273Ťe[8ŽĂĐĐ 7Ýx=˙řÍďČŠ}ÉĆĐ—ÎżrĄAÇ´ý%=)°ăÝ·E†N_Ť;n˝™WĎśĺmÇˇĺ ´›¦]¦Ńhŕy^˘kFAZSýŞç/„(4€ÇˇŻŻŻ°CWݧĺsI4ßřJV?¸ˇüf†ŞÚ*ą-EŠ Ă°ŔB^ËĐ4ť8Ž L• bjôV@:yž×mŤdŽ“¤ ;#hš†ďűT«Ő[Ą‚´ş^Iý€$Ź(©ŮŮŮ⻞}ći<Ěëg_ŁŮ\`vv–JĄÂÁ‡˝H†\ą|™;wrÇ[ßÎÓO=Áű?ř!ľőÍopěŘ-\ą|™+—/Ńl. „ Ţč+ľ«ŻŻźÉÉ n¸ń&ľőŹ_ç7ë·©V«Dُô/?‡ąuzĄFpú$őOü ó˙đU†ţőďRşăV‚ÔĄVć4–žÄ]ŤŃ’ôŽÎŮî]OÓd»TĹěz’‹ĚżŞ]$&ĺÚ¬ÚúmôTúmg9x±EÉ©`šťN›$ŽČŮX@mb 3·’® ň§_ţŽ:ť6S““ ±uŰ6.^ĽŔ•Ë—9zÝőŚŹ]ˇÝn388„¦ëŚ^8ĎC‡HĂĘŽE’fřAČčĄIn¸ů6fgf¨T+´›3 ÷;´:WƦ¨Ő*lâ•Sç1í2›7oˇŐjâÚ861ÂdÇÎ]<<}ű›˙Č/}ö×V\ ®ëzľ …#^/†m­5dőzťN ‰|®˝Łět:č"ˇdHÂĘZĂOJTëE[Űë´1DmlŚpŇ *µľA«Őäćc·055ÉŽť»h·Zś;÷3ÓÓüîüáµ´î»đ€—¸Çśő˙ć›®ë›yaGĽmY5ŠDR׉»I‹:€m$‰yx™$Ő–ÄaŐz^/»]Ku•÷Ą” d%Ő?Ž]ץa‰Â+«ßŤţ~‡Pć 2Ń+Ľoę)®ąĺő’µîÚi-ĚJ[ňXGN·đJF¸n%W>SđŽżĘôţz­ÎČţ1Ó˙ןS˙č§đ^zöCß§ňî÷Pű_ŁÔ_`Űłp]¤äąŔŹ5tłLÉq‹„[B6>oFUţ—?'ŐĹśďW»^µţ×úîŢgšĺ‚ ÖŃL§{˙IÔY1ÉÍsA”»TjRc˝Óś”•ôXǰ¤cćJ÷ż|čşNŁŃ(ÍT‡Pń†V’2…EŐ›JĄšNś¤Rž2K{Š7¦–b©$Či !1$É[ůÚß}‹ĎüĘŻ‡LEŹ0ô±…‡®KóŁćy¶äšĄĎTĆâŢů“F-lCr3’Lăř«9xŕý} Nżv–=[*äYJ'6IRQ¬#E.Š-zJ¸ŽÎ˝zľJgX=ĂĺqLLŹźËs˝˛î‡˝ŮˇČY޵Óh4ж\›đr–e„QŚă”0şYé4*kÉ•FÁ„Í=6ąŻ‘w.0Ă;hLgśďuđQÄ `IpTŚ\uďcW.łyËNľň2QŇ?0Čŕŕ Íf“é©)˘(bhÓ&věÜĹčĹ ÜxÓÍ<ňĐDqÄŃŁ×óâ‹ÇqJ Üčë#I.Ť^d÷ž},,Ěło˙îýö=|ôź,|Ú[Źţ€©˙ú§dţ"µď3żLí§?N”ëXî ¦iŇi·0ó…"Uâͦ-[Dľ×Ad%s},@ś ’ÜĆ*UŠVËl#“ŃFŁXôI¸€± ô§RsŃî^żQ— w¬ô]{Ĺż•8·j­v˝ÂÓ­…ăѵ®O<=zÁH9ź»h3†A-±ôĹ äUŞŐş÷^Â1c„&Z r­DÉ)sit”<ĎhôőŻXĄSDݏ÷Fď¤ôW†&«¶–At:ť«L0L-Ĺ6“%öąj$™@XÝŤĚ'ňçA“ż?Ë2˘ …)˘u ‚´LµŢÇ/ĂC30dÁ\S!üçÝŽ®“ÜĘß. a¸”ś2 ˙ćţç_0ň{–M2=Iĺ=?ÉĺĎ˙3â±+lú÷w“=FŽľ¤é8–e űĺÉúwŐćŠÍ«Ńh “ę"¨Mm``ŕM%·S“ăB˙˙%ąU‰„r„T•·^˛"umd ¦iFÇ”٢WdÜőâpš¦xž‡FÄű*™7ĘLv!2SqŘuÝ"aVqXÉ6*=UET÷ľĽ“łü^”ÄWÇ…I…ŠůI®–?OĄŰŞ0ÉŞ $1+.‹qŞ!¬ľ.‰Ď'Źć re– ÂDG3+ĹÚĘâ¶±vG ťĺĘżůIďĺw;ŘO>‹Úµo i”U¸Îa}ărÍ W—d– \EĹÂ*ËÖ2I¶Â–R~lµ„/ůź~řx¸ŻMFYő,‡aťĎ V©sl‡sE’¦¸>F‰cÝj޶m"ťĹŚOců5ěř™ľť0á(Š˘ 2Qbe‡ő•a SG˘4Łć߯p `8:j•vmÉ’~‡Jů±$“DR"‹ ذźTÎ-‰I¦iV&†a’¤ $$ʡY/Ţť¨żĽę¬ °ĺä1aXh};^ť(Ő™DŤă°^ó‘óK$—?Ä>t„čě)ډ)T˘dŽű‹˙ ŢřTĹyĐóţŞCúđ÷+™Ć… äG‘_*˙|­V8PĆ7łőšBĹ)®ăᬱ•/G®4Ě5\ŤµňSş0qý©$É2gÔpśÁg&­U r9_f1A”â8ëË–Ý2Xßh¤Ě†eěNť<ÁŮÓ§¸{÷{öět; Ň´°IÎŽhš'Ţ—ÉÉ)„б,‡^kş®§ !Ś˘ •iV )3˛4b™:qëвQÄ™†”I…i$Yˇ‰|ĺÚmR©°śFĎŢČ8É <­éD9¨|S‡Š[1ŕFXvÁŤĹí»÷p™ĄěŰ1ä¤ŇXyYŤ§ÎWá±7Ą  aPÎ>:śKŠ/ůËż±é'}ŚQbËvÍzR e‚RJ7mäNRü˝×Y!P”óK ÜJög°A ·ČĘ›â‘mĚü.–×ŔNΑk5z±SżŇU×5ćiŽŚôGĂ ŕĆŤëÔjőĘ5*ŽăUE×unĎÎĐh6™ąu“É©©JgµüÝK‹ ,..Đúä2gdt”ĄĄĹ* Ü˝{‡‘‘ŃęzüAwĺĂKLNM‘e…žąw?Zˇ;S˙ýŻb§+’3ý^G¬ßr)gV_Ë|ȡFÝçö´Ţ0t…©§ÄQétÖ(ü˘UɡmlM+t…©etX-ľ˝Ő$·Ü ĘJ.đ@™—ďżËÓĎ<‹?zée„ˇqâý÷‰˘ çϱ´´H†\8–(Џxńý eçÎí\»9Ç{ďź'ÍuľőÍo# ›$¸uk†zŁÎ™S§‰ĹÄÔN^üţw™ąu“‰ń1Nś<Çťąe ă „ĂŰďž"“pţÜNť:ɡĂGś¬Ú±ĂÎcëI•×?ś¤Öëőj>@ŁŮ$“yˇ|â«$—Ęů%é¬Ě>qV´ˇJś‰JtľßďcéE@Ö´Ćě@—łŇ•‚,+*Oĺă+ĽîkŐĆeĹ‹|®ŃćÇj]žuúłj¶Ťiű„IQܬ \Jĺí.Á›Ż#[-śCGč|ď;X‡Žź;EÔ~áŔČ8/|óOXź/˛ÍX)ĹĚ6422BĐďp›^ŹóçÎ kŮúË$ËËËLLLpýÚŐ,¤Ëââb!ÔéđĘŹ^âëßř“Ź™Ü~ůo$ą]+ÁXę|–•޵ĹÍâpś$8öÖâđđA¨4iSU#¬|«6Ž•ś'WýÔĂ4­ rdFuŕł,‹4‰¨Őëü㯽Ŕs;§q„ŕ—ż÷2Ďě"N3Lǡć:,‡/ÝĽ=Pď0@˦†´€$/¤ ]§›I˛,E¦)KiĆ·/_ăĎ/]ÇłL¶Ő}tMc`K7Íp A˘ˇĚq ^šŇ Â[]Çq|–Âś\»CnX4l›nĐÇr Ë4˱ÎLŞăš‚†™ˇ˛h ŁhÚ>Q˘ČóŤŠ ç±Ç/_ˇűýż)IoÝ@¶ŰLüę?Ç?|´zŽqci˛(/HkQ\ju4§­ňĎKö°ăâ*â†Đq-…%Š}$MB’”U2„ĺ÷zľA5s%I%Źé‡)¦iăzőUŘ ×Ćńş‡…F’‰UZçĂß_ľĂ:Ę]9˙˝·ßâK?ósŚŤŹóćëŻ2;;ĂĚÍÜ˝=ĂÉďsíęn\żĆ­›7¸ző ËKKĽńú«Ü¸~ťłgN31µŤŰwîń˝ď˝aúĽřâ9yň —.] ŰépńâE®^˝ÎŮó—xëÍ·ŘľmŤŚ?ţŁ?&Ë2^{í —ÚÜ™[âäÉ3ś>sŽv»K–iě;px`×ÜÇŇ‚ÂHAdÄQáTçůu”n'››8č8A–Ä$™âěĹË´;]Úťźzę1LQ(+$R«őńÜk‹5e5vŁ<`X˘®T0Y[ěúKnK–˛ZPZŹ®M<Ö»°µîaś ôŮ6ľ1J^éĺÜň;†OS˝^ŹÜÚ f0ň%L'ż‰‘wč¦M„QŕnK’ÂŹţňH)™žžä˝wß%Ďł33,/-r{v†{ss†ÁňŇ"×®^eŰô6Ţ{çm.x‰G;Ć_ü>KKKbUÁ>ôk5d&yő•—9pŕďĽý6㓜xď]}ě3·n2só&ׯ]ŁŰiăű>‹ ó,.,T†۶ď¨NŹý~źÄ´^ ăđC›ÔvWÍIű`{XíP“ĺżVGbą=m4´Á|ť„ ¦:ą‰A‚¦=XY!W:©4+Rΰ«ŘGq(±,«".–»őć_ůđͦ_D„ŕNĐl6ą{÷űö`vćÝN‡Ďá‹ÜžťĺˇGćĚé3D DQL»Ófßľýă\).]úéé).]¸€:išqäčC\˝r…gźű$çΞˇÓépĺňe˛‰-M3z˝>Rf,//łoß~ü޶„ ”ky=-Îő®X˛Tř†A’JLC ˇÖmV÷G†dŮŔÂÓňą›XĽĐir&ň9š\ aŹŃĂÖÓ5óËŤ%!ŚR ÓśŢ!č"¦Ňĸăő{ݢ’=řZˇ+L‘‘%i–Ş.a,|s§˛ńq·Ţ&܇#8ńî±'‰.śĹÜwűg^…źţĚgq‡×_{…{wçرcŻżú µzťŢŹĄĄ%Ž>ô/~˙»¸®K»Őâ­7_çâ… ,,ĚÓi·Q(^{ĺe¦¦¦ůú×ţ/^dvv†Ý{öpĺňeľőíţŁIn‡ÉU%GˇLř€n·Ëń§źáâ…óčB§µ´ÄČč(Ź?yśĹ…<ĎŁÝjóäSO#„`ÇÎť„! ©ů5ćďÝŁÝîđܧ>Mż×ăĎ}Š«W>¤9ŇäÎíŰ<ó‰O01^ŘÓ,g~~!O=ý Ďź/ěáóśÉ©)¦¦·1=˝­ččĆ-̡Jv™„a‚Ň ,7‰Ř$ČŃ™[ěĹ)ş®ńĚń'¨ŐDqńüiŔzÂe±fX_ÖĎĘ|r­ˇQŮQŃuýo&ą-«AI’' ™ă DŃ7bW®­B•ÂËÝ~€mš ôGYĺ,["Ą é2ůŇQY4iíG Πem„7ŽkEÔä9‚H‘ŃŔvůěŁĚÝ[ Óé2˝mš;·osŕĐar)™c÷®ÝĚß›gמ=LOoăÂąłĚŢžĺčCsëÖM\×e÷ž˝ěÚ˝ĎőXZZb÷ž˝tÚ-ćîÍU¦ĺap ţQťÚ ¸OŤh —çÚĆő9 =Ç·RL˝¨ň¤RńőVťWZ‚tRxq~b\Vr]÷=ÁĆ’gQś¤AŰ+Ü™,żjý“ŢŘŇ(’\CK Ł©ŔŻ5‘X;ÝTL qčń[ŻUđśđ÷căLüęo’¦apňäRŇíuX^ZŞş!8ţÔÓĽňŁ—xúŮg1M“kW®`ŰÎ =ĘÓĎ< Ŕü˝9ĆĆ'°,›ÖňyžóăĎžN§Ă3Ď>ÇŤkWůăŻă?šä¶4 ) ™Ěq]·ú÷ŤµÝ€<Ďé!޵~a˝ů°rĐ-ă°JۨĄqŘBZ;ĐĂ+h˛…pÇp¬_ž'ŽRFpśÂŞ=MS\Ç·{ÜäťŰ†n1Űé1ć:™ăV«Ăr‘ć9ONŽáŰ6ľăĐNşqJÓ6Ů۬“ČśźŘż›îŢc)Ś2Išç<29†©i´“”Ův—# Ţź[ŕŃm“śş;ϔﳆ<6=Á|?$•9MŽsuąE7NqrĄëu@ĺěl6ŮŐl`ť¨¨>9=ŽĐŕĽŢĺB?ÁÔt|Cp;JxqˇÇßß1†ŻËU1904‘XëVŇ4ÇÂ}ň)ú/żĚč/ţcÜO}eÔ«öl’ÄiM ’dăbEśš¤–ńđA§,6”‰ň†RŞr»ë÷ű•Ě[é4&ĄD˛ľNűw˙%ÉŤkd ó$ׯŃů%Dsűŕ> ‘cčh™fPŻ ©Ü¶DK˛\T yžcQś9 Ť~b‘JĚőűÔ!†Żż$ď–Lý­âA÷îŰĎĹ çyü‰'8xčׯ]ĺůźř"i’0>>Îľý ~ÍXAôX‘ <|ô ă#5&FlęÍ––Zúöî+´o9JÝ×ŮącŠ];§9|řK‹ËěŰ˝ Ç1čöB:z„k×®c:źůÜó€Ćö»Ř·˙Q2>1A0dŕłúÚPUäbPŹ3˝€umŕ8‰&x÷Ě[=¦§&iRsM4 ŘZš˝­ŽžTš çĂŞ*©]H)Ůś–ö1Fyňó}ź{ ‹ś:w‘Ă÷łgçö-mŇĄÄJ&“ăădYúŔöĆđ\(Ú%i"č÷‘ń5jN;ľ…ě˝Lž TžS' ćŃ »9ÉÎč ťŢU:ň8ÂŰSU/¦§ ć˙î= Ł€FŁI˝Ţ¨0:¶młcç.ň\röĚ©•dDóçÎ1:>Îňň»÷ě©đc¦YT÷îŰÇěĚ-,Ë®®áĂ‹1„A˝ŃÄ÷}¦¦¦8tä(ń Ř W\,ËÂłu\3E±"h^’ájŤQâŘ#Šú˘ ťiʡk Ďʲ*”ĆÂ>dŘ\[CvPJCjnY1Ť»¦AŻ`;>şî=tc›rŔĆ,Črů/(’ş(Šđâ2>âiµn€’(ĄŘłŁ°SΕ H ŇČÔô4AżĂŁŹćřˇň”]?űE^ó$Ď>ý8ÍZ?Ü˝g7^­ŔU˙íżó%ň\˘.‡!ŠB˘hšrđđ‘Áóß_솻 PejµY–UlÜ­Ľ¦i’ĺ9JimÝU_lcČžŇ,LrUçłc}řĚ(ü»™ŤäŇWŹRľ+ ČđđüFµű˝.5kăő§iŕY’\ő şşéSkŚG!áD›XÔź|ű+żĹťßüUňnsÇFůWÍšžG’$|ţ'ľH§ÓfűŽLLLˇP]`;K‹ ^^Jɧ?ű9:í6SÓÓô{},ŰbŰöív<‹ě˝‚”zÁwÇá"šnâ4ĆّܤݽEG>Žăí#M3şÝEÖ2Y’2ę7yzÇvŢ™˝ĂžFŤNŃŠ"44v5ęĽzëa–ńđä8ý$%Í%BÓ°tťşe"4Ť×ář¶)..,1ę:¸B€RşNÍ2yN&3ľ{ů:µý$Á5 ®.µÉTN,3f:]ZQL"%{šuľwůť8âKGöđGgŻ#„ŕŮ];şN;M‘Â'Óqď6ÇGjÔk‚˙zďv:YĆ{âCŢí$üôČĘ}tĚ™÷čwB„ĺă×Çřű~APŐ=ÇÄúÔ§Éy„~ #ăš" şş¤ßM¦ß/Şáw©Ż!•¶ˇüRÇU± 4„Ůh »Ŕ•§üűžçŃl6 /_důk˙űČĂ•é€ C–˙ßá=÷ ôFáČWHEvh·˘Â–˝6F†äi€kÝoPŽLjXéµ(Šű-„éŕŐš5\°sĄ­+?9|=Ą¦j)Ăř ŇŰڹǢ~ ¤8ÇŢšĆ>˙&ÇkäyČî]ŰĐŤv6>1QTŇeíٟЋ< 0±°ÝÂ' űhyt “SěÚą‹ľ÷íÝÁîÉíH™TyDy˙>Ę!a­‰G«hÚüüüVö¬-ŹgcY®çqcf–SgĎłsűvžyâ±-Ő•R„QŚm[]ŻÚ¬[Ő÷†ŔíŠdáU´ŕ†©ˇ›>JIň¸Ś–őť iäýy o„EÚ_˘›l#ö>MŽY´NM Ó˛Ń5ťdČŇTJ‰>h ŰŽC†ŚŚŽŕŞsYšâz/|ëOů©/ýťUd†˛Ťł:iQHY€ŚÉDyÖmMr[üU¸áä&Í‘Br¦Ó^B&ľżâ†Ç1iÔÁé–¤żIť$/ä» «Ě¦¶:IŽ3íĘ ˝n.ąćbĐ'Ít2<żV°¬ĂnAF’ćËđZ(O˙ë)”RWeŐví˝fE'ILő°×´¸rĄˇ±úDşÜÍĐÍZ5żÄŹgig¦9CŹl…Ě5rc”Öň «kEAę´z’4“l۶˝Ş n´†K)łaV÷°˛ÁV4IÇĆĆÓ‚Pă°¶%±síü˘j{?Žě÷¶ŃÎ o@Ă wá—vEěe×ÚŘťfx$™ŽáŽWbűa’ĹĄdĚV¤çtâĚŔrŠö`ĐďˇdT äË\CŠ‘ÁłMh˙Ĺ7éüŢ˙Éö˙ůÁ<°§°ţÚÔÂ0¬´l=ĎăÔů‹\üđ űöěâą§žÜŇ­”"ŠlËB˝RiYO‚oŁáşn…Oć_AO" ÝŞq8é!ĂED}'š¦#ű÷0Üq4a“Ë´Âb˙y˛\Ç6 <]¬LÓ"N“ŞŘK3Lˇcë:ý¬Hf'FF˛Ś,I° cŰô‚€}”˙üť‹üoŹŕľsžß9:É›ëßS9P60­–m†}TV0ăűý>Y#M d'ŤFŁ(0ĹK膮BňÂĚĆőüBO˝_(ÜHUT/7{Ëwˇ\Ą|ÓđÁ0 :ťÎ}ŘԲآľűm–˙Íď2őKżFă§€Ľ×eáßü.ýW_bôżü'řźűd5/WÚŔ­ŞčĐůľŹbP, °ŤűyŮÔ›…Cażł€-brI&ĐĚúŞXđ 5<¬ĽS*˘”ß˙ wŔ4M^~é/ůěçž§Öh'Ů–W.ĺüŇmďŐ—_âĐáĂ,-Üăąg©ö•\i¤4¨Őëś>u 8ňĐC„ý.¦ž¬1´Q¬´ý‹˙˝°°´´Ě®=űhŚLV9Kżßâš›WS öń˘Š«”" »Z\}”8ţ8¦irîâEöL×V=› 1Č”Y)R}\…¤˛3$ ívűóĹŻýÚŻýĆG‘_ŘěAP-‚^rcf–GŽáńGŽVNKŞg™$ISÇF Z`Ă~ă[ş !XXZćÜĄiÖë„r ­öÂ!Ź–É“şicxS¨¸Jű’<飒.†;Šë¤Éu};~~GÍc5WÚŮĺćWţ}ŔÂö} 0=hý p8OË N‰!,ÉSoĽţ*{÷íŻ6~!żýŰżý±` żň+żň•<ĎÍżŽ8\¶Tˇx_Zť.3łwxî™ă<|ř ®ënH+G‡í‚µ +qx«Ę †a0ż¸ĚÉłçi4‡Ú“łŚĂË+q8iâđ4yÖ'Ź;î(žf|ilĂç2fvoä(2ËPR Űaé:ĆŕwXş†gŰřž‡ĄëÄA™¦¤I‚ďş¸Ž’E5Îúj8]G\“ˇiš†Č4Ĺd qeYhN‹cŚÁ{`  Çń¨{zžŁŇ5S"dL’˙䵥·ÂIŰä˝VŹĺ$á—ö5±µő;kIS¶ăa95 CŕúšŐ Ń,:OQĐĆ AR‘‡űAF’fř~mORů‘aOĺ^\Ş ĺy^iÖ–’hk?ŻŚăňÔ ’ł§Čăúó?‰&ɵ+„ď˝MrĺCĚÇŽ#÷`<Qf"s}U7 MÓ"I7=Âd€+Ŕî˛\ǰK‰Ď‘÷ ˛źV`J‘1a”•ŕu`kĺ(UTÖ^ĎF¤łáů˝^——ôCvěÜÉ{%m[ĽţňK4›uň,ä­7ß" CÚ­^şŔűďľCŻ×edd”™ŮnĎ-’kď}pŽÉ©i|ßç/ľóQ±{Ď^LËá­·Ţg±pkvžÓç.SkNpćünĎ-‘HťwŢ;.,nß™#“pýęU®^˝ĚŘŘ(¶íb95ú1ä›xË<ŔÔłRÇR›0ÉŃČQbEť©nIlłęš"G#J-÷;Ý}·m…46LÎ>–0Ś$ËÉó"qŚÂŽ‘UcĄ«$TžŇZC÷*ÍmőůD>ßů™ń„_LŘĄÇő¤ŻXß‚1‘Ç`°ű=,-@S1Q´Zň(—٦„ÇJÜ;ޤł:šp°l·2„1ä2©Ěöe`[&h:Ré´–—ą{÷ďĽő&swďŇëuąpţqsęäĚÎÎpôˇ‡ą3;C’$:|„?{áŰ8¶ÍŮ3§XźçŐW^ćÔÉh4ĽýÖDaČ{ďĽÍÜÜ]ö8ČÝ;wxíŐ—?¶ZÂWżúŐŻčşnnEgł†a±ÔjÄIŠ0 C$)ľçVp…őČ»yľ‡‡U`ýćâ°ă8Ľú aѦ''HŇl‡è?JÇäÁu »†°äq«84—qXĆŘ~GźÇ±ú¨đa>ŤăŹU›ăÚäbűąÖč¤Ü\׳Ő4ŤîCöŤ6YŚbnt{L{nu°)ăpyíBţěň öŽÔ9qo!tÎ mÓ…(äňr—Ý ]]ÍÇj5>čEüéťENwúĚF1˙âČ6Ůć5Ŕ ¶=K˘;TÓ%JÔŔÇ$Ž#DŢCQŕh3YÄaMDšäŠűĺÇwX®°Óô€Hb˘W_‚L’-Í]:GçűF|ńyż‡˙~Ů#ÍőŮx5\bí÷{~ †ý™rđüâţGýÖ} ş^$\˝ «’Ô˛ŕ6ĽŤ2±ńňAĹMÓ¨Őę,,Üca~žĎţřóüŕűßĂő<žxl?çÎ_bĎž=ŘŽËĹ‹8ţÔ3\şxÉÉI¦·mĂ0 Ξ9 šĆ;ożĂŘŘxń{š#Ľ÷îŰäR2>ÖDhŠËW®’çEÇň‘GŹqîÜ9˘0âüąsÄqÂOţä¸7żŔ7żń ľđĹżĹőkW‰˘#‡v!“~eçüQĽ¦P­(Ü)ľß SćŠ*EżgÂ…Ş /2׫bß0io=ŇázŁ|VQŃétV­ÄŻ˙úŻ˙Fi+[.ňŹ“ŕ3muađťď˙€ą…E–—[<ńčĂŔę*Új ş$•Ë40„Řđž?LvNLÓ¤„ĚÍ/`[űöěÂ"X%IBś‚Ý<ڍ?NÜ[DEw0ść ŤÔBŚět˛î †eŁéa’~‡ źÖ_Ôeey=ťŐáä¬üíŔ¦ ýáQ&‡–eÝG¶ÖNłŚ,äJC×d]’Ë0ŚŃôBŕܲm Ë%L)đˇĹ«k(•öݤŢY±dŤÚB‘ći–ŻxDkT ýâyŻ€Ĺ=Ď«ô,·şîĘůĺú)É‹kçëšti´j#¸~í*Ć@b¨V«Ón·1M‹‰‰ ćçďaZÖř?A˝ŃŕŢÜűöďçěéSB°ĽĽÄ¶é nÍĚŇlŽpëÖ-věŘA§ÓÁ÷}<ĎeďŢ}8x€Ń†Íťąe ĂDĘś0 i4ěÝ»•çv¬°®®Ő™dqqÉÉ)věÜE’ÄŮŘĐąqýA°k×Nę5›ë×g2Ł92Âô¶í´[-–——°,‹‡qúô<×ăáG­şUqrôđ~L!Zë)2·0,‰I’ÚĐú Ú ‹ Ó {FĚőúLůŻÜĺńm“Lů–3É‘‰Q!řž(MăwŢ<Áß?ö0 ˝SŤÝ8avą…mĺW_|…ú™g96ŢŔ1Š”e’“íŚ?ŹůźŽěŞ’+)%QĐ…<¬ôA7ĂŘö’<›§}4á'[kĄëŢVÖ@éÓžÓŘđ^n ‰k¦„©…0=äĚ–űź“ÍĚTźc<ĚřŻýÎŽť[ú~kđ™P:e8^ł"ă.-.ŕ[ŃşÉm?- ]©­<ě ů 1\Ü*92Ů ČŕŮaĐÇ Ź¦R@CÖS h)3i LŻ*ÂdIi{•=±J–±,łČ,ó˘ă¦ Päą"ÎDAţ8…©¬ŹiReáűE0–¦E®4“÷/śąF9Săź~¦Áh#۲S^9Ôc«FAż‡©X˘P¦XuŻ(’†*ÖŢżµ1H)ĹČČH%Óş¶pX楳a•t:U¦RŁ­dą–Î#›-rĄ Ô˛m. F˝Îĺk×iřţý2^š^üGŘ9]_i|˛BůÝĺÉéĂ+׸=7GÍ÷ń=ŹíÓ“řo“ů–eá»&ô?@ç.A; v?>z¶H=ú3ĚÚyÜˇŁž s^őPJąŠ˘2Vâč[ůíĂ őő@ú›Í7Mł ćÝnwĂ$Yٰ0,ÜB­@Gé^Ą¨”"M⨋ ”pQÚ«ĘĐd4ş×¨Ő›.©×®’ĺn·ű±®cX”yłŮčhávU’֒΄–á[)Y®#őšěb ‰T‚v?'Iжm‰ź.“ÄR^d˝ď×4EÍJV˝đ…G÷XU}–R=\#^EZ S“dF2PŮRiý ßżńńq’,'Ír|×$ču°´î¦›c™¤j†‡ç„—r-šůň¦mŇ\Aś™•…eôĆ/ZZŐ”Ęâ[/IîΧ«^ÍÇŽú<˙©â8"‰z÷éZjšĄ¸-U«lEι҇,4űh˛ź•t"MâĚXEś)«xB˱Ű.&ewĄ|&­Vë>mZŻŰĹq¶mŰţ±’Ű~ż¸®ëWKĺ“RÎkł5‘eYEä©×ë–…ďyś>w=;·ßźč­ŠĂ˛˛Îóü݇/^ľĘüâ"5ßĂ2Mvl›Ţt®Äa ­ŤYÂvŹČý1”h É6Ťđ›µIň´O'{Ě=¶ę3ĘNPi Üét¶‡5­đş˙?Ţ:Á?9ţ(żţâ+|~˙Ţť˝ËícĘsůÓó—ůÔžüÁÉsüćç?MĂ(,ť˙đüe~ęŕ^Z2çĘb‹nłÝsÉTÎFť×gď˛FüwO=Fžg…ŇAžĐ bÚąĆ!°ÝQŻ~_±A¨ËPč†[<«ň7çi•¤”*Ą­V«rŃ‹˘¨‚ lőn•4UŽÇ^Ć­őHgu» 0'ÔPyŠ#b˛;÷űg˙Ślqc÷^żú›äŁăŐüÍľß1˛U÷aRóŠĄtd€; d®ÓO¬u tĂITžçŐáčŁÜż’@Äş¦aa÷¶±9q+“©˛°śú*“(,HÜJ* Ť8dĘĆókţSźFى‚n§­uV⹦qţşÍKo«â°ç ţáĎNŕą‚0č“gĹýVF(ő‡mŮă(DĆK$ą[}y„µ†ô·Ń^¨‘cÇ«Ż*vY–EłŮDĘÂHçA÷XFV›źźWĺéŐuÝ*É-Ą‡ ĂŘ4¸–öŽó ‹Ü[\âćĚ,®ëňégź*$·Ş_żL«“K9ß6“€¬J–?J«Z)…¦ ^{ű„Ü»‡©‰ő«Íw] ßł‚.˝pĹYÇí~ż&PyF7#ń?·jnÉâVůÍÜJěZśŘFů2I)+,ŇzÖłbŔŠ_ďŁL ĚŽëW:–a˘ň”ż}¶A;…ë!ôŕSŁŠąç.q¦áÔ&«DHÉn_C7F›6Jeë+”•¦µó=Ż ¬eL'©iš’Ć=tÝ@ˇáŠŐŐĐD ’ĚX-E¦ňéĚ[÷>ÚF†3P)°ýÉ"÷şäi„ŹëůEb—ôĘĐK¬UŐÄáŃl6W™‡”§őŤZbk‡ă8¸žGgˇc›ĹĆ ˛>Ž‘mš B‘¤&ԩ׋uÜívQYŰČ·¤lJ˝pŞŤTíŃv{O„«ă˝–Í7ľňü§Féös‚PŇëKfďFüŁź/Ú[¶Sl–d},#@k ‚B/±đ꓀ݦf´߯dnÇŐ´"1 ÂŞúţ| ?—HQ= SH<3%“Inb»Ťę gYÖ–+éĺ;<>>ţ±’ŰR-ˇŚĂeŰ®$ł”Uý˝OĄľőŇr‹Ů»sÜ™»GłQçO=Y9­8,@ÉUqXiIźŃÚřşę$[ąşĽňćŰhšÎÇ2>:ňŃâ°cáűQЦít·ý ü¦(ş¸ńS«ćWËůGí@.Ä {ÇÇčKI„„IÂĄĄ6Ďnź"’G:I¸łâ&ąś¤LÔ|šő:ďĎŢÁŇuŚ6Yęö05HsE,%uÓÄÔu+AÓÔŕ` ş^ě5I¦“ë~Ul€F8Kůéł5–¸ĂnžWüŻ»ď’f ÝĂvś˘0´ŃČPş‹i9»hň …šr!ŞŕŇŇRőŚËwˇlŐ‡Aˇý«.¦ęV–čôyćç·űÍŻ"·ď«Ň6Ą¦Qđ:ÖËôA‘ˇ|Ż•‚T+,eł,#č.cT± č÷0´Ű©yŽ·Ą|¨”’n·[ÉHn¦p3!)â°!öíÝt MÓČZ@˙5„€fsĺȵBÚ4„jÝwý%ą¤|‡,ËŞä©¶úý¶UȦ)ÓÍ{ĆÇřôÎit•ă bÔ2W=ëQË,Ô4Ť#ŤŰMĘ2vŚŤRw]<ˇ3j™Őş +J–‘ci=Âî<ý~ˇ–bŰE%.–đ‡OÂCµâźsqŃĹČ4§€Öä9QŘÝŁN®ŠN^Ż×ĂóĽĘŃi«kYJY­Ăőâx })÷˝µ1:ĎsÂŢQŕůMŻI.SĘ˝3Ďs’í۰ěsäc>®­yC˘ţ"qRŻ×«g«TQ5 łM,MśAě ˘ëă›i¸Höđü†3J:$ŮƱ°ĽĆ˛bŘjµ˘FٱĄ\ÄqśBV4WB'ËRÂ0Ä«Ť˘ĚQ‚Ä$ËK e#‹Îď˙ß,ü_˙‚äćuT“Ěΰř{żËŇ˙óű°‰„¦cćD1t]GX5ú‰±ŞJ›ç ×LŤYÝďóůOŤâş…›Ąg&¤áAż‡ăŐÎXq˙dq˙âLŕxŤę~ŮĆĘ÷ůV†oôzódYF˝1J¦Ź¤2_‰Ĺž™â[ ¦(ŕÖŕĐc9Ž{Ĺ>,„ Ďó-‘Î`…|˝JĆ·¬DQTI5Ôëőʇܲ,ôpy’)qo¦iňĘ›o“IÉÓO>A–¦ÄI‚íúhP”¸óŐU‡2±mËÜŐ® PôXĆÔŞŞĹ###DQô@G4Ť0ŠąôŢ :|§Ž=F§Ó©„§·trU7ż€–7 ĎyOV-Č"1×44µş˝`YVµ©”•ľag–ŹRý(‰$ek©ŮlnX‰-‹®ë«*ëe‚äű~uí2[#¬7 â™Â5d¶L·ÝÇvëc Ťľ„_>¬83¨5Š@!ožą›"eńćĚÎĹĽôF›ç?iá:ŁŁŁ®¸Hµ7°ĺ3ľŽR†¨¸ÎhÝdÂ1łĺkŹ^7Äqřő±ŞĹgĄÚ`CSHŇĚŔÔ‹ö‹Đ3ҬM/6±]żźNÓHB" üFY™čUYč OĎy›~·ŹĺÔiŚŚmXÍ/j ’/x‰-*™µč\×%WE 2 QŔJů˛ý ‹©%XĆý÷L)Đ …5ťvQ2§ąř~ ĄÜbľžî5<ł8wi:NŃŇk- „…WŔV˘>®3VĎüđŤ%zAqĎ|Wp`·‰NşÂŐS’´E?¶.Cť ‡®$µćŔ}0îĆ%¨BI†´ŤCÄý%^iV¶ŰeǧŰí®"˘lµWĆá2An4\ŽăTďNůŽŻ5?‚€4‰0őŤŰÓj‡m#CĺmzíĂ® ó©‚_Ú—?#ζ[$},-B×"U«b…”rŐu”ŇV[e’Đjµî3qXoß&6C©Q/D3ëřµQd'}L=A™ÖßűOČ5ZŽi¦2ťŞLrŐ#ěEh«öĂ 0’i2×ÖĘWËĂĘśŔ6r” ú č.~}sçčşnőeupřúÇ©r‘ő U‘!+¤B=ۢÝîUP!۶ńăDQHšöp&kG"bqö7˙˙ÇžÇÚ˝b¸"[Ëô^úŢçűŔŽ ;r©Ô1ěZőţ†ý¶[ŁÖ úőqLÉÝ.÷ ÂARiéäyΞmbčţőI‚\s©ŐGHÓ”nÔFNU”yC¨^|!+ÖqaÂŃëXN §>A$Ig •.ôo@6\ }Đ5˝*2”XúŹÂOZ÷Sľ%ć¤Äꕞ;%€żLnĄ”\řđ ‡ěçą§Źsýć-jµ:™T˝.iŃl6Vťľ H‰Ś¸‘źE ž=ú#¸Nă8¦ŐjˇiÚ}U´ętť™Űwxő­w¨×|>8}]תvBŻ×ĂuÝMO®fř>V˝IŇď‘ŮGŞŕŘď.!čÉ­ĘɵŐ™er2,E5|ňM’äŻTÁVU‘Ë!„¨Ş¶ëé öz=şÝ.®ëR«7‰2gU­ V»…®pŤ-]22—űđZ‹‚¬40tă4ɸu;áÓO7ůô3#|ęé&GúÜťObť~ńÖ{WxííkÜérőĆ2ş.VZLúJĺ"%Ëí~qo!¨Ş˝^Źĺv˛j śżtMÓ«MU×ő#ł Xܸŕ[Y´D *yî8ać˛dJpńZ4 ËĚ6Ť)ľŁ’e‚~Çqh4š„Ň#LÂŞĤc„ęß׺zq2Íă˝nkÝj~y8)µ×®r öz=|ßżo —Uě4h}zµ†»armö(afÝW=ĄYámٰ‹cĘâúŤ€¸żHć[Ł„©]ťľ‡G’‰J‚' ű¸FŠ­÷{ …^c”Tk’ĺđŮg [Ý,Sd™Â4ź|B/Úäa‰śş’G ÄQ߯ă7ĆŠ{Ç„•Ž˘˘ŔqÉăFá´fŃ#č. TN­1F¦×‰2QUń×uÚĂ­’¤(hŃď,ç˛Z{˙!Gš¦,//WšÇ¶mW¤Őaµ MÓŞęä©sxâŘŁůG/\żo]0hHśéđ&MSő _Ó…ťşKhën`ŕ~ŽĄ˝ QŻ2­ŕzao|Ó{pk,ËM:Æ"]a2\Ţ‹Ţî޶F?}j1szťIGQÔI9xžG–çĽńÎ)~đ¸vp’ďgţNUU ®Ťďű>WĘłäB'† ň.ć“=·,0 Çavv¶|^ĂŃĎ<ő$WŻßŕČá»7·¦iş Ţ´7nyÖfLč\AɆ\AYëBÁLŢÂvt+LV)vtĄÖá¦5żUÉ|:IťŽ»m‘M?»ëşÝ—jôraŁĺěćĎžNnÂx€đćob*VŤ˝e%εžČřĎJ˛.fZ/ý?ÝO—`–ą&i qhÓ‹lî9ňř‰hOf‚űŹ,0eÜ{÷,“´äŤw®rö YŢđŇkqĺÚ¸MdK®\rňíË,Ě'ś9żÂ+'/qs9eiyD–U<÷âś|ëŁQN^4\ą6ćŇŐ/ż~‘ĽPĽxň&ďťYăÔ‡7xîĺë4Rpúô5~ôÂ{ŚĆ—Ż<˙ęMn®Áp\ňÁů ăÜć…Wopň˝5–†Š<©›#×VDNN“/‘¦˘(&ç»ŕ‘ĄŁM.1‡çH"7§É—ÉÓ1I’Đďk­dłvޤLW_6·Čâ8¦nŤ”x®ÓéLoľŢTˇęş&Jćö€I©”Ç[?¬Ř*źş¶MĹlĽ¤ŐPzł4íő&°*8ë-Şš´R]}±ĺăŃúĆ6;ăółźń=Á gń×~Ú% n},ˇ\IlŹI'Ǩ€ĽŇzťúBKŚm ô¶ĄÝ»Ye<\Á˛‚XC-˛ĘľĄP4~¨çv2ŃxĺČ«©ŠtĂĆ>ţ˙fc]YYé7ceÚ4M+!”e™Vá#nÜ\âôą <ţđC(Y·qx}6•Xñp]—‹ĺ”BWŕfÔ^f’ąîŔnź“É„áp¸e»ż‘’˘(ůô'žŕÚő<öĐ4MÓ]?˝†§ăđ†$Lć„â4‹LŢŤ˛âőĎżŤÝ޸¨:GGşë ´ÇHm5‡f26ŞćýÝíw` †tg@f7'3Ó×v…’ɤËdµßÁeôüY[2řKቂżżOr-‡7‡°RÁßŢ·žĐç٤K&%^7ď[ÝKžç F»…îMŻ4Myçí·¸řŃG×ô^üý§żĂ•+—7%m‚>8«ťÂŠNľţŞĆÖÇ‹”*Ů@.ş±´ĆéłŮ<,Ë"ňÁjFLĆ:„ÉŽr9Ü«±8›Ú·&@6f’$ťĎđésçąvă&7––9¸~oË*i÷O·ň\ß§WľŰëQŚ&TÁńö*…“żG"Ob…ÚĘŻ*JšŮŁĚ&łEŃťöng `^ă±=;;»kŇŮćg7Ăít·{öţ`ľ•c[Š˘ÖIŠ'-üfîHÉŠßż¬ż»ĆBY qŇĂč$ş~B‘gÜ}Đᙟ¬’fúĺŽB›ý{_ ۶°- ŰÖ›ÜcßĹ+'/pĺÚ}{Ţ?}ť}{őf÷ö{—9zď"ľgłĽ<ááy˙ôu†ŁŚ˘”čp˙‘š…ACčfŔ­$ÓŞ*›0Ň?ĎR¤lHíç7ăłµýsŰÄ^AU•-ÔˇŹy6¡Đň8 „˝ž [rŚe+*iŃ(}o¦ŠiÚäwŞ(đ2 Éw2f`OăńĽóţ‡ß]gç×/a'‹ÔŮrö!f’™n=Ăí˝ćaýăŮŮŮ]“ζ‚ …aŘáo§p˛"OÁe«ˇ’ÚW*ˇŐXŻf)YňŰ{Żş+Ő'=„ĐëŇóň´ÂŤđiší±‰]—lj-™˝|7kYÁź|ýk 3|řÁ)ţ“ż÷k ×ÖhZó¦żđ<źý©źćÍw/$Gî^¤®ţů˙đ?ó;˙Ý— Ă˙ńź˙3ţĹżúWÜ\ZŁßď377ĎéłçQMNÓ4Ľţúäůă?~”><‡cŰ”UÍÓOźűî;Ćh4ćçáç9}ć"Ź>ţ++Ë\żz…C晟ŤvÖʶÎT,âu}ŕíŇÍĎoľoűň<ŹŞÖŞ;ľk“çٶ‡ß颓†.öhšI6ÂóµJ„˙đc`ŰĎcéţw¬8¦ľqĄ zôAš|™mX!eDšj[{eG¸®©ćŹp§” Ö÷ˇ”l\"ś™A̧Ľňć„cw•Ď»y ”€v-VŇ&éé÷m4âů>–2śŚpČ5lm›)4°1EŞˇVÔŞ-…¤ůĎ®p,EÝĽ ×ĺMn{hŃj7ŞťżiŤí­ źö—żüĺŻî68L‘9íă‡ĺŐ5®^żÉŇň3ýdÇŞF.Fś«ßÖU(w–Ůh¤™ˇŻö0“Ěî(ßb~n\fF“”ë7n˛oďlŰćľ#÷츰M€1z|‹Uă‹sXjÂXG9  jĽńłÄĽ‡ĎR(Y’ÖűČí#϶mÍÎÜä&µÓ0'ü0 Ăđ¶V›źÝsŁ-»•SÚv×O‹QʞTµN¬eQ5:Ńu¶ŔUÚ–N2ü ě~¦×@ŔÂśËŮŹ´V@Xüô'CܶEdYç.¬pďáy®Ý±wOŹ Wđ\‹…ůłłýÄc’–ĚÍ&¸ŽMž×dyĹÂ|ÂŇĘŞ–ĚÎj=ÖIZ0ç,Î÷PX¬¬N8°oŔÜ\Âp”ł¸Đăě…eߡŞҬ" \ęF1™äĚ |ň˘˘(%Uý~Lš•Ü×Ç÷_„[ŕ )B‚0î0ŹQ¬+UŐ´mÁťľťhIáâů›j`w"ž^×µvf“şbć{yžíú°„Ôz ~ eŕ 6Ë’ă ň[›‡6Đ:‰y^ă!®c»U•%˛\Ű–lYŕX’˛Ě™¤93}‹^bS7¶Í6­,KCJ„7Yĺ˸"e’i§3/čQTUÝěXAý÷=»ˇ*sĘJé„ŮČŠ†Z ˘x¦uÚá‰\—J´íbŃÂ=u™ŃÔQ|« íď˙ţď˙÷;ŢÄ6ăË_ţňW„Ř„yÚbE”ŞŞ°mmĆŤ”,ݬröÂGyN‡;Ćá”5.Tď! ›Řë± ić°Ŕ ža4í‡Íú—RR”]ľÂüÜ,–?vôŽă°” b‘3iŽ!ť= jÜôbů2^›Řę8Ľ‡Ü>vKŢMrb>ß`+MŢ,żÓµ@Ď3d—˛,wǧĹ룸×9E !ZE‡ZY[š9Ř–Ö5u}˝E#?¨ĄCÓČm+·Ű=‡Ń§ßŤµ˝ëş<˙ŁgůŹ˙ößářQW5Ď>óCž{ććççąpţBŔ«ŻĽĚK?ů >ň ÇáŐW_E—._Ąi$Ź<ňĎ˙čţâĎľ…řś|ý$ŻĽú: ‹{9î§Ţźůů=ĽôŇ+<ý˝ď3™áüŮó$˝„É$%KsŢ;uŠwßy›ď?ýćůŃŹ^ŕˇÇ>‰1¤Ůiw[lŞaĺßÉ~¬”ÚŕČŞ”Ŕ÷4«WóŻ$M5A!ă~Ëq̶«K47—ôšX]˛DNĆ„Ź>Fď—ŰVآni‹0î!…G„­ÖëĎĘ·ÜŹ„ĐĹ›ŞM Köíń5§](™ľÎđj)ŔťĹu˝Î˛©˛Ö)݇ĺ„d…BlăZŮ}>úa©Şí´8DqźŞŃäěŹ8Ńdµ"]Ĺłu±˛Q‚ZÚëó/Ş2,’ÖŚeş°é$I˛k€9p ŮAańÔăŹęÖËxLµT¤á˛ř‚”Śăt…=â0óÉ\×ÝVúé–Éi˙˝×ęń]ąvť0p-˝±›—¦©†*Ä6^ň‹dkQ»óŘĺ%üńw B;EŐÂö(Ć#ŞŢ/ë*Y»©Ĺ= vm@0]ýř8¤3PŤČű`0Ř5áeşµ4]}ŤF: AŐŘxvł‰€VÔVKîL&cd•âú=ü `~6ŕoţŇľţç×iÉĎ˙•0X°J)}čMÓpäđ<–€ě#‰<Î_\aĎBŻm1x$qJ8ŚFGď]ŔsÇďŰŹ°lfú!ďžşHŻđŔLDy¬ 3>ű©Ł\ż1ffrěŢyl[päžyŠ,ç/rcą`8.yđŘ€űb®ßL9rwź~Ľ¨-‰˝ĎsHćg\·f+Ň[Ő¶­5„bŐqát`ůt2AČßÝ^|˝ll¶â5Ź» öNŞHžçá8.EU#,m‰ncş]‚«ŰsP‘V‡=łm›Lz@qŰ$ѱ¶•Sć%ĘŠé™*t–áÝF6LEŕ*<˛˘AY˝ţeY´¤ŻşĂ‚Ćňzˇ‘ôá[šÖósŠş"-<-Y#B]˝ " n3ŽÄUňI-éĚ$ u]C“v¤Ű’$^IVąŘ–Äk™˝ŠŠ|RŢŃ;¸Ý˘hׇäi¨‰QPđýO>ńR)VVVwLlkJ®Š3äL8ŁN’LćŘgßĂś‘µŰFçy«űĐđ¬Ë,ݬ"eŰv•v3Ö+^ňWÉÖ.PąłŘő üá_`G‹¨¦@Ře:¦Ś~ł7mIĄÔÇa0j@wB:s]·…›;]Fî/Š˘[ĎŤ´¨¤uK¤‘ᥚ’ńh ĎŹşXnL¶"ym7Ű‘Î6Ż!D7O«k«śůđCĆă1y–1™ˇ×ďsčĐ]Te…%'zÇÖN{÷îe4“¦)Iséâ%”Tôű=G[uٞn¸÷čýäyĆéÓg)Š‚ĹĹEćfg9t×!î»ď7n.ńć›ońř“źäÔ©wń<ʧžzśSď˝CŇÓZŮi>Âo ]¶RśR»YYYąíóo†Ô]7©ŻĎ¦Qí*§ňťß©)ë1uíw9…帿öëÔ˙ËżdňÂłÝďŹ>Ęě?ú‡ÔŞ%ÝŤČĆA<׭ɦLŢm0©BŃ u˘[ÔaÔÇŇÉŐd„®&˝‰®jëĆa[d»×©R˛´ ±®Ä–!“|„ďěl­n[ŠĐŞ‘rŤt”j­ňdˇ›÷<ËpÄz>éŮ ş‚ë¶DpK(Y3eŘnĽôh˙7˙äź|uĐ(L[ŢnHĄČŠ [ ˇÉe&đŢ:×ÄYƬ‹đ"eŤ ä^\Çë’ĆÝÜGšĺü๸ëĐAN܇ě'Iâ>ăB`— ëghš‚ śgĺëIËńiŇ›Řţ€j˛Ě$üE”Łť8Śoůd2é4řL{âNO~;yTo5ŚaĂp8ÔBß›,4wŘaýÔž$IwŔÝ(¦Ô"&Š´žnťŻâ;şŠź ľďc[Ç–ĚĎZ왿•Ťř­v¨«É$ľ§m–g®#ÚJĎĚ ‡l›îwćgč÷BŇtB;ľŤëZ€Â÷uđśť čőb’$¤®*fbĹţKH\Ďc¦ďăZßłXśuqmIčCä+5I䳸8‹TµBĘŤÄĄĄŠ‰ZMá"]ŐxT»AŐ9yQ„1Ž‘úz{“ť¬Te÷‚p5¨é„aHE;V‘Ě&nYŽmá:6édL‘MB Úi ®«IXŃl«9¦* ÝâÂ#/%µcW´sâřZpĽ,KMnÁŁ(%Bhs„Í´řüúÔ8”¤Č3,Ű#NZă°ŃUX%…ě¶sRĺ«aÄśţM%¸¬¤Öµ´ňB˘}Ňw’PÓxj‡’4/»JN,ułnG-ÝĎL© ­~Š<ÓVÔI’đ»żű»«rű;żó;_I’ÄÝmŃŚFJ˛ĽÂs,ÇAAŘbą·Š Ĺ qžËÝ˙/DĘDé©y\Űít%o‹V‡#~ôă—8vď=żď(÷ÜuI˘± »éF!°Ş+„ĺ÷˛@DŹx6öŤG{Ř~ź&»‰ĺ÷¨Ň&Îg‘Ţ„ĐĆ?¶m“ç9kkk4MC’h#™ÝÜ»ů|ĄT— î6OŰťhĹ´Ťčn÷  «"'IŇÉľ)) Ý[5Ş‹Ú#éiŢK:^#rs”,ŽK‚ čöá ş8˛›aŠ.Y–m°ăť~SÝť™›ăňuĆă1źúĚgx÷í·đźO<„°źţěgůŕý÷q‡'Ŕwi–ńđ#Źp÷ácĎÜĎ믿A’Ä<ôđ fgzDˇÇěě™9fçćYŘłŹ÷Oť" žxâQŢ}÷=Nśx7ß|ŹĂ÷ĺě™Ó:tűöÎq˙}÷e{÷.bŮ.aÜ'Ż´ť±˝E<Ż]âDwj as»çßnMv۲đ%%«+Kíő;çT:1ŐßoŤÖçVJ1®âşá`矡<ő.Íő«řÇŹłďźţSDt+©UŤ×Ťâ^ו„´Ôq̱Âj˱şµázÇ.ťÂUőp˝HďJâŘş(#Ľ™ÖT&ĂUĂ®:k*Áşŕ&,·«Ä–Ułĺüoľ×–Čş ¨TGn–EQ)dŁagşó©đśŤ±Řjݞ`’Ux~kŞőá™sĘó\f}ÂV<ţ¶R ĆŚAÖ9¦ŞŞ.¸šE2]‰±Ä%ń>’Ť/Ű!uś»gޱ¶¶¶®ë׊řďTÉpÚ“óµ7ŹÇÚżŻ#y–ďŽU,ŐĺOö}Š›gQţśz 'ĐUµjxwpYĺŚĺýTŃ“€ö 6¬éĄĄĄĚlt&ă8ţŘUß÷;·—Íß9%šą1ŇQćß,Ë" ĂÎ>ŃčdîflĽ÷‚jÓ©wÚ¬`4!ëß©É+,H‡Ă! ۲I’ ­đŢą‰Ăf‡Ó¶¬ŞŠµµµ]=‡Ńg.˛®Ő7ÎşÔĎi61čk5®XŠ0ŇŐô"âYŽ­Čk›01† k8jĽA!A)m0 ¬uP‘ ;hU‚0YŔ¶mÖÖÖn! (5e [šyhÆŞn°„–ĘÇ7u ±¨THm]…2F(AˇbzýÝ5ęë‹ÚB8I§[]Cw{-ö?íd’ł}KyăÔ¨ă—Ć!;ôF đć}oédHčÖÔҢ¨ť®rm¬—® y§&B¬ŰˇfYF]םAĆô3Oż3f/4]…Îł ®(¨¤C--ý. …m‰ÖŢ[±NG҆,eㄚ[’e)4)«‹jŤčű. ¤ËxNÓęęVQŰŤ×9Ąeé!3|Wwt¤Ô–~ç |ó^;ý=n—ÍÎÎRË–Ôč:¤“®ZŁQ‚˘qq˝xŰu°nX`ăEÚJ<ťL°šUËd@%ČË—¸ú•ßböżý ńŃ[~_Ú)l}]”Ů żíxĺé3ço˛6Ô˛~O=˛ďé÷^©„EQYĽýţMž<1ËĘXđęŰ7™źK¸÷îyNľq–Dz˙˘NŔWoĐó77%:Y#Űç÷őA3ĎRT“ŢÖ¨nv¸€çůÖUY–ŮGd·µf/j›¬Ňînöo~áK_ŐAP·HÂ(ě6ç­– ˛î^D)%+++·gé ‚9=J)©š’Ëâ}*6˛gÔ^'Ç)‹r˝Óęć­c±ä–ŐČÁ`@ŕűŘBĐKâîĹŞŞŞkőě„iuĘ3ÄÁM!jěj 7^DŐŐÚ9ÜţA„°3”ńç@®BÚMřÔ¦1ťÔOWRw{r}Ę3X.ŕĚw0]0ţńÓźmîɨXÜ)ž×Ü»ëęSoQ[ÔµÖUvB„ťŢ±TÚŤJ*ŃU¤ÔžľďvD #·›*ĆôsLW”ŤaĂp8Ľ-«wĎfY–n‘Hýío [VD„hŰí˘&Ër‚(P+—Ľ”8nÔÍAS¬ÝŰʢhś®o A”ô©ĄKQęynżÓ#ÝęŕfŞHfýL»©"Y–Ą57+ŮamÓtŚgݩֵ4Ž©i Izë­v‘[c Ą Z|i–Žđ­¬=ý*lŠĎމWy%¨ky ž¸llü¶ň«˝Ä3<[bQ’e9ް2,é%‡ö÷9}~…ĄŐŠ÷ϬF!ç/ĄĽf…ĺŐŚŽĚ2Nk.^rôđ,®ŽëS”Šűfő! Zm‰_®őm§[¦ZüÝ%“4GXÇUÔ6ĺ6Ő€˘ń‰[Y›Ńp•"O±,Gë¦ú>¶’—Ćžvýia,łÚE*?.ćö7żřĄŻTUí¦iÖ­ý«đB´†ßóş÷}mmMăáGc1}żëĘTMÉEë=6Ş5Ë=ÉńvÝ4]ÎóĽ+6{éÍ÷13ŕ{ľçO)nAEbÄćëíň<‰ ˇ¤žßâ N4j*ŞŐ3¸˝Ű#Í|ĘřgAX™ÍÄ–éwisîőz»îh™ëÍłO“ö¦źÝČ™™ĂçôµpgťÍźmâ—‚¤7 nlŠŞĆ¶$5a+ś?ŤJ«Ý4ŇęěZ ÔÁHxš5` lw˘Ś0ŤKžV0…łg߼qť~«Ţa®7qLJI'4JWńÖá2‚ńh•^äj쩲X]Ë } [(®^»ˇŮyIF\ľ|Ťş,Ź(éwŘx×*ëŢf€¶—J´k°$Śbl7"- •t‰{ZRoó^şů{4’››ńČaâ¸F!˘Ę×tłíô(Yç%~°qh“ ˝'Vm7PJI–®¶E%p­š"KY)jl)‘Ź}’Jy¸¶şĄŘP©őĘož®:®US–şF ś]⾣{°Ü\É9i‡ç‡ěÝ3Ă›ď-sćÂeŐpřPŹË9“´ŕŢ»ŰĂ̰dv¶ßvOSb{„úĐ(KM¦Ôô^ŇhCĄ˘Â"\?&+ą5TD)AEDęą­-S× ¶cbq€yŮ`!7ĚAŁ´üśv=ÓEĄöľř[_…lńŁ“4ÓŠQĽuPłú%Ůę%7d‡iŇ™çúxuD# ˇOŇ.‡ť„^t @Ńă> ‚ ĐŇey‹Öt‚¤_¬x]˘Č´TMT=‡›Ě ł%(Ç8ń>šô&őř2NrËKČ'%ŢÁżŹ°ÜnĂ1/SŰ<Ě˝EqŰŤa«kakŇ™9E¦óVk«gßLxąÝç›y÷ý/H¸VzĽ’GśÉNKĘcŻ«m;Íg–ž!8¤úł]›t»ű0§SPJÝŃsLV N¬®µpżgoŹG4-Ń‚Ý-Ű#еˇ…Ć}ńDqK˘TK«»Űv{Ú·Ľ®EcÚ-[T3”ŇNKEQtŚ\ł±čMJ“äÇ)©Ű€ÚÝ?m’**Šóä~Ţ:u“ŐµśĎö0g/¬rěž²#źw?ĽĆáý!Y®+RűöčMł(*„0 ęÎßżgK%y^ŕ¸APT:šNŇ)ŔŃťŞ,ÍŔ©¨ M’°l“„HáQ” Ą$yívÖ‘R *ą®ôđq“Ű/|ńK_\©eKŽŇĘd ˘Ź)0čkâ°iŻšVÓ4Hg®ăâÖ©ŇťŕÚ8Üç<‰ď[˛vJR§ăđfŚáćÇ(ĽlŚĂ%Qů<^o€*G¨|79€,GÔŁ‹8É~l@9IqüÂÖźßëő:«aóĚ›‡‰e&A7E;é$M'w¦Á†ęVcó!}·d­éë§ě0ęqŁôxŻŠąRŮ|”)V‹9»éÔ0¦˙>Đ]/ĄÄu5ÔÄŔî´ŘĐ4MWd0qÔqţŕ˙ü7ĚĚÎňö[orüÁ\ľtQďµMCY ׯ_#Ď3^üń Ü}ř0×®^%I®^ąĚO~üc~ř˛˘áÓxď˝ÓĚÎÍł˛6áµ×Ţ`n~šŚş.ůÖ7ľÉăO<Ĺ•+Wé%=Îť?‡c5\»ľ„ëxŽľWť˝¶‚Đ©hĘ”ŞÄq)üŽ{;s(ÓE\^^î ‡~ë×ëő(ëFŰě.édŚoç"“Iňę2'/ÂH_ß4 R6ÔŤE”̶ǡ˝Ž“ÖĹ ‰ęčý(ËgvnžZy•Ä:ÁÓś„™¶Bťâ¨I—ř9–ÂE‘sáňëKkĂ’Ĺ…>uUńŔ±y^>y˲xâá˝\ľ6âđÁ˲q‹—_ű'OĚpĺFĘěLź^˘ßő˛Ş;Ó†ön·ś?ŰŇu™QVEqS›`cecă‡ö9&x"ű Š"§jŔqÜ6ÉŤ´ShÓ m,v©Ą…TÖ™SDzDg‰ŮH‰@»ĚdyA úýÎ]¦–Şsč2®,¦Ú8ýBÔu˝AŞÁu]ö0[.pyrŽëň™L:ąn?éĚó;‘+™&śÜ)élZęäO†łühUđúîŹm®6˙îxÍ uÇôß°§e‹ ž×T˘v+fŰv—Ż®®vş‚wâPë„SÍNÓ1y% Üő ±‘Ö-něžW#J±.ňÝHA.m‚M­jm !©ĄŐ‘˘ÂÖ©l¸–3ŃzŚeYî¸L2b¤…¦eřÂ0$+* ßqŚ–ńÝ­;Z#¶ˇ‘#ĆĂ׏‰z yFMYUÚ[_ďÚ ×.¨Š’\jźoĄ´Ó™ !餿F·@<@'ž#9qßĚ„=Ţ9u•s—3‚Ŕ%Í*.\Ń´›ńx\±6*´mĄ´¶ B«äyFÔ›ÓPĄbLěV[Va§‡ĆăVÔő ăÜońhi:Ć!Çw$EăŇoemŠ|D`3 ‰Rc˛q°#˘vSóĎŐô7ąČ˘ş›~2ĂÚÚÚ¶qxZşČ8Vš8 ě“¶ŤĂ㛸Őű¸Q@¶| Wd8ń>ęÉUšb·wËëQ o0Ž~5®‰"Źąąą íĘť,9§ă°‘Ľ 7 §Éż&©Ţ aČ$©FŐĚÝn {ćs˛,ăŹGs|YđćŽD+UŔ?("ĄÄóĽ P53LĄŐÄ?ł6ĂU¶ć° ë0©~żŹRŞS°m›wß~‹W^y !,:DUVěÝ·Źçź{–ĎýĚçÉÓŚŻ˙ńi—É^Ź#GŹRU5`ńĚł/rŕŕ!)ůż˙í#„ćµÜ¸~Ť™~Äž=ľďqăĆu~üüs\ĽřçÎśáŘ}÷óá‡ď…żöw˙:JęěZRWł-‰c7¤š0¸~˛!—Řéąă8&MÓNMašt×HEÝH&r*,K!E¸nHRĄxísřvMÓ¬1f¸­[ź>¨¶Ž‡ĄV„h¤ĹćťĚţÂżôU0ą·`ÝűYQ•nŃ*I'X¶’͆Şív'g€TޏśźĹ‘~{Zš ć™řs»bw›cl ™Ćrvu ®sŞŃ)’ÄÇgXËbnx~@äÔŁó¨¦ŔI ś|’“&e· đb› ®ßşr´¤ÝÇéęĹť’ŚÔČ´Xńn‰˙!P ó·_ÎC>3#Hř/ď‚wÇđ 35ˇŇk#`ľŐ˝ďÍ´wLő`ş’ľÝ0I–e]Bçy·QÜjxÔ¶¨˛Rˇ¤Dˇ±BŰá2«Ć%îéď}<âű®‘lh±L"Z˙xşź×h Ă•oUĹ1XÂÍ^í¨ÔRáą6J6”ůxÇ*4°ˇE–e%QÜ_O†ň!®­v¬Ú¸vE‘çÔ⤏çG‰LVkŰâ âČc¦çŕŰy‘łwĎ ucqßŃý,ĚGdYÁGf ]Hb›ŞQÜ›„! óĚÎD”ůŘŃĽ°őĆ·ë6ŇkëĎ_S9e-‰˘aeöş Ž-GZ]¦˘ay–#±°m݉0U¬­*`»rűĄßú ŕZB •ę"qÓHňްdé*Ľ%müé÷ű!v„íŚëU®ćIp]Ďő™ Č˝ĚsĹ·u=ëqŘ$Ćřc'Lîôhę’jrž8ö˘Ă<âúpNĂ*‚†ztUŤq“7ˇ­’ĆżŠr÷Pcly7и^­×˝;Ňl„;}ś8l:IFťáNŻ7sgľłş^/ět­RŠł_X8~óxo ż8SŞŞk“ď´š*¶‰Ăć°¦«ŰÇÓ}4zĘÓűÉĹ‹ńđ#ŹrůŇEfff™ťťă‘GçňĄKěÝżŹsgϲ°°‡…ĹE†CÍ—řěO}!Ží°Ľ´Ä=ÁŮ3gI’Dčüěç޵µ5ć}—Á ćʵ%VVV8xčiš†óóóĚ/,rßý÷SW’8rŰ9STM˨·Ö;JŤ´p˝¨Ó7UŘ­örÓí>¸ !şą.ŞĄ ô4ÖÖ@ö˙ŰJŞUiů.ś ‡Gä[r„hI—bť4•ĺ®§ˇľŻ«ńăńĆĘďć13č‡`©)uÚq}î˝gž8Ňóqä® ýÄĺćJÁýGćqü„Ĺ…Y‚Ŕj ˇR˛ĽiÉÓ1i.QJŢ +ZŘcén»&ŕ(‹¬$˝V†1lzK€k5¨&g’UXÖ:Üf˝ľE÷ćÓg•~ůŚËh_^óx†!Qč“ÄÚ]ĚlnŰťř’«Î%ŢLď˘h|+¶-~vöµ„?s*ÝíB077םMUn§ŕ`U— ë×ńB›Őb/kůA,[;‹©r‰ůňOń{3éěv‹8Žů×WCđ`kěöo>‚˙éhIżX낞ńâ†u;ݦi°,«;ő‚öď‚ łß4¤Asú7÷bŘŞ@G"3N8FţÇ$¸Y–uL×逹‡­ćÁuÝîzÝN*ń¶0/h¤@xóřA ŰWĺŤH[yŞq_¶ăÂG‘[á¶ŃZ ,ˇ«–L&“ ¤‘é*Žă8„axK7Ăq4Y$/k©·s§š&˝ÝndµGŇ_č榻~Šô¶Ó ˛:`03ŹÚ¨ČÖ´˝ćm$ŔĚőEít¤ł"Ď¨Ë ‘Ww檱Pî~»ĆÝćć:±±¶Č/HşöRKÖěfŇ`#bÜŰnČx¸D`oßž˝J©?Ű™ Ţsš,ňq ež9—*E(•îž K ä:ŃĆlrQ‡>Q¤lc‰ľ]¬¦äşťńnž¨ží“x6ź€×VšŚ´ănc‰eYąhš´¶sľFPżTŚę},gw#„Ć”S-1_~Çnpâ=' ŹČ˘_A:óÝßpł×č-Ĺ Žt1Çtłî4›¤Mwň.Ý q[]Yafv¶Ű{”R]WŇóĽîđ˝,ëôH\‡üągH_~”Â=ń(îç~–Ú˝• ďy˙úfźĐ†#6üŻçá_-TŁ.®ď¤M<=Lü3/‡·JňŚÝp8ÜçmŰćÜŮ3Ř–ÍŢ}űčőzś={†ů…EP Űq(ňśŃhÄââ"+«+ô’KKK8p€+W.Eűö@)ĹGÎÇ!®Őf9Q„¶¸ŽĹĘ3s\8ŽŮ™%+µtČK]č¸ÝçIßĹ d•SäkđWh˘ăŘŽ"đ ÜňvyžĐľŠ;˙s`Ĺdm6°ňŰöÂť$ąÓ¤łíČ;—ÍDŹ;u:3Ám+}^Ów‡CnĹOňh$ůTOň+s w1¦jU\×Ýpď+Ă ./±oq†Sg/S× Ü{a ŢýđŮOQVÜXń·~éS|÷G'yôÁ#Üsh/ý^„@đ·~é3€âĹ×N±0×ă§ź:Îw~ô'î;H๼ţŢyŞŞaemL^”Üsh‘}{xííł\ĽşÄž…VG)wďźŰqŚ2‡ăhf}Ů8”•NrkiăG3m0śŕ’v/śŔ¶tŐGˇm§×€TEmŃkµ8Í©róçŇ›Á±m¨¦ ^V đ]›tĽJěUéͶ˝VáB´íznI2Ë& jµ\ótU[|ŞuŇ•&Hc[ŤĽvIZĆdĽ†#ÇذyĄ!/·sjë ­>°DŘ^LZ‚ö¦Hă AN´Ž#Ę"§ž:`äĄÔLÚŰl ąŚŰCËhmŰq°Ý˛R¨m6&Ő>żVjŘš°eŰ6ż÷{ż÷ń’Ű/ýöW¤’®VëÎÖvĐÚ˙­‡ŔjĹű«şˇnjš¦¦Ů&î ąÁ qóů1>9ă2ďÁŻÔ Ňç,Â:ëôYM‚cţíbÉ´í®)nq’L‘ßš ·x˙?üţÓ|ö§>Ç×ţčÝ•úło}••eţü[ß`2ńÂóĎqýúuŢ<ů:/ţřö8HŇëQók'_cćŻ˙M‚ăáş›Áôwţé×°Şű±Ot÷gş]wy >’»}É‘äćŽYňtŇuăvS$š&ţ BŁp:ŢňbŰvס3˙mŞşďľó6łssřľĎĄ‹Ń X]Yćő×^aĎž˝Ř-”mZ¦Ň@Űz˝ă´âĺWNr÷ˇ}`Ů4D„aÄÓßý6wÜĎ+ŻľÁž˝ű°mˇµľm˝©;®»ľźĆŤ 'čtÚ§±É›aŹ˝^Ż«‚o~4ÖVϡ, l9ĆwŞ–ř¤â>JřdŰĢF ,O»‘EŃ‘Xó"§i±)Vě đ˘i%)¸ĚV©Š´+VÔR+üŘ·I§ˇY ˝ŠâĄt)kAëý"›¬8ņ٬®Xarl×'Śz-ywë}hzH%ZBŻ&ŠNĆkxABO9EšŰę:ŁaÖ0°a !pFź®=a˘ńD]ŰÝî•Já96EYv¤QŕmX¨9WćHd‡ŹľYÁĎĚÁ›C°”ěŔřÓ;¦ýnĘË›1Qu­Póo&¸é6‘N~ema-źE”KŻŹď×’9Jâ;cdĺPO–(Ő<•÷8u|Kúzu/ĺř=üŕ<‘ă@ř9°çş şˇ*!´ů@Q„axG„'óď›IgÓĘFďt+ÂĆĆg_×V5‰˙®°ČŰ8Ó–›”?[ăsަ‹Ą® ™ßź&Ě)Ą¸űŔ<Ż˝s×qpě­ 7JASkljQ”„×axAăé&YAżâŘ–4ŤÂulŰb8Îč%7–‡$ˇ×ŕ':Ę+ožfďâĚŽĎ?=EžŃT)®ôČŇaѸd5ÂsnMř,ˇŰ0*˛ kŔŔe¦m;‘7Ň4íD×§ťŠ@ݞj´ާ« žÝžL[ŇZU—AŘĂ"&­Ď·iQ5RŕFĘ'Ăm]_ôőeµJZęŔ$eČ$¶î2ëď`-®·Cű};¶Â¶ ĘĽ˘Aë2*“ĄC<»Ľm‹Ęs$>yQP¨8éO9ěŚ ý)Ü1ĺfDK“jL>ÉŔŽIzłEΤşÍ–šŠEmubçiš¶A» (,7FX‘&:¸ĺ-uúŻ™ }3aë㩤ţKµú“-ĽMôµ„…ă:¨F“f×!M3Ö†#|Ď%ô˝Ť‡_2®‰łťžíŐ~jŇö+UR1É'ÝsL?!Îm…ýŽc˝Ě»nćBJą1–#śŐwÍ B„]VJâ;k®@VMşL)Tî}Tńq»§«Ŕň^šÉ)üČ&vlđźg±‹ĂÓìÉÍďŕn4fó<§?蓦)źxꓜ?w–ŮŮY^{ĺeŽ;ĆcŹ?Éü_˙÷?pśgźů÷»˙„ńÔ'ůËżřs–——řąźů—/_ćÚŐë=v”ńxĚÍ›7Ů»wăń„§>őYlŰÖ’‰-ôOĎcÜ=ëVĂĽ¶măű>˝^ŻÓ9MÜ´l[«¶ cYLÚ¸©RňqÎz,ĘĘ1ľł‹*ĺZ2ŻĚlj5teGzsĽ„¸7GšŽˇľvU56AbHÁ“ŽăŃČé(Ĺöz$ý9ň<Ą¬&·Ő—Hü©†äăáöHúłť-»+6Ş@Hµq]i×ËšŞZe\xřA‚Fj!ómˇEí’ ÖÉͱ[ĐT%µ đü„Ş©©Ş ľÓÜ’$oţkfĎ5é(Šp”R8¶­1·J¶/‹î‹ ý­#›†˛iĽő‹cQŐ5kŁšŔwń}K®‹óÔčÍÓBr:…ß?}r gĎĆŰ2z±ÓLN#śl¦©î«ĂÍC“wéY¬f‚-J¬Ţˇő‰hJTť!ÎgäďCŞőÄËg×uIOPǨ'Izw#Đ–ł; cçÇń-VŠ;Ťé$ŐT§ öËśhoÇ®ëšŐŐU<Ďcff¦#y܉ü×ĘĘ A033Ó%[µČÍp]·cËNß{„DaŔÚ(ăţ{đę[g7^Ř~ý+kc~ôĘ)çú4uÁęjŤśŞ:AŔý÷ŕ''?`emÂÜL‚ďéÄ'Ž,!Xśëssy„ç:íźV>8Ď{§?Ân±ą»!ţ®–«ëŠQˬ7‰Ęx<&Ü´Uµ$°q 6°!Ř<ĚfÖűt‡BW,©×‘%@V#üM÷ăÚ×.(ó’‚€(ęˇTLšŽp­’Ź^«Î,»Ĺ}˛ń˝Ő0±6î"ÚÔ.jŠ˘¤,kÂŔÓď‚€+â4u«gëZŠďÜ„—Ö`Ą†q e±ń>ý fŘlĺęy^wčÝŠ+!„@NާÉĎăÖ)BeŘ-—Đdä:CVĘ^@DO1röŇvż3­¬ ž¤ĚŹRMΓÄ÷ba‘e[żKćYÖí|׋;Ĺ€˝{÷qţÜ9ž˙ŃłĽűÎŰX¶†&řľOŘę“>|űöíg03R ۶yěńÇůú×ţ˙â7ţ1O÷ŰÜsĎ˝Ě/,pôŘ},ÝĽÉŤë×éőű„aÄÚę*/˝řcş 9üi{ó[˛űľďsj»Uuďíő˝~{żuŢě ‡ËĚĂEEZ‹ĹPQţ˛°c#AÄBDŃ–‘8Qb9 "ŔvlŔ€l˲AA°ÄU\gČÎÂΛ·ĎŰ÷~Kwßľ[íuNţ8uŞoďýFČČAßs«î©Sżó[ľżď·Ky˙éŤkPJŇ‹çińWőßú=”,Ż—©¬7lŰ®3íÁ` +ÄĂđë;G˙ęD‡ LNÇôű}Úí1h6[ş<|ü…9ö,Aꉉ ľáSÓÓĽůúk|áKżĚ÷IÓTSşş–¦ ÷îÎÇW.]¤ŐçîÜׯ]#^úä§řɫ݄!/Ľđ?~ĺlŰćÖ­›XBđÜG?^÷_ŚžgeYÖđÁÍOBčwkii Đ 5#‹†!Y®ń±ľç’&1 keÂÂ0Ô(ŐcĐKđ|ÍPECTke-OC Ň$ÁÉšůˇ›S­¤o IDATK 3-+ĂĘŽ{v}ł›uůÝRqmß–“]©K#hŃhčë ™¬Q˝® [@ÓËég ˘ Ça6)˛Đ3UÉő×Ď0ědYN”čj˘R!qŐ—2*ë^JÓhŐM©.‘Î$Ű ›,K‘xţqš`‘ŕWę”–P4˝Ś¬´I §NzH©§ćý.]QćÁšŤ««~UËẪÂyAYaK  Ńp±’ޏG‡»8V@§8˛EÓGˇ8lÇĺúY,!DmXGKN†Ěľßﯛłł›„Ńw°m°›3Kż2íQ–á AP˘AŻ×۲teŕ b6üVĂ8ăŁ49Űě`HłAGä…5˙ä‡i:3×6đŹőć¶Ś(ŠV]M•ŽëUŮI§;dr,DXęÇŚ·Ť\ěăŘ6SÍš«1[¤YÁäxł.Ýš›gˇÓeĎÎqě*•§Eő"ŘDIĆx«R*ęELŽ…$©Ţź»g¦·ŐpŇň˛š L+VMÔt1Y–‘Ä}Ľ*SiF’»8vIR8k/“á2`˙ĄĄĄ X!tçůčţ2Íää$Y!uéߨ˛Ŕ–ÝMŁp©Yaˇ,Mß•eYÍ7EVŃŮR±+)\<żÂ·GC„ڰĽqšÍ¦Îä$‹‚ţK)´TăR™Ěűř«˘o»2¨ćž5¦KrEAÔ_Äru)IbĘl¸®đĆzŁ‚\zxľ–G5ęDľ+W(í =<Ő[÷;łÂ˘¤ă”EŽ,"BoĺďÍJ›´pVzĄ333ŰQGĆĹËW#Ą42"óÜ]đ8VëJ[ÚacŹL (Ądqqq[ëiě°yŹ7˛Bý>wďÎqřČQ˛4ĺŢ˝»LLNâű;wî¤ČsÎť;Ëľ}űq\—4M°m‡$ŽiµŰôz]ĆĆĆąrů{öěŲ,dĎ޽ܿŹ»ssîwsŚw^j[d°±I’, 1uç śŤ«yJ ŇÂ[+f¦iJ™ H±lۦ×["°6N@奠PrÄŇ5ÍÓ¶%k;“•x;u`EäYT+ťĄq›•Nęf#Îm}˙AĄ&—ńmť¬HĘ­±6ô*´âVŰq(˛G$+®/•>o ·ńňú)-ż«?¤ęÍ`Jam4…eáÚĐkŐ,Ya–%÷ ŰˇďŁlm\;ÜcŹ:ZKěn§«tµq64lBhö˙#®ë`‡;*ŇhHâ>Ck׋8^łţüę®Ď͆é-Š‚~żżm•X+Ąř0܆&ë` ˙Ă:©Łßłştł!„`rr˛¦x3ó«çK)™ššÚĐá7ÔUŰ‘ď4Ăđ×™Lu) ZÍvͬ°^™t«±úl4ߊ–—!„"-„mÍĐëvB6Űäą–l89ĄÔăfĂđqnŕ6f-L“¤Î0Ăd&&')äi†@+’éX®ŻüíÚ08ţAĄµ´´@č¤[–¨@ł¤Ň§ŮÔ†Ń@FşK „ÎÖ 'ĺŕ4Ú•c=D”Q• eŻpR¸xMm°˝ˇ= Tĺ®™?`©ĎŢĽéÍŚ¬°Č•ÎÂ+Ą´ŠšëŃj·5Ţ}8OĂŮꍀĽ°(đ>Yăŕ:r…Ù˕Nî‡eK¸|őz$Ą ”˘˛ąb9{_ŮdŰqp%KŠĽ@Şĺ˝T–˛f¶B?ŻĐ÷(¬”9q™!KĚŞ'90y¤–ŘÝj¸”ç-; «%gÍeźf˙k¸ž…ě!PeJ>\"vž"śy ×_† m×°ŔÝnwŤŁflÄzżéĂ0ÔaĘÓ¦’f*3#kľ.CÍ©÷XúHřń—@ęĚíÔoýmüł˙“ń˙ú >ýůZÖŢdČ]×e0¬°Ă#Çń†Ě0٬Űůí–eŐ¬iš’eY-‡n‚} 4żožbžĂčYË{ ¨rĚg—×Sx%®()$oŞj–KHâ!~ ˇUqa‹×.óFEç¨ÖÝć\ÉóĽĆČ›ç0úM’ˇÓé¬Ř[áĆ4‡Íi–“ÄÍ0ŔVѶ8^AŰáB5ź¨ťÜ"YÜłT‚$w±˝°†®™Ć·­lInS m˵¬xźĐɰ,U5Q/ŻYT4iŽMë*y˙ˇ›ea,ĹQʆ]¬ÚěţłÂGWŇ$ÖpŚćDť€#_ܦĺ|,'Ķm˛d@Ă^ɤŐÉňŢqűżýű˙Ýď+LéK€ŇT¦&QŢËĄ,ĘJ’UOVJ;łŤáŇ䊼(q-—1kŠqµ“éć E^l;9Ú¤`\!Ä:Ľ| ?~ żíٞ>–ۢ滉[éî#lŽŐY`ĐŽĂvîĂůM3×zŤW›ŤŃ¦3!–»â·ĘŹ6FŽŕ‡eF0ăašÎLyÜ” ŚXOaÇ·7SJUŘUŰčóccş$ý Ďňą‘^Ŕ-|,ˇ›Lc•algŚ>S®3óG °ç”¸¶$/5˝ŠëşZb˛ěŐ`{„MŘŇĚI¶ńKmÖŃt0›2‰ŮŁ2†ŇĚئ9ĂŔôá’VŇŻă“(Ѩ•˛6ËŔęw×Âńšő5=Ď'ÎR›2Ŕ˛”oś$57¤”’4‰4ŐĎ6x ]["GšĆŚŁ’¶e¸'5-ŽQşRyŻ–°tmI‘guÓšă5‰3E)·ćUÔę8…¦+­öŤJˇi8čѰłÚŽĺĄŤB¬0ň˘ú[hKŰń±Ü4ŁVעZ'[’•:Řůë*”™ć1sF۶‘·Ô™H۲)Ę‚˘,*gBÎ`Ű ĆVJ©Ů5lźI±‹¶šfgkŹ>X·¤Ź6ÎŽ‘W_ÉS­đÓ7đ›•GXn‹2í1LZÄ­żIé$ÇV8Byžo2äű,Ě?ŕG?ü>ű÷¨™,ËbaţKKʲ¬q˘Ł×1toFZÜČń–eÉ©÷O°gďŢzŽy˙Ě·Ű:cuéâED•p1ô‡Ć)[Ý5Úhĺ8÷ďß«•Ý äCNďDŘŃ÷żM~ű&*MŢ}‹đWľŚ÷k_Á©l¦|lwłŮ\aÇM¶q#:Mc‡GŮy`ă†;S‘e¸W\§wi:ú]­t6ꤎ:—«Mł—ŚŞĺ¨¬¸ů;(|;G!ɤżö;„nJ–Ĺä„ÍÂrĆ:[ĽQ`cl.PłŤ>ٶ:[mx“Í\ăëŔfHY䨊ťŁ´+[VÁ–6ŮƶĄ(•‹„őľ)•Cš—+zÖ†šU@Č”()hVýiš"‹tËć]ĐĄ~Ż:ÇZ–>“ćúÚÖ qîâŤHŞö°jţ•8˘j:S­ö…ÔrĘ[]j2R·AŘŻáMÉpi…^T˛ćŁ_Ysü*­ŽćůZi®( ¬JµŇś7¦Ub˙÷˙ŕw~?ÍJćôh…>–Ą_Ě{ Za@QH’Ľ$ di˘°XěFŚŹ…I~„[K%ćy^—†Ă°ňţ7áx5Ζ˘v´Ww›‘¦)ťN×uëő6Ô:ŽăTĺťItĽ˛Čë=Ô†ş, šíi kŚ(s)ĺĘgç.aS;ŮĂAźĐ+hş1Y´ Kk­1¬Ć$Qî­™kF!-Ľ o•$1Žě2čÎk…›±i kś(_{í5kŹ6lˇ;$,e ÍöĄ3…ëŹ×Ą:O$#Žíră‚@3+řVLÔź'ĎRší)”;Á0ó(¶¸ľÄĂŻˇ„eŃßA"ŰD™†h|—.ke…łBŁ´mz¶ě’§CĽ`Ś\´‰sG S”}ÂüťMďcłqíÖ<çŻÜáňŤűś˝tG«őÄß}í4i.9}áIŐ©­Ě´}}÷Ě5nß[âĆMJ/•âüĺ;ĽňÖ9öýA§Ç»§Żrăö<Żľ}NĂK1oźĽĚĺ÷yóÄe,Ëâö˝%ŢĆÉ÷OpéâĚdÇÎťĽđâK\»v•źżű¶msćô)Ţ~ëMŽĎó8}ň$/úł\Ľp4MyűÍ7śšćťź˝E–e<ţäSěŮ»Ź8ŽxőG?Äő\îÎÝáČŃcôű=Ţďç|ţ _¤Ýn3??ϕ˗¸woŽŹżđ÷îi<é›oĽŽRŠׯ“gwnßćĐá#š^K-+[v»]â/’x@č-C†”5žUĎĎ–ç;.a{šD†›Ţ?h{nąË q˝^·^»aîç:ÁPJ‹8w)¤EV.7Ś™á9’Ŕ‰‘ŮR*\‚¤ ČK‹˘´±ň{ęößúí˙ę÷K©xâčîÎ÷žháą67ćD Žë Ąb×Î ÝÍ®té«üägçxęŃYÚaǶ9{ůGfwsűŢ"ł{§±,‹‹×îŇj ă”ă‡vcYWnÜă…çŽqoˇĎX3 Ől°Đ0ßéó±§ŹŃ Ľ:ëj°BŁŮ/Ą´ťÜ äŃ<±xeiCj`«ĺ!W—ĚM¤j˘ŘŐr«ÇFQđĂ)ÇÇ”ŻMyglNÔŃů«ˇ›EĺV5Ťc4Ż”ZWź|t~Qugł ćzsGŁfó|<Ď«łŁYN#Źiŕ(is9>‰[¸¶·"“ż•śďč}ŔJ1 Ďo"Ńüx&jĄÁ2#+r©« ćŽB6€e°Í†á¶, ß÷ëL¸>Ä]¤TX(Ę"ÇqlG;ńžçĚpWµAŘ&ÉeY`Ű )8-U–żĚ–%r[U%¦%Fř}łrŤC"Ă%¶%<»ÔÍRóăÚNcEôîlčě'R˝‡mż>€˘AĎΗE4”`u.ÂdQ-ňjľ§ńĐĄEž/CFÇ(Ô$IŐG• iuđüp]~m#ı:#Q“ŹçšW3/µA÷‡ŻŠôOżńˇ2·źűâţ{O?vŔ=Ľ'ďśľÂńĂ»Ił’[s ,ő‡X–`fjچď!‹Ľ˛Á nŢ]äŇŤ»ĽüŃGQ˛ÄvÎ]ľC»ĐnZôŁTÜźď2=Ů&ÍrŽ!J2zĎ˝řŻľ}–ç?D)%WnŢg×ô8Ď<: ¬„*™Ś›Á„×™7î,`QÄ DÉÉ“'8öČq&&& ű|îó_ĐŤ_“SěÚµ·b]9|ä(ĂÁ€ýűčě˘esřČQŢ?ńžVëęvi·ŰěśŮĄ¶¤d÷ž=ČŞejjšţ ĎÎť3u`ž$ SÓÓZ>ą˛ăÝ~‚oß,âȱĂÍd퍍4ß˝ú Űl¬>Śý2Îŕčk`¦Rh’AJ)®Eç(˛ĎňJÎw˝ß±|·I › ÔŤÜq4ŔńšwZă+]Ë|ńŁ•Á˛,W>m„í6 “0ż×|^ eŮض*ôzhśů¬ŁäiD!-Z­1Jĺ‘fËĺţ¤p š:™3‘éµ#Y^V"ş˘&Xɵ]”Ž?^Ű0Köµ­aWÔ" iV"ÔÖ­e®ń8“4[ă5óFwVŕ Đęů®]Rä)IVT÷źdşO`=Ř[Zş„­j-}|k@–Ć”Rčd‹˝|Ž›}ŞF„8FEzšŁ×9Yš‚pIsĂďŇh¶pü†K’ć`YšŔľĆ ^xî8ßzĺç<~te‘ăşI#¤`ďĚ8Bf÷LQ–Y©¸9·ŔÂŇ€(NÉ‹C8Ž…°q’ú ĎŐ, ŠR1Ć4.y^r`ď4;§ĆřΫ'řÍ_~ˇÎć­ćxŐř©†?§9u ś]ŕĹžśĹŤ’:łg˛ŞëqÄš1±,«n੢hM–a˝1ŹÎ_-˙¶Ń0/µá¨5‘ˇ(í|Ýhţhą˙ařuÍ0”9aÖeňÍ~ł&S9 jĚčĂp+ąŰŐň˝f˝ LÄĚ7™ถG`µ¸XĽËXw‡‚Ç Cí$›»Mgćöű}ś‘Îä$IP˛„učy=»$+µÓcžá(·§ą3Řh-ŚSÚÉ LNNR–%‹ťÂŇ\ľe^9HK"Ń…†ë †•SťŠľ¬…Ńp”ڵ´SšĆý5MBčěG){ ű‘ćU›Ör¶Ĺ†+É «úNA4âYË”c)+§(—ö]<żŤě ŠX2Âs6vrMcIŚ.Ë’°5N4´±‹ŔŰ—.[P=©îć5”7¶Šjì€R„UđP¤=üęo¶H ¶˝ÜQ? V<·Ľ´)¤…kIJ%ÜĽ–n8[¦Ą‡ť^%°ď"ś›Ţ÷fĂqmJ©(J‰ăŘşz%3;Ć —®Ýĺéăe‰°TžS”%‡öďıíJS0ż8ŕţ|ŹfŘŕµ»|âéĂ«V\Ľ>Ç‹Ď_Q™‰“ĎÓ“RđňÇăöÝEŢ|ď"/ěxmGW7#)Ąô{ČÁuÂÉŁ`OAăq„}gf9v•‰\o>,;Zć4ĎÁT§@ŰáÝ{ö˛o˙ÚiRJqđđa¤”<ű‘çëďůÄ /ęŚwĹŰj;33»Ř·o?böĐaĽć9O=ý,qóěsĎëýP5<=Xg3MôŽť3(ĄŘłweYrđĐáú>”Rěź=Hžç|ćsżPKŮ>zlĹ^2öb55ť=‚P’1¨›yŤmßĘ–:ŽĂŇŇRí ~ßíĚ]mż ő Z€:Ř6É$Ąy®źkh·ą%/pŻť=ÉvŤí­eÂMŻÁvť\óśă8fll™ë:K#\O°šŐÔÚ±K ·ţťćKŻ$-ráâFďăűĄ>C÷ďžâÖÝŻľuŽ];ĆizOŤ·™h<˙äaŰ˘Č ľŹë¸•-Ř9=^áO$Ă4ă©ăxěČÎ_™cĄŚ·|f¦Çyć±YŢ:q‰aśâ7\ßă?=ĹcG÷jj(Ą¸sŻĂĹkw9°gšv«U—*×뵲ŰîŔމôžŔr&i·ÝÚ)6°‚Í^°ŐÎřřxŤŰřęů&“ąş+~łů¦<˛c‡>Mp5&vŁaśZĂŻű0ĆÍ+łfĎł™î8NŤ urG„íbaGµÍ Ŕ¨źA)r”QP‚ţqfŠ ‹> j ĂÎĐh4jÇ};Cm۵±÷<ŹŇžbđ¬lEGh!­5&“ßď÷™śÔDě›=»Q ËTq晜š&/J†ý>˛j2Ëň‚‰VNiX2BŚ8ŢŽĄhy ie\š­vÝŘ’¦)¶Úظۖ"´rĘr‰Aż´°¬€8`ŮβE1XWÄB;ąiÚ!MřAËŇäÝ6ɺ̪ú—©€±Ęéěw°,A#CA<ŔRɦN2€cK;#ωÓ~ĐF(ęăR*‹°r’˘áĎ^všuŘúB&ŁÁ‰Ş¨fiCgŻm-uś•6BFÉkŘăޱřÄ3GůÉ;ç‘RňÜă‘RbßŕcOeî~! KS‚f”Ä‚†ëŇnäE‰ăŘÜş·Č>őÓm~ř晊ÚQ°wfŠĂűwrćâ-ěžÄ¶,î>čňÝ×ŢçĺŹ>Z;^Żżű8~xoťŃÚ¨ Í.îĐpÎB4Î^”÷¶ÓblÜŻ›…Ś<čf6aÔŃ2b>K9š-ĆIµďfľŠ˘§žy¶ţţŃaćŤţo§Óarr˛~§Wßłą‡őΤQžŕQšÄÍě ©Ţ5u„ŤC´Ń|cĄ”UVp¸†ăv»‰k$Č4™ZcC¤”d2Ąé¶´V=—ÝÁ,Sůnň9|K'7°Í Ugq GňĂ$ěŞ1RJ©ťÄ¨Ź#V2ÄhlĽłfľîMLL¬&6˛ÇFd–ůöÍžm·Űř%dyΠ»X)şäy†8˰‘Ú–Ę.ĂţĎĂ fęßG}ü č…ĐN®T˘~Śí†´Ć¦I’„,ŕ‡k IDATKDáZkłĐB@ŕHŐgŘ‹°=Íx& ĂL‹<¬×[ˇĐYaÜńúĽ/$hMë˙N4ěőç/__ŃpJC˘~‚U9řiš¦CNA*uÂ%‰zř•¬ş‚ŐP#&bř…Gý°R ĘĘgĄMٞĹE^:XĹ"<‹ĺi[,CC/Ú v»Ý‘ż‹ş Ö ú9ŽŹµq]O 6tř”‚¨™ÔÄčI“D=B7_ăd®™‹¦ľ’V€´jÜŮ ßĹýmQŹeĄ5˘T¦Łž•¬‰ţłRŕ…3¸®Ëp8Ŕg Îm ‚Ş .ŽřNş%3‚InWŮVmł \:|Psó–Ňb»k°\°,ymś•ő†J+Â’Ťţh=„ăŁd޵ëďlď…]5ćçç#ĄJ)‰“”hę/Ƕi¸®O Ažĺu”*„Đ‚ JâůeM ¦IÇP*ąQ*UÓ‚ R*¤T¤©îâ]{Ł:§Ójµę—e]~T•¨p|ŤŐSyĚ0?@aMÖŽ…iÖŮŹ)ĘoÝnpňľ–Ăľiꆧ) şÝî Ą¨íęu›—Ů8\Ł´3›Íe)0ŃËj ŃĂ`Z ćhô·Ż7ß=\óVÔeBZMkŁßľ]ę3Ó<ŠĂSR2:¸ŽÍü ň=Č[ȲķB„˛jj“y5kg۶6ţž»%Ěd{ ىřÍ3Ô­I¦(×Á+öJżß_ç Ăp›ůÝćł+öĄĺ „˛ŕ­7^ŁŮŇĆáôÉ÷i¶Z´kÄѰ$ČĄ…_uĺfiĘ7ľţ´ÚÄą…çąT¬} ,~úÖűś9w‰R ¦§&l Nž>ľˇýč«.7¸ŔX|•1wË A• úɲs;úĹ3;wây.q“ŹlƢԍSJJü Ŕ±¤,±mĂ÷W©çTޞmUťä«öažëď´­•Nî¨!ʲl]Ł$Ę} ËŐłŚć‰˝AŘ+ŚšRjCĘ’ĄĽÁ?ţĹŹ/+>¸Żxý $ĄĹGŘő‹µ¨ţ0łćA¦33ßĺ’„aPÓ•Śľ`†cĎ8_[WSÖ27ßeŚŁÉěéĆŃBĐëőF2č› &úÚŞÄh¨dFŤŠ™ăű>qć?ńŢ­źł-2;1;rô÷ĘëK U‰%l2+bIŢgľ»Ä®ćn\×ĺOţď9w˙7:×đÇÓÄł5uŮOŻżÎîńÝŚµĆęčwôšőjĽĺęghö€¬ß8¸Ú ŽB«›6Ś”Ąů[M5cYÚǨŢ!Gü‡?ů·Ě?ç©§źáźţápôŘ1n߹ω§=8ËĎŢyźó®‚p8wţ"'Nśafďa._şÄŰoľÁΙ)ľóÍo0{𠍆ω§9á*űĚâ:‚÷'_ă3ź}™?űÚźóäSĎđýüA”pëö]‚ äÖÍ›?üţwiµÇązý?űŮ{ÄIŠçůĚ/,1>¶ Ż޵ÜôĄ°5Kj'µ(­ş© đD„Puă‚c+,Ąy…Ą›Ć˛Ň^·émÍţ3ď‚ÔΩ뺤…ĄŻ/×SŻľ˙Şcş×ëáy^Ý@iŢ‘yq“\u92üŻ5 BPĆ]Îźá_üŃżüPÎíWżúŐß\¨Űf×ĚN˘8¦(JĄ$e)+ú$IęL™®¤é^ Xć\Őö¦Úď#Á’Š,/tp¸Şűd´ŃÖđÇ®¶?BĹ„â–WŃęĹűi°t×ü¨Ô©qp ü©.ďăň/ćń'ď*NÜVĽz ćc‹ç8DĂ~m‡?Ś“iĆz G¦”olŽRŕűËteŁ*Vq}oe‡Í0Á ;ĚĘ#ŃŁŤÄ[%JÂ0Ü´˛8:ÔqM6X–E·ěňŤÓ_çÚüUöŤďĹ÷´P€c[ä2áfzX茮BQŚŽĽOž•4ěßőy=ĐßŃąŠë „Łhycş¬že(ˇxëĆ›ßóč†É†ÍŔGסŮGĘőF&HRŇď÷WśĄ«ĎrÓgë4˙ZŽ®FČ‚ĐËpČ Z“ŚŹŹë=RUą ÇęęQ”7¬Ąc¬˘K’Ä`édI^Úyˇ¤6ŘĆR °µś}QDĂv–-ź8•¶ qÄp}—”EBšřA× ‰Rޤ Ał˘ľŚ‡\›ş‰Ěp…kÇ‚ ŮĆńBâ´Ô<˛Űp˛•Ç ëó×¶]’L’‚Rmn‹…XÖ6ă¸V*4Ď^!ą+.ăfÓ‹¸ˇ®ĽgŃďÉ__ëÜgĘpş9ŽC)µ˛GĂskC™¦)ĄÔW*E‘—Pefd©é˝tfÍŞékŞ;B*Iž(Ŕuěz“™î˙Ő‘ł1:RJ¬rßťÇr*'!VäîckfÔ¸šŽqr˙řÁ™»ŠŹÍÚ|ú¨ËĺyÉą»’çö*‘¬0P†cÖ˛4>GX6y–Ö%óRZĽwę'Nݢ(áŢGë.ßŃ—ŕîý!×ovšôy÷ý›?¶oE&T#÷ąĐIXę&¸®Ă•k‹ěÜŃŇY·$Ću5TâÄé[ĚěĐI†uFaٞ›9XŚQU©ŮÎábčlF( +ÄwĎ˙żňřŻňísßbŞ5Í7Îü%‹É"&fyőúąŮąÍ©«ç98y„—ĎpöúE§Ĺ?{ĺ_đŇáO1Śó­3ßŕ·?ń_ňíóßd|lŚďś˙7—n€%řł_Ăs¸–ËĄů‹Ţy¸†25ĺA\ŻJ0şFaŁÜjşzľRŞŢ‡5ÖV)†§N˙›Ĺđ?ü1ů›ŻáXÎáŁHY89gĎ]`nî. ß§»ÔĺŔt;ś;wŽĄîsçÎ333ĂĹ‹—řŕ‹ě?płgĎsúÔIžxâIŢ~óuşK]ĆÚZćöťŰüüÝwyäѧđ›ăĽňŁ177Gše<őôS\¸x‰ďď‡ěÚ˝Źď|ë›<ńÄ“|ď»ßÂum~ôpwnŽ?őľý­ďpëö]öěžaÇôřĆď@˝[dędij2Űi,c»ÓŢ2¦ą˘ă2MÜ-Sâ$«Y!â m—6ɤĄ6ÚŢbŐ˘e,‹˘Ř4H5%bÓ\i\×Ő™L™1Ç%Žô0:Ű%y0đ9É/ń˙čţk;·°|Ř ¨Jix—ëčĄHł ĄôgçF]«–I©3îVµĄťáŐ69ËňŠő`ů µĂĆá2ÎťÁŇÚž׫$vY,ÉĽÇYťÉĺn6N®ÉâüřšÍ_žQěłř•§\ćz’ó÷$»ÚŠÝˇÎŚm(nÇÉ5™ĂQjľ0 ‰Éű§osůÚE©XčD:¸ !DťÜ0Ěíą>aŠß°8űÁ=ÜUWÄF‡ąŹűóe©K®÷î×}ÇxžG«ŐâÔą9vL…őámlăFMwŁp5“á5ٶ­†Ię¬ćY/Š‚v»Í…ů …ŢlŔ{·ßăęŇUZŤ—:ç‰Ô€“WĎQ@éđÁÍ«ÄqĘ«—~Ěőůë|löăśżwžB•<ľë1ţüěź‘Y1Ż]yŤS‡yăúÜëßăöŇm’"aŞ5ĹXk¬^C“đŮŠeƬaVXťt1ďízlC«łżµ°aU˙6ľS–A¨ű€z˝žnXÇČ Ý1ĘҢ” Wć—’$Ň|±®­2#Š3·A#h‘ćB ^­c˲Ň%ađÄ,Ť‘X4›c”Ę&Í·Ď5îÚy–ĺŠ0l"ś× Öp ›Ś¸ …Íü$IČ+f%•“˝± ŹRPZ-‚@ĄýŢ"ĄT„Í6–íÖ¶dłw¸ŐjŐ窩~JP,‡,r‹#«´[ ,T‘p#ÚĎ%>±ŇąŤđLÉÔ¶,žn2Ëň‚Rj<„ßpń\×±ix ‚ĘYŇ0°+\ŢʨR‰úLnšéěU8âÄŽFΦĽS+„e]ĺ>\;!/l˛|éîŢpqŚó6j\˙íŰ ţ›Ď48ĽĂf˛)xďfÉáIÉěřZŽ×Ń2“ÉŇmTî/¤Ç›ďeĽöÎł—2¤tŮ1e“¦1–‡îbî^—Ź?w€KWć™»×e©;d©sńň<Ą„±v \şÇî™q®^[Ŕu'ĎÜ!ÍÝ~Šs÷zt–&ÇCšˇËÉłwčőSÍNqńę7nuh4<Î}0Ç…YV’ĺŕşďźľÍő› x®Ĺëo]efG×µÖüvł†yž×8˛íŠHŚ>Ł°ÓŞšżyúë, ČeA!sľôŘßŕěÜY.Í_`¦ą A! †Ů$Ď8´sVďˇÂâłÇ>CZ¦|űÜ·™ëß&‘fwĚRČ‚SwNb{Š}‚kó×ůÖ™ođëŹ×öX­tfh¨¶˘íÚp/EMDľ}›ŁÍ ý}Ź˙Ű?DvÝj8 }÷-Ôý»Ď„†+y÷ç§xňégůŢwżĂ#Ç! \^íuĹő<†Ă‰©ÄŃ۱9úČăĚÝąCÇěŰ·—ů÷éőzěŮ»‡Á Ď™ÓgśŕĐ‘ŁěÝw€7~ú:_ţĘopúÔÉ—×étŘłw?YžWĄť.žçr쑣 ‡>úńO‚7~úS~íËżˇťĐ-”΄qreĘ *h·Ç–×2_©°Ł” 6 «.ŰĘ™˛HkĄ2·Ń$JJ®U*ËK‹FsŞ"LŹqÔ€†S§IşLż·ÜŔ<'ÓD5ú·ĽĚHä€Ŕ ŮťIĆĘóŘ~¤ĂEŢU_fČ8_űŁô×vnMÇ6,ďMǶ+Ľ¬V“R⸾çáą6®mă5řľ>°dĺĽę %:Ů Ť~Ůr!•ţľßŕŘN›ă3Ż].óáŮÝkťÇµ÷±1 b^zĽs*ç­1×nIÂŔŁŐTUEĘfj˛Me?¶“łÜĺöܶmqg®Ă…Ëh5} ›[wzś˝p—هf8sî®+8uöž×`q)FJF9÷ç‡LO6ń}—§nÓ¤ěŰ3ÁŐ‹\˝Ţar˘É™ówx0ß'N :H9yvŽ;sKxžĹOßľÂĚtÇŮŘ›Š¦aUŘ.TX‘76ěÖŇ-ľwî»ĚçůČŹđţí÷ůäˇOńÚ•séţ›~š¬Lxďú ”„'ö=Î[—ßá‘™G88~”fç3\âľĎŐĄKÝuĽ(I˄뽫ÜéÝáłG?Çźľűďńí€'v>YŠFÄŔG…‹¶ű;L6ßqśM)î0Ż˝ő öěçÖíłűtFěŮ'÷sęÜŠRňÁĄűx®Íµ÷Ů»{’Ë×ÂtćëLĂŃC;¸rm0ô˘”çź9Ŕ‰Ó·xâŃÝüů×ß'ŠRĆÚÝ^ŚăćĚşvµščß` ÚívŤ«zÚp8¬š^“§÷<ÍwÎ}›,Ďđ,ŹŹěűřĘ?á˙řňňŻ^˙909‹[:řvF© "D`žĺ1´yńé§IdÄŮ+—(JI”EXÂb‰{dVÄcűŽsęţi^_fii©ÎľBl˙x,_·Ű­÷€i|٬ab”PĽěuYř×˙śÖKźĆÝłJ…{ř(WľGôĘ÷h}ę“”Ď>Ĺ'?ý9Ž?ú;vě`¬íÓnú(©ťG{ś÷-‹łgÎpüřŁ<ńô,bzÇćnßäέëüň/«W®33łÇu)ň×óŘ9ůäËźáâĹËüÚŻ˙Ó;¦xíÇ?á3źűŹ?ÎËźý,W.śćÓ/‚7Ţx›}{÷p`ß^ÚěŰ»“gž}Žń‰iťńNű[vÓęĹF…q3ÝŘAkŠ8ŕă9z»vYA *9ďŞéËłĄh“ŚLř4›mĘ2$еD˛k+ňŇÂjLÖ8Ŕ"Ó´9R JĄ3źĆ´Z­ú}ÝĂFyő~ĐaŽKŚĹŹ Îá´'A)T™r+;ƂػĺţŮl;lŚşbM3%aĐ (K’4#/Ę âĄ›ĘČ ,KCÂżă4őaYý32Y–şÄ\É©—˛Dáhő5·Tk)Í!ÔéthÓ»geôâ€$)°-›µčą•cÔ~†aČD¨łC{'l.Ţ/9´CŰá±ĆúrŞ«ďc”q4‹iY.ß˙ńĹŽv”Ă’˝‘óůO¶ŮłSQ–I’aY‚v«ĹäD‹gž<Ŕ›?»H§1»o’KWçůčłű°,Á“Źîĺ˝Ó78uö3;[śűŕ6ͦO×ÓĐ—,Ë9<;‰mÁôT‹n/¦zôz Ď=˝źÓçî <˙Ě.ľö98c0H ›{' gÓßn“ú0vx”ŮĄxvßsL·¦ąt˙ăÁ8űÚűëÍqdÇQÎÎťfĚÇUÝ"lřŘÂfÚŰ…T%žPĘ‚§?ÂăŹ"ĄâkŻť'÷=Á 4<îs…‰V›Ăű””µގ‘Ô·ˇŠáf5K‘IlŐ0ď–a‡0=:Ë_¬aa ±E‰k—DąÇX[ßcő #d•‘&|°…RKŚŁ«ÄEQ ĘÁę/ĐÁľk§¤ńR5}V” đ¬ś\şŚM„ő5Ć-&“SK 3—F0†m‡ÄQ[ 3BčLě¨(P«=N’4ů b‚Ń`C·Ą`QĎ×Ě =˝Ďo¶wču/"ŽĆ'…GŘžZV•XT‚9ÖŠjčzĚ&ŐétÖěëyq“nů€}é{ňSŘÍ)ŤČz\,_` tĹi6›5ř{”[v˝aY‚ĐoPi•R.KÍŽ¤úĄK“Dg|ť-ł…YU„Ŕót¤"ĄD ·âxěŠrýDAš•,ÝĽÎřÔ^Ć&á4"’dëŇŚ™_˝^Ź_y¬Á?ů‘ŕ«˙)®áŕŃ]‚Gwä¨-¨ů„uÂp šŽŢŢćîeĽüń zĂ’aTÇ%ç.Ĺ<űřeବ”Âulš±sG›éÉ&;§[U·  <ĎNóÎ{יݯ)jvL·h·|Þ]5®.ĎsĆÚ>cí¶YVróέ¦–*8°˛^ëfÓcĎ® &Ćć»Ű€ZhŚšé"÷}żć¤Ű®Q2‡vÇ|ńř—ČTÎßýÔßC)…gy4˝&ź{äDŔçŹý"ĘRěß@© <«ÁžŕKq‡ýłüćÓ˙űŐ>?ăŃG¸rç&_zö™jM€SňäÁc„~HŻ7d)ťgÜź®iŤLfĚu]&''kęśí łŚ<ŠE^=Fť€čüYŠůű8ÓÓĎ‚ragß~plTQĽű<ő<ýŘÇBđôÓŹáWÍ {wRŻ…T\şč°kfšĎţÂ/09Ą»]'&&Ů˝ŁÉ =J1ő‘'0!ć—˙ć¨=2^~á)’¡čĆ©Żüćž^1č-đÉž_ůő/Vż„eqóĆ5~ýW?ϰ7Ż)gÚÓ$ILšEřn±aŁDV:4›Fa§ŹP Aĺ¤őZŽkËÚ©ŐšíjŤR™RŃ CŘÍöY–2L†ŘŽKX5šE#t€žË*7ƬRŁ(Ş»ëWż ÉĽ¸…Rł[4šM”*Âa0L8'~q[űfła2C—mşĚ×{'ۦY5eyJµĚRĹ)¶­ˇC–e4°mđ\ôIŁÖR–J%/$yžP ÄĂÝîă“{ź:ŚŐ["/¶ßtk¶ř|˙<üÁwăę…¶/Ěný]Ćf‰‰‰šeîAI–Ižz´Iś*:ZäÔů}»‚:`(Ę’ĽČ5tN€íčŠäÄDČÄDX;™ÓS!qš3??`j˛ÉX+ Ýô‘Jqçn—†ç0łs˘nüšž …° N2, |Ź4ÓŐľ™íú»mŰâŔľ Ú-źĄnK;,ĄV+ě÷ű”eąfo'Ho4µJÔŚż‹űţlËá Ź~ť›(ĄŘ5¶›—f_"ÉSNŢ;ÉgŹ~Žýăű÷ĆřÂ186ő(ďÝz—¶?ĆěäA&›“Čr@_txţđsdyÎá]łšŻ[ä|ö©—°„ĹÍÁe6A)ĹŇŇRÝÄ'„`||śáp¸mvŠŃţ㬸ĆzUU“%6ë%kKČ’†[P”ݎ±ąišâ•ľFĂ‘x*&‹RJáÓlŹ×p‡$âY›ďcM˙5$&`7iµ'ÉŇ„†íÔű{”n ´]Bgqµ˝HZ1ÔH2Śřvş)lKŠ N$Ă Ďoá·5W¸Ę"BݬŻšW­řo­T–熩‡ç·p‚i˘h€(c™ą*Ďs-ţ$ĹĘŘđŹjŐ¸őئ 2:ÜĂQŠÉÁë¸íéĘ[tâ×ÄGëĎŠ~żŻ uµ´ŢfĂŔО¤Q~ËRj©:ôĆk†kĺmőU-ŚiY­öI®ŁßęËAć¬ö4MÉ!ôµcł5ď‡M›%„ŕőüŽ -ŕŃ]˙ă}Zvş6šŰb ,Ë"¬š:nßMřËďuřĘ—f(ĄbrÜĺÁbĆw~´ŔßúĎĆ(Kť^/ĄĹĚÎI,ű˙h{óËňëľďó»ű˝oUŐUŐű6Ý=9wŽ$J©H´"ĹJś(bŘ#9!d‚¤‰H˙`p $N8‰!ĹA$Á’LQ4)‰")rfŘłr–î™îé®^jŻzűÝďýĺŹű~·Ţ«®­GŃ=č®[ďÝíüÎďśď"ĄFĄâroy‹ZĹ@×YV\&C×đý×5ŮŘQŻ9Ř–Nšć 4ˇQ«-gßÂËÔH3čőÎľLĄ IDATfg*ÄI†méDqF»Ő ˡÓé„3í*–eĚJMÂ=Ôńi76xR[ö {0)łł˝˝ýŔńQńĘý—ą<…ş]źŇTT# eÓ©]IóNň){»Đ¨h0ÇEç)’$) Šô¦^Bµ@ECé˘*C ×u÷”Ň4­ÔÎLÓ”ţ_}“í˙öżA«TXřÍÂĆo˙Sš?÷źŕżúŁďü%•Ďţ$­/ü:«+Ë ÎśśĂµŠç1JŠ ‘&rTRöS‹Z}¦ĽV«+÷1őŚVÝa}łĎÂ\)U‘'Xßčpj±YžG†ÍŰ×—xú™gËŤGv°Íéw@%%Ű(Č éX˛Ć´‹9 FÜ@ß6—‚Ühâş•±Ě6¦ž“KA”jĂĂu+DQHďt‹âxÝŞbŰ;9 Š®Lu°Śśad? O<ęBĐétř÷u±Ä6÷97Ř`Ń*śŃ…ÉĆŐÁGą«´üŮ˙đғЮĂĐ×4ÍU›UĄśrXäRFqáŘcč¤iVBľÔ{Sq÷ÉĂšůŁ«iTŞENÓÂ&™Cž>‡U·Đ±5 Ó ^źA׍#k«ŞBđúŞÉoW#Jˇb ~ýÇmťŤJ†Qĺa5®óú\ xţŁM)1ďĹŻä1ťlŤ˝Á#ćłřéCŰQŻčőzĄ˛Â¤˛†ęŔJŇk8ŽÇ’ĘMb´üŃËĚýůź“Ź†x~–Á_|}î2ôIî,QýÉźˇöŘ“|í«ÂŮłçxéĄď“Jťĺ•mľýW/"…Ĺ;×ßÇ­4ąúĘ8~ň®ërďŢ]^ţţ÷I’”«W_Ĺ­´čôč†Í‹ßť“'OD1ř‡‚ĐmęÍYľ÷â«4ŰsĽţúë…z‡”Ľňň÷±Ý:ßżúh6qZ°ÝmS‚DŮŘ QěHĆČ,$ IĂŞŕŹ1d*±†™ŤW)FEÁ¨Źm$ă{«ět Ň™¦Źqd©(ě|÷°ÓÍĄ G”X0SĎIÓśťwWmĐ„f3 3˛śs‚ꪅ{ęů$bUÜd>J8)7Đí*yÔG3+¬ôďč?‰d'PĚíľđ…/ęşn–’9acŽ’‚lVČ ˇ‘f9–i˘ďGkVăoˇăx®c‘ĄIIJ+n´1ýł††iĐv—Čý%‚¬E–É),úQ1ů§Ú:‹m›×îeüň§5>yŢš"ťeŁąďąďÜŘî&Ľscĵ#ÌٶĆńcJŻ´Q/dşdžŽ7ĹöÄâ Ž]ś‡&Š)ZAdŃHuĂ(ň°ˇ+…ŠIű‘ç& §4MIŁ^ÁwAif˘‘’KŤ4×°ěęx˘ˇg‰[QfQ­MÁ ‡®…-z˘‘ĄÉ6Ţ»Că˙Ěw5Ý0‰’âPZM®83ČĄ6žní(Ě„acňn’¤ÉÎgK ą^ĂvŠ©OíزkZQÔÇQ1=w˝*BwŁÇÝQlD u0µ ?±ÝjYű•äÇT#Š”h졠&JA)šLľ#zl%Nű=Ž#„n"łˇÜĚq[˙$“ťxý×ý׿¬H1ŠŃ©Ř̇1ŮÔ—Úů˙˘`[ć…íN†NµZ!Ś M[ň”rnŹ,Š[­ ŮŇm,KŁ*Ţ!âŇ(¤>“¶R©0ç„Dśńúe§P‘ÎÔŽň¨ÉµHÄ13-“·ß ‰bIšJLCđéç\ô1nƲlŢ_Ú˘ŐôŠLś3 Rň\˛±Ő'Ď%“zÍ+ ,Őâúľ÷ţýAĚ씆¦iĄÄ×îď® ˛˝|Ë'ĺR”Ădr¬T*űęŰŞ{ Hg»ďTW;ňŽWiuNްU˛@¦i˛ݦËC:ěMŚqGwŮNÖXlś*Żş>JSvÄŃŐîyŻűŻ:»Éó¨V«¸®;ՍȢ.fÝEú!Ń;oc?Atí-Śf“¬×C4šĚüý_Ćp]^|á{Üz˙çÎ]ŕ•—_acc“cÇćYX\¤Űíńţ­Ű|ěĎó'_ů#˘0âŤ×_ĺźř8÷ďÝăôé3\»vŤíÎ6++k|úůolšŁQD–K:ÝÁű÷–yĺ•«›źçĄď}]׹ööŰ\xäKKwąqă}._ş8Ö •ĄşA’ëŁVHt–žÇ!q’JZˇo›çĂiŚe~BD6@×(G\ęu7ő™G„aŚeWĆDŤĽ$j¨7ą¸Ě‚ 5ÇÇJRiR©6Ę{0É öĆ>őĺ3ĺp§L<Ôb.„`KÜĂHűś®cUfH‡«čî ‰żĹ‹Ůß&ÓŠ´¸ýŤßřŤ/V*S×őň´ŮbŻĐ¦ň°@× mcă0˝(m´Ł$-,e¦°SŤ}\ä dęC˛ašÔő·Häq¤yl 3yŮk2*• m;"2ž›ë"ó´Üh*§«˝6Ő{…ĘšHČrŤKa”FĹ$á“Ď9ă© 4Ł”ZŐˇßÚDFFa”⹎cíyš¦±t·Ke4ęNůl)^¤ŔnŤZµáÝ‹@5ąŮß«Yŕ8Nůw»ĎV÷BLĆwKN?)ठBĄ^ň/rź;ů;ŚčŃ—›‡¶6§Í+lĹ«TťžăMé(«k§ŠčÉfĂ^Í&u>ŞÉ0ąćďVúQfţ¨ŹĄ‡eN 2‡CäTĚCω3ł\GQç@ÆÝz±zÖ<ÄRÇ-°cŇŮA†4ˇ¬P©ů ·UÜÇJÍp§ŠL•/ő1d+͵˘0çg‘GŚ‚ËrĆú¶E-J׼Fl­ĐţVäRU$¤łÂá±R­“ćaśˇďRf2©gz)Ń(5ÇÝ!/•ĐUÓÄvÜCk35AS *ŹK$«â}qÄéě>†Ű$® {łŚ]^ćçH…;ő»ô/~ń‹_VĹLń’O‹đ5ą>l¨nžaDaaϦŹG`ăË-w‘¦e8®ŔŃ×Čü±ó ŞJW@íśU±ľß‚¦vŻy1oŹĘÝp˛]JBF1ÜFă¶âB«ar5ĄâęüÄŹ4±Íť‘y’i|÷Ĺ46¶Ą!sxďýM†~ĚĘjź(Né÷GÔj.˝~‚$ÜľłM’äčşĆ»77¸{żSŚż†1~â8×Ţ]ˇ1ňc®ßXĄÓ YÝṆ!¦Äş*Rw'Gő0*kÝý®Á^÷@áş&Đ~›…ť ÂÎTÝ;]×ŮŠVńŚYž˛”˝I~H"UabsÖzśíxŤç4îx׺×ŘsRMamŰž’śŮ+ˇîwÔ" 0ôpŚŘW®ß_ařőŻ‚”$÷î’ÖŻ˙ŢÉBówůţ=>ýü§Xş˝„ašś=wĎs‡c2†ËŇŇyę™gY[Yfmm ]×™ťˇŰí˘k:ŤF“űËËT«uÇâęŐWĘëśçy1 iµŠŤIµJŁQgvvŽjµB{fŽŐ•e.?úDÁČwŠťz‘¤Ó\/˙ÎPlÜ0BJŻZ'f9ň‡˝‡)đÇă@Ő}PĘɸHVÝBrfGW1“aj’d:Y.°ÝFyťĄ”ôş›$IZŔmŘ!ôíŐ SyČ÷ý©EALȦĽÁĹá-*µ™ż‰ĐL„nňŢđ$÷&ŕ*>hqű[żő[_t]×T0—0 Ké§ŁąěŽŁü´”…yačEÎó1l,˙EG4MKńÜĎX! ‰śçPN‘ęÚ)MŇĂLpT×-ŤCN{= ]”yxwoo3žýăŘLˇş±±•Ňl|î‡XĆN!Ůí'|ç…÷8u˘Ě3‚(ăŢý÷VzŚF1›ŰC‚0˘Ůđď'¬n Ć]5ÉýŐ7oo'ęô4]ăŢý>–e2E,ÝÝ$N$ŰÝŠg!SťŐÝŇę´BŤĘĂĘâ K[•v¦y;f>Jú0Š˘ýÝ÷ĆÇ«k­r¸¦i¦É čˇé¦aq3zťŁążśĐ.˘ “4O8V_,˛Rצ› 0=QťěÚîE€T×Qť»Â{çIż,23©f W÷ѵś\R@­,‹0 Đóá]Ű0ó¨T u8ębééX!-s™7ÎeQ\ęwď9ĂDÇ«¶ĆŇ_Cl=ŔÖÂ0 Ë5ĽęX+<ɧŠ\ŐÉM'š J•ˇĐ§-&jşé"4łěÚ§qż” Ër ?1Km\UšÓBć oOł©]zŕľ4Mĺ~/UŐĆôabłŘívwţAÓ) [Í$"1ʼnh2ęwȤŤçÖ÷·©1şÂŢě…VČn·»/FLé—Ş‘rÇ{3{EšfĽv-c¦ˇqů‚W nëşÎKŻÜĺäń&Ë«ŰÁ WobO=y’ŻŢĆuM>ôř ^zů6Ź^Zd0 ą{żĂL»Âą33|ű…›|ěŮłÜ_îóńŹś/w­űYďu 'ťs€=YäݬfUŞŰí>Ô3¤l,Ł(b%şĹ˘}Ž[ÉčĘő#oărŮü(BŔvĽÎ™Ö#Ĺ®°ßĹ>qfg{OÔgďĆ)–oÇJ)6łÚͧávi˙ Íö?âżđ=Śą9fţń—0/>1&¦äQÓ€4“HY¸şVŚŘ‘dÂCh:–eڶ±Ťś8‘X¦VüűxŹJť(´ZíŹř8f†e †ˇ VoI9bë Qśb™EÁúťď˝ĆéÓg9}öBÂtA{ĐB2¶lc‡PV`Ů jŤŮrŚ޶pŚÂ±0Ë5†qqmĹ™kéŽüŘÎ7׎,"‡8FJš¤cd…ĺ.żK2Ń„$ĎÁ߯ÉĆŞîÂëMĆ2×X\cĆIA¦dţfă Ý^źoç˙)ˇxPç÷bnó<÷…nŻ×+»y ‡¦ô‘ww§ţ˙˝óđ¸[+Ç…í8“ÇXb“D´ńűë uěĘÜT×X…ÂÁ:Žłď˘°źťNgJîg2TŻ6»a–‘Ă"Í$ß|!ć±GL.ś©”ßŕő·ÖiWȲ„ăóUtÝŕ[ß»I­ćĤÝňđ<‹µő>Išóř•“Ľ{c…8NyüĘ"W_[˘VuyäÂoĽuź+Ź,Đ„¬o p]‹ł§Ú|÷Ą›|čńĉä©'O•ŕápx$"îdβ ˲ŘÜÜÜ…E<řřÉk°˝˝}äMPN3…¬ďҰfHeÂÍěUŽ µ‹ŕ¤v™YëŰÁÇ*'Jňfl“Ź»{…â´LÚŞA@Ç´ŰmNţ  «šF !č÷:¸VŠ&v>/LudckĹąř‰E˝Y¨ ű›%ˇwŻS ӛŲ,|„H»tgĄ,šZ™¨ŕŤí…ăp=ĆÄćjTkŤBꏉ=ał[ŘńęhfµÄUËtXćRI¦Źń¸ćÄZ§™Vˇ^/ňUżßĂŁť)\¦—Ĺ­©gSy\}~”ÚĽš¦řC4|†Ř*IfŽă”ĐF(6.q!D^ůŚ7Wjł¤śQ‹k$Ë<˝»‰–“sŹ78ŰżN«ęG}d`TŹłÚ‰ůžřERähŞĂąyÁ÷}úý>ŽăLنîýPrż“ŕÇEŮős‚¤x8Ń{—@&‹6Ś5n.÷YÝg‚ářgČłâ?ˇ!dކDĎÖ‘zo“%1in˛ŃńĂ – $ăz-Lˇ;×n·[zIą\(|Öj/ĺ~'á~'ˇëK’lg¬ŃétĘnŁ5îpM2ß÷‹$‰9{<ˇU Ąt”iŮô>›Ű=6·†H)Jk]רVl„€ąŹwo¬qöôLç’jçl”¤=Ď-Ô'î.o“ʉVBX–mŘ–Aśěh!NB 5Šęv»ĺXaw‘pŘń“‹´˘„5˛,c0`š&珋}fůxC3хɉڹń ˇ k×L¨Xń0ňł{˝^é9®Š(u öęşL†ę6I)‰ĂáT˛®Eűţ2úě,­ż÷÷‘'ĎîŔ/˘!YžŃFZŽ©gXzŽFŠ©ĄäRŕ¸Ĺ+ŽüBh-}Đ-ŇÁ}ŚÚ â°ĎéŹîYŘţuBu•&ÇÔŞŞ&jŤFăŔwPA”˙©vzśJ ¬29xÄÔö'*K ׼káh¸'ě@BĺĹÖŠ\–çŐz›Lk&:IfŕzE áb­J1ň~Ă0p«łY•0ŃK‚–©gxfB>Χű9¦^żcÇÁ ą^'H r)ĘŤ…¤čÄ#{ŠdW|~Hnrh•¦Űb”ب>Ů^ĎeÇĹ5’EÝ8év»ĺł§”ŠdĶí=Í”z¬ł0Ú¦iç%dÁFeŘáňł{¶ú—ľôĄ/ď5(nĘ#Ő…ÉD‚6ˇ0űŤ×7±-ťďĽ˝ÉĹ ľýć&×—‡ĆÉ)ŇyÄŔĎJ,˘’XSp„ňšî!c¨đça–ăUMÓ°m»´ťúHDĐŽľUi‘önaTBç˝ţ·Ťbżˇ˙…%|éK_úâ`00÷*:ö÷ďőęşÎ˙úµ[Ä™dµ“f‚ż|kłčü¤’ĄŤ€Ĺ–Í×^ÝŕěĽG†Á˙ý­%~Ě·:¬u"fîX+WŁäş`­›°ĽľĘŮć}6‚6I,ÉpŘöun®…ĚT ľţĆąźń„9/ßěâŘ&¦Qäĺ˲)&ľzö›hLň‚QÇ”üör•Ď´ÁŃ`Ö‚ol ţŢ‚2#»‹D~SŰż€OƆ ŠĂaiáđ5î/śĘ’±SY ÍpJ·Đ,îď+ă57Hvŕaś#Çv¸<,c` xgśŮ¸•‚“0ôÓ˛qĽa"“Ż]&5’\‘‡w h¦–âY.¨Vkĺ}PÍĎňÚŚÝä¦ĎgĂš¦)¦iNMωv2bB¬¸Ă\ú†Ű"éݨťBćŻűOłŞxßëmL:@ěSvvŹ®ë–˘»a EŽŽIśćlőcü¨đĄľ~żĎ§›#N%Ë[ß»Ţĺí»Î-Ô 6ś„š#° Ť(ɸ»9â•›Űü{?ĹŐ÷¶ČsÉÉY›[«ŰĽµ6Ë»÷Gt1?öôiň,c®aŃ Ő€•íB¸˙K=Z.Ž©ó/ży‡vŐâĘ©:Ż˝ßa±ĺpjľÁG.cu{D帖ŕÂĽĂ›K6—=ŢX°Ú ůö[`B±W(Ü•’HŰ­I·ßłŁ!yž—V¦Á d­–r)GTćPđ–eq˘q†ąxM•5n—Ţç“áŃ aÎĐ ·«vA óŚj}†Ŕ÷I’–‘ˇ©„`Ĺ%†(ë Ş‘Ëîî·’Ţń}—‘‡ęÚFA×Řgýű?K,=¬ńµĘâ!BËx÷úuNź>Ă×ţôßRo4ŘŘXÇs=lÇ%ĎsZ­6wîÜćěŮó¬­®”‘÷Ţ˝A’¦üţü Y–Ňž™eksżóü-Ţ˝~ťÁ`HŁŃdue…f«Ĺçúgx÷ÚuîŢ]âôéÓÜąs—JĹcyy™fłAµZgkkË49ń"W_z‰+Ź>Ćňý{üŰŻÓ´¸w÷çĎźçÖ­÷yâ‰GůčÓ—e…ú¸ŁÇ¦]%Kcz˝^ŃAn4˱ÔîĹ=Ë5F±UŽČtQ°lŐM×ušÍćŁY%ktP¨ű5‰EÜ2OiDßĹŞÖÉ‚-„nŁYUÖ;×ôźÚ—ńű× Ő];(TaEQ‰Ĺ‡řY[x€N’ýŮ8ÍAŔf?âă—gIÓŚ·ďöÉrXéMö]ć›.źĽŇâwżŮŁâĽ~»KgsrĆcĄôF ®­Ótém„6błńücÇ­ ^˝ŐçÂbŰÔxď~źąşĹ‹×·ŮĆ´Ş6/ß’ć’g.´xs©G–K~ęăg™łwŚa,>ýč Kë>Ď\hňűß]ćK=ü(Ăłu‚,ĎůˇÇ =ju˙LÓŢ7 IDAT,ĺťöĘEĹDLźş~žçQ­jôz=‚ ŕŃË'ăŃhÄÂ1ŐyŇÂbu}Ŕůł3äyƱ Rć,ÎąknĆ+sY’$ś:Q¨¨Čł”3'›H)9u˘]vŞŐ¤@iÔî†ÎíJ~Kaf•ľZö*¨é†2{Pďú{5ť°±ť„SMĺp˝`ĂÓuxo‚z‘ćË©ŽŢÇűwms)ȵJ™ç]×ĹĄyaHsPF ńx»4›ĹGa)łXlôµ=»Ů–!1 ü\¸TŞM’$ačđ¬xĽY(šQŞŁYŐrZ-…Žii„Á ĐÇÄgŻ$Xďn6‰I$$¶‘–°:Mh<ó<ŹFŁńŔłsڎzŽÔZ śČö‚ľš9Ô˘«ŐYŇá2şÝDčw;wôOxť5ĺqÔIôz˝˘ĄÜlŁéEËŮ24>|ľÉlÝćµ÷;|ôň,×ďxĺf—vÍć©ó ÎóJ6\šĺôřäŁsĚÔ-;Ő Y1Ůě‡\9Yăń3 DşĹącĂ ŕ™“#Î/6éůŃáÉ3 ţäę ŹŻ"sIßOoÚśźwÓśQ2S·‰Óśů¦Ă'ťcy«9[^ď‘ë;XĘ0ëbÎÎŘ( ˛ĂAĂ4qŞłDy•0ŮY,=Łjžâ•ńź…¬Ú42ěm–»ŇJĄ2{Q]A(^p]ě˝óBPń\ŻĆܱ¯>đGZ‚ˇkś;ž,ĎhµŰt:Ú33<ůá§xöąŹ`÷îŢáń'žÄ÷G<ńˇS­»đĹ'_XÄőÎ_¸ŔććźřřłČůÉO1 ž}îî,-qüÄI677ĘŤĆÝ»wxâCĆrÎś=G·Ű'HMĽJ±pţO˘çt]+É1Ýn—<Ďi6›ĺrw$™Îh<"Sc¶ÉÎůî8 †Q7 ĘiB˝^/GµyžSÉŢĂ6zgdaŁzś`Ôăuů“¤Â>ä>X(!űĂŢő3Áß÷©ŐjĚTçÉĆĎY˝bňÄŮ&[k÷ú,¶\^zw‹ŤAĚbËĺâb…ŠcR­T@ŔÍ•>ůÖź}zŠ­óńË3¤YÎíőźyrS×8ÝJ¸°č°ŮóůŃ +ÍF˘cé‚fŐâ…ëŰĹIy[î¬EÁ ńنźhWfđýZÖgŘß*¦őYÂ|·VFhźą2ŕíe§Î|űš`Ř^ążě§R0)˙µßjr|;YXěĹČU8˛ÝŚŕÝç39îźÔ<Ş2‡J®aâX.s•EjŮ,"Ő Ä€câ,u˝ÍVĽĘBőäNá?–łű{ÇĹܰ\‚(łXe9ÚQ]©WË÷#z¸fB–„aŠ®ScRőÝTbMsťDyoďu˘Šç#Ă(čN.B`;ćčő‡\Ľpž“Çi·gh´ÚÔjuąt‰~w›gźý0Ë÷—Y8~ś“§Ďˇë&ŤzŤŠçGţĐÔ«.¦©aŰNś8Á`8˘Z­2;7Ă|Ëf~aëďľÇń'ŃuÇvŤ†$iĆ“O\¦Ýžˇ×íńČ#ç‡\zä"W®\¦Ýjrůň%:ť.—®<Ę`0äěąsÔęmě1‹™¤[Â>Ň,'J(ŻMEĺ$á 1iať»3’ťÔžŚ4Męš|†ŤÂË]Ď{ŘŁo ;Íńě$2OůÁđqVô§8LŕÂ~í×~í‹ŐjŐč|'cň<ŚŰśşO•J]×Ů­ˇKÇđeŔl}ž,Ëčt¶qŚ kě—­"L4¤ćázĚ# ‡ŘZ\âžÂÔ R?VŕŞú]mZW7N5¤îá¸Ő)e E^ŤÉŐózJĘĂűÁ¶Ť˝Ţţ†5»ó°ęÇq|dĄĄô°Ďßî<ÜuĄ=fś¶˘5N6Ď–kY0ĹČ—@’j¤f/yž}L-Ţ‘ěĘąŃŔóŠ5-ö7§ÔgŇ\ç^VA•ĆŻ‚&äyŽ?"ł{ GÂö9–^©VŤ6 uő¤ĹOĽ|źyŰâŤŢM€źĺüÓÇřň9dNŽF ňÜ ×*ŽM…Čt„cNĂ âLÇpÚ–ĹZ·Çś9ăŤa[Ąëâ`4"cjv†Łe¨9sAŐčF)3N!ˇ:H%†0ČĄ…;6RP†Á`o,Išć˝°č*ĄščJ)K‰µĂžÝęĺ˝× ‚ó0ˇęD!#äĘżŔôj$ý»hvÍtąŢ™áMýgŹ 3Ô N‚ÍĆ«ZŤŰŃ[ĚE§hxmŤĆŠD`ŮÎ`“ŕ‡ šPú–EŐŢëő@·hÖŠÝťQyäŇ—@žď^vf]× 2‹¦Ç㝪&ČÓ”\ö¤y&‘BJťQ‡Ó¸&ő˙“ţĆ GyT6+Pzś«äjŰ…ĹÜż;wBřůăđ«‡˙……ň<ŻÄ´Ą“; Ř-aµ|Żă+[uÓ4-‹äĂľz•'´r–y"u·ÇôQ­<'Ç´†a0S9VîŰŐąňwCÁ Ť3×LJ…Çű{0vü˝ý¨H6şU/ńJš|P×2r$C‚aĐ ©—I›`! bKµÖ“(FČt„ˇI„±CĐĚł4HrŤ85°ŤtJĹ!‘ŐńČ& ŕx “…8LJŻq)+~k\¬G™ŽW- Č0â™; ÜÔ%†VtA"érňÔébŁëcD{ł€ˇÜ(T¬”8Ç;U4“alRł’\›­©®›Ň1M’¤|ţjµZůüíŽý 8Ş{”ŇV*&›¤ž±lű;z€Ě4dęc4ÎŃďwůö G*l˙şˇ`< Űßl6ĵOFFĘŤá,çhT[HYČ^Yz^Úˇf˛Ř@Ű–A'Dq2ŐťJân'Ćq]ŤN`iŹ!¸›ŢŁ/ObµŔ †®Suu˘8!IS$`é˘Ďš,Ô‰§ŞĂá46S˝Ăˇ?$3Męµb’ˇ¬°ť‰ĎÜ/¤”dŚEUřŻ.@Ă?< ˙ő»_˙É\2eĄzDLęîăUá0Ź9Qp˲¨Ť2ŞłšeŮ}•‡Ł(*Mdŕhk€ŠÉkř0ĎßäąëşN­R§. <ńBődŮM/Öťdś‡¦Ô2r,˘aD®W𪭓 ±´„XÚÔś"Ż~g—úڎI Í'EDza$ Š\µ‘)ňpť4őŠA2*´h3·¦`T#,­řy?1ŃÇxÓ«ýµ(áOĎóŁsM~h¶Á­lńË›ü“ §‘yĘżxýúŘéńTŁĆśWă‘csŘV›µ^—Ş•S5%Q& s ‘ĺčaÄżzăMţŃG/°î§Ěş&–™Ň;¤±ÍWo­±îô‚€˙âąËxzńŽR¸3ř7×ońŹ?ő$[aČż~÷ŹÍµ9Uw‰Ň”ÜňX¬×Eý¨h(8z^BĽ xoT#@­[Ęşř ¤š‚»Cmr„SEň^ÜŠý"Ëňőë¸da„†n×Ů茸¦}îČś‡©ŮŢäˡŔę‡wŇ4\Qc {ľC†#ÖrBź8ÎŘeÜ[ęđcOy,ořî;›,¶]?Y,´…šĆ˙űWwřąOÇtŞľĄÚ¤Lâ˛&YG9^á!•tŹň ?ę÷WÝË,ËJ˛ĂIŽ”t¦đŕj\˘F†ęř\ ˘Ô@ɸëX<Ž™‘ĺC†ýÓ®R©Ď––Ż4Ä1öî 9,){Śú>†]ĹqĽČ,˙|ŁA?©(0i?ż(™O}ŇLÇň\+d•b‹ŠŁk9qŞaą…[ŽďűXűČÓ ů,đ Fč•Z±HŚÂ!–í–đ=ˇí*…Xş$wŔţµz“8ŽđٶžLáĐ&;ŕInQ©*_ô.–mă8s FŇtouŽ4MKܤ*r'źżIŹx5vÜ“ć3–e•0Uä¶@ !pĹ2zÍmwŢÇlś! Ľžüľ>łçq±×FUmtz a˘ ŤQŢGô‹}ŁŃ(I{BţâŤM>˙‰‚ŕôňőuŇ,g¶frĽm—]ď^ńâ{+|ârküţ×@ž Š®‘ŰłS¸NUpiš†ëŘ8ŇćĆĘ€ůz±0ÍÂđ@ Ë3ÂÓ¬Čâ°ß}>iš–ňőz}Š€u”PůRm”Á⦿túáňđ$áerłuTeĹsPϫʋG=~ň=h4ĺ†ý¨ßżÜ°ŤŤ¶Hť|ţ<Ď{(ŇŮdŁDMwăŤ%¤BČ©MĽmćdů Ś •Z»04Ă Â0D'Ú·Ľ±Ť )DŁ€PŻ–Ş5jJ¬Ö¨UŁĘ_t]¤ĚŤv˘óŁő„0ŠIuCËI2ť%ť—çEÍpąęńOß»Ďß=sŚ?XŢ$˝8#HRľđ‘K\ßńŢö———ůĘ»7ůÔé“\]Ů j™|öü)^Y^gŐŹ°MŹźG’ßyë.RBÓ±¸ľŮĺĂ 3|ęÄ1‚8¦jlŽ$wF’Ą­m ®ĹĘ`¬ŮĽ1ŕĄű«Ľ·ŐcÎsIóśîÝÁłž=>ĎKwď3W­D˙Ń•ÓÄ{ŕŐóĄ¤Ó˛,c8bĆTłM=JĘo÷ďPҡŞIgŰöÔ4ŕ0â¤aŃ „X<˛p łqŽpÔá ůÓ$ÚŃeđöDäŞETUď“ÝĚÝ_ěí{#ŽkóőWVyö\„ehĽvkCÓůѧŹóG/ÜçÉsmŇLňŻż{‹4ÍÉĹ…*üýULCăsOC˛\rk#âÎfŹŠSěşâÄBĘ˙7 Ű.ŰcM›ľźâG)‹m—Ż˝şĆ©Ďž§áüśۤ™ä±Ó ŢyŻiôą|˛Ćő»=>zy–›«#îmŚXl;¬uŠ.Â3Z|ďÚ†.řÜSsxN‘\•›ŐQwę%n6˙óč˘{¸:G˛$V×R@}EÎ:č{L*3t»ÝB“q܉?LaňóU7şÝn—®8‡Ť©ÔE˝^Żě^(włŁvbww/vaŮm+©°jęŘ4×E6¦ž•„1(4+VNšvF6őf»Üđ ąBÝů,pŚ„,éŇŹ\Ż>UŚ­Dđő-¸TĽÚ‡O6ák9?_+:¨É„ö«¦ĺhZŽ2áŕŤwŐqণŔëţ @3+Tëíňů •ýě$…Ä1SâÔ(Áţ®ëR©&Q<Â5S„`Jč;Ju,§^v=cH–ŚE.Ž[Ĺć`¸Ę-Jnor“­şžç†!–e•‰UM &]q¶··KŮ9Ă0¦ŠĽ=ĘjůęźbzÍ‚‘ëÍ’÷‡‹¬űKÍüMĆî"Aĺ€=ĄÎ4ŤWßЬ/0ˇďă9×î®pfˇFłbó[[ ‚”QňűßąĹĘ–ĎóŹĎáÇ9_ąşĆ\Ăćą u˛\Ä9/żßg˝żÍ=ąČ·~°ĘéąY–7$iŹó‹UÖ:!Ă ĺů'ćřî;äNÍş|őę*_řüdžńÍ·¶‰ÓśĎ<9Ç_ľÝC>˝4ĂZ7 ęä^ą±Éąů*+Ű>RÂÇ.ĎđÝwÖix&ź~´ iµŰír=:j‘«° `ńĎ–ŔĺP5vŠňŁÜUd>ŚÂŚ łQšŘjłveőůJˇŐjáşnÉ÷8¬@Pöńа¦lw?h‘Şň°R\8jŁDi¤+˛í$žWJÁ(¶0´Ç(x 0ÖUŐRrŮ§ß ¨T%Ů/đ‡xćađÂ,—=†˝¦SĂuwşŚRJţĺ˛ŕv ó­mťçšpmŹ_I¨ A– ˛|§p3´śgkÇmďwlĆ ç*6ż{oźžŻ#ó)A×ƖߦŽmč|öâiţŻ×®!Hů[Žsń;oľËß~ü ll˛ŇâEňĹ{k<6×&•9uÇâÇĎÍ%)ş€ůj… ŽąÓéť$eô¢Éq}łĂO_:ÉźŢÔ Ň-29Ůjńřü,//ÝĄĺčüÄŮţĎ7oâŐfŕ€†ĄjކëşĺDm˛Qî‰ Ó¬BMZÔďé÷űeŁJéä{žÇh4ÚwŁVs3äę70*m’ţFő82Ťx3xŠmăÂ÷}wě;k›ě¤ŤF#*•ĘžlşŞŁóçŻm"EΛK]V;!OźkqnŢăťŰŰ\:QçěBťQ¦źĽŇÂ4łMĂĐ1 ­0*(?· A,oů¤YÎÇ.5éŽ~ćSç)/ż·Ť”pwĂDZt˘$çÔ¬ÇĹĹ­ŠiT‹,‡Dę4«6?ňáEnÜďłŃ‹ •†a„eÎÍ×hT,>÷ô<ë˝Ţ(ˇ7J„Y •8ŚŃ˝WH)ů®H~çCđĎ ř?Oůž*’ťRVxuµj6›eáşßç*•‚0 ÔC*#L~¶JpÝn·Ä:î÷ÝU©Š{%Ţßď÷Ënö^řśĂÎ]1˘;÷ÉPFAĐét0ň€Áę }°R4t‰¦ëS]*Ë+X¬qzřxZ×$ĘR ŕ˙ńqřü1¸|Ŕ&ÔŹë’LÇq«ĺbQ­5e• Ö9ěR(ńo‘őËâ±€KhřÉ޲.dˇaG¸f‚®*ꨌ™ÍQ˘•b)‰t1­±Uq4ŔĐ$¶‘ăI°Eî°˛÷ 5"K’˲J vŻ×+;›››lnn–áđAś˛*°···§l<ŤFéŻĆćžç!:‰akäń)st»Áv?äíż!ŮŻ‡ ußwłň§ž-)ŮD\ż?ŕő›}V»!oĽßĺ§?vś;kŢ_îň™§©W,–Ö‡śh™˙|“—^‹yäx•®m‘IÉłŰ ÂbGSutÖ» ×îűşĆ0L¨W,6{ąÔĐE†kéÜŰ ¨W,’,ÇŇ2/Xěßzc™L =Ó¦áiüĚ˝YŚeYvž÷í}ć;Ć”9TfÍU]Ő]ŐĹ.ł›lNâd 2L–ź˝0 űA-´ Q6`éĹĚgĂ–!0B–!B4§n˛§ę)«˛2+ÇČŚéFÄťÎ|ÎŢ~8wź8™ÝmkQyăî{ϰĎÚk˙ë_˙ň]‹ťQĘňśÇť­Şh%L]_Vé¶ľO’)vF ß»ľĹrßef|xo„”‚ŔµYčÚěŚRľ{sÄĹĹó—KKžë°ľźa[‚hgÄsËi­ď{–b• hą‚.Łr %xÚŁ­Ú9Ąiz¬hńI÷ਸ਼áqt3 E˘ů·&eâQµĆšU“§ĄIM»AüŹ"§G©ÍTóYĚś»‘Ý:í4ŻAąŽňy›çž—18ió´°huşł±t>«E§;WóŔަčŹZ)|Ú3´Á¤J Z€ÍŤ^ëÂ{#č+[ł®dZuáU4ŮE#«ŞŢ EM°t‚c?¬ÄpčX´K{¶1NF­ZLâ1®LgT†ł×*±Lz®´Í!ZŚŁ)ľŚŃüVu˙ăhŠgeT-¨JG„ăË­›Ńk=Î :e&7÷3ĎóšÎpŇócćF†DQD§Ó©ş˘KĆ IDATÎşâÔăÂëěSđ»”Ó8ýgĂ1ďŞ_Ąg[Ä˙ż°f6ĺ(*Ôôí»C|ÎőćČrĹÝí¬P8¶Äµ-¸’›ă’$Ëą°Ř"ËK./wy÷Ö.o<Ű«ł#WďŤxőĘZ·kNîă´ó…łsR ŃĚŮć|4…W&Ý–šłŮ7]ş ř(e­)•uTYÁřáÇ­«1㛵%sss§RµÖumÖŠćµ;ę‡ %‰r‡–›ŐľLiV«>źh˛‹.ív‡˛ Ł ®•«"`,-­v·>FS4Ąń¸ĂŻ,ĂÚ)lÓ7/?żĽČďŰżůţM^lyü/źąĚĄN‹8 ŃIÄo|ţ9†iÁś·‚@c Áo|ţYl)ľµľĎ/ľđ,m×ĺ×?÷Ł(ćÂ\źĎ.ťĂÖ)‘JXđmľ°ÜUeĺů˘|)X¬Ľö`áJÉk‹óxľŹÎ3–źů@Ł´O»3ŹB“LGĽřôYYr}0âÇ/.áŰ y’’âÓnwIżlňqŤŢ˝Éεo=­pLJYg=Ď«›u:ťZ%GĽ%Ć lT6Áé?ĂdĽĎňď>Röë8ŰŰŰú¬©cM˘pš¦ě”Ű;g®| Ç‚aTbIÁ|×ĂóŰHˇŮݢ´&ĘŕüRϬm Ń–ç*Ńű´ßs٧t›8ś`[pk+á“űcZľÍ/ĽľĚ8…ŽÍ4ŚE+}—iRâŘׂ¬ÂăŇR›I°9±ÔsI I”ćôZŐŽÁµ4q.Óś…®Ă(V(-™kIň˘@˘)•®ř˝łJV žTě%„`aˇj¨`4›öôJ‡¶Ią6•şÝnÝ?ý¤ÔŮŁ*rŹ«qjÍńÍ&QŐÔ­5űűű§"Ëáő<N»Zkúý>®ë{ ´Ö5‡Ňäď Ŕ¬T‚Böčtş‡*gµ†¸pvĺ“$ˇĚ¦xvůPçAg©.hă¸Be‹_żÖ"WŕI(4üÎłŠ˙(š'¦%p’[ÝsضÍt2ĆeŚ•ŁÍu@ĐŞ8ŮI<Ĺ·ŇcíR „·çUĎ«]î’’R´hÍ"˛dBŰ9_jQŁ(Ą’Xłîmé¬Á(<ÇÁou(ËUć´fŽ2žěę‹~řú &©‹FÔňHgQ1âőFyĂ´Â>­rüáďÖ‡Š|*I˘bçpŰ]ňÉV°Ţ>ĎMű—y”ě×qö¤j ŰŰŰ‘âLŃ´ńĂĽTl©5á± –±¤ŕÚZČł+Ží°8ßeoś UÂýAEíxöB­JŢ»ąĂĄĄ+ó-J q¦Hrf4°Łń^`óÎí1w·B^ľÔáĘr‡ÝiÉó+«ŰŇ\UÚ¶Ű1ç= 6†9ÂöxőĘź®Iâ„ËK÷)BŔąľŹ*'YŰ­Š/,ÜÜč·]şľJUŠľ}p;ťÎ#ćůůůŞr4z¨#c«ŐŞuo›őgµć=0śńE¦Šü4Ąšć|4RL'ůaó=ů5ăŹúa­u}ÎŹRŞ1Y‹'őĂB:H9.Ŕ7™řwí UÂ\;ÇŇ´ÝcN ‡ »„eYL'#\*”0É-„])ÜdYF–Lńí ëP 5$ŞEŻżPďţ~Ą”ńĎ'‹üń@Ҳ`?‡XÁżůlD+?(®hjŐńdŞM»[3˙ęţłŹ7:.%>~«;»SW]Čşj‚˝Ôai~ KJ¦ă]lbŇŇĂoU٤4㊤’¤Ę.6éĆ糌ŁRpŘj܇~k„,ö‘r$q®čąU+migžç×ꏚ˙¦»cSVÎ4QŹÇŹ5ĚǨܨdkďŹpÚóäŁ{ŘÝKyÂw¦?3“`||yžë'ťÜ†,> 'Lň}ÚŤ~ëZkŽ}Đ´Öu‡'Ăĺ„*ęőzőű&“IŐąG FQÁ|Ű©ĺ`ĚM<Ĺ ‚ NÍÉ<;]3^ŁVłßEŐG]Ď´ĂĚ{„ĄŞÉ ŹÓś«Ů­ŽçŃÝŚŮk­‚V-[c‚ĚÇA3M gš Y“xT°|’slţ˝ÓéÔ­ňŽodg¤”µžîăśCSşěq%wšşA/LşĹdNĽZă¸Îˇŕεrl©‰ —vw±–çňD•0VŞŞ Íń;¸®G…2Äł+”IiAŞÚôú•üŹąJ)„”ä–‡q}AŕÓu$ş&Ű۲¨űçtgŐ˝ńt€o7ůjÂŚeś|JžNń¬˘R5–-:˝ůjh»I=>Ęl°»řA@’Äč|Šďć"ô*鲬´h99R(âÂƲŰtf‹Ôd€źÔ‚2-ěşR×,pM٤“¬ŁEł‘:«Ěܡó™=żŽcĂđ´ťuT6•#%Ö÷oÉßxbu„' n“$‰Â0 §ęŃgx+ŞŠRş,zźIc7ýđQ˙ažA#ŐěżżżO©÷)qVňÂ…B«CňÇm’=Ď«C¦ ŞÂŢęßU›çÚß"Ş6Hb†ÖčrösŘ?˙cřÔM{T€i|± rÚź?îZhř„FÄP_ ťěQăŤćIő†‡~ôÍx#Ei|`­>t†c? (9ëąQéëš@č¤ ÍŃkG!ž•âXŞâ˝Zsu÷Ş4ŕŮײ* ¶Nß÷I’2ńí˛®s› łřČY>·łŞm˛ăŘt‹Ďu4E–βŹ%m·=’ÂĆoWźłłłĹv4bÉŃtÇ±É AN°Á•)NŁŤ®Re¤Ó˛î qN I¦Ű´Zťz=Ş‚ő™zÖ1ÓŘ4«”@'§(%A»jFŁ”b<ÜĄíž I‡™;S Şě,ň_U·Éź+„`iié×ö¬ÖşlK ·ţ^PRĆŞ#¤ÓćÓ˝.Ř˙éűb1 ´©(|]?Ë–tÚ݇vźĆ±ťô ™“3NÇäyÎÂÂÂ!$Îp.ŹýîcPĽfkÎ$IŽm"pČą fż›VS‹äJ›ę ” rt{›š‚q×ßmŠąŽ˘¶®ë‚αȪŔDŘu;_ŕ|ŃăÜÔA•&xśęâ¦slîŢ´®ôćĺ$ Ńf}âSńI$wšă› Iť [ÎřŁYizM+Á$·N‹ ¨îaž…x~w¦ ™’G»ő.ú¨U¤UgI"uŚŇ’Vw©&Úźđ7WĂOŠăť‡(-h÷–f›Ă!ţ‘ űŕ@”ŰŘn§vN*ŻŠľňŇÂi-Îě)ŽbA’Â"-\ż"ýÇQ„Ô!î TĄE˝x”J ¬>­v§–)Šă]D3ÉĂ›‹fpŰĽ‡G7YM$Îpp›ĹAp úđ¤–D#TŃsw°âOčöŰŚÇCľQţ„âÜî“·Ăá0j·ŰÁqÚŁ¬ů -Xjy'ůáŁĎ T¨§ůŚÓ6‹ćţu:ť‡‚Ě………:%y,‚)äďTÁ¬**ß«őěu9Dőoő0Řpś6đââb`]?,˱¨¤ ,».zŠörRă›ÓîIąš´íă˘YÇť‡áĐJÂI&Ą¬3†O˛4)lŇ®uŇĂpŠTzšęN˝.ő‹“hLFĆÓ ůqQ¤S\« Ł{ Ľ2ŢÁ•B.׺ňĄXU›e)E:­ő|ŁÂ§×ŻTWĆŁ]şntČż–ZeN…H-Ň4ˇČ¦řvq|YĐkŢ$·đgŮŠ֕ĆÓ‡$%ÍŘić>Ôő Nśš]N:ţRJ=vp{p,šhĽIŕFt­Űä~w‘Ýý1_çď‘‹ăĄĎb5q8ŽÇsšíńźg…v»JÄq\7l8.%tôäšâĎBČI™żť´“Ć7,ÓQˤ Nĺ´ YýŇa & ETď±ě‡{E0rBz“pôx oĹa‚%ŇĽŤŐ®«Ůଷp2˙ăp©šv”.077wj&:ořOŠÄ6ďá“?›ôRŠÝÝŞ­cŕä¸VYŹÍPG€Ž—6x]+ťö!y“Éh—Ŕ>ݡšďn>;eYĐjUhÖŁdÇ*q­‚B{xÁ˛‚á“úľ_uڧňĘŔ4qppýŞ{WNRŇëUó"™l8ĹAˇ«ţ賥 Ó.ľ‘ '8"=1¸HK—Vw¶Ť‡(Uŕůť T-2ľs0^éJkŘ!ôˇ¶’G7‹ćů™N§uĐa®Íâââ™e‘Ž3¦W 1ĆľK >ázĽÂšőĹ'ţ\řÁ›8 ő4Nňq6d ź ­s5Ęg Đ,Ë:6ČkZsŁh*Ü›‹·A5OBËŹg¦ÚŽfĎŽ1Ń^ŤO6~XëęwóúLĘ]V OMŮ)×uIÓ´.xišďűXj‚-KJ-Č ĄÁ˛}‚V»öĂfžŐ›ÂUĂo}ÜÚ‚Ł€OÓ›5ů$3•čY–ŐšĎ?¨>«2Bs|u<Ź+®WÍĺlFmRúŔ?Wc«ŕP:ÝI7O‘ďKąR3µĄĹ±™f'ĂSA p26–Ű­ÁĂ6ŕßt2Ä!|hÓ~ř +,¤Ű«éEÓęĚÍţbűőů޵TU®Ňç.®ß™Ô†PĆřNq"IJkHéŇíVYËńh× đü€4‰QEk˛Ň")l» W˛Vę9 p2ťěŽJÚîö“Zžç„Ó žď“ĆCćÚnţ>ßK–=ůxęGMěěěÔg|tţ(é©’‚5qŤ úE\üzr› ö8ťQš»Çă´,Mç´ÓRMáęÇůî‡jś«8@o›ÎUÚ¨V‡`žT»®‹ÔŢ1;PMµó’öáO†Źű¨˘ł&·,˲C‹ĚăLĽ&—ĘđO»†GŃů`8É:ë8dY¨0 ‘*Â;ŇĆđ–ŽZVHě`±®îťLĆH?4^Ív˝G+{•Rřľ,­ć8“˘â—‡“%‚öáÖ˛ÓÉKM±ÜK Iˇ˝şSŹ‚ńxD×>ŚÚiŞüŃÝ{’K”điµ+Îq‡öĂ|ŢĽ”wľF uş‹méŘ#hUĹII4Ĺ–é±<4= vóŇj(/p÷÷÷e”Ru›ăł¶=ÎĆăq­ćQů<›ŤIÄŞűʦ±$¸‚f€c¸ăŹjDˇ(ą#Ţç)ý ®öëE޶íú™8ł/„ş}öqŇ…§mĽ›(]ňÇÍĎKIN»F\§Ó)e6ˇĺ>Ľ9Šs§,šÇn¤ËĚü= ČĐrL§FI‰‡tĹ!EQ0ďŇvĎ–-*JA¦‚T˲f Ă6Ó¤h  Í5%/ą:ÜqŇ&Ƶ>‡f§Ëéd„+&3úśS…¨""pTŤöš,ś¦âŕL,ç9ÁŕĐ0Ş Z?şëIfžQł­Z%ĂzńŔyőVŞ9ô´§Ůi÷űýS%°Ć čęE\Zą5wGRĘ3§MZv41 ÝpyŽ;¦&ňŮä=Ň´ŞŇaćbŞY0[ýń€žPfÎVÚ`9`9h! Ľ#Ó˛ÖL!R lq˛ř~ŕ”¸L‰§;DŃ´–ŚľáIÎĹ´şÍ˛|Ö;»ÚTL§ÓZ›ń¬˛3ć;&“ qٵ Űí’Äjľ×p­Ě=oŢĂ'•ťÉ˛¬.@8«t™i}śç9I¸K8©*‚˝öâC˛1ǶPŇŻűlO'#|?Ŕm->yy0>=F˛¨»˛}í¤c?ş“öť’¶=!™îz^Úť.¸ó¤ĺńi¤ŁćŮ ßJQ[Ça’zä3ńîÝ‘ćÖý”[k)ˇf0<¸&ľŁ¬$ç%…ě3 ĂIŐý/×^íŇx‚m6Eˇ‘:# wI“V§‡tçŮJrUŃC&ł=‹Iő4•*„¨7̱yM•RAđ¶yž×"ăEQŕş.{aÎşóâćÓĚů†aČh4:ÔL夹´ÇôE<ZĎN&“CĹGćőłX–eěí훚7Ĺ|Çő”?úś›ëü(٬Út9Ł%T…dčrV1c~ËYö¬Lgíě5itŇÂťQĽŚÚ†Ůŕł, ›ăĽŠĆ”cëăáq\´­V‹………š>věX)+É$ĄĂj\ÇŚÇc<Ď«łúAs+}ojďŃńf3hę.Lk€‰8ŽIđ=‰>*ŁxÚ¸š=Nöަ*˝E˘˘EZH´®®ó‰E§ Ş3ťEÂ< mČ0šů¸c8ŠVĹźăŽßř c®Ąh;!*Ů9ԼȶmZťy’ҧPŹööĄi9)y2ŞżW)E¦ěŠľP};Ó†SÁh*R‹,źůl3>Ţ' 'řA‡Rö™fA„hIQĘŞ`ŘéÖ›XĘŔ¶Ŕ9˘Nöq=ŹVw‰DL H´®T*ŐzÔ™2›ëhbÁ,Ë.ŔˇÖľOjMꍹĆ[SýC láť[ó }Áůůů‡¤”}±É+‡ĆÔ6I’!ÉóśÉdr¦EÉ\ÔÉdÂx<~Čą„´ßďr®Fć§( Ćă1{{{µFíă;WşąŤHÎnp™Ď¸_€ÖضK^jÂ8#IłCZť&ĄšťABăY˛‡CţꛟpýćÇ pH)Yߨ/w¦-zëî[żř˝«|çťUţě«1Řây>Ýno¶“­ŠćŞ‚ Y˙~đš¨ŇŻűQ\HűjŢW]Ó*XX]Űź}–¬?ă¨6bÝ’őŚEĂá𑺎‡‚ě¸*j9 E˛O…řAË_$Ě‚»ŁViÉΤ¬âWD¤ŃIÓj÷Á™gšądĄ|)hZs3e¸ŢÇ=;•vâágˇJO®ĎŐ ×AĐę.QČ>qnó¨Ë+ßoq{5ć»ďŤXŰ„ łD&úD™mK6w"†ă Ąáć˝k[9 ‹Ń¶öJ<[ăŠ)ďĽ{‹(.ř“ŻÝf’¸|xcźq$Ř•h Ű;CFăݡćĎżąFśUĽş8)ŘŮŮ'šěQ–ď´Í·ß°?•\»9$Í%÷·rrĺĄdą®ÜŁE¨†vň¤¦uĄjĐ5ß÷‰Óśu±L)_jćGiĆéŹÇăzŁxśÎxFB(F‡ü°ái?žeY­|VeăĂ÷ööŽc6X3{Cý0ň?ŹŁď}Pŕ++_«ő}¬®…zö ,ŤEš—„a•Ň7hżYś‹TJ‘'#ňxŹ$iwzŐřÜĄ(«ŚĎqGa˛ÇMmÜÓtŇŤžřÁ9Wa•ú`ÎĚxÄ&PÇ©ŽC=â2*-V€Ű÷72¤3‡¶ç™¦.…˛Ř؉ůóo¬ňéÝ«¦¬nÄDiE… @Ó˛b’hźwŢ_eg/ăŰďírg=ăţfLÚ(]ťëp8A)ŘÜ-řô^Äć òm„ÎŮßŰ'ž­‡˙ö/îpw#g}'çÖꔢ´‡ ……¦B«ĚVuGiEeYÖ<đ'5ęÔÖ÷}&Qʆ}ᇦ-n}ĺ+_ůG§˝ÁHđÂŁ-ą-ďŃŃ ´8HĂ6ĺ=Ś\Dš¦un–euŃ×YR4M­>۶Ú)ĄÝn·®Ř7»WÓËÚ¤ş ńů‘)ó1sśşŞŢ­Ń‹Y‚V …$M“Ş˛O,Qb‘˘µÄq˝Şr=”Z`IÍi‡ Ř–ŕ­ďß§Űń¸}gŔŇRŹ?~Ŕćö)-ľţÖM®ZçÎęב ˇßóY]ň…Ď_¦(q’óÁG÷ą˙`ź…ůď~°Ćí{{¬śëńö{÷ąłşËĘrź>zŔĆÖ„$-i·}®ßĹaâ{’·v"ĹťŐ=„Üľ»ÇÝŐa0™&8ŽÍÇźl2ŘŹY9×™]>Qw 2)­ő™;•ÁAA‘)Ľ3óĎüݤoM1ś-uĺ¤Ć9išTĹYť>%YVü I_kAN»ć¦Ń×V8–Ć9Iti·»äĄ<1Űn·ë4–IÝćyN–eřľ_~YV]Ńlë°gÔ˛ÂÂö+¤=IbĘtD–%hŞ]˛ëzŘn@’ ”*NäˇĺôřÚ[ľýεŤ”OďDlďć<˙L×oS*Á4Ěď{ĚőnÜŢ'ŚrŔć­wÖاKżc±±F ¶e1šf,/Íńý7ČrĹ`oÂǟܧTP*XÝ ¤Ť”‚·ŢYŁTۆą6¬>TNÓr’‚Źo PV×Çlí„ěěLÓ‚›#–‚š?oĚ,P?CŤ˘¨Ţ¸™ŕ~ě2r–xŮŻăě_ýţďýŢ“ŚűĘWľň»€sôuâ”Ăřą˘(Đh6Ĺ-zz‰€ć–eŐ~x<×üIăű’$©ąuʞ¦5™¸ćßLŰdĄT˝‘múa­u­¸rŇ3|˘é#A¨0…Ŕ3ĐAČ3ľçaYIšUňaB")‘:aÍ€M^*F–§/ŁR@ž\ż9`qˇĹµ›,źëóáÇŘĆ” ţňë×yćĘ9®^ŰdwoJ'¸®E+p¸v}‹˙ÂÓ v§ď_]c{0aaľĎ?`umČąĄ~ô€űëCV–ű|řń˝,×tŰ>kĆěěNÉŇ‚Ŕ·¸·¶O–ĂÇ×·Ź›®ßÜDŤC‚ŔăÝ×âśĹůV}źŽúasż~X~¸ąöO'ăjý“•¶ENžĹJĐîôPÂťé'«CĹRqnÓîÎĎćĎߊk?ś& Ą¶huzÚ&IŹ_Ç…5Ę Ľß4MëÖÜćÜ-IÝm±žrYk`źv§_ŃşFűäiUäk”+/ +-ň˘ę¬vÜULKéôůw_Ýă;~+âú­~ßçüJꤴ›Ń8ĺµĎ,‘ĺ%«ëc>ľ±Ëü|Ź>ŮáćÝĎ\î㻂·÷Ȳś0Α–C·ŰáÝ7YßbŰ’®Ţc{1 s6wböG óý6ßy÷÷îďóÜĺ6čśŰ÷FLĂ‚N·MĄ\»ąÇţ8eo”3™¦\»ľEĐňY{0˘Ó’‡ćů˝ŐjýŔÔ0×uk ŤRš{Y—Đî?zđí‘fAĂj÷Ůív™c…ľ>\UÜ쎥”Şy?¦¨Á욌°úY¬BÓŞNSÇiAi“–nŇš)>“2»Ď3!@†oËŚóe‚ÜĆžŃ÷\|ĎAéĄ \™"…¦Ŕ%U>Ą†2ʰ¤Ćó}JíÓë8SĄ"KbŇ,çî˝-žą˛ÄKĎź!yęâĂQÂtš2†„ŃĘ•%ď~°F’ćZ5MŹc6·ö9żŇ#\Ŕbi±M·ă“f%Ji\×¦× řäĆVXhÍâB›w?XC HłϵMR4đĆç.łł;aQ ׳Łc‘E<ŞÓŮQkRâ8®©ćo&…†!A”ąuşJ*ÝďŠéxĄ­î"…ěfö¬ŔɢÝ>Đ őr3UĘŞÄ*Ç„“˝ŹÝ¤$ˇ t§Óé!äe:ť2™LhµZ•Tî±v Ľő*ÓJC×·RTşÇtĽ_«t{}ÜÖ9â˛őPźđ¬tXßVll§‡:˙ÝríĆ8ăůŽTü¶Î/wYó™„_ůür—Ŕw@+|nŢŢáµWÎqíú ó>Z ćÚĚu}^xv‰i´ć*ĺBY,-uXš( Ť%–ĐĽůÚ2]«äŞâTŃj·YYž«(;¶ŕţýó=·V©źĄjîדZY–¤iZgwlŰf/ĘŮuVřa¶?J;ž˛ăĐŃ tY<ô^ă‡ă8¦,ËZîÉp! ˘÷(-Ô¦™nĂáđˇű`›ĹĹĹC]ąšÇnÜ$I›®4K‘ÍÄĐÇ 7·ˢĚ:äĺ)®•#…¦.™öş Ěc\[â¸>iéçí%޵őŤ!W?şÇÖö?ľÇKĎŻĐéôzm.®Ě±¶1B Áýµ]˘ř°~ďĂ5¦Ó”É$%Ir66‡Ü_°r®G^(”ĚϵfE1y®he©¸}o—ń´şŽŽkqő“ Â0a:ŤYołłĆ9oľń ·ďnłą=f¦,.tŘÜšśę‡]×}"?ÜĽ‡í/ąąöçEI”;…‹ęą¶Â!áxP©)ô(g™(Ą+µé´ëu\”aýTÖ~ś ád€ IDAT6i¦ĐŞŔu$Yšqá\ŔŤ=ŇĽ$/ŽmQocŮ’•ĺyÎ-uIsEZHž~ŞÇ\ß'/®Ąđ\Ág^Xä«÷IsÁţ8«T~—íÁוĽő‹ó~ L+Š˘nĂű¤–¦iM•2üňÍFÎâŁ?†ÉÇ™ÜF9Éb.öŻĐiuëż7[íĹqĚ­ôżwőwůŤoý]ţńG˙uqż&‰Ă™ŘśčŁLëJh{ooď!‡ÜśĽ'íćLJ˛dśĆck|q#ý5ÓľE‚t@HlÇ#+J˛$˘cG€Ŕ9{Š-K4’ś 1(ó×±Ŕnćţ±śˇ¦--¶xćňËK-–]nßÝäÖÝmćú-´†~/ ß¸üÔ<˝Ž_3çđůĎ]ä•ϱr®K·ăóĚ•EúÝ`¶Tz·Ű&đ=ňĽ ×őŤcćç®}şÉkŻ^¶%ós-FăKćŘŘcۢ d]iI۶-6¶ĆřžCQžÜ1ŞčApö{Ŕ]ĹPććć››;´CµFąsčÚVĹ[9¶Žw«˘·Ţ©jc»ŐÖSÄ1•°¶Ąh9E2¬çŹI/u¨I’˙ÁYYîŇ $ëö‡!®cŁ€Vŕáş6O]šĂsć“ăXĽůůË|éÍ+ô{>–%yúň"­ŔEkUsA;ťŞ˝–;ÜYpń|ź·ß»Ç«/_ +çzÜą7ŕŇ…9nß µ"Ë ěfŃź®qĄ4Y~üýl®…Óéô!_ö(kŢCÔ›B:łF›58)l’ĆB MË-°Őé¸*2 :‹dşC”ąˇ—…µŠBÓ,©i9*Żćźďű‡Ö&Řa éŽ"ŽÍs÷^{‘¨8ÜV˝Đ~}>y2­3d–ÔřVJ‘ě1i€ ˝^·µHRu}FZ:ř~‹;÷cŢxµĂĎţÄ<żúKçxăŐ.ž+ŮŮÍ ß#šŽčt|ż‹´«9Ňíx\ľÔÇól|Ď&đ+ŢůůĺçžľÔĄÓvąriVŕŕy6A«EVŔd’áy{ű!íVUȆôđ<9Űö:W.¸\Xčw]VÎu±—^ż‹ăŘĽöĘ s]ĎĺRŤ)23ż?‰™űây^MÝśD [öů'Öł=ÉD’$8łäIÄ »¬đl-ü›$IÝ > CnŤoňO®ţÄEÄĎ]üyţęÁWiŮ-~çs˙ ÎĄšnĐŚŢ}ß?t!áác¦*ÖTĂă©#4eo[ň¤VVĐ8^€ăXč˛ Ďb¤P¸2ĂŠ\YdĘŰeIZz82#)ŰFk…Ô1®•?Ś É4´—,Mđ\ÉhR 4´»=ŇܢÓń™Lb„ÇłŠÜŠżč8Ő1 !Łś˛Ôôş"P–×µ(K…eŮt»mvvÇäY†m[\˝ö€_ţů×ŘŢŮĂudőy¶ Í`1×ďei•·]¤äEĘ`0ĄÝvń\Srú=xRŮ3vn®j”0ťNúw­ňPˇ@Ó˛B’Ď •`<ÚǗѡŕÖT˛šOťfn%ŮŇyžr¨{{{§>řŽU"Q`µđJź7O§éŇëĎŐ]ľZnöP-Šn Ëéŕ­ĂşŚEN·Űç{ďŤŮd´}‹Ŕ—HK0ç´˝’źţ|hJ-‰sa·đýJW±ĚBđ ‘¶KË÷@+Â(Ä·2Y f…?™vqDN®\™ĺ>ť–¶ăP–6ÉCšp#ĺÁVĘúfJš–üĚ,Ż@éÚ%A«‚8N°żŐ%I5yQ˘tĹ—«LSj‹RTS–eŹńě]fDq†ëůő=0^^h¤¬ÝRŮŞ’a’Tł ‹ś8IŃX¸^@’<\xŮëv˘|6oĎLŐąá…Ú¶ÍNX°i_D‹Ó3'ObOĘąý­ßú­ßu]×i·Ű57ű¤yTRđ@|ʱ´SNu»ÝZÂo2™đťá·ůÚĆ_pˇ}‘ßůÂÇíńmîOď!Ąäőą7޶Ć2c}ĽÎ\0ÇvĽ…«]ʲ¬k„ÜÚżÉR{éŘM[Qµü @’'\ŰĽĆ$›Đó{#›…b+ܢăvńyÍfń±ę"´FHIkF Š“›¨d„ĐłěYőoK–آYÉ,Ú˘(Ҳ–7+ )ń@ĄĐXN€¤ pR(ć{6íŔB[çWB3ןďĐnű¸Ž=Ë®g:m‡NŰ™ń}E퓥¬$™Ę˛`eyŽ…ů.Ű ç[ôű”Ęg\E‰ó<‹K—čvʢâ¢K|Ϧ×ë0?`#uś] oö¬|JÇq‚ žłÍß…8–Â9……Ę»VIśřA»Î§Y(,YIdĹ3?¬´Ŕšx¦XÓOwź .Ş]áŤÝ뼳ö˝VŹwwľĎŐ­řŢćw uČúpŤżľűWLň lĽOTF|ëţ7Ů7ćľąúuö’]É€oŻ}‹A8 Iľsë-»‹ŘŽÍ×n•ŰĂ[Ľµöm ŢŮx‡Ow?ĺţh•{ă{”3ŤDSMÜ”<9MîĄií–Źď:dy†Ô9%‚\»´\Ĺ?ŰYä÷ů—ŁţŮ`žo¤}R ´íŽăČŞĄd’(aSXs¤*¨;¸(mĎ&óálŹăxě s¶)ĂQTŤNčq2='ŮQşŔÂ|—ĄĄůZÔXł»Y=m(SQ977W;™ł~˙ăĘÎ`ÎËÉĂ·¤xd%kˇť:=;î‘giŲć3—´°ę&qî<ÔY ¨7E@Ýúň´s÷í˘F0„€–S ËJv­™Ś2»>~!*ÎŞ‡‹_¤¨„ÇÉöŚöe]ćű.?˙ĺ>_›ç ţĂ/;,t3@píö„ŐÍ”Űk1÷Ö&xVI §clÇ'-}®~:©u5’w®îóýÖůę×?!ŤCJJ[dE%ë†S†Ă žĚ8Yš÷>\§(ŞŤň{WđöřÎű{$ĄO”–hR—”ٔ럮F )-”Ş‚b×kQ*Ĺwľď±EcaÖóɲ,Â8áXů÷N8¦6ŕ$é¨ l<|Ş”ľńá潆c»›n0ďÎłä/s©} €ť¤z}”ŤřĂţOöł=ţâîźńýŤďł™n’éJăüöţm´PüéŤ?Ų,Ö§k$ŞÚLŢź¬’ë$¬NVIňjc÷éƧ|wý-É˙ć㍊{ă»HK—ëÓuŢŰx—R”ä*ŻýĎŃşłÉfiĎĹuJĄPeA‰C®ZvĘűŮ<˙űîyţ`¸ČŽz|=^ ÖĐвb|«ÚH…˘(r áS>iéč}"@ŘHŁUą˛ë4z–çµ2~`nnîě” ‚LŁŽqĺ©%.]\:6{iTxL‘źeY­çúX*A¨c¦=g•a4±BEµ6T!ެÍi–—˘VŞ1Úćíî8sLS§¦•J’vť=kZĄtX˙8e„úý )˛Š*Q⊠Óń^­„Q–%J—ŤÚrŤ5űG>+pr,5"š Ę>|ćyźË2Ҷ żô“®•°řÖ»»(\>üd­JN†it—ŘÚ¬n¦5j«…ÍŰîňµoÜŕúÍ t#Ą…”‚–˘ ëb-ĂK¶,‹·ß»Ź”˲řţ‡üĺ[¸łQ’+›ŞÍµŔsÁ·s†ű¶SV­ł\Ńgşˇx罵łgXffb C/ł,‹Iśłe­üĐéĆjµă MúîhEĄ˘d Ö8ŻźĂâ`ah¦÷׆wČˤä{;oqľuĄ5·Ç7Y›ŢçW.ý*ýK|wí»|áŇ›<żđĎ,<Ă;ëoóŇňËĽ÷ŕ=ľqďĚů}Ţą˙6y™3I'de†Ň%rýßŃ úĽµúm|×',Bnî|Ę•öîîß壍ŹŘŹ÷xéüË<=ŕĆÎuÚ~›oßýźżôüńżĹ±^X|ń˘`4ĂÇ{zbh­HŁ®UP* [äXv‹żËĺo/ĂKmđÖXňĎďˇ5¤ĘG#đ­K”3T@Rj‰E X>6‡[ôĺĄ@[süĺ·†|ă{#®ÝLXß,Y\p:®; ™ fs^g1ăDM1 ďűuqH3ýŁ”b:ť;ţ´ůs–ď?I] 9^k]wžiRi̱tş]J’´j=xôë‹RŕsضSíú™ É*Ç$]Zí.Zs걤ÉXEőĂk®S…6Nđ¨•Ú"hĎŐć¶măú’LPeÝźĽZ(Şß›îUJ(JpĽV­bYs}Źů9›;«1ă‹Ď]Ě1)Ę~ĎçÝŹěěFĽöĘ ßzűývŕđµoÜŔsmŇLsgmÂŇbŹĽ€ŐŤ_řŮWč÷ںʅîľřÔ—xnéyľ·ö]\Çĺ›wżÁG;fŢZ}‹¬Lń]źwÖŢćĆΧÜßgu•ŻÝúKŠťh‹Ż}úU^Yx•ýhŹűĂű<îy6'ÜŰżÇúhŤÍé&ôáńôüÓÜŮ˝Í÷î—×/ĽŽ=Űdçäq” Ň(¤áGć(¶(Éhńî¶řŇĽä—ÎUR[żó©ä7źŞ4ÄŁŇÇ’Š–•`Ë‚R[Ą®T,‡R;h­@¸Xd‡@ĄÚî±±Łůăżđ­·§¬n(zIUôl6ŕ&Č:+ÚD#Z/˝9úýŞŠßÓGÍ áĆŹž 77ă 8ŃC…ÚšżéCă‡+?ÝCÍj,q<Ú—é`ćo5q8$°"˘8FH—N·_)±4Ä#źÖşÖ†ęßßßǶí× ß)Y©†YťŞč»,«¬p«GY”EQ+5ýđŃŚš%JŇBŕzş˙žçňôSŁqA•ü­źsą¸hj†4`síÖ>ăIĆ…ó=ŢúţŰ)ݶÍ׾yÇu(JÉ˝+K-6Ą¶ů©/˝Ŕx4f°;ĺÚ­}˛\rőú.ia±ľ9áĆ­-&“$U\żµĂÎ`ÂKĎźcs»*4ü±×žb°±?)ůđÚ…ĹťűSnŻŽŮçĽuťVŕqóÎ.Z[lîDÜş;`ů\ź;wwxúňü©sé¨ fÓ…¶(Jîä}b»űčÁOh5q€ă+*#=&Đ]#›f†»ěČUî鏹Ř^â'—żĚ×7ţš˙ůýĘ77żÎOŻü,?Ö˙Zk–ÚKlM7ŮOöx÷Á÷éű}.ô/"-‰˘dˇ˝Čëß@//˝Lŕ|´y•´Héű}~üň—xý=Ú ¤ŮAAÚ—žţżţÚÎkKŻóŢwą0‘B\ě_¤ëv°ĄĂ4ťű7L#žŢď÷BLpG!-§âwuśŔ΢Ŕ0)`\ŔŚ@\HˇhŰ–(‰Ę€RŰx2ŵU UI^”ÂŻ8»Ąss©d›ď˝?ĺÎýŞŞV)Íް௾=ĄTVŤBU‰łŹźvLŃG«ŐŞďói:™§ÍźłË§.ĐD5Ç©QŰŁüÄ&Ь”˘Ű_"ă "×X)‚Z¸Č&UęQj§€bÂp8¬Qđă`»ţĽ†d]ł"·)Ü~,źO °;ł‚¶ QNPé>ÓéÇká´Î1Î:$ąU#r†ä6»ĚXN»¦MGŰ„Ó1yžł8Wňú+ós’QěÔ…3ž­źóyćrźá(âür‡W^X`{0ei!ŕ™‹>~ĽJ§íŃź_Dş•šÄÍ}ţźżřÁî”Ďľ|Žv«’p‰â AÉKĎ/Ł”f}sźW^:ÇŇB§ć€VŤăXÜą7 ČK˘¸d—Ľňň IRpńüç—{¸®ÍÓOőëb™é4ťQxěJŔ˙ ÔC‘2Á…‚A¬Ůł—9ö˙o3Č‘ifcžA€ »tô.Aýţć|śDcîYWą‘ľĂEo™×ç?Ď˙}ď˙⟼óŹů?nüośÎó7–±Şspâ"·÷nł,đăWľc» ˘|;ŕJ˙ ĄRśoźg>Gé’ŤŃËáĺĺĎf!ŰB"ë@k’LH“”_űěß!×9‹Ý%JUňüâóĽ´ô2«ű«¸–KËyŮ4ç>Ť©¬`šŰ¤i‚#R<+§mÇřv‚#2´†˝ ţl®‡Őb—•.R”´í­Ó˘UILYŽ¬ä©Š<#/5™n‘kŻâł6ľ7+&ˇĹź|mŔ`Ż’ŰŰĎůę·'ڦV­ŃmPĽN§sf$´yڤ Ńk7­™ }ë$ŕÂřaăGOZËNűnŕÔ˘łćś;®pËť…š\ôg©÷奬QŰ4‰ń¬¬Ęhą%˛×:ű§5nňfňoĆL-ĎQu3Ţ縕ĄM0k›GSt>$śěWëMűaŮ%Ě@˝Ł~¸ú ‹ Ő™e7CĆăŞCžš7^őxö˛Cŕä –ĆĄŹÍí ź}y‰;«C^}q‘~ĎăÁćç.wéw$ý­\yj áΑäďđɧ||c0ĘůÂçÎŁµ¦×őŮÝťG)_xý2Ý ·ďmóĄ/ş¶ď»ÜĽ;"J_xý"Ű;.]ăüJDZ¸|©‡c[¨REíNŞ]VőŹ2Ó]¶‰®o&’©óŁŕ”&G+*Ďw/ł,ó_Mw¬<Ďąźß@I…’±5ŕźűYŢ8÷yJ]ňĺó?Ăß{ů7łĹő ßd”ŚřĆÝŻseîi>{ţsVŔ+ËŻň·?÷klM·č=ŢĽôf…hĺ^żřy~ís‡Łu×ç‹Ď~‘ÍŃşxvá9.v/aSÉ|=żđE^0ß^ŕó—ß íuřőĎ˙:źY~…¨8Y†Ěś»‘>k·Ű5Tk] ‚EUDvÔ,ˇ™–đTŻv`/7ŻW\0Óň®mG€&,Úháŕ®3Ąí$]uQʵO¤»dĘ%Îü Ă§wbŢ|­ÇOĽŮççjĎľÜ!Í»űN' Ăza4č†yŔĎbB1‡AP㜥ŕĺ¸ůÓĽ†g\§3)ĺˇ ű´ńq×Î1č,’Şqn‘®?#ÎÇŽ8X *Ő‚^ä“ɤ o. ¦#š±f›ŕ“Ş‘KĽCťÎ Ş¨ ‚JT= §xvUxŐvňx—8š´:H‰IŢ&Égś_¨Sd…:pĘi<Ĺw =&šîQä)ŻƧ×ëĐť;G.ćŞâݶK·í˛8籹3ĺ“[ű,/¶ üŠcüo^¦Č6·vé´:źűk»tÚ>í¶‡m FădV ¨đ\ ÇR´‡•s]®~ĽÁ4LÂvŰEP‰Ú߸ąÍ3Wń<›ĺs]Z-Ź(.j•…˝ýß«6CĂQUQ–%­ŔEĘŠ—kIńČ 7Š˘şŘTAd<˨ĎöGeÍÍšI5/y™×罯é‡WókH))­‚=űżúâßäĺţË즻\h]äżyőĐžiâľ´ř›“Mţĺ{˙‚´HXéžG"ąÔżÄ/˝ôË â7ř™{ÓX˲ó<ďYkíůLwŞˇ««z"›Ý›-q°[¶,$2$$A~1bCHDô‡ţ™A‚’ 1â$dÉŚĄH%ŠÉ&Ů$›=°Ů]ŐU]ăťÎ°ç˝×ĘŹuÖ>űś{oŐmŠljÉŞ:÷îł÷^ßúľ÷{ż÷剭'(uÉa~ţú~‰ăÜî­A<ŕĆÁu̲zŚ„Ď<ń>zńcřřäeέÛDaĵťkH$˙Ćs˙&ěÜ8ľţČďŢźĘď·šűŞEľ˛§KlĎ“CţýËđÁÄîo%ZĘÖ·˙FV Ľ Ť$Ó R…(mUo|Qˇ—`CEBÚŚ¨´GŁ~8äő·r®\ yţ }nČÖÄc<ňxëťş‹_‹Ĺ˘ŁűyžÇx<îbÉyźż+ZŹŹŹ‰˘h-ˇ<ĎçűTGŰ‚ó+sôťÎú2ŚîtݶłV‡=ŹÁxŹŇ ¨–Ne}ƦZ¬ÍHh#×č*g9őő)”N©É]»ű|?ÁRÚhďű sů ”&ńKLuD–N Ă Ůc^-ťÖ–źU.ÉPIG1őśHĚÉű¤é‚$Ň|ęń`›ÖŰeQY…0ěí ÄŠÉ(ŕÖť9űŁaň$ľ2üܧ®ňĘ«ďpíę7núý”R %\żyHY”<đ­ů”ßSĽsëĐÎghÍÓO^âćí#îŢOyĺµ»ěl'ělxňÚ.BH«?,J ňĽFIAÓhîďĎ—CJIĽ ´¦Rm…iW]ÁÓÖb±čöŞ‚iVq_]ţ±™5śµÄů–ĎĚ;ÁE†UUu‰…›’Ľwü.o™ď˘z<6;9ŘđĎŢřżůµü:Ďl=K†¤iJUU'¬!Ob3ĆZćő[ .Ůń<ŹĹbq¦”X7)¸üĽŁX;ĂóËXlNĆ;źh§9˵ęMz /ľ´ĂŐ ţť‹†_ß]ŇNë¶C¬ *´Ú`´fŕŰŤi€F{”µB ĎĘÖ,ÝČţÉďÜáSź \Ř đ}É?˙˝{üŇgěmź$î{ž×ůl;‚»CÎZ}/l‡T÷)›Ďđ˝ŢĂ÷:‘ëŠ çwď$éÎł<Ů"„"JFËiÜ’ÁŔľK‹é5Ş@Ńx”ÜÚ>%Łmŕú^ IDAT[˛,c<SE‡>ôÚűÝe>'TµÍP˝mâ$ˇ, šňč„üŤ1V"ĚČ„8RWU1łr:RÓhI-·I’UURgŢęą¶ZPiź w\ŕ¦iČł´gąĽU#hZCŮ˙ö¤ý¬†ŚĆcęşáčxÁ`QWŇ„ž&+ /PĘň€g…Ź”ŠĽhđ=ą([şMˇ cň<#Í*_Ѷ MS#„ĺŤ5ŤAë†Á FJźŞ®ńD‹ň|ĽŢ@™1†¶­ië áE¬ě˛édçĆÔjÍ[iČżžţ$ÖŹŞ–đŕÁ zpěĆ2ĆP‹‚­Á.ľçw&!RJ¶·íěÝé-nď#7řşŇüĎßű'üÚłżÎĎ^úLGűmÂÓâA?´mŰůEA]×kô GgęÇ]X9n9§°(Š(˲+’Ď‹Ť±’{a’çy§®“¦)yž1 «µäHHŹ˙ôÍˤ-L<SßHá_,EčʆBGř˛Á—5•ŽhńP¦$T«d­Ň>u+ŃŘ8l´!NľôŐc0pĺbČĄ‹‘zůű ŞŞá3/śěöőăؤďQnqžçu}eYv®Őž¦éąi_ýëH’¤3˝9kxü¬Ď;ôŘ˝7Óéô\Ô7ĆS’(±gIš¦«s|>Ăgľ˘]Á˘Z·ďźă}w+7(‹ĹâLđe3Žů_Třž¦h†ă=¤”gŞćŤĹö÷—ůśČ+ –EU^{„ĂK–K:›ÉyďóP4 éŹ:ŮH­5Ežbš”ŞnIBk,rgż"ăG«5RŘ8ść†ńΠ`:MąsĆŢN şdµTělMB”´ĂßG ;€ţŕ e< —’bP7‚EVsio@Q”Ü{°ŕâŢĽ(EQ¶())ŞOÁÖdŔńÔćYă¤ŐŠ$îé/c¸A Ô:÷Ůĺîü©ëš7Šmćţ{Ł5ü(KÜżß<ěĺ®(¸'ŢćŞyŚE­ú/TY–Ľľř&…ZOX´6‹‚˘i<É$Ţ₼ĘĹáă(©X,ďycA°ćĐstttîĎ;éy7¶űÓn^ÜĆă1a®%W‰_oL ¤Ó'µ­&­l€4ĆJË·-tŚ’Šę”VIŮxvIZŁ%%ń­Żý #+ěďŚBI ~ő—Îđë^Ą$IşŕšeYw^j·řâáĘÎđB`ř/ł^w4MĂááa`Ŕ&ąçšl޸Ž8Ž)Šâ\(pą6ż+Rƞى•P™ř§_GŐHZ‘ ƚ餷¬Çé…NŮúÄĂ=”RVú«ÉĐ""Ś:é_V/¸Ö’¬ NýYîlĘżÍçó5×+7EŰqľëajüŕä ^]¦´Mđc„\ý gŰ슯űóŠëŢÓhń“\诟Tr[‘s[ĽÉ5ýa|µâ{»=UUŻÎżA©Ö;ZŠ´äAzÄĹx‡ťčOĆĎŻĹÁó./]’ů^ Íţçť\™ë.ť÷ó®ĺż¬:J˝Q î›!ęU\+ĂšÖH$ ˇ(A@­}” đĚ|É ]Ź­–¤u€A-»XoÝČůĆËsŞZcŚ=/ÂPň±\˝|6mËĹa—đ»Â V_Îňĺ×PRđ·' m iÇżu÷{8v±đÜR–Ëĺd<ó<OlŽŽŕ G—Pžőy_µh#U‹'5U+h‰.íŕ›¦!›ď[JŘr嵦Íy?AŘßßGkÍŢŢóůü‘ĹRh©ŞŠşă…Ă.&Őĺ _ş–űúŇFP¶!Qb)UYĐTsŐŇŞ- `TŮţ©:˝Ť4&$FUp6äDţéŞE3šěZPf¶O«ĄíęIIž-¦ ňWqüQ÷P)Ő%ڬʲl­»Ű¶m÷Ţi­1­uuSŢÉp•Ď1F#üA4cşŽŰű·ćšwý'~â¨-€Çń ŤĐţ:wŮ2í–°g]×loŰĚ»,K´Ń4˘ÇrR&ľö‘RĐz5·ĚĚć\RO2lŻ!ç!»÷+D!¬`óĂ®˝żś†žXÚÚÚzčĆ=‹Ž•­•€r­z S•Č˛Śş©ń¤čDŢ =l{LI©JŁŚ­ŽĐ%%Ň4„˛>ÁáĂ€8ôÉ‹’ç?ŕńŕŔ#»˝Ú?˙3É™‰­»WÎcŢéJ:&Ë2~ď¦ÇG‡đßÜ€x ţ—[‚ß|\v‰ŘDÖUśłŮ ß÷ŤF‚pŔMÄöźA–eg&©ýŐo˝şÄ2ŽăÁZđšÁ%ôZá´őZÂ0˘’ĽśކŞ=Ý«Ľíu]wß× „çN”ĎĚÉŇ2™L8ś¶üÁ—xphąfO<đ7>­%©ĐDxRRćSŚŃĎ»ĐÝó<ωĽ“Znž¦ÖµÝ+hâÄ*yămʲ$+D˛Dö†,Z-@ z­şśČ×2ňE*f0˛źO˱ߜ@˝O»‡N™%Š"FŁQ7|â–ÖşC†¬}k§Nţܶ©ĐmđµÄÖ9cu¦eÍ]qá}Il˙2KJůĐ8x(nsÁ<^·Ý=8J÷)ÄkòR üČ㲷‹‚,ňzó5¶Źăb|µ‹}G±ł–»6'«ň˝XąZ´5g8v{ř¬Ď‡žŐF®Z+¶č\°ďĘd2±E¶n0˛Ź).Š9O©µZŁ(uDn<ÓŘóI·Ô:"”Z/0«ÖrŢ=O0CŞşĺ±‹’‹{>Ż˝ąŠőŹ]ô¸zY§')ĆîĚqsî, ^/CţÇ۱‚<Ak oˢß.~ş{u||ÜѦŢkˇŇżŽííínĆâQĎPJŮÉľÍçó== ¬’Âyë’Śˇg(›ríçyá¬L—zďâ̤ Vg™ŁD8C›ŞŞÎ•ŕ !đČXĚrĽ`Ŕp˛‡TŠ;÷+ľňő#ÖLFŠĎľ0ŕ‰‹őg©|[H6UJ.ĘŠśdI/+˛9ˇ<ýů{ŇŕS—uU.ŁŃx‹şPd3jĺ +, ‡ĂNĆrs)Ą:ÉMa¤: 24UagBT¸ÖA˲¬Ó¤”ĚŇ‚űęęű’ŘÂR ě,ůĄ†Š‚Ś!+ŮUź@Wu0yKíS¨v}pHy ?đPŢr’Ń4ŠŰĽ®żĆëÓo‘•)ăń¸ăń<Şęrdz'ĺZrg‘ÍĎZNňä,;U% g=Łż&öj¤Ş®ň#3K6OF{äÚň!uRhbU0R3Љ‚ŘË0Řá†Z/M)Z+Eĺ’xË-˦äç6ŕňEŹ(”üň/ŽŹÎ'nďöGGGÝłs÷ţZ ˙Ńcđw.@,WíD—Ül畏Ű\Ž‚˛9tö°a!D‡T9čľÓYŘA sŞ®bŮDń`Éoť±Ůа´ăÝ´±=íŢ ć‹li’“ÉdíýiZC7Ü+m»,Ä|+´zݔԍá÷żĽĎĂŠ¶54Ťá­›%_ůf‹é©‘4­‡ôÚ¶!T5‰_˘Ë#ĘbĄĐÔĹ’żuƵc‡č”É­…ĺlźt©xEăÉ.Úߦhü®Xµ~wĎ«|N¸´%Ř ]U+.čpĽKeF4íů‚–;¤ďÜąs"±u{ŮYd m5Ą×ź…¦©rţZĚ .ôŻ;e@ćŤĎu]?Í5Ź;úĎć>¨(¨©HX˙®Ř®ŞŠQ¸ĹŐ‹Ś›=„^4ň}Ź0B+sT’s[ţ€W‹żŕÖôíGíl.W|ÔuÍb±`0Ľ'—«GÍ5¬ţťŐA ˝†APzÍÚĚC:ۧČíďâ-ň6>×;¨DK˘R™b„¤1>–ÄËiQËa_{fµZ.“Ű•b]ÔeĆĎ|Ěăé'ě»öÔµOżpVbŰ_ýÁeçr5™Lđ€ŕ\ł<á_ئSqçsßĚ ťő9Ůç•˙ęťąÁËńxüĐ8ÜżÎĹÍśöl&¶ö~ d°â·NŹ\¶vĽYýh‰>ĄT×5v´»0 Oä0v{©V#­b†'5ľĂĐ@3·|ćiÍ˙ó˙=ŕÝ»%UĄypPó˙ţÉ‚;A÷y€RÇH©0mÁ ¨Í1‹ůńŇL§iL[÷w? »GŐÁýĎűľßµ¨űďçiĎ0ËŇS5nË6`4\=ĂAXˇuM:ĎđĂăÉN×&?ëÚ“$a>Oů?~“˝Ý!örž~b{-‡1ĆđÍďĽË'>ňřćwnѶšźű™+üé×ďŃjͧ^|ŠAňň«3¶ÇO<b„ŕö˝’Đ—ĽúÆÇvC­%Ęł”D_X„ÓS†Ú°–ĐÉy>#Tő‰÷ĎZFč¶$\0ĆĚÉçxQ-;‚E5GÉČPEžz+©5bŮUf­+=ŮÚy(mosą{ÝçËK)­"„©©* şĆ 6T—ڎ.3Ţ ®­âsďĐA¦".d[žK2\˘çÜ-,ʧeĐCm]ĺśŕ¬fYF†\ŠŻ±«cż°Áµ%CmNá°Đr[ĽÁAy‹‹Ő“\Ś®˛˝˝Ýńú\,'űŇoËś– m&Ý[ýŤíŕů¶mÉŇąuO’†¦•„Ń*ąňDŐűĽ$%yZ  Ć–•Í T}&—±6č–P•€ lMB$K"U˘Ť  ˘>a¸ňěîŻ$¬řŔ‚tayFQś –4óW×fźN§ěů»ü÷7J@m VtU›Cmu/7‹¤órŞĎ:ŕÜ÷ő<Ż ¨En‰ţĆĄńć Evâ÷Łń6eQ– ˘ E CĄ}FŁĄŕz±B!=iđdMQM™áCŻÝÔ9J >ń±«|ç•ŰĽ{gĆáQJ« }ţ ß{ő6mkxâę­5Ď}đwďĎČÓŚyZń‹źy’?ůÚ-^ô}ŔľGH(ÉŐËiŢrăVN٤'hµDůVö,č ąhwrhN_2Š"Ł]ň<٬Rbßr[-1*vP˝ÁGa˝š¦>f^fDÉ .[\Ž‚R âžŰQąTď?żÓâÇŁö`UU§ň»Đ-ŇS`$m•!ĂőJż­ ;™ďy`´Ő%ĺ¤SV]×Ü6;%ÍN[nŻ9}Ńíímk!ZÎhMCÂdíß;y¬>߲ikÁ;30#‹-´·(eŠX¶ ›Ť8¬i™‰}ffźál›kއŮnźÉĺtÝ3GsrqŘ©:8şŃ{áÔ÷?ß˙îMÝâIëÖj×Cü¤ÎlT,“Ś6g6-đ„áŘş]UFäťL2Ü*Ú!%‘\I±Lh!R%±Ęi´˘ ˛¦§¨cĐMĘ'?¬ČűţÎ’»ŮR–9čGُ°JL c^šĽ4HÚXüŹžô"ďDByÚ=„uŔ&I’sťő»qÎHc“ę°2ńI—I˙Iů/÷ ·¶ě3¬ę”ĐkÂrNhŘ5ž(:§ŻDÖTő1Ó<ěŔ¦Ó®Ý ˛ĄYĆdń7~ţ9~ď÷ż”‚{÷ßawgÄödŔŰ7p÷ŢŚŹř1„€çź˝ĚŰ7öyçö”­IĚ ¸ű `{[P”š(R<ůxĚîŽĎg^óĘë Ź+‹4 h‰-ČĐä¨ĺą^5’0/ďIÖ© F;–~W/şď–ŰŤT„2íÝ7˝šÖL™Oł%×Z’ëČ:>ş.‰hS\83@Qźäľö‹śóśĂNGş?÷ŕ†GŃ%2ŚUąĽÖőîfUXŁ!ýĺŮß᨞.éĎň‚;ň±÷]©ć„Z‚›ju“íY–uÎ÷}&“É©CŽ›QEg‡ZÔ9wę·™‰TćŃ.2Cžźd8°AĚ=ĎóŘŢŢĆĂááቶ‰ka¶m»FwÓ¨çEr7‡Îšj>ăÉŽmyÎő*''ë–Ŕ]6‚( ‰űű«"%ňJTďťh´¤•cd»ŔďiźYH«›Ű´íA7Y»y­®ÂÂNT*/`ÄÔŤ¦Čs8'ŻÓ˙Śs;ďrĽ~‘ô^‡ÎÜĐbQÝ`QšÎńô ! u«đĄFU㡤¦lĽĄĎş&ĆřA@–.0mA”l.‡'D}¸vŘ#WEiząýk·Ö¶‡‡‡|ő·ŘŢrawŔ"-yü± GӜã”ťí“+—F,ŇśýýGÇs._ňÖŤcŚđřŐżýqćó”őĄ)Óą![N$±âÚc>żň×wşésĄ¦I» î˘Q$Ł‹kŠý5Ú˝“Ąs< ´1Â#îMďž¶ęĺ G”Śş¤łm[ó)ˇĚ»{ć[ŰŞ=ůLűęgíÁľ\Ź[. úľOŰÔč*Ĺ âµáŁ[Şl†ĐÖĄµŻ‘Šăă9Ń2±5ĆđîŢőŻ˝oü.·~en_; ľ4K©«Uéy“ÉcL×’vËRMÓ¬Ô<Šű<07ÉÄŚÖ<|˛Ý'bŰ\ć˛˙$dŘť`ź×ÎŽŐ¦tZŇýµjaZ*U†çî丵ůÝíDyŽžmŰ ÁlzDâ­âaŁm»ŮřUë­†=ó ݤD^K˙´ÔŚAçkęŤö¨L€GM j´Tbb“ŤÖŃýĺ’)­ ¶Aŕ!qQVežqŢ7BKŹ7ŰZ;óřč†jŐĄ<ďÚTřQ†ÎÜ3tżw4˛¤ůü>ŇT­ęÎżşUH©©[…Ö „Ń`ő Ű„m˝,fűťCś[n€ú¬8 0™LÍfäEĹ˙đ >řôEövÜş}Ě_űŮ'ůĘ×Ţ&ôźzń)ľöÍ|öSOŃ6iÖđÍo_ç3?s•ďżqź4kyîCŹóÜ/óÚůúwś[Ýň»G’ç?đÜ3e‘â6¶xÚ l Ç[§îCËň<…6'P†ŠˇíěĘłůí­T&"ŠGk]¨4MÍt 0«ZEQű§R6ĎáłŔ†4Mפé:JÉA.ŇÂ4V>ł6¶MM],~‚1Ëó](ŚP]6Oą‘zÜó?ó;˙¤Ö XCJÉA}‡cÁ…đĘYݵŁNŰdNVĆóĽ®­áű>OĹ&«ŻqŻ~‡cq—ö!O#vŔp‰uîäSś#Ś32č#ç:ó”Ŕ7 ŇyŽň F»Ýçň<;1hŕ6¶6‚ZK«S*dó鎷) W˝¶Ö?śt…·!ę/°Ú·ög)jb&˝ŇL‡lN @7óYIFŚFcŞ˛¤¬*‹p=dőÁť;űůŁŃč‘m˘µë+ŤZ§ná&ăĎ‹¦»ęs<w|ÖŞHń––ö(褼üeŔ‘ÂP×G,Ę(˘Ô +~ęҢ­–h#đU»–¤m˘ČŁŃş®ń}ż+ć.ěŤůčs—Í" yíÍűcřđł—ůÎ+ď‚€>m[Eo]?˘Ő-Aŕqa'ćÖÝŚčŞMöŰ9óÓ˙ňŹVë(Ď|Ňů!~hMŞŞ˘l5Kz¤?ě*ůÓćóů2ąÓ4 ¦iĐUg‹Řű§ žÉÉs…ď[”P)Ĺx˛MY&ĺ_T(i‡D|©)Ż3†č?żţĐŘöööZ‹Ěˇ}¤Ŕń»Ŕľ×µ!Ő‰©Ü¦*^€Ń­uŚ2ÓÖ4eM(űžKĹ"ݏ/ŻĽď‰íŹk94üAv›@Fě .ÄIG?pšk÷—CŔµÖÝŘV´Ç¶ĽŔa~źűć™a8Ł«DA+ęşé4®’ÚoŹź–ŘşżßśzŻťŚFWSŇŇ'NFČž;ń ĺúůÓŔiµ˛Ę3ő1‹Â'J†¨hŹ,],ĺël’[ë# ¬7~VGAĐIŮŚ& R ćłÔĆaŁ»ÚŹÁĆń¦*Wqś0žL(–CŘŹĽ—şáCb Y®I‡A@UUç6‘ŘučĚ ţąý™¦ "Ő.ÇŐććŐŇh‰ŻZšş&-ű EҡüY–u­}Wč şÓiÝ<ç”VĹS×vůÄG.!„ŕŢý·ďÎěuE>ďÜ:`:K€T7nŢd{ۢęz毼qŔWwÉŇ”§«ąuGňę›U÷„¶'O_©0F2žě,sš9ÁR‘ iQ˛ęžlîC7€m;, Y–â)Pś­ ĹŽČ)Ó Ří\ ŘůäŮO”řJ,(ĘĆŁn×g~6ĎáÉdŇqäÝě[v»ĺśÝ@ŻP>µ–Řb M™"T€1ÚjÝ şĄ(k"uŘç űŢ•‡~çźÔ:‰ÜŇđCń-.›ga«,7aď&zO R®zíş®‚Ś˘E1ç^sť#q˝AĽŹń„ůčšµo_včÖš ð›śt×~^!ňëŽ S·’F ŹíÇt:E™”ŔÓ'ŽLmEăz j™đ6ZP6>Adš˛,E7%^w’3›«5‚f9™ë';$ľ'lHSK.—j-¸n.%5‘×P¶>Ň_VÎeIS•3‚«ڱ’ŞÖ’ŞŞşęャMůŻó°’ţrčH–-đ)Éi{Śd•G2Ü!۲ťŞš˛µI™ZŐÓHwí­8::˛ÉŢxBžŻ¬¶5Ë ´Ú&ßBء'!R VŰ5IřľÇ|6%”s‚SźßýRI ţÝż„.č ŞÖ'ŚíGž-hŰšńd)%ÓéôLKOcLĚÜ~¨‹ą}?ÂCĚkŹd´×iH;®Óe,ršrAä×Ýh´$Żý5MJ·¤”ŚÇcŇ4Ĺó<îäבyO¸ĆďŇZwH›+˛Ś®‘FŁ|kŢ uK[ĺřŃĐŞ$Č•>k‘ͬ{™äyÁů#˙Ň#ß‘źÄúqI5TĽ-ľĂó,‰w(Đ —źúłúč-¬ĐPÇ»_ŢâžąN-NvÓBž4Ăۨ†ÜĐŽ“÷yX˘ę@‡OCáBŻ!Z&NE-ţjčx6=F‘uú˘ýĄŤ «}˘ĄŇ `5ś‰bçecV#”IńĺÉ=d°H¤1€?ÁB|%ɲÔR-„´˙1írâçä˛r„ µ đŁ!rI©;ď\„›‡p…ąkń:>öʇ]Gě˝ĘEQ´fe{ŢŮ·ŞF€żŐ˝łéľ°Ľĺ´ şÄř¬ wíÁ€ű÷ďwËY^á{.Žîď§\Ü ĄŕÁľ5ʉ#GchQĘc2ňɋ𭭠RBľx@¨ZíńĄ— ŻżUső˛ÇŻţ‚"đjZ-(š€`‰BçyŠhs¤o9ŢŹ’$B°µµŐőeľXĘ'>üŢmĚpl»$Y–ujAö(hĘ9žXQn´”ŤwŞŇ„;ťťuV¤¤zŠYx'dݰ^†6_BC[Ł<żÚş Ň Đmň–CcUA‘Í ˘!M]‘Uš7Ş]ŢÖCżëOjmŚî·˛˛FŰl‰ź5Uë6a˙A» r>źȧ“Źňśü,łâňJÍ“k‰­ű¬«\ň<'Žc&“I÷ ]2í†{řýk{/V°A0˛Kňś±ď’7Öˇdí& Câ×x=;>OA…¨ŹČGAČh˛KÓś+×–Q Qŕ+8žÎ©[Ăd˛EGÖ¦,‚pʆrz‚żÄoŹ(˛ľç1Ž^p*š/§‘}_uZŠY–.§ęg]k~{{»CŤÎ»„°Ň-ÇÇÇq¶2ljď±|žu]“Ďď[«ădŠvČęúęP^ÔéaćyÎ`´…ö&4Ú­–g&¶Ë*Ä IDATîÚ]űXĘ.­Ţ/›4°ä[Ąűüm¨–P,ČłĂáß÷–Żůň)†QÁłOK>ö¬¤Őu—$zŇřMqH–ÎŁ„ÉÖ^Ç9|W}čĂŃJă]9!«ü5+b·´ČžŤ/ÍŮłîS, ’$0ďQ3¦l,âí}6—+LÝ`ËqzČľŤl˝t§Ąě¨6BúÚ®BYĐT9FX4ˇĘŽ©‹E÷{Âhn*šŞ`?‡cď™÷ćŻň*uÉAq€Á𠽍(CĆkĽr‡äY™´Ó÷P_ÂÖJŞŞârü‰ţ—ôÓ'’Ř‹ć©ć~Ř„y4uÝcLw-n"ÝŁ©ë»\ťG§oŹ*ĹJ˘,K’Ááoť°rµ˙Ö0 *|ą Ď{9Mq@–e #ĽxcZ”8­[E^űÔÚŇĽ<)ÎHĎg˛µEŕ{Ö•IČeGíôďz-C?Ç”GäąMP’ÁčěϨ֞#Ęž{n/Χű××u$]Ű÷ĽË!î}KÚóآ»Bc ‹ąuŚŚcT´Ă˘ ‡a9»<Ç˲d0cĽIgg[/ĺ6v ®0ręqŻęB.]H–±Xł·w¦ĂΨehü `ooßWäŮŞ¸‘˘ćg?»ŰŠO}Lâ-}% IPbęc˛Ĺ” †{ĉ•ŕ{ÚĂp8ěśęʲd0Úë„%|Y.ďhůĚ2D3%ź÷ßd´‡ö¶ČjŻŁGnJőď_Çť:ÉBsXß;qÝN­Ć%ÁŽj€ŃhCUd´mC[éatKS¦čĆ2RyDń¦Ě0FsżP¤Ţä´Kz_–úÜç>÷…n(Ś9űâ&×ĚGđXµúY×5óůĽÓč«ëúźÎÓ »ô— ru]‡1«$Ś1&lsŇ=( Câ8î’ă˛,;‚µÓ×sAܵĹ?˛`îłîÚ7˙^‘ď0jDĹVKŻ8B7M É`„P!EŮ"„îČâËo‰@,“»©Ą_¶ÔUAUĂ!µö(*‹ňşĎ7ZR6öž;I•ů|ŽnÚ¦ˇ¬<Ďg8HěKUW©ěË·Dq}ŐvĐýî¶))kC'ř~@Ó®_%u' ­‚A`Ł*;$Z„)­é@kďŻCáŤ1M°”Rťľ  ®•îtv]›is“ !:i <ťzžh¬h¶ †cZ|ň˘ĹSë\:·ŞF ¶—"Ő |3Ł, „ şĂőQmŇ(ЬŮAQXŮ·ĄřúĂ®}ő–Ź2´Zŕ‡Éę^HźĽlfÄQCÚĆGşŔíI'ŠRw6»®ĺ|źÜk6˛XJ}ąÄČɲÄɲ¶ßÝë U­O<Ř€šÍUi բۂ˘¬Ň["ŠĘPŐ}Š‹·{WR_9˘ĺŞ8rď…ă…ťö<ě;.í{.Ť”ĘŹ@,ÝĎ€¶Î™—pKÔďŁÜĚćúť˙î˙ăĺsź˙üçë~zĎ˙Ö»ßäĘîöÍmÂj‹´Jyc˙uv’]ŇvÁ÷ďżÂv´ šMŰŚµŽoç^·\’PUFĂ…ácěŠÇ‘ZQ’3bŹ=®°©eäPD×ÁpĂľI’$I7ĺíâŻ3ypĎÔ˝§®ČtF îÝě/!LG0@m’ÎQ0[Q)Rů$[`UM‹’ćD ÂŃĆě_(iđEEžh$I2 ŻZۡS÷ycyíc ‡ÖA«, ĘÂR ęF/;™Mm-zW:˦ű݉_w?Ó“šPUTeIŁí}Ę:P9 CI`é%„‘ ÎÓ)±W€®Čň„ÄóüîüsqčaËNýç_UU÷Üęş>“2ÖťýU…h§„Ş˘Č­˘Ä`8ˇ1UÝZ ÚSBˇ1ĐĘqś, ÓňĽ@y>Ăá¨C ŽŹŹ»®”Kp‡ÁR%"żA‰–ŞőşwÓě9ގтăyÉî¶Ŕ“Č!d÷n(iđ•=aš<ŚnçöK€Ýűďyńňý­7Ţ_űÎH–ęAEzLčYŔJ‚4«ÖŢ L(n­Îř&xeŚU\jšfĄŹĚ=â|‹Ŕ[ĹI§¶âĚ,6ď©XrZ7éÓ¶őR­F…VIJ¸ç]ç‚wäŐźę@Ż(ŠÂ¤iJÓÖÜß%fĚeóLďK‰ÎŢѵB];;OżRwÜ׳–łt•‚ÖşK”űËŮűş6Řć:ípw?ÓIKméűDű>ÔSK¤(źd|Á&łC"i‘kmE­~B윦ĘôÄD®Z"xíFË[A^{řá°“ű­Ő­+źÖ®äüÄ׿ź@z>Ib'7‹9Më‚«fŕgVo`ŰôZYó†şi©Ë’H媥6ŃŔŢďŮlzbÉZą„±M€Ýós|ĘţÚGň_ßxŔ—flűŠżő˙áîąlĄ=jč̵ ŞŞ˘LÖś[ŚYYĆqLžŮÇiv÷ąfÄpdŐ+˛ů"ŻˇŐ’ĹŇAËţĎ’®r öţţ>ĆŽwčZ7@°l­j#hŐv7¤-Žđ÷ţdŮŠżŘ'ö[5_µÔŤÄ‹w;zMÓ4§:Ląöťsť9-ń]ńŔYşčř[¤ÉNm›•ŤD‹„d0\Ł«ô>ÜďŮÚÚę8˘ÍmýCĆůe|µ*š]’S×ő#¨e’Ň–řÁjŕ±m«{],(Ęš·˛űÁŐ3~Ćűł~TZÂl6ËŢ>x+ţöťoóü‡®0=.ą÷ŕŰÓwůäă/ňÎŃ;Üžßćًϒć)÷Ă˙BŽ—yŞ™Ér(ó¬ĺď\mÚš¶9ůÜ=hs€±CŮ—IS˙˙;„H:­ ~eL,ŃW) U«’=»Os|3EeÇ­IžeH“n´ÉĄ°Ý”Í8Ü´‚RGÄKÎd‘-řĘ«Ť÷С=„Äó’8B· i–ˇµ±…ŃDŞZăŁöWŁEă#˝Ahy´uUȚد1HdxĎ÷m2\¬ďĹFŰÁOGwşˇżÍą!ŻTšßąwĚAÝňs[ owD ť Őégař,Ď~)%Óăok´ĽđĂAw–é:#ňWqěX<ş`miçSBaĺ­ćĄu|sÔ‹‡Iö.'űč¨Nýîęé@ ) yí3śŘs}1;!—Ž‘…ĺE7íÚp {‡śÖ2@Ń„ Ć»8î°ŁRős wo'“É©T>‡ÝwĎłi2BOS6ŠhxÁޓŠźŮ‰^«ţ ăŐ;ŕ”R6ńxÜ B֔ܫn°Ó\]ëîA@ÇçĹŃVřaÜý˙¶i-'>ť’ך7‹-ŽüźnM}îsźűÂph+]†zO¬ Ç…«ŞŞCa\bă6ĽsÉq ¨*;mąŘ!OýdŮqű\KÓ%Égń4Ďú3cLÔ‡Ăa×*soůEwĐK)Ńm…'m ţd• 7ó.qk×j*űy FTŤ¤n¬5ťôśžÄZ%%„x(J+[†A4¤¬őňĺpßł¬Ťn©Şš¦µ<Ĺ0hj;4f„m9ź•ŕúJٍ(Ë „G[±ňşŃËÂnT]M×tbaŮ”-M]P”–óčdşRݵ¦‘Šđę-ŽÍĎďŚI”â_Ü=f|< N<#׺qÜZ°±Bfł™ÝČZv•}tI^”x~D)jA]·řĘP¶>Ép )ĄuI1JjŠ7ÔIś9”iÉO’¤săsÂń.pl^»•wI ¨şˇ·R‡ †öűd‹#AŤ4yQ"UءțտAĐhe¶d@’Řdz>źwCNľč®m8vň`gí—˛,­púpP!U# —m¶Ĺńš–"ضˇ’f)ťVQ9ucPĘëüNÄÜX{L7ü#„ g†)±t?S÷ ťţµťXşFW…¶ŠöĐŃˬۆŞČ8*·Ľ«÷Ynfsý¨Číľđ…ßJ›Ôçđžşđ$şPgÇ„~Ä W^ŕŤ7XTs>tńy®M®˛—ěuL—Hn˘pn(ä´ĺxxeYv{W)ďD×­˙s7éiî,č˙ďţźŐuMY–ť×&"tZ'GšÚr9 h9$Š—¨S1Ĺ[&y®Ł`š‚¬l‰ă/¶#Ą¤é$¦–Wş‡í™nXdö ·”­4o©[;™0YÔÖi§oÜ=‹”U5HĹh0@IAĺ†&…·”0;żĄ°H˘0%EY#ý(t˘ý-ZÄ+ł™ĹôÄ^”7»ř=]Úf ¸|f,nkŚn¬ţ¸çuCfm«RĐT%mSq7÷¸ď=†ů)»BŞßüÍßüBQ()Ů^Â~÷€\Öď ·ś µăě8¨ÝˇYŔ©AŐÝěţŔ—űó¶µÉ]"g'yŹZnă–eŮU§}„ŃŃ$ş–ó`DY ŞF2Ůó´ŕ.¸jt[‘¶‚q»muG´_µĆě&qg®UÖO°˘(ęîkUUŹĐ4]’kچ”’Ôu6ŞóG?íµÁUctEQ6H/$ J KPgŁîJw=ŞMyâPŰä»( L›uµŐŇJű ş‚É% ÂTĚŇşCúInEĚf3[`Xč#†zgíśIȦęÇjLSbŞ/ ‹9Bz(ĎňµÍa©ň9E oë jpĆĎz˙ÖŹšÜţĆoüĆoŐMíżúŕU¦SĆá„íx‡­x‹‹ĂK “!W·®ňÖ·¸2şÂv´Ýµ=…]L ]’sVr놶m×ZÂ}9E‡č:ŕb>źżç!¦‡x(é˙}ź2ćyi¨Ĺ`4±(ŰbF(Ëmo)!PŤ°iŤEbUd|٬.ş¨ŐÖ}Ş_¨ŽFcÔҬĹ%ŰóůĂ$ô mSST5JůچCڱ„->Ó o.—ä¶uEŮhü Bz BúxJQ9ʤ§&ČŕčJĹFˇŮßSź˙ÁmîW _řđüÇ×.r§(ůÝűÇüÂÎËžd^7ÜĎ ¦uĂČSť"†KÔ*:źĎIŤ’F+<µ˘ăąBă8+¸9˸0ŮBy1ye¨[Éh´,ěÓľČQKŐ ÷‹’Ł˘Dę´îŠŠăĽ@#HŐÔ”uĂa«ń|Ů{§7ăpźîަC˝+’ -˙ÓťëRŘűš˛|8]NbEisůű].ŹpT!§‹ţ°ŐJ´Ö GcÂĐv¦ŇĹŚPUÝ;Ż—† ˛GaK°!Ď ę–.'sžhp]Ľ† ÓBVn˘®Ř”][Ć`t ufĎÝ:C BHŚÖÖ ŃębÁĽ4\ç Ť O˙Yďăň´hřaů:WŠ1 ;ů÷…7Ą_ÜAč*7¬ŕŁC˛úÓÔÓddÜrź™LlPë;Rýe–K ¶¶¶Nť0¶m±Ś$±­l÷ťiĚŞUK~¬˛ËW_•TeEV†ÄÉcćůś@]›Ě}®®}«JĐ»ŹN®#Žăî qÁöQ<$ڦ.sŽŠŚ(NŽw(ňśŞi0­Ź’H•§*3Jă«‚y©đTBŕůTUŤFQk…†?¶˝n'ǵ źçŕ c›\5ž‡Ćp3/ ¤dZ·¤MKŞd×®üĆí{üú§_ TŠWŹŽąźćěĆiÓň+Ď}/żýó,çĹK{Ěęšëó”,çńń€·Źů…«; =řgŻÝâ;cľuç6oňĘţ”ôóźćĎß˝ĂwkţůwľĎ‡÷|üÂ/ß?ćĹKzvó6/>v‰Ż˝{ßSě„ÍlÁĄ­1…|ďŕŹ]ÜĺĄ{|ń‡ďđâW©µá{Ç%ź˝r‘ëÓ9ÇEÉS“1—–ű!I&[;äy†©3‹¸.‡»ś Ř@U-ťeŰwiëSi‡ĽmŰ2>.€9Ů˝ÉÄďÍ$ą?mÝ˝:K„˘?g“˘e›(˝VłT‘´€ĆŁs(‡߲ߝ©MÉ˙*E[tעuË`éĽçöŔÉW»¶¶p1C(Łm2­µ-„«2Łi[îV1 ˙§3‘űă\IńË˙ySϬ錇C®^¸JžçĽ°÷É•QJQqwÔ.'ż5Ź»Cw3Ž8$í´6°CĂőĆ6ĽçÇÍĺžńl6ë®m:ť®ˇsnčĚ÷}Ăq‡<×uŤŇÂ3kşâkßÇדRfF޶¨Ę’y9'ö­r‚`•ŕ–Kkóţő9XE]>ŻÓşĄČ3Ę"#Ž˘ń¶uJl Úř€$Pĺ©`Cä·„&#ŻZŚ?"‰Ăއęźă~‡^‹13ŇyňP†!7‹ŠiÝđň,EřćtAk ·Ë†BĹż|ó:—‡†ľÇµńĂŞaŢÎÉŽŽyúâň¦áţlN¨5Š’‹IDY·T& lsFdě Z˙ÓËŻňŃ ;|őć» $çůg‰CĹ­ŮśqđO_~…˙ě“ŕN¦ŃÚ0ňĄÖüďß}ťO?~™?{÷.÷ąghf3„ďs¬¶áĺ;÷ů[Ź_âť,çë÷öůŮşáůť Çe…v˘w)ĂŔ‡˛ěLP¶¶¶ČóڬĘđ•Ć GÝąî‰xcŐí–éşÉŽŽpŕ挌\ţŇŽśYŔŰĺŢżŞŞş‚Âó|ňR- ‡éҰˇn×tťÁĺę읲•űOUU]‘ŰŠ‘ܦęÉ‘u† Y¦­ ±gŹnj0Đ–2ugK‘Ϩ[¸S)ź>Č >˙ůĎá;ř„ ÄäD›@k}˘q´—ÄÂJĚ ôĄ'OŃ=ě‡I)e‡ÚöŃâżĚrÁÓ˝p›v)aT”µuúr«([Z­)[ßj϶ m$R®Á”´VŽea+¨d0B‹¬ÔĄÓY˘QqbŇ×ńoçűąż_ÓPŐ ~Eš¶ĄiZZ{Íâ$áżj}0Lb«ă¸ČđTLQäôÖZ)i(JC–/ŰCaČďÜŢçßş´ËĎL†Ś=ŹoÍR^ĹüĘÄdýÓ*6pmkÄ߼ÎĺŃW0«j.Ť†ü‹ď˝Î$yP”|űö}Ţ8<擏]ä˙üîk —ďđâĺ-B?ŕŹŢľĂcăOL†Üť§L˘ă˘âxťí$ćB’đŰŻüÄyăđŹîmóĘţ!osg‘re<äúńś·Ž§ŚÂ€ß˙á ¦YÎ đą1KŮŤŇşáK×or!‰ŮĎKţäĆ-źŚxm˙çw¶ÖTF„Ń0´(JžĄř"[˘ąAŢ<\űTšŃhÔ=7\čř«g­áp¸ä­5üáźźo}˙:oľs!$w'üéKopqwĚ[7p<ËŮÝň/żĹ7_ąÎÁŃś ŰC˛,ĺh^ňöoÝšňřĺ=¤Ľöö=ľňŇ›Ľ~ý€˝˝˘@R¶ĆŘîÎÍ»GüŃźŹ›wöyúÚe”’|ńËßćą<ÁŃqÁďĺ;ĽúĂŰÜş{Ěă—v¸·?ĺţáś$ ůęËoň̵‹'‹¶A×)ĘŹ1ME0ŘĆ “U2ľtO›–‚wäc´â¤[Ďű±¤€q‘gůŕżýßţčeűâ–µŁµÁ+7—°ŮĹrń·+śbÁb±č¨ţ°žv0ô<Ć ÁűqĆ᪪:žŕiZąˇ*ÉŠziiľÚSR˛: j=j­¬~©X Ć% 'ME–H/ NƵ˘Ş[Á‹[CnŻĚ3ţáă;ě)ÉK÷`€‹Ă?8:ćő#ţ·—_e;‰ůł›·ůöí{duĂx¬ŞYÔ żűĆ[´Hľró|ăź˝z%ŕŹoÜ㙝 żřä%~ű»ŻóÄdĚďżőßx÷.UÓđŻpk[[ü_Ż^祻řč…]¤üŃő[L˘˘iůúí{Ľpů"˙ë·żÇţ"ekđ`6çKďĽKŕűÜ™/¸~4Ą2†×öůĂ·o"¤ä;÷öů˝\çc/{ŞŁĂDQD”ڍµęöA‘z ĆXÚWŮ®:©‹ĂÎĘiřźţD‡‚Îf3-řî7ąy瀻ű3¶'v?˝ýî>w'ĽúĂ;ڇ RIľű[Üy0%šş@*Ĺ,7Ľôýw©µÇh6żyói¦ŮžXÓ b9.„ŕîc^~ő:JJ&ŁM«yóť{\»r‘{÷Rľ˙Ăwą·?g––\Řqç-([m¸y÷ťÉ)‰iSbÚÚŞ-µ5áčĘ·{D Đşˇ)sö Ĺ»ŢŐźAžĺí‚”Twj§!).ČőůqÎV>ç.1+Š˘“~ŤFŚFŁŽĂř° áh .¨ýXżř˛js}ůRSµ’d`éiš’e ’á/Ú$ZŻ·´ Éj;Ö_‘ß«ůbꦩŚvhäó2 ŐË{µ4č/'ećĐ çţł˝˝ÝMvžg™¶!OĚŇ ?LŤ&`ZÍ€JŻÜL v@,C”””E0-JBY„ŃFm“Öń ˇţţjµěÚÖľ™˛UÍřŻžľÄ˙që>żúŐďń|ăU0†˙ňÚ^÷^=ł˝ĹżýÜ3Śźí$âĘxČă“1{ĂŤÖlE»QČ,·|¦Ýá€ÇFZc¸4ŚřÔ• €ŕ0Żřű/>ĎÝyĆQ^±'üŕđ_É˙źş7’äşďü>ygUÖ]}ĎtĎŃ=cp@IQ”xH˘DRňJ˛±vx˝˛b˛Ĺőж%ŻCaŻ7VVl¬-Y^YZť+-)‰o‚ 0@sasô\=}Őueĺťţ#ëeg3Ó‘"ö÷0dĺőň˝ßűýľD9“b™ ‚‘‚ĹĂ÷šÜÇ#¬;.^âöGY’ÂŰŻV(猴ę9Z°Č™řQˇ(Ôs9di{’ÚétŇęW’Xřˇ”jÂú·yžbÜ‹÷_«ŐîhŘ Bç'Bçď{×QLCçO=Ä÷íG’$ÖÖŰ„QLÇvčÚI5ŻÝíń컏!®ÜBQľqň,ď~řĂUÎ\śI ť÷Míe|l€7Ż,bűFjÁ+Ë2/ľv‘üŇ›|eiťŞpź™$CŞ,óÄŢŽÖ«¨˛Â“cŚ—K|čČ^˘)2ĎŤŹ&ę:˛Ś¬(<łś¶ăÇű*Eś "b~ţÄQŠşĆžąÄ±ÁSuÂ(&§*TL“ű†Y´IfĐʧëĐ€•ă}Ćiő;ŔHAD1aĹ1ş"óˇĂ“t ?ŠŁgĆǨ&—W×ůđÔ~¦jŐ¤=ÎÜĄÓé¤Ý ßeNľ''P±}mŰćF/ćá0 ©T*)áNFRB™@T|ĂĐgx ĚőŮeLŚ`®çsńę˛,sţĘ-zNň}]Ľ:ĎčP•żůƫ؎GłÝĺ‹ß|ŤŃˇß=wŤµfŇE¸0=Ëđ@… Óó\ź·éúzú,mÇçĹ×/1X/ňňéK¬5Ö±{o^žAVŠ–‰®*4š]F‡*¨râČzę ^;wEN€;Űî+ Pô‘×C5‹ýuŽÄđzě@bžúTÁŇŕĐĄ?çĐÍ/±×ąĽ"ÍPŽ‘Ůřŕ–+Š"ÖÖÖR·¨|¦•(& ]ÄEő:ěn1±Âô űç»#ˇąú˝ QşĎ^%,pMÓr…ß!'wéµWRDöÚ…ČwÇ5¶M®ÓBtZtZ«€Dľ8€G‰žź,‡[’ńń ;‘ ă±0ínrŤ‰ż·NĎqÉ[Etł¸‚äđ#ŤHʡj9#Á06Ľeäľa„)uń{ çiý> IDAT˘0ŔČ—ä2N°óäŞĘyÍÇTd)&§züĚÂ`€‰śÁď=rٵTBl¬Tŕk×fn4ą FE׸dcCäe‰‹« .Ż­óôř V9VŻP+ůر#ÜjŮŠ qÄžbŽżľtťzŢŕÄHCŐ828Ŕ\łĹd5ĎłŤź8vĄžĄ'°ăCuN-.ŁË2ĎNěˇëűČ’DÝĘóČČ Őyvß^™[dČ4ŮW­p` Ć#C [9TĘě«”ůęŐI"˝CĘ^݇™łPĚ=w•ě·IŇr»÷/aşŔY@–%<Ď% ‚„8&ÉÄ$ň7â8Y–Q…ZĄ@»›l<;]+§S+çéŘNZ9<óÖ Ţ8Ť#G7]‡ë„aD>§óČńIJ–Ů—šJ§N»ÝÇ<&ÖÔş¦"rôŕK«-íA&$Ý c’ľëŤ`źKJbĎ&ć•0đ€UWa]¸ëóüľ‡Ňŕ~ÔîęŰţ‰%éĄx™Í f1gť¦áEpD!@I`ŁH!6[ľĺJÜ.9EH wŞďULŁalĆĺ…‘Ś}i!·×† I§µŽŞjŰ´Yă>‡!Ń[Ýś0*rLNóÜÝN ÝČŁĺ°^(öEďEÄDŔ01«ÝnÓívÉçó)˙dWßpz.ťv ?‚B©‚aDat|ź‡!ăEůĽ‰$±ˇrG(A‹^·™T¤Ě*˝¸Vé¶ťOŠÉ©A_a%ÂP\2<~íŕ ‹ŽË•nʇĘ˙âţ¨ýőşb|őęMţâ­+ M’ŘS*˘) Ců<-»Çż;s=%‹˛¦RÔ4ę…OîOžPÔdIâso]ăÔ Sµ2Ő\ž+kë,uş´zaĐ# ¦ę‰ę†&Ëh˛„,IQÄ_]śf˛VáľÁ:pć<Ş,3R* S3 ,‹˙ďőłM3)8Ş,3TČóäřżú<§‘¶?˛E8Q‰×sşžŽ· }^±Át'ý沪 [C—ą[•%†ë%tUe¨Z Z)oóâ<ŞŞ0µoŚ˘•ŁÝqčÚ.Ş"36XbĎp•NĎKńÄ7n-ázE+GöRÍ.ySçŕř0~ď â(Ķ»CŕůTˬĽÁŘ`9™­a;>+Ť“{ëÄľł1qč#ů‰ ť˘%ן…†…QĚ’«˙ŔĚD(ŕ´a˙ĂXłŻ'Ű2-±XÂF˛ĺű>ÍfÓ4)—Ë8Žł % ˘j B`ýľĂ–,Ë},ĚÎŇ0âĽbb‹ŮŰ!”Ý.z˝Őj5ÂO~ŔÖëu1”DŁ0FI,lű-Žť®Ý TüHŢäŠIËĚŇ}‚`ť®§cä (fb÷E8AÄ­”ěłď@Q”M6–w—ęHäntşôş.’šŁP¬ŕűnŻC¤ćřůËEŽ$öđJŽXŁüăźĐOT#4Ĺ'Ú8]EĎŁĺŞô\5˛1”­–•›+ ŠńłC/­ĺřÄP…q…”t†!˙ůĂ÷§•HÁÚlęV»ÍÇä]#Äq̉Á:qÓ\_çÄ`Ť§÷íĹîvčy]& ?{˙8ÄŕD:?÷čDQÄá‚‚©zD‘İY Śŕx­ś.^jÇúP‚8ŽůÔŃIŠ ŐKŞEĺr‰O )óTUöíۛʾ\k4):OîٮϜËĺŇŠ«Řé†A©oy·Ę™ř ±Đv:ťôýw»ÝMmRýąÝxICWY\mÓhvśJĄn._źăÚĚ"O?r(Ѝ•ľąČŇj‹ÁZ1•á{ô)Î_™ˇ×ó(ä6¶¦ˇa3 .Lźĺˇ# ŐK %€l˛U(PĎu¨—mF+¨ŠD,ČQďt‘t‹Ř·Q +q‘5‚^‡Ŕé"iyTÍŔw»t}‰yixŤÝż÷!’T$˙N$Đ;GDD‰ŤD]t±€TNH`Ew«Ýnoă5mé­›#á:'$¶{§q#޶bžP‡ďUW’ËRá˘'ć÷(–°úNb˝žŤ.»(rL'2G,ž‰¸váL¦Gr‚ —6´cu5B§‡×sĄţPę| đţ}{ŇŤ|«Őâ`aJĄ’ňD‘Ě÷}>¦k¬ąĺeÓ dlOüĹş.6x¦iR(ŐR¦»˝Ăl5_|?bÎć@"oÚI‚Ŕóüľ…»LF8nRĆIq3ż´Jłmcĺ“?‡QDĎ X[ď02PN:ßHěźfąŃÂÜrżĹ‚‰ëřaĚ+§.ňŕ‘ńţßÄ´;&EQ(–Ę„ů<Ýö:{‡“MŁ,Ëčfßµ‰PžoŁçJx˝fŇUhŻ"©&ŠfK!çĐraQ{‡ŘťŻÝ"ľđXe”_űĺ˙éł  Ă0ĂÇqń<ŹĹ…^|áyćÂ÷ýţN>ć›ßřűÜd¤UZ0MÓp'%8¦™âWÄ%Č(Y=»ťĘżáyĄR)ý}a›$á6Aś8We­ě˛8ž­ŚĘ¸Ďd ű–®Y<®,'-°ĐwpÜĽUH`“ĹNIĽHŔD‹N°IłŇS;…0‰c’÷é%zy«„ĚżšQy®u *śm+|||ßA뻳$„ɵkş‰¬őÚ⤅x»áěôzL Ç )ňđD|«|XÇ›äĎÂ0doŃÚńYÂŚ™Kt»nL†D1ůjŞ÷éz!QżM—¶ę2ĎP( Ů$H6BĄB°^ĹćŘ$@_(ŘW*p¬Vޤ  ŢWö~Ňjcź¦Ş*Ĺbˇ’ YŠŃ”XRSR‹ŔÉ^{Ëž5lČF>gR)ć2Xłw´Îěü*ĂU:6™ŕŃŚD]ářáqjĺ„D±ď óËMŞe‹ű&ǒ͇ˇS*Ř;„$«” ąM‹âńfćWŞ˛g¸JÇÔ«†je‚ IŽj” ą~˘•(+-]‰Čw“ 3p‰}I–‰I5 ß—B‡Ŕw"™[nžĆ;Ŕ‰ĚP 4ű]¤|™Řwů7đÇo sűk˙ô7>ŁJjşZ ® 6ut˛ějˇq+ć±IÉ‚ř†Ä\* cbQĎJđ‰ą$«nŰvšhÜɰĺí„H”ŠĹbRéď_żHĽŰíV‚©zľ[î}' ľ0–ńŁ„±ŐŘA‘A‘‚>YWˇĐ—˝Ç |ĺÎŇ_·gĺߍhÇA,#ă{>ž ›&fŹ˙‹—4~jŽ`Ňď4M>TI®I!QPäÄĘ7ň]\L3O¤äp»ó"‚Ŕg8öÖUöĹ612J_áFČU%p¦0Uł‰ßjúŐ˙ě|,L Ă@Ó-/†ČG–B‚8G.oĄ;€ÇOÖ˘­ň]‚‡#ćaˇ4°ľľžÎoYGFˇp”|T…=VžĂlÜú ‰n˛ëş›dwÂTËRҡÍĺň)ä˛Óé¤×’ť‡‘>kZ“7vĎct8)¦(r|óň-LŚ091Śaš,­6ą9»ĚGĆ©•óh} ęłoR-[Ú?BGtz.÷MîE’er¦®m¬?†¦„1gßşAĄh12X&Q¸€‘z)×’„i㬠+ FÎÂq MĆ2dE!ôdENćá8ń”Źă8q%#&p;DˇOxxˇÄŤ FW)Ýv ţ}…©@qéMâĂO!Í_Dúß˙ă‘‘Q=Ę׾ňeŽ»źľő<…BO|ňÓĽţÚ«4›M:í6'§čŮ6ËËK<ńî'ůň—ľČ“O˝‡±={xéŰ/ Ş*GŹÝĎ«/E‘ůá„ ŢäŘýlŇPmVÓ4ÓITL.Ífó{Ž·Ý)$IJä…ťďíĚ"˛!&áۉG+ý6ýNŽĘt˝ ±p]7•Nş[*`j °‹* l¨F÷żˇóϧ`2/®ĂÉuřżŽE´›«äµíR`q,á†rböł€×é’“{› ,D$DŚ0˝GIJî?”,ry+­†aJÝË3ČúŐűľźV…“ÝńÝŹď Ë0_]]%ŽcjµZÚšÜéX±›×4m›xşpmşÝý8€ fŹ·ôd‘WĚTü>›`o˝v íŠě湉şP(¤UŞőőőÜh6Ş;]»ho ńýjµşaŘĐŻěŠI61şĽ~–˘ReČK-ă8Ńóěuščą˛¬ú.ŰEŃó€D×¶ b CŽâ€µžÄĺ ŢD– M†` hjU>2©ľ­ňE·ŰµmŰNÁĂÂ8áNóˇčzduĂ=ĎK c°Q°ď ;¦łÇ‹¤·Z­˘Şjęůý QiĄBĆLTŮvFnź·*‰hJHN v‡ĂH˘ín8©Aş}ífţ!’Án·{W˘ôÖkC60óžý®Ę?‡Qf8ۆß<Đ#đ] ©ĂÖźŚcč*‘”Çě›ÄACv·¸en<'ÇMć­Ż‡ę˛–'źß°PÎýć˙n‘Ý„A€ÝëR*U¶Ťź{y‡â=´Űíď|'I­ŰÍĂ’´a@q§u}§ă…‘čˉa'ł×ž5ŔĐ4ŤV«µăsŰÚ}–e95ČÚi®ŘVIÚnŰMPdY¦\.ÓX_O¸#QDą\NçZń˙řˇGŁ»BÍÂ4Ěôw{Ý6v§•ß ‹( śQ$ť´(˘ŐuĐ55ö ŽYpt®©ű ·m-ţţ#§ÂžŐSH˛BŁtµąľÎ›çÎráÂyLÓä/ţĂźńÄ»źJ„…űU©×^}…Ŕ÷yřÄ#,-.2{k†Soś?w–ŔxďűžăÜŮłH$ ľď34<Ěß~áoh4Ö¸ďŘqÔ8Â˙Ú—č~ýK„ŤÚľ”~âS”Ţőrż‚ ^Ŕ÷;â8¦Ůl¦‹H»–íĂV«•şĄ„a¶MÂH¦ă¨JŃ—ë­"?TÓÝ~§ÓˇZ­;kßéüžç%>ëů<Őju“ĂĎíBWBt%$"â(˘€*A,öąJ^’ě´w:oŚ©†q·ç€Z ź/â{^ŕ`(ަ„>Án~–šˇĆmzí’š8]mrĺÚ"Wu§g ¤ ĂŘ$Ýł«Vˇ´Yú§R©Đl6‰˘(•ż»Ýd…‹eѦŠă8Ĺsß.áĚ’ÎDXF±l¦ňaŰĆP#ĽPIqÝâÚŰí6•J%MXďFÖ„ŤäTŕľlŰN+#YŮŁŰ˝qíY¨ŚpÓŐa‹™=FÓ4›s¬É·XŤgXo/2¦NQ.T’Vs©JÎ*Ňi6đ=UÓ1´*B˘GÓ|”($BÇő$nQ{G$¶~órNđwh2ĹqśÂž˛2BwJš6[đÄ)ű.ŮPŘ€Ţéřz˝žV3żß‰­84›M*•J:g5ľo7lť+•ʦyPtŇÄĽ—ӷʦnŞŐ ćŰYZß.şÝ.˝^ﶡťBpü(Ćó şˇĘľÜ_€ÓýŚ$+ÄQŔNÜI‚ĽĹ-ě®nX¨ą*®ÓE  ŐßÔM“$‰śąç™Ó|˘¸E·eŁę 3—Î ˘#´›Ř ٨Tj© Dşw/ď0Š"VVV€D­`ë¦ekl…í‰wĹOß©şľÓń Q şŮ—Âs=Č$pbě®ŻŻ§…9QŘ:†v*Ŕ†S«€uZ–µÉ%n[ľýXŘËçóéşcŰvß7N1űâzçe¦}…YéKť*#˝ †Ťd«™/Đi5đÝŠŞŁ[˘0DV¤ ŔÔ<BrŘ=—9yř‘Řô¸R~8ůC Ę?đÜgśÄ÷ČÔáĂXů<ďłw|‚ůąYňy‹Ń±1V–—9}ę öîGQUkkhšF­VçŇĄ·8vě8µzŤĽeńô{ŢËo˙ëĹGěÇ©  D˝ßýmě/~˘°ąŽ‡tľňEÔJóđ}IŐjK›ěűYŮ1ѶŰIH§Č¶©D8ë„Ĺr"űÔźd·ÂD[-Ű"ĽŮ‘äxž·#Tbk$LdâDđ˙‚-q¦ _Y†ĎÖŕD1¦Űó‰$í¶ň3’ŞCěá8.‘¤'.kˇĆVźöťŻT%" }$ĹLÉóܵzž}š¦ĄäQ ľ—g(Žş›µZm#v«7{öجS™P{˝^şQ×!¬błç–€(膖UDRŚT>ĚîˇiJDËDńƱßĺ8ÝnwŽňN÷.Žf[vD•u7c_TĽDlĐDő-žçq˝wGJVoGę°Ďô"´ČDSś¨™·PT×± ĂYV˘MÓű×2ßSYцᝀďÚ!Ţ®‰Ă/ţâ/~ĆóiŐK±ÁĽŽűűâÜ@ÚŤy»ó řX’)©®´$%ŇI1˛MŮĘöV—·»ťŘPu(69DítĽ®„¨r„hhFžßžU¸lĂĎĂ©6<\‚÷Őć{‚"…;Χ’”üVş¸žŹ¤šhF!ŮGá&ěíÎמ$ÚQčâşž$·bó»Űą@<ďďđí¬eŮN–€ÜŮĚhó\$şRş®§$Č»™@‰ă}ĎIߡ˘šxˇ„e%°…^wťĽćˇĘŃ6â"$sžP%ą—yTä ťN'-†d]âv;ţ¬Dłc…Śd6lŰćjp†Orh°ă:h‰¦&.±†™C3L|×Á÷d-1ЉŁĂĚ.~3NަZżă5ţ CÝż˙ŐZť}řGřÎK/284ĚľýxóÜâ8ćŕÁ©ÄEÉĚńŔC308H„LěŰÇ‘űŽ‘ĎĺQ…÷=÷Â0äĐ‘Łi«ëŕä$S‡_8‹ýü×řą_ ň<‚FňŹ’ůĎü·¬ýţď żëi:’ś’ÖîĄĺ~Ż!´€8ŽłÉ·Z`tv[Ó4ÓJިŔ›°ź°™Ť,Î#Ză‚4v/IF–l!*k[¦žŻáĘËçPd™˙ă`)LpmA$ă“L ů]ŰĆ—sJ€qńqUŽQuź jbwL4#ŹlTéyJdc(·OPăb9—bY1—ËaY¦i¦»Ň;=I’řł?ů#:í6ŐZŤýčÇÉĺrT«UŤׯ]M¬|e™Z­ž’b˛mwˇ4I›GLPŮ6ŐWżü·ĚĚÜ$źĎó©źţéńŢD·ŰˇŮX!€XOŰ YRöľ3 ßă›%Qß­é^ćبć …”ŕ"`?wЬ¶¶€»&°ţ]"KŞ]__Dz¬ÔHh7íţ­óŕVňmÔ/=_Űv\–,äűţ¦5 [E»ŰůES´Ňw‚J@B@˘÷š34N?â{”ţ<,I’¬Łk*®ČyLŐC“¤j šŁ).~čc{&†™Ő ëőĐ%Mľsˇ ŚĚţ3°m;Őů-‹›Sw A˘ő<ŹV«•v¤v;~…Ş—e']úť"[I-±A»[(r„©(’GŻă€’OťŐz˝Š”Ěo[%1E!67bÝĎvăn7ŹfÇĽ㢊}Żă’ąxmmmÓźE±GD†Ě9WńŮ b"V¤›¬ł 4ĆŮ“›ÄĘ[ȲB©:€çą´××?IlĂ0‚5;dUľëuý C}ćŮçRą©Ź|ěÇRm·w=ńdZî®ÖjéňŘăOěřCőÍ’0€eYśxô1NźzÇ‚ ŢÄ4“äza~žgž} ósŚOěăÔÚgĎśÂó|~ęS?MąRauu•gŢű>ľü·_dmu•Ż~ĺo‰Ł“St»ľôĹżáÄ#Źńď~ďwĺ#ű8®ă°¸°Ŕ'úg»u‹K—.ň;˙ößđÜ>Čô•ËÜšąÉ}đY^}ő5şÝ.‡ŹŢĎ?ô#é8*•kôz=|×Ţ´¤‰*qö°Űń“e)g˙űÖä Z­¦IΝƟ®ë›ĺ;Y8:ŽĂ\0M…¬°Š÷É‚qL˘ˇ•śżé5Ó[»ź¨;~Ś©ęž¦€Çůđ<í{+KőN ±ů߲č,muKşSw)`SYlöťŽĎÎIY·°R©”Ş~ě&ŃŰm ÚÖsď´YŰM»_Ü» 2 ,ŻH:vşö¬"‰HBÄ-˛ě†U/Ά! Ë]‚P'o1uwŰú``É{ ś.FśČĺi€íĺpeYŐ)™&=ŰĆ \YÇ”ÝŰ&«šˇĘÉZéK9t#G›8AMÚŮĄ2Ž%źV÷EÇJ`ú…ëˇ(4Ýîd7 bMŢݎß\.—bkER$z÷2ţŐ>®8Š"<ĎŰWŮ)Ś~5ŔTCś ,@Ó4ś°DĎďn«ÚŠ*wŁŃŘôŰY§łťćáťr€ěłµl’|§ńŻ(J*ű':Ś;:˝6Íx‹ VTAŤŤŤą¸\»ÓÂé9)á^–e¬R•ąŮŔÇR–č LIď 8ÂíB†ŤE/› 9ŽĂÂü<óósT*n\ż¶«t‡ĆÚ*ăűŘ·?•˙’ú‰áęţ?‹ó4˙ňOđgn$Ĺ1DK¸¨4 öŢt^ďrĂýÉě»çŢâk/˝ĘÉ7Î"IRZeĐ‚V«Ĺą+שT*†Á­Ĺ•TG 2ńg®Ţě·”mÚív:A*ŠBŰîĹ0żŇ Űshµ;›çV·—˛IŰí6Ž“hŚ Fçnî]´3ÄĄ\.§XśXR0tEI®/$ş}}Č8îk‡Z´NŻŰ@V5JĄ"ş*ÓóU:Až ş˝@łˇFµxŤDĆÇ0 Ő*v#Ě´ÔŁX"–74…SâHfaj6›A@š Â?űěßüq˙ďßBľpĄ˙rąNĎáäK/âşËKK´ZMn^żÎ}ÇŽq˙ńxö}ďçÄŁŹńĄ/|‚Uŕ[Ď“ĆÚ?ôˇ3sóźüôĎŕôz,ĚĎłş˛ÂÜÜ-4Mç=ď}–W^>™*śzăuÚ­ž—´~oŢĽÁęę úÇ˙ž‘‘Qž˙Ć× †Ó§Ţ ŃX'—·xęé÷đ7ź˙ťN›ÁA>üŁáä‹ßfúĘe~ꓟŕůç_ŕÖĚ-~ţçŽW^~yðˇ× Ý\K0ąĄjJŇďŕNŚîfł™Ž!ë”…oÜź+IGaŔ’%†‰D¤ěďrH66xČ;HĹCóCOja+Mle[ZO¤´ú ŘĘĘ ¶mł´´”TáÔ Ú,No†¦:t×oŕ?ŐŘÚImÝfł™lvJĄô]ě&D˘Öl6SlőíŽídáN)މˇďű”Ëĺt.ú^„ ˝­®·xőĚyľ{ö<^ćÜběv»,¬®§•Ľf§Kt—ąpąŃJżqí"Ńs}Űîˇôť6ł] I’ű돰jŻT*i!äN!É ®Â_µÍ翼̟a™7·”Ę™őKVPTSOJ§ç¦nŠ"C: ş]ÝĚS(” †®ŻÓ 6ϧ›Î/ #ˇ.~݉ď{(z_®`ć6ťr/RÉĺ·Z„ô§P{š‘Ť­ăVTJłăOŚßŰŤMP.˲čőz»>>fÚć8vŻG«ŐîoŇĘ)Ć5»Ö˲„˘ôń*€ičFž0ŚčŮ:­5dYÁ´j(ýÂ$IH˛L>oá8.˛˛=鍢déş^zí’,R:§ RÉŘ‘űżńí‰9 P(¤Ľ’­!ćâl—S’¤k+" CÝU†ĽŚ¸‡(x~‘\X$—É…E ż@$Ź••šÍ&kkk,//c&ő–2Ťç^§«–ďú.~Сüżđ ź=wî …B“/ľH&Ąü·Îż‰eY,ÎĎSŕó˙ń/ĂDăÍłg1 ·Ţ:ĎŕŔ çΞFQTVWW8{ć4Qł°0H膑Hox˝“/@cN¦sňŰ<Śsúu!˙©ź#6“©ëzŠ»-á™˝[<Ôť"—Ëq~ú:Ź?xŚząČüĘ*ŻźżŚ¦k,Ż­sîň5J…ośżČÍąEjŐ «ëM ]ă•3čŘŞŞpćâ47ç"N_śFSU.]żĹŇj÷ â8 IDAT xéÔ9dEáë'_C’%‚ äŤ —¸µ¸D×v¨W+\›]ŕ+/˝JÎ4XYor~úŐR‘0HŞ »•ť!>Çq]ľĽEA!o÷u!Q@đ#… ’Éĺň¸ž©řhr@¸ôĽÝČ“7u|?Ŕ d4T)`§Ë¤¤’«É>®ëĆ2şaÄ:^ŁĘ!n /$,ÖN»MâłůǢ(JÔ Îťbéłż‚{îţÜ-‚×pN~ E–(?ň.TMăäK/222ÂŇâ"űösmú ·fnrü‡8}ęu†‡‡Y^^F’$FÇĆŘżźfłÉ‰GĺŐ—_¦Ýn!I2ÓW.łÖXŁX(2=}…^ŻÇČČ(ő.ĽyŽZ˝ÎĘň2V±Ŕ­™\×axx„C‡Źpń­ ŚíŮďűL:Ěää‹ ś|ńŰěŰżźŮŮ[¨ŠĘá#÷1??ÇÚÚ*Q i ssó„tm‡w?ův§…©şhr„톸®źú„ )°ŰŮ?fßżŔ+ŠB±XL Qîdălj,›dËżuü‰Ĺ+»Ń‰´¨~5›Í ¦i‹ňU–µ´”%ÚĘ*eŤŽ˛FK]¦©.*>Ců=´ ödťľ/s!|„¶ôÎĹw‰x»Ű_ýŐ_ýLˇPĐ6$”’ďRČȉŞünI¨w;^´€a;>QĚĂAl˛ľ—óß)4MăĚĄiö Q+e‰é™yč' ×fQU…óW®áz‰"JĎő0uŤóKŘŽG©hqs~‰Ąµuęĺ$~áµÓLNěĺÖÂ2K« rşÎÍů%ü(âúěç§Ż3˙Cµ*şŞŇhwąrs–K7n1:TçęĚ<˛$GaĘ꿯aeMć•S6ž%őš(fiŐ§—î+3„1VÎDU•4y #9±ĺD/ ŃeCqń</L 9ş¦âyN¨K jŠÇÝHÎddbt%DÁĂq}bIE7ň‰yO&X˙Xµaý÷ko»!_(8š¦ĄĹńţłcj'|kvüeuŠłÚą…BV«•J]› ?[ŹżÝřÓ4Ť˙ýw˙бˇ1żűgßd¸^&g¨ü‡/˝Ěˇ{ąrc‰Kשńí×.óĘé+ś˝Ľ@˝R˘li|ţ[—yőěUN˝y•‘zžz1&đ}ţčo^!Ś`j˙Â0ä›/_ "fúĆs‹ F‡6 TUĺ>˙mľo?Żžťf­Ńb Väë'ĎS­©×ĘüűĎ?Ďp˝„Ú7zëę<‹+M†jĹô7 ĂH ]Ů< ËIlG3‹9ŇŞŤF%ÖĐGęĐÖ–ičs,«×i¨s4Ôy:Ú*ˇęa+MtLrš…ďvdYM˛ ś^Ť?J ˝ó;hʧ?őÉĎN_ľŚëş¬7®_C’$®_ż¨ŞĆčč37o0::ĆôôĆ'&xá[Ďck««AČéSŻăş.÷?ÎüÜv·‹,IŚŚŽ%ÉčŔ ᥠD«+(Ĺîôe´AĽéËä>ôQ”ÇßťaĆi‹SȵÜ5‚( \¨¤X•·C:8Ëł—§YZN…‚0"o\›ť'#J‹|.‡Ýsxŕđ$ŻŢŕÖÂ2=×ĺÉréÚM‚0b|t…•ŁuYćÚ­yü ŕ‰‡ŽqîŇU C'B M#ź3ąďĐÎĽu…É}{¬–yëę Žďáü•ëŚ ÔŘżg”ożv†zµL·ç0\ŻnJRď•t}MÂ0ĆĘçĐU%lĂń$UĐžÝCSbd)F‘ctŮ'ôĽ@"—·0t Ď—IÚr‚ěˇŕŃsP4 Ó UUÇ4MŮČo‘ׂ>ajóýhOă7ţr‡ŹRxęô‰”~äăÄNű_Ąđčă(C ŹŚR.—yćŮ÷±˙ŔA Cçń'ŢÍŘž˝ 19uÓ4yú™gÂGśC‡ŽP«×źŘ‡nĽű©§ĺŕä$,.ĚóÄ“OqüÁ‡Ř;1A±XäŮ÷żź|R©Ä#Ź=ÎäÔ÷Ź>ĘŃŁÇP5Ťýű·,ŠĄc{ö˘kO˝ç˝śšblĎ<ȉG#ŽŢ÷ěÓ|ç;Żňî§ŢËčCIĺ>hŁĘ11~¨Ç„A¸—6©XDDUUHśÝ®†aFGÚ6ţÄâľőüC&ěŁ H[ĎbQŞé#h‰»(’††AODHDHť1 ´µ?§’_ĹT։ـv4NY7‘±c-ĹPľăí&·żţëżţY–5Ń‚Übsáş.†a¤íÚ{%ídŹ$ ]×SŤçldťńI¦ëµČéůTéíâq…žéĚüł ‹x~rĎ­ŽÍůéë8ž‡ç'B÷­N—ńŃ!ľýÚi:vÇ€ĺŐ¦ˇsöŇ4AbĺóäM+7gŞWą>;ĎÂę¦iđćĺkČ€¦Ş I´:]t]c­Ů¤k»”Jß9}žj™0ą6;Ź"+ś˝<ÍŃűRC‹¬|ŘÖ"Ëĺ1ůĽĘ÷82eač2EKe˝ĺ3\h¬÷hw}‚ bn~ŤőVŹRÁHż5$ą˙Ű>š!IбC4É'ŽŚ"iرNřN0ĐąC¨óós(ނި©FçÂÂŞ˘°¸ş††Ž„äLťázŤ˘•Űô›‡&pĐĎ{Wé2YAQtMI%G„đ˙VrU t=U S;]] Ńhă÷lň©¦aĎ‘cI–SÓ‡­ˇČ1EĂĹ }\['g•P”„0ĺ:]L5$ŽĄm}«— –Đźű ĆácÄž‡6>A÷ĺ‰=—îw^D>t•j•j_Ę`rępúďűÜôĎ©ĂGđý€ˇááľ-nŽ“SčşÎČč(´Ň?üŁchx8Ýl•JeJĄrZť8rôľTQ¸E5›M>˛i=08ČŔ`b4P,–čv»äryrµĺJĎłůÄO}šz8Á;5WRů8YŠÉk>OO'7AËş$Ý-„.­ŔÔŠęËV˛Ăíđc"˛8Č|>żŤ("IËËËé‚%"@J¶Q=´ö§Ňßő$;)ÜK%­BŘ8‰žÓ1ä¶}Ť7y€ öhmjJŹ™ L#|çWî%îńNěpń<łďÂ4Í{¤fŹ/‹)äe+ö’oAéŻb\ßâ"‘2áÜO­0Ž«{í¦ é$€ű lYś˝r­ŻĹŞ0>2Äęz‹ŮĹe ]c¸^MݎŮjqp|/ş¦ă‡!őJ ­ŹłĚĆ@µL†şÁÔľ˝śżrŤ‡ŽL%¸ĆžCѲ(ärčşJ&†>%Ë"ŽbÖ[mrÇ&÷§k@Ď»ńXU’ˇěyyCar"ĎŤŮŽ$Ńućç›ř~ŔsĎĺÔŮ,­ŘÜšmส*łb®Ýc˝iS+\ľşÂřX‰bAgayŽ}Ct»Ͷ•×80QĆv"–Vl†ňĚĚ·8<9Ŕkg1 ËŇyýĚ,ýˇŁĽôĆMĘ•"˛$#+ 3~˝eóČń!¬śĚµ™&ŁŁ5nÎ6čÚ¶íqôĐ0W®­ĐëyěŻó­“—ůá<€,IŚ'ŐJ1/Ý-¶Ž?ˇ´´´$†Fbăľ›ăłÄÁÄVZ!gętm‡7/Ď0:´±.$Ú4Rš A€"+Ľqa†vÇć?űčăČš…$KĽđĘ›Ř=‡÷?ţ(qéĆ"šŞĐî:,,7¬Pݍ”-&ö ˘*Ňöń“Ś©ĚLŠě}îNśŔA)J @ńĆ= Ëíťľi‘ódçá,DM¦WWWS̬®ëéď¤.‡Z%Đ©ů{ú'ŽY—“j0*%łJĽü—–Ž$-c¨‹ÜjkÜĐ>ĆŐ¦˘8ĚE–ë[lPň“źNŰA°˙ŕä¦*€Ů?ół˙Řp–ÎXŞľkS5);,/-¦5Ş PúŐ˙™Öżřçł3äźyŽÁ_ú ˝~ZśWícˇűrEšÁ!Ůa,I7hşË ş [{©V«Űl(ďwvâč!¬|Ňę}üřQV›-ž|ř8ŞŞpka™GŽĆq=LCçľűšŘC©gzfŽÇŹ!gš”Š  ­ŻžýÄ÷Ą;Ńw=x +gref–§N<€•Ďa»Ď<öŠ$±ÚlóÁ§ź`fa‘ÇŽE‘elÇáO>ĆÜŇ ŐŇf÷%ń\łĘ©>Ꝫ'’BÎ4%‰ővß÷SFŻHÄ»J’xRŤHK÷R2] ŃâN×Ő˘X®ŕą.¶ă"Ë:¦r{˛®D¨˛KH˛‚ď…ČZŰ“ ňvLnă^˘Ö—żŔČăO˛üŻ˙7Ş?ý7Će»Ýď¬0}ĺ2‡ŹĄVHǨ¨>4ÖÖ¨ôő„Óߎă´Í%Čw°‘ŕ‰Dw§‰Ŕ•Ë—xŕ79üܸ~ŤJĄşí|â8E–Pă6Ý–Ťf°Š5ŽŻĄx5oÓ4‘uW¬\áužµľ^°…žé&wźĚřŮJRŰY¤ Kd-\›Í&Š˘¤.âş…  _˘"$0ڞ+Ć!Dŕ„N|]Ă —Đý+śŽ>HW.Óőa%ĚłO[ç°ľĘZcĆ/áÄ·Ç˙§©k7IęÖwńv¨"ŚăDźy+ńJQ”tŽ˘óŘ´@‚Ëń«”›ĂŚë‡ď¨°Sd«KcCuJVb 3\«pőÖ<ŐR‘(ŠXk¶Ř;2ŢO®'Ç÷`:Ăő*o\¸Śˇ©9p]Ő⼑ŕ ''ö3 Şĺ"šŞbęÓkëŚÔ¨ńÖµĆG‡(ä ]çđ}ś}ë2Cµ*őJ‰ůĺŕ>.^»ÉĐ@uS˛•…Ý,޸÷‰1…Żź´™ľa#)äŢóxŽß9}}‰uşÝ.ó Mň9cGÇxé•iŢ÷řaNťťE’"?±ŹŻżp‰‰˝uöŚ8{~žĽg?ßZť™ů±¬Ńlą\ąľĆŃ©:s‹4Marťó——1t…k×Ů7>@ ¸~€ë\ĽşÎž‹ő¶GÇ^F–%nÍ®3P·¸>ł†ŞĘě­ É0:TFŠC–VşŘ7>QiĽŇŁxnÂáË4MÖÖÖîyü ¶dLÇě©ssn%±§íż‡=#5^={•N×á#ă„~Ź(ňyꑣś»|“™Ĺ.KŘ˙Ď=q”—^żÄÜr›‰‘Wo®p˙áqŞ•ŻÍ3ąívCS©• ”ËĺÔE\»ah=8JÇvŇ1/÷!®ăŕxĎż|ŽÁz…'šBV–yëęͶÍÓŹĂżN:°M$Kř[[[KU/˛cWĚĹŮyF‘Őô;݆cé˙ß čŇą¸ŤNß^ŕBü42kˇĹ¸ÖfźÖ¤®ô¸é—iGú¶kýA‡÷ďFŕZDé{·Ö·"!0 cWŇŠ˘ ˝ő&Ëżő›”ţ×ß+ŃÍZŠß ß6k¬H·čŃÚô[…¸Ę¨˛»}\BjJHÎëUql¶šy»c7.5’$ĄBáÂé$ŽătpJ’´ëęKöüYËâÁ’‚Şé”‹V"é‘qz šp„FéCްôťŰLQ,áF˛š@§‡ăůčJ„ˇx;2smߤTI0“Íő5" 3ןäâpsb,/ÎÓřďţ ±ă0üO˙‹˙ňˇüńO,ÎŃ=ůŐüKXţ8őążäă?ţ Ţşpžőő­f“‰‰ý\żvEU¸qă:ď}ö9fnŢ Z­177 $ÄR±Dّȧ˝ď—/]BÓ4t]CŅöíŰĎââyËÂĐ &ćä‹/ŕyS‡ăy.ółs Ňnµ8pp’«ÓW0MłĎÖ989ÉâÂO˝ë~â(Ŕ%Bą@±óbKB›(–Ą/P‰űă§\.oŞŞn;1‚…lwL`‘¤ 'ź{¦i˛˛˛’¶łĘ#bś uAĆ˙M`qŰv+é5ľD¨ âFUÜ8t_k,…şQŇA’‰PmĆŐpŮŻŃ ·3Pń©ĂoŁGÄqlÇqśó;Wб!şwYlż—ĹxĘS˘Ä®Ô#Ŕ#QÉ čJ â$'ĽÜŤxU.—SńzQ@ ° >†aČ­ĹeňąQa;.ž÷–í÷îŕúŐľĆéNk€¦itS×i·Ű›ä­„ĄÝh»Šă7ćQ± Ó™w9qŽŁebIĆ4 *Ą<†ŐšŞ06R¦ZÎ!K5‹jµČŕ@‰Ĺĺ&ş®pčŔµß{t˛ĆňZ—ŹÖ¨Wr Ôň ]Ť± ,«Ŕčp™ZĹ¤Ó +1RSA’™Ú_Ćq*Ő ăC,ż×b jŕyIGml¤ĆŢ˝54M˘^Íc;>ŁĂ%JbÁŔĘëT+yr¦ĆřŢ =Ů$9@Q-yÄťĆo.—KÇž0ßh4éřëÓÝB^!ŮŔşJ±X@‘%âxłFŢÔ1 5µ˘×ŐD'8’ôTŤĂ04LĂÄ0rAŚă“JÖI@­ZIÇŽ"K}hâöyX"îw=dE¦T*ačZędŞ© †®˘kÉüřVΠ^-ÇŃ=碧Şjj‡.Ʊčľhš–n"ņD|+®ë&:ýĹ:Zçňňu ę"ye–ލűŐ:”d?Vđb'VY,`ŻÖ&/4#ăSPžxüŃĎ~÷•—ąrů2GŹĂs]ľňĄ/ňđ#ŹR©T8{ćŻ÷Un\ż†Ýíň…żţµjŤ/ý˙ُ̽%Éućů3ßÝď[F[­YĹUTQEAKC­é¦űa€¦źf€yˇ^řĐ|×1Ŕ`^{0-µ4l‰”D‰l’Ĺ*˛¶¬%÷=3ö»úîfóŕ×<s¶Ěu»,-/“&)÷îŢa0čóć7‡»wn#„ŕô™s_=ν»w0M‹‹—.Ť*ð´ĽĚh4âÓŹŻqţ… ‡ ÓäÂĹ‹Ľňęk•4ďâŇÍf“łçÎóáűďń'ň˘@…h–,EÁ¨żI–ĺ¦UqťjŞ$Ë2q]ďP9_$č!Ă0*µ¦´Ů5«×ëaYÖ‘şá5.LËfęę~Ż5őÎAkW)…c&˝ďaú­’,ńQü%îŞY/äʤc¤ś´GFF(be±Ux˘ŕ”=ÄЎtă™ç n˙üĎ˙ü;I’Ř@µ Ő;žç1í â8®x^ga÷t¦G—vëăS=4±é°‚O‹HŚ(Č čpś‹DĹ«Ům¦Çkĺ©YYRĂ4Ůî—Yü0Iy÷Úg\˝sŹn«E3‡1žëPČ2đE®ÝĽe4› „i2…Ř–I!%QśŇétřÁOŢáěńzĂ1¶Uf°Faň˘ Ď2ĚÉ{pP€>Ëô!ˇÓ„q¤¸rNb`Ů.͆‡! IJřžmŘv™UlŰ@II¸Ë©]”Ęń­ś…–Ŕ6 ť˛ôë9 ”Äs&U6SáŰY– L?č¶=%ÎBŰÂŤŔĄ»°„ď9¤I„%Bš>,´Ęk‰’ß÷pË6h‰i–I*C€ďY´š>­ćŢL·ô»ŻĄ_g­?]™­«BjR-ý|Đú­›Ţ×u <ĎxĂ({IeâĹ &˝$1j~X'ˤ”UvkQ•ěš<ß÷+¶ýßš{~ż{Ń”^ZŔeިbý,ëŤĂ‡ůâz˛Á3Đ˙GLݞp‹÷’/sOž%U-#ĺ¤=¤m¦$Ę"Vé’*kü&ôĄ÷˙‹f3ó˙ŕŰß=yę4Aź.^âço˙Ś‹—.ó?ř>żýć7ŮÜÜäô™3†iĐév ÇcÚí6ív‡«oĽÁăGŮÜŘŕK_ů*†iĐj¶˘3gĎU¦3µúˇ×x=HŐXŘ+§j`â0ŰW/`ăî?˝¸u¶R×,ş’›ˇ€§›;ľÇ'ë\˝rǶ“?ż|ö?ř點ÇG7ď°ľ˝Ă…3§xĽ±É`4¦Ýl˛ŐëóáŤ[[첾Ýă潇\opűÖ[o}°g šźř(ô[:HŐ™ÔzEI7ňhXÝęť®¦9ŽóL˘ÁÁ§Ĺ#±Ă1użż_E ¨2dł*I\żM8ć“Ű÷±-‹Çk›\˝r ×±q•ŚĆ„IÂ{źŢbF$iĆ퇏yşąÍR§Íţţ?łŐ°´°ŔŹńĂqTŠŤím†ă1Ű˝!źŢąŹaš\żű€źĽmÂ{jňö×8±ĽX©&Î+ÉnĆDĘ»Çň‚˘Ý pÜ Ľ~ŰŞű™†âˇ˛I’¦M&M˛4Ç4T©2™[J` cęrL!+ú/)lšA€Â”˛šfşř~y`Éă¶Q‡)Ň ĎŇ’áÇuq—B–° ýSş*„¨* »ß±KÝ©ĹĘďqöVkâw†k7îŕąW_ĽŔ§·ďńđé:—ĎťfĹĽ|á¶µ» ő‹©Iü§Ż]gwöŔ,„@vI%6Ů\4]Ő~/–Ć?ŐK„ÓĽzPy°7ČµŚ˝t'–Q`©„a”㸶e"‹Ś0•(abNČÇ“ÂzFŻ[;×É“ő­›ë•Á­i,´›ŚŁ†ďŃn6x´¶ÁÝGOyőňÂ(ćĚńŢ˙ě†0¸tö4ď|ô)÷ź¬ńć—^ă“ŰwůĘ+W°&‡B}؇Ý.]—ŕëMɲń=Ç2«ný3uÓ~x27}Đ¬Ź—Ę +JC¨==bŇˇŠ„8•8®Źď–×8 c C`‰rţĄDŮŢ9B€*q× đýro·lÓ3Ňéńz©y¶mÓëőö”Ţb{©Ż_]UÔÜŮZŘć Q=š% Ńh‚Qňô¦‡ď—‡÷,îc›»űX%¬‘§Ăc™Ű¨˘‡aÇótôźŐ9iµÚęôęĎé†üéÄěĆł„¬´OÖ’éZöř¨I†ŔI±WÁФĎČü ײĎúu…`(]zŇŁc$¬Z!ˇ˛K‡žôh™)ÇÍ1Ńo0Ŕ‡zH!DĹ©şł;ś›cVłh‡<+şźé@Fs´ęL¦fôsiŰvŧÇë—łĐé1ú…Şß˝éG—ÜęłÓÝĽ:c§ic´‚“i–Ę4i–óËOn`çOźŕÉĆ&ŻN8g݇ľ÷:·bý$»űŔ¬ňyűĺˢË=ľďĎd´Đe& ćźőŰőńł¸…]+Ç5 DÍąJ%ČhŇh¶‰Ó¬l1`FHYX1afŁfĐ€í11áÁÉmaZŘć®|â÷ţćŻ9uć ŽăňđÁ}†úg˙†f«Ĺ˙őţĽřŇËŮÜ”v§Ăh8âáű•ÂŘ8 9~ü8žďó替Ăß˙Ý÷8sö,˝ťŰŰ[DaČÉS§ŤFlom±¸´ÄkW_ç_ţůźř­oĽÉŮsłçLcśT‘Ř)In"Ť jöI’„"ŢÚăPăĚ&©ôËţřďi, iś«‹ř WÉjŐŹiÓ• -ˇ©ç°ÎĚ1Ť*ŽŃşôč~VďĆOÓ´ÂĘ;vŚťťť*Ă4ďuňĄ´˛á«Ľ–¨ßăˇ˙ďů0\¦8l]M#ĺ%g‹ ëÉ"‘˛«?K”Éőt‰ä×\{^¶„ŤŤŤđúŚÎŕhrý(Šäť6˝6tvI3ĆĚczł,Š˘ę ŤGdé|‰ŠfłYa ‹˘Ř×Mb\`ŇZŞďMTIJiQA–Ą{Ćč§ ô0QrŽ·;d! úe)»Ů$Ž’8"JRŢ˙ěŁqČ—^ľĚť‡Ořúk/îë‡ëťĺu¶´W& ËeˇÝ (ňꨩ(§1ô^ĄßŮYż}»Î,? e6Ak™L–PYä„a (vD’— ™Rŕ7[8¶˛ ?ą6ŇjµH’„Á`€eY;v¬R1ś×ę{«bŹ‚×~¦›Ô}+%—€ŤŞŞ5č÷pŤqu8PJ0ÎlŠIâĹJnĐvo`¸mŇ8'Q𺯒˛j¦›eív»bPĐď©nţ®Ďa§Ó©ÖĂa÷®ńň:¨ÁX__GJůŚôîafĐ˙5¶«ÂD©śp$¸ëţ[>»‡Ž·„䲽MËLą›uŘČBrÉޡm$ÜĘŘ*tmż3ßzë­ďÎóÁ$IŞrŻÖ©ź\M™.@‰ýÔäÍő,ĐA¦I‰÷+“ÔxĄ3 yžWÁl=Q/łfú÷5žh—WďŮ{Đ÷lF%±¨KŤş,î86«K]V—<—c‹Ý}ŻŁ~ďőrŚî0®î]–M+đź)i,źęĐó¦iA˘1«Ź÷<Ż˘Ý©Ę/r·L–eó™T&ŤVYňI˘!*äŕ Ë"J’Ý2Ř~Öjµh·š`YضSf=…^°±±ÎňĘ1 rÇqxůŐ×ô{¬=}Ę«W_çé“ÇÜżw—?ţ“EG|ăÍo2ŤůŇWľÂéÓgŮÚÜŕÔé3ô{=‚Ąĺe’$áµ×®NŞi–±şşŠe–äÚÓ5Îťaß9ËóßÎ0 …e* %p˝F•MČ łl$3JXB\Ó˛¤¤1F L ËóqÄqď†{’ Ů}FŠZT´Ó×kSŻźşŇ™çyĎđö3}¸Ó:MÓŞ‰LsM&&R7)%Móvú†;© Ś78ČÇéYb9_@š*“ťÂcÉŚXµĆڤËH:ôĄÇОb…ô Źüsdp-ˇŽqř˘2·ł¬.ň ń¸ót¦Ă^ź4öt°1ŻéőTu†űÁ\ÔşěŞţíY°—y¬ž­ŇŞý‚]îµ-«ÄQNŢ—˛á®ÄkZ¦ÉR·Ĺĺs§q›S«+»őŚßž.k˝gŘL›fĂÇ2źÝęČzŢ÷ý}E<ęď°†JL—é ]MĄĽn.MP`:í’ ¨Č G=e˘Ĺ‚8)(&¸Ě˛LÚÍß*‹Çő‘Ş\óľ¶mW8Ýn·[AƎҸ§+b‡®1®A¶Ę,t†gçئB¨Ś\Z8Ny¨N'8cí‡seć»ŮFe6ŁO0E‚ĺ8¬÷n˘¬e­Ągü°ľW˝ONăësčűeĐwPŁYý^ę IzŹY\\$ Ă v”$RŠfq [=ư&MŁ}˙Źąť ź#É ě>¶ś¶‡ `P¸lKßČ9iI•UŃýşLlll¨y6¸şé‰ÓeHÍM7kę Śă¸ ¶„x–·sćʼnR­c?¬;o÷S*ëv»•$č,iŔĂĆĎó€gC@Öł\uÓYWý™yyőXťIugoÜ(©ż|w—ÇqÖř: żvşG!`׸SĂ0ަ”é{ÔĎ Iňx Ç”¤…I9e”ĺ•0NĘŘVíŇ€iÓ,ĄTŇ0p˝Rň1MRĆŁAŕłąąQv8ç~ ”b4â>Ýî;ŰŰŘŽŤm;ôz;ř^YMX[_cii™ŤŤőJY/‰ă =™ÁÖćÇVŹÓď÷h5[–E† GC–––év»39fa/WpVn٤Gq<ÄqË€(#‹\–ď“L nbąÂ@f L„QnBi”’YWqŰWHłŚ0 ŃŚ ú™ďÇ ”Ş2š%ă(ë/MSlŰfii‰ŤŤŤŠ€}^SJa)ŤŃÂĘą*Fń7¬?ăVÔű»´y"碳/rîd]¶ ꆑqĹŮB×Ó%BytÇÚ±2.c®ŤZDsÜĎ›ą]__…s§7t%Hgŕ5×ő~1]=Ó> v™5ÂŢÂn@1íCë%)ĺLĄ2 “©g°ęW§ß:ŞŇ™Î„醷Y GťNI߸łłóLó“fŃ•˝yg];P°Ą6±ŹvĂ'Ëfď@ĹS®|ÚíöÜ\Óő,ެJ¤6Ó4čvK®ëŃ`ĎŚČĄÁ8óp=ßµ‰˘8ťô¨g Ós†!Â(Ům„–×ýű.ÉŻŻŻWâ°Kł•¦i•9l-.,,ě顂˲Tí ě {Ňďç6ŤöĘä ×ǶKşÂ(a¨…I’k_ü!w±!d™@˛ ˛(!/ă/ľ†ć§Ö‡ĹĹĹŞJ»_ü˘”bii©:´$˘3kl łhĐl6Y__ŻŢÓyM)…+vÂ˙+č‚RČ,dťăCţ§éŃřĂ §ě!§¬!ky»Y8g÷X¶Bîg]ÖňĆ©¬ý­mĺłnGÁÜÉ‘e™šV×ŘóZŮD˙żţďé2YÇŐF«Ř:‰Ľ'Ą¬H—őďNĂ´óĐô]Ů,!]şĐ‚ -M'˘ˇ GY`:—RVZ°aš8Öx]ć:j€­ťł†:ŚĆ!JX,t(ÄaăÇ©JŘŃEí7ľľ1Ő7Z<@ÁÎöŽ™brO©G)…ăřžKš”Í(U9×iHI} ”%G/0„AGČÎŐqś c¨ťŢČë4ýwę%Í™og¤…Iˇě™bĎĚÁ”$mĂI+I-ZÝh?›‰íŻ™”˛bŘЇ%íg 6eüĽĎAwç§iZ™:ąqX€ńyÄttąŮlVTOʰé´JĆíííC÷€:Gő~đAżŻ÷áYĎN7R…aLű8VAš[Äąµ›( 8¶M…dą, Ş&ÔQ›Ăťťť=×.̲F*EG(YĚ =ô\čąé÷űŰŠq*ú:Ń>4—Ćž¤‰öĺł`W{čë Ił–dŔîV‡ ™lŁPdŇ#h´Ş˝Lfcüřźń›”ěIŮ”mÚÂöwă")‰’"¸ŠíŻV0Żihجy÷}żâ{ÖxÚĂ'µéJ†vhŽę٬Sä4G˙7ö¤Ň+€pó0ř÷|8ę2_-ĺY;aŤ8c÷é·łrepÎîsÜń(kń(o)ŔőŤ‚Ż´ú„Ňäýa{îŕÖü˙Ű˙řÝ–˝đ ĺ„‚­~Č;Ýa0ŠY].›T~ńń=Ž-u… ·¬ÓđLî?ŢäGď|Âť‡¬®,âű.?˙ŕ6W.śÁ˛L˘(bmłĎ;Ýa»7ćřr)%op›ëwźňdŁÇąS+(Ąč c®ÝxĩՅŠZE©ýUbŕŮĆ+MA#Äłt%Ťźnś™§L»ŤV:»qţ¬ńőr˙,°ůaż­ˇ¦ăxîž2Řaău7«eµĆâÍcu¨„Îj~^M†!iš‘K“L–ݸőń˛ČI˛ Ëv ü Ý‘,˙έ©ŻM7 Eľ§Ł×!ź(b•»ÝĆ*&˙_ŕuyGJą‡WT—z3ŻÜ eYU†Ą¨ĎA˝«ZăŰĺř´0püîd]ڱ‰Ę,#'ŽS˛\Ńl4ń[KH™0HBďŰäÎeRű 'ă§ 1ěűnŰ9F~źdĽ×:Ź1Ů쇣ŐeÚzWîĽëO)µçÓí#5‘)I#˙9ŽŮGBqźˇń5®çWÎh\×ꥱSv9Ď;ŇŁWř,q[‡žĆ5$W›CĽŤT»Ř/>ľËíűë|týŹÖËLęO~ń;Ă$Í«€ćîŁMľ÷Ă÷ޓƗjÎÝýLűkÝ@»¸¸Xób{ëă*ĄyLű"MĚ ąŃÁxŻ×#MKÁ] ™w|’¤DIJŕ•YôŁo@ůŽF#šÍf…ťź÷ޡôe˝^×uét:ŐVĎ„ćŇxĆ JNÓńhŔ8*ýhłQŞFZŽWő¶ě7‡(˛„ńh@!%ŤFËń*ľi=PW[›†ĹmooWÉfłÉ–ĺP(LŮŤ2a•Ä1R>›„©Ďa–eŐ¦…A!K†ÇkVâ?Ž×(ÔŽUV_‡c=h|‹$ř3ďë(ke4(ś3Äţ·_%ęŤyéOLŢ&–zTÍ›ŽY¦çPgmëŐHýąńxĚ`0Ř×UśA•é?˛/ÎŻa=„a#„ĚBB®p'ż@8'kßď¶ źOÓ%|‘󲻉+ fmemNZ#ÎŮ}Ś9ň·'ť†™ó4ńŘ<*LB!‰Ä€BĺŐe€»ĐxóË—Yě”/şRŠ˙ňżŕ‡?űxO°pűţ˙ôÓ †äyVĺştűµ«řôÖ#­m㻥´mVü·w?ĺ潵ɷ>Ůâ•K§ą~÷iů'“‰›§ńK/˝ 4¶t^ÓAŠvpÝn÷HN㯴Ĺ‘‚T`ŹÇcŤF•Á8tĽaxB”eǢ(7HÍnqĐoę†=(;xűý>žçŃívgľX}W˝s[ZóŽWEÎx4d–x×ŔuÓŚ$+8ě„'ČĽÄßJ)i¶Úöî‹çůbíTvvv*ýfłÍ§·ńËŹ>ὯGľťŃrS‚š4qýĘôćşłłCž4ÚKf?Л̨”-®™Te#ż‹ëKŇÍź˛ł˝ěvźˇ@ÚÇ ?e\&ôQ2G8—ÂDë;K>ľC»ŐÜłöuůsż,ÖôŻŐjUëOg ´jS†‡ŞŹM?_—śä§ţâäĆ ˘Ř§ü1wâçĂaÍü-˛wŇ.+fČ‹Îř4]&R6/»›tŚC`NFA¦ nEÁv]‡Y&bÖÄFń`ĎA3+;_»z?ů˝«4üÝuý“_^ççܢ»k1Š6·űX¦n2-˙•ç9EQ°Đn2Çś=ą\UÇĆaLˇrâ??»ý/˝|ž[Ö«5PÝő• IDATL*#óŘtP9ŻÔ¦}‘Ôtď0Ó4čč26Ěź¨HÓ´‚ŃŐ‡šaâ96¦aT4Šó\»N2č„HŻ×«č3ʞi UUNÓ¸Íuď€Ě>išÓj6i5’¬`8>ü €<‰ŹF%\«ŮÂőlI3ýĚ2ĄÁ ‚,¦I»Ó»ËßţÓŰĽ÷Ń'¤YFžişiĄ2öĚuLćP'\ší%RZdĘŰĺCʇXĆTĐ( „J ä/ń/¬€0ý™~¸°O1ö˙”At–d8@ɰ0˝łdăĎČÇwé´[ĎÄB\ם ·Ô{ć´Ö{±~Űív%ĐsÔŔÖW°Ó÷1śI•&Ź ă€÷M&óÇ;‡ŮPş|š.ŠWÜMšFĘĂĽĹý¬ÍŞ5ćĽÝ;0Ŕ5…bĹI‰ĄÉŤđč{„Đe·rbý~ÓP†`gńŁw?c»_.n!˙Ý|…ß˙Ć+{°v§VyýĄóĺ /.ËĹ÷o°ŐŃît¸ýpŻ]˝Hśd<Ý(Ą>-ÓŕŐËg8}b‰N§Ăõr1öcnÜ}˛§Y`ŢS°ćRŚă¸Â;é@ď(š~9ćupş¤˘+´ćvUYgîLě$ ¨3ÚÁÍŻ”ÂsllË$Žă*ÓŮť‰ť5VĂO¦ńÔő ¶†wĚsíŽSó5U[«Ő:ŇřŇą¦Bd–iĐ𽲑J^&@–D5Ě\I×2>xŕĤ”چFú!wď? ŰnóĆkŻĆżüäq¶YĐtĘ 7pŇ=ÂPR«rČx¸]=ĄYî)×AI$®˛|{U¤Äâ2° ðâFÔŐ”şsÍÝËŚÝ?a^AŮg éM7ţ÷y­Ë±Jqq<3ń˝’öJÁćöq´›=»zĺ,/_:…Q“Ş:µşČ×®^,;ă'2Aůnv»]ßáwľú"źnqëţú3-xůâ)ľüĘšŤ&Ł(ĺŃÓmî>ŢäĆÝ'HYfš¦łţ™çy‹Ŕp8¤Ńh)©?§«qÚŹ6^+=é±u~Ô= Š"úý’>ěŮwpöOsÉNö€y®˝ÜęďŃŐ0˝Í`łŰ T{~&sŤG•}qBVHlËÄ2M”°őĂB’$‰Ł˲A•=éuŘxŤ=Ë ý“ë7QRqüŘ y–QH ·sn‚cxVľ‡o][–Ś ĘŢ€V»ě›Ăל‚ţ(A’[XÉg8CÉĹj•IuçŮŠŞ0Č˝—ŮÄpÔ%çÁh‚ŠqägĄŢú>äýj5lⰊ´Ö 7yžăş.–eUBGi‚´Ť7ú–ż¨IÖ\5˙›QkîŢ‚ym,m>M–É”Á‹Î fÄ“ĽĹ˝¬Ă1+äg˙W*ÁVfsëMdu3˙—·ţĂwO¨‹{¸u`°ĽŘćÓ[ŹX]Yŕ™UňĽ„ ,/41 ×±i7=„a0E F!ÝvŔâb)ńt}›4Ë8ąşD#đ¸~ç Ýv‹gŹa© 7 CÚ˝aÄ×߸̫—Nˇ”*Uòtî—Ră˝”RŐ&¬1…ôT˝pŤ…ŃxÚýĆë¶Ţ5«q@ë¨3ĐóŘ,,Őł84eŰ´[eđ;M9RQöLŔęÓX.í§qau ś»B‡aŕęĎ Ë˛ C7űÚg[x®K‘g ‡#„iŃ |P<ř!/˛atÚ­ ˘‘ĺ°(dšĐ‡ŐÍ1‹É‹Ł›Ŕ2l#e8Né ÇĽxé"ťv‹Ľ(č B=Ýŕı„QŠQBa›Ë(%0„˘ádX†˘×ßĹ"ŰŽO–Ms¦ˇH “$7đâźáµ\’QFâ~„±gë ={°€ÂÂkťÄ¶]d˛F1xĂv†íHľGÔ{€Ăo.U‡źy±|šKQ“„릻#ŃÍ(I3ůg7GA:Ú$ ţ”-µĘť8ř•ÉĺĆRńcÖE3b§đŮ(4ŚŚ“öT™3éi2eĚÍŽ0mĎ‹ąýźßúźľsR]˛MvUś”R()‘Rńöű·ŘÜrĺ…†ŕńzŹ gŹa™&QÄ™‹ŚÂk×rűÁÍŔăř±%nÜ}Ě/?ľÍŁ'[Ľ|é k[Ţ~˙:ă(ĺĄ 'i6<î=ŢŕłŰŹůäć^şp’‡O·9}|‰o}íEÖ¶ú¬®tj~ů_!DŐČ9÷¨$Ö•Îćý®:¶–RZÝts]ש)ÔfŃö۰ëG5×űlL¬ ÓiaM`^u±”Ăî]WU¦«KőkŻKbvíš“\7Đ>Ďţg‚n§ŤeôClŰ)ű" IQîC>žë ŚÉsČrň|Ň15Ľ¬„‰=˙ď›iVđÎű×xóë_ĺŘň2®ëňÁ'7yű—ńňĺ0Ť‰h!qĚňßR ”ŘfoçŘfAZxŢn€ź¤ĹÄ—żĺ62ŹiČźc:.ăä$…svĎ>>-'˝ë‡m ű$Aű,†!(ÂŹ0ÔÂضÄH>#ÜYĂmŁŮŢeQ8 ¦şŢĂł¸¸Xí±GńĹ&Ářo°}19¤$Ł>aăĎxR,ń0ůŐpŃćl>‘qÚ’a˛ž7H•ÉI{H`děţĚ}`'wŽ„ł­›xĽńPélÁĚL&W3ÖU=M9Łł:ŔŇ“¦ŞsÖ»Ť ĹČĽbşhĺĚôw qô®Řúx ę0Ö%†yş#µVąn4×ôKô +„i>WvĂîw¬C4ôł×˘ľ°^¶XW}ťťbúčőŁ»v÷›SťiBĐëőv7Saâ>ŽeĹ i–—ŔX¦V«UÁ1”Rĺ÷(Qe.â8BĄBšmH'E*AZĄ|°!Ë,č Fă1išđčé:ź|v“/_}•÷>şĆĺ gąxf™†gOJRŠŠŁV(¤ä˘MŁY®‹h´ţDÇ<‰GäąÄČžĐ6úů—‘Ω}ç .¤ YFĘ.ř‚bçżmŻaXŰÓ¶(úw±‚cdQéľ„Ń}¬öľômÓ–ç9yžł°°€ëşlllŕ8NµA6^)E3˙®ĽŽá–ÁN6Ţfd}“ÄąÄűŁüWŻ^㋌ËÎ6–ÜJJ‡&ô4÷˛kyó Vfˇ;Ř"ć“ŰkJpçޤRx®ËęĘa8ć÷ľö2Ƚ嶤°ń›Ëĺ)}°kŚQJg&~x¨ř>}źpg‡(řĂąćAPÄw1’wQEB:NŐ”ŐĆŽ>Â3·°+ĺßE1ąóÎŇo#…·/7µRŞÂh6 şÝ.˝^Ź0 ń<ŻŞŽÄKޔ“ ŇÁôË€Łv—(šżĹť¨ÁÝř‹ĂwfŽ(¸ělÓ7˛ú…Çy»ĎŠ5ćAÖćqŢúB~çW©P¦Mgćöóú}ÜĎ'ińť¸ĐôKu›~_Kć“´úţýLżp8â, ÇyM]®o­Ô¤…f%7¦ďí¨j—Óă÷8iNłŃŔsöçTźuď¶mWďŕA*‘Óż˝ż­R»\×ußwĐřY÷~Đ*Ŕ¶,‚ Ć JĘ=~X©’=G3$(ĄŞďQ,ŰĹuťňĐ”ÄŘć.xݤb’ h—ěqŹŻßćÉÚfE+ćą.ŁńożůUd‘˛Đ|ö°Ë­vIe ×±MI’›+Ŕ÷'s8Ú¦ĂĂ4SÉ%r÷ňÁČ.O~Ç“C@…ď˘â¤Ł1‰:‰0ŔĘîc‹ě扉ŽČÝ×q–) F¬żş/ît:4›M666ČóĽ tă‡VJŃ(>ĹÍŢ­zňh‡!_&÷_á“që×Ö8k 8c8nŤXËÜĎ:´Í„Ëö6#ép#[üÂ\±˝˝­ž'9‹ß´.±¸źŢsýĺŞs3Nkť×;ö=Ď{&ý®]M%˘9K§9QşŹz&ěy¸uĆVcŃŽĘQŘl61MsO 8ďoVyížcí‘ŮťwĽŢŕ’$9*gÖŘéů×4R‰4^g‘µ`ì¬K50-‡Ŕ÷QJĹ ˛ČË“édýéß™™˝,Ű+ťk–A>Ć·vÄri Íç\ @”4:k;÷­ł±µÍ믾̻ď}Ŕo}ů*Ç—Ę2—’’Ü\pO&dŃä;(gÍ1©łĚyÓßŮD7Öźˇëş“ŇŻ‚ô&$ź ń82Hě×—Ľc1˝˛H)â…X@4®’űŻUŤRj.W‹5dYĆÖÖV•á›őîL›Í€`ôWŘÍ%@ ł!ŁäIă÷I‡_;sIě~‘f ÉE{‡Žs/벑śť8Ú“LÂç…Hü:D´źŇ>Ožú®‹ČÔ)“ę¦Ç»înŻ…ë¦9n5Ĺ–¦WŞ'´Öňć:3y_Ń´¦śuŕ=ě9čń¦iVx˙’Óă§«!G r=ż0LľSU­ć­4ękŻ_÷Qđ“ÓÄŁ,öË@B /Ó‡§}«ÇĂ÷Ëě~”¤ Pň™$ìµ 8ž_©Ç™2ĵŠĘ$ąŤß*ŁAGŚ0L© ÂTđ˙|ď8sę$ďÓ 8sb•‹g–0EY™CIâÜ$h›$úxĆpňű0J )Ş÷E3Ţx—Hś1EI7Ž#G¨áFIŻUD;ÄIÄ~ dŠźĽŤcÇn™…¤q‚ážĆ^ µÎĄb¤Q7ňBY‰Y^^f82 ŞŘ©^ˇ™Ă(ĄpŮ$ţ+(×F‘ôfçČo˛•»|8:ďěç53Çśszô'\¸žČ¸âl“~˛é"I5¤ÎcÚ1h.R z†Ů%ˇYă5ŽHoI’TĄ$ëĎÂŢŚA˝Ą„R»JVG 2źW©¬^z9L!hżg łČGša`Ú.ÝVP)Ś=ŹŇ™Ţ„ĎlóŚ×ţŽĂËgŤ×ëÇ4Íą˛.Őx¶ăř%.I3šŤ ÂÚö=B€ĺřX–MšĆ2Â5sRéĐl—e˝aĎ ÷†; +,®ß}ĘVoŔŇâ"BŔŮS§řÇý+ ĽúŇ‹Uó żM`íĄD S‰Qe×´ŞÎQÖO§Ó)ŐÍĆŰ&8VÉ5}@€RŃIÚ@u~źtx7ů®aŘ ¶äoËĺ*ó^of1 cĎ{Ąß©iŠąýʬ‚ŚVř7XA—ű·¶ń.ÝĹëM”áđá¨Ĺö@ýőě€zĐuI^řX BÉČ˙6‡źH䯷qVŰ’qŃŢa¬l®§Kx"犳E¦L®§‹ÄęóAÖÄúúş:, są®[5 5‹Ď*ťi•ťy”Ú•şK’Ó4ź[N÷¨e2E öîvÚUWětÉ~›ĆręL{O¬N†á\÷®1ĘoűşÁ›_˙[ŰĽíc:íY–óżűMâ(B¦;Řćn€‘I0µi4ʲťÎř٨›®äyNogTNł˝€cň1Ȳ[Ąl/ŠBu ·QXÇQÖFľů€Ü>Cî\Ú“M×PĂ0°‡q Żąßőh¨M™Eă€kí°ľé±ńpŤţÖ6çżţoń–Îó0vą~q×ç1Ĺ kÄi{Ŕfp7ďpŇqŇň4or?ë<÷ő=op»˝˝6 ˙y ÚčîéŁ8Ó’ęišV"'ó0Ő(Ąöpd×Ďs`>J5Ͷmü ŔŇźGÖ\Ű‘ö€I`ÖnřU`şźJÖA÷¬•őďëLęQL@P6ń=Ďţ§ćôőĎŰR F–ýŽm"ËÄ%ŽUV%úŰś`kĄ‡í–Ô{Y<Ć  ł°‚‚~o›Ŕš5—‰ÉÚöá8cF|őŤ«ü×ü!'WW Ł/ż~ßózxĆú# 3›¬0«‘>Ě;‡J©=Yîhđ·±BŁ ˛[P¬Ę@IŠ\WI˘"ßÁĘďa2&ä"™si”Đ(ͰŁ×ź>4ę*ĚA<öúŹÇäYB;ţ{l>ţĺSžŢ˝Ă©ËŻłúú˙@ˇ n† üŠšČ济‘pÉŮ&S7Ň%„PĽ4‘M˙4Y&ú®ůÖ[o}wZ]ć' †ňô˘Í›zç¬é«tŔ˛¬Š6ă°WźXꝡ:Đ;*3B]©l˙®ŘŇÇáÇożËÝ0 Vł,c×;ŰçUšŃ™łi…ž™s ,ÇŁáądŮ®žµJôAcž{× :k~T…žş¶úÎÎN…cÓ8ˇyź»–_ĚóĽR›Ţ(‰e;(Ԥ̕‘çűŻÇ,p&%0Ç̡HHR…ßhŕ{E! Ç=\ë w@bgO”R´ď~đ1Až—Ť=/ś;KŤ1EŠfj’Jç6¦éěaőĐVW:;hýč,CÉ̡°l—W÷yŮţâ/ţâ;qŰł–ćőš×Jźz”&0˝hq۶«Ţy|®Âh‰Ö:KĘQÄ ôuh_˘łi¬nŢ˝Ç{^ŁŮh`bĎřŁú˘ů÷°lZ !JnYÔK)«lř<ĎNď_zÔĘbó^»®ÂiE˲Ýżf™ÎÔë,ëÜJťJ’Ą †ĺ`Z&–id9Q?Óř[çŰ9–)±ŤśŤë8äé۬‰ŇH8·h4š8ŽS1ÁLď…˝‡ú@¨TI;§ Ó˛‡1¶Ă˝Ć"ńđ!EÜÇ$D ßÁă"[çIüo€s Ăé`šÖ¤)TXc<Ď%ÉŕÎýű„QT®ďIĺd?«łëxńϰxJ”ݰv˙1E^ śUÜĆ‘ÝĺFôůaXź×eŃ—KfÄ1kLżđX/,™+VČPşdĎ Q0ßzë­ďÂ.í¦ 9Ь¦€ŤFDQTItőĺŇ”+çĄĺ÷ v„ŘĄśŃŔzý=ÚŃ5ČÔ‹[7Óěç vú˘8fg§ĎńŐš5˘đş$íQ7)M›S—NÜăŕL›Fŕa™Ć3ŇÂű;-EĽßłó<ŻÂŘŐl!ćŁ˙ҸiÝđ7M›6Ďú©C4§ßQÖŹÎ»ŽŤa’´ĽŢŔ÷čr{íQ:Ô:¶Ë4t€ěcYI^ 1I3Y~N›Ä’ă+‹~§ë[|ë›ßŔ¶mÇĄŔ&I ňBä6J•ňŚÎĐÖçdÖú© č5­1‚ú¦iZÁL˘(˘ Ŕë\&ł/gFtĂTŘÁ"Ž9ÄN?ĹŠ?ĂNoŕ¦ŕËĎ0Ś”ťâ2†aq÷ác˘8&ËsV——zŐµ*ÂŤ†Ő<Ă źłůč>çżú§ř‹X˝üen„MzůłČż)K‡DY·Ć´Í„Çy›HZś˛‡xFNŻđŽěüź7¸}ë­·ľ#„°u€TǢÎ[ŇTS:c:Ó`Ú˙Ě’C=,HŐż­łÎÚŹé SJy$9]Má¨ý`QłďCîÜ{H0 Ë2Ž[v)ëXó<‡é=`ßĂľ0q=ßÝőzü´ŐĐŁYż-„¨šhë÷®ý¨®ćtíő˝b0ÇńÁ׾ŹiźŻi¦ôxÝĚ}Řx˲h5ˬg–KLCŕ86yˇńŮSЉfQA „Ë”X"#Ď%ľß Š()÷rËx–>¬n†P"§ÝjđŕńBś:qś—._*n$i!Ęľ‡Tš¦S%g†Ăářă<{a}ÍçyŽas‰Îţf…Ť\!á aä`e÷±ĽŽŕuěč¤ëbŚ™ÜĂţ˙¨{ďϲěľďs_~żü«ÔUŐqfz¦{zzŇrf¸aWKš2W ˛)ŮaČp€`´ “ AÓ¦EČ (˘)’’E›6-‰&).7ÇI»»§{¦§cuuĺđË/żë?Ţďľzőë żęŤ<@3Uu_ĽďÜsĎ9ßď7¸ŚĽ†ďűta"xőŤ·˛ö]§r¨GŢbćßĂ ^CŻś˘ł±ąx»ńfi’晋ĽÓ«<0Ýá·Ú"©ÓJ]şĎ¬Ń§“:¬Ć¦Ś“ş÷ŔnÜÂýĽ~ ttP&Ͷí<)î| ÷2ĺXÔůU×qś\sČ)oZV•ĐFŹ3îlżëuP*˘ëë›[,,Ţăă?ôaęŐĘ}Af1{Ô ě8ç]ď@‚eŮ”]{_ÝňqtµP=žK}ŹvÓtlŢ^ۤ¦‰|aň†÷}»ŰgÂÉ(EPÚX¨¶’k[-6űfë5ŢŮjŁ puť›ťa*©;Ą†˝ř÷şvuoűížŐďű˝.ď%)%7Ł3JRrçjń}˘ RB,J¸Ą¬çŻŰiXv™05‰˘,€=(Č5tpJuÎ=ú(–eĺÓ4±ě©Ô‰â„Ňđž*›î—ÉW<ě8c…V4uI’p§sŤµhŠĚÚuJŤSxĆ9Ľ°Dę-#Ň>†SÁ´mLKC7M4Ă€ÄC$=:ľÉí{›LMNđě“)•ĆŘhČ”rđ5ZmŘ\ë±vç퍋78˙ťŐŔâöwPńk\H“NjsĚč3e XMŞtS›yŁGE‹h§Î‘Äż™ŕvČĆU%fßMî3§*8 ‚ §fTëŁTÓÔü3 ˲öQk€:wńĹëP›Řq j|ŃÇkšF§?`»ŐâÉ çyčÔÉüÚŠk™ĘÄş®›S*Ť»¨Š˘ pâ8Î@­¦MµěŔ0k·×xő ĂČ+5{U"Up«üęgnßă‘fťK«Řę ¨ExqB+Š€=Ěâ©Mm‘^SJIß÷ůĚ­»<{ę$ĺKw9Ó¨¤’——×8UŰaQ}·qç`pŐžT¬$*Ľ×óS\îš S¶[m$‚Ją„a™L|ˇĽdE÷ůÔ(Ő±śLŐÍó=R ¦]Ć‹tdš ‹űąĘ‹fę073ÁäÔÍfǶs1(Ë.áG‚0Ę8ÉU’Aµô‹ßa’$÷}oB|­RLžçq˝ó6IR+OáTçńôsř}‚et-Ĺp«Řކ-Ö±-ŰŃ0,‹H6Đ5ÁÝ5Ź(ĘćüS/Ž‘M×)ÇßŕŢ˝„^7ĺöĺ7Ř^oqüÉĺŘcϱä[Üű.·#ŚZ‚ĆVęRŃBŽ›]Ňd1Ş1ˇ{Ě=:©MxÄwWp«L=¸b™T—ŁV•HUYFŤWÁ!ě°Śäůâ®ô†a`:.oˇÄżÝvůTÇĺ϶LfâÝĚâ}|3A横T*´:˘áNÚ¶mĘ®‹±ŹZÓX™ŘÎ ;ď T*!…Aٵ‚CY ö{*ŔW=”j7úŇŇ*řÎ5>öČiţđť÷‘C6V IDAT8?3Ĺçn.' ż÷Ö,ËĆ‹bŽÍď.!LMÓxńö]n·ÚĚת|qa‰vŃ #ü(bÂĐů×o_e#ŚxţÔ ţáW^ĺXµL?Ž1u_n´:Ľz÷ÇJoŻońîV‹cŐ ëŹÍ^ żŰęńćę:«}Ź{˝>§ęŐü}«ŤŚ™oMTBąäćÎŐÖÂ]Y[ČŞ]ĘŔ]ľ×ÇŐú„„Qš…agúäiš˘‰˝3A¬ă”†TD˝"ÉZd$;ÁëşyOâhÖ}śůŁ6Š ^©1A˙n»·Á˝ô}fŚ“H$7ËČPPs'°+óřúŁřr–(H‚qĆeüdšŔxYy’[‹[ź›ccs‹ůcÓ»6{ů)3Ú/'˝BŞO±pő*ťµeÎ|đo0÷‘OÄ:ďőËßĺv„ý,f&µ¬4¶ž”hĄ.sFŹšŇJmRĆ»öoUp »łx‡UbŠŐłâď‹ţă(U,eŠSW±ضŤiš\ň-®†. ‰ÍBâd›Aż{€mݵä¨Ő´ŃńJ Ő´,nÝY`cs‹‰F׹źÎh4Ńń —ű˛Đ20˛M#ĎÚ4^řŞ]n´˘Ą¤XUpűËźý ŹL6yg}“ůZ…ŻÝ^äÝŤ-¶Łßyă2ŹMcčo¬lđŢv›ł3Ó|ńĆ^Z¸Ç±j™ëŰ^YZc®^ĺ‹·îňdłĆ¦çóż|ůe~âńGy{m˙ďÝśjÖ©»W6[T+eţôÝëô}„ŕł·)Y‹ť]ăÝV‡vśňĘŇ 7[ŢZŰd¦R˘4đ“ ŔĐżĄ¤ILDhşNąä"4ť8I±´čľ$@ ]ĘĺLŃď·°Ä€0JĐMÍ(áÇŮš®k{×T$¦őF×qčw·#ÄN»ŁeYX–•gµcő(~‡ę{“UrAm*T6xµ{Źm±ĚŚyŠ-Ť ¨–&0«áéŹâGeR µ!ńHŁ( CTTi‡ L§ÎĆÖOž?‡=¬B”¨“RRŠŻ`i«řQ…믿ĘÉóO2qáŻ`•ę$fŤË˝Ú)~}»-E°ťşX"á¸Ń%–‹qťŞ2kôóJ۸¶gp«l4@*Şl©FxU"Ý HT .‹»×Łô@%IBz$‰D&˙†ŕ§fáĎ×ágçáŐ|Č9|rŽ™GéëT“;Š"®Ý¸Ĺű7oqgńół3LONŚ… >(qŘąĄ”Q‚e[8–™ÜăzĹR§r˘Ĺ,ĚŤV‡ő—×6č†!sŐ*~óµ…{4›G'|áö"ŰaēǦyqq™íŹ-$o­n0])łíĽtw‰c•2–¦1_)qm»ĂVßcŞěňţv›ÓÍ:_şµŔdąÄÝľÇçnÜćxĄL”JţěÚMÚ~@+YíXx´€Ąv—0•LU+|uá>>›čEÝřbiIYšÄaĐ4Ę%— ?ŤÚ™XTpÝlyŰşD`é)" łr–á L?ŇHÓdW˙–”‚důM’„$h Őr[[[čz†N4MslŞ×0e›Đďbkđđ·ćˇ˘‘šcMćV«…mŰcé…ďAJɇ_xǶ9wöažĽđxţóqÇw»Ý]Zë*@9Ě4ÝŔµÍÜA+­ôqÍŇ ŮĄ×ŮDÓÄ.v 1,_Îh­–»}^[ZÁŇ5аmlC'Ib‚(¤Z*aé:q’0W)c:7·ZlôŘşľ ? <;?Ă?yńüđ©yŇ4 JCřřçysi••^S×xzv†“µ*qšŇ 2‡?QrhŘU]ĂÔ4jµZľIQ¦˙=M¦„ţ€V»”PŞÔ´~b3LÜRvśAż»›n0ő”Şĺc$ŰD~ÓvV“^ä%™3S=?†7čaj;×áŮ&ŞÓéĐjµňLy˝^ĎçßaVĚôű}Z­V.%Şhó¶뤤ś1/˛ޡKĆ<‘Ó«Ľ›ľĚŐökxÁ€z˝> –wćŻišl·ÚĽđěÓó4ożňeÄŮŘôíďŐĚQ-:WÂ):©ĹcÖ&ŽąNb‹Ç­ l1~ tTǧiJ»ÝĆ÷}ŞŐjÎŽPôĂŤW}íqŮxxÝ5ĽAvž_y>1 ?w Âńď[KŽzŞméÉ ç0ujĄÂÓO<~$iŕ0 ŮŢŢ2e0EŁ5ÎX4Ç2цϳ^ŻçT–cŤ'%lá{=*• µZ-§sTfë:?yţ,źżµŔćŔăúÖ6%ÓŔúă( Ń„@3L2UIŐ4)›©”|őÎ"5Ç&,‚j\žäĄ;w©:–ˇS/ą´˘?I9Q«r¬ěň•;‹hBP¶L~řĚI,]gË÷‰ŇS×p4AÉĐ™(»aŹ©mŰąS¬9{Zšŕő»tş= ÓŔrëř˛N¤R€ža8’$AĆ˝][IŰH¨Y¶‰ÂŰ©Š˝Č&Nwţ2ÁÉÁčiÔÍŮn\3F¤Ů{ďt:ů|©V«G[‡‡÷«ľ·R©”Ót9 oyp‡’¨Ń4ʱŢ"Ä#%ˇ%VąšľÄíÖ{řˇGŁŃÄ)Ő‘z ©•‘Â+këY ®ë<ůřů”WśżQíňĂČ7~—ŰwŕŤ/}ťGž~–W>ýE&.ľ@0ű›ˇő=…yŘĎR‹qŤ;QťicŔłĹŤ°I'µyÔÚbJŚ•wÖý×ý“ŔػՋU,sŹK˙¤^LđtXSőH č˙fŐa҆óex·ý>1ż?člŻk€Ł—ɲ>ßě8sú$ýŢǶŽÜ´?šÁ í5^’·¬aLÉŹ‹ŞŐ„¤dFčšÄŇł,®nş9ËEš$hB0á8ś›hŇ(9śm6¸ŰéqŞQĺüÔ›Ź3ő/śçĄĹen6x|f’†ëŕę:5ę,vzĚTJ<>5Á„cS6 lËäÉ™I&\—Ç&TL]hô“”§ćfXď÷Rň'ç04ť¶ďó}s3Üítq ǧš4l› 'űWł-¦km‘”r_˛ú˘é"Ĺd€Ą–ťQĎk¸ŽHĂÎ>Ô_ k`ë1Q'Ó.‘`ăG)†YÂq˛~ľ$hc˛şČDĘ.Y5ďŠýä*t3Ǩ"‘:ŽjËă•Ţ"®(cę&·“ËČ=:\ѧ•¬áűUŁ‘gň“$áí«ď"†×r|v–˝¨ÔŠ0Ó4©mÂÖ.żüů©źfz~–[ďopěă?Ćíöi˘ď°PĂ7kÁvâb”ăf—ťĄ¸Ę1c@S÷iĄÉ- šąýä'?ů«Qşňźżb·Ń4m—$óaă‹ŔaµA:Ě˙ZŠmfŔMüË•hŔ‡đ^.wáoÎĘ#ebä: űęő:Í&›››ś<>ŹëşGbé)–švś5D7-*®ťoTËŽ’*>l pÍŰH0D–Mş…=ě Ť˘,h-ŮŹ6jśn68;Ń „ĚVĘ<:Ń`Ëóą83ÉéZ…Dhlű>{ä4eCgұi:µa{ĆĂÍ:'ަ\CÓ©Ůç¦&xzfŠşmsaŞÉŰë[<5{Śăµ שּׁńüńcţ_30mîÇł Ä_"ë§ţđ[Ö#˘–H7»ÄčôÓąŇĹĆƆ<*?kQĂ[±<şĚaÄ݆–R˛Âő¦¦&?6ĂńŮc‡gI©zŽYްµ™đŤO˙gžů8KŰëřY–S‡>ËďUHN ĹÖă2ë‰ËYk›T Ţ '÷-Ť=(ĎmŻ×ضí…Ł»¨ő "2Bě’öóšT¬1lăŃ ‡ż1ĹqL[?|š\Ňu Úâu(?¦>{­%š¦áů÷VVY[ßŕÉ ç©U+ÄCU‘kýA”ÎTŰÜ~<ĂBÓi62’Ę€«ńăpĹkBR¶vzţXÇpšřľ‡iŘN¶ŚnRöň©ŽăäÝnwWegÜőOUQUrDŹŇ.Ą®C™|9ČŠóÉŹ-R˝„ë¸H$i’y[¸{ȢŹZ*^d ő¬ŹZ#&ĚüŢú.*Ç~hďSA*ňű¦išsĹßwÝÁ†Q‘Ó4©×ë@Ä۬R·&y?|[{?4t,,is‚Ç©W2?˙ö•«,.-S­TxęÂů] Ľ˝LJ‰eHJť˙źăĽň©OóČĹ ÄÚ<-ScsęGéF•Źń˝lU-äQk“Áőp‚cFźi}Ŕí¨ÎjĽ?gşX__—RĘm}sr‡¬[ 6ÔÁ`đ@Jg{w—­CŰy±a˘ăVŹeNnĐÇŇÂ(%ÁÂ-•ó`Ç÷ýśg\“rGĺăľ SÚť.·îRr]Î>t]ŰípŠăżĄ"šIĄr°nąÚ(•ŠŠďŻÔôB‹jmŻ»†c¦ř‘†ÔĘ”†ďŔ÷}^]^ăщuËduŕs¬”•{ş©dұéd&5Mc=Jxd˛y(ůyQmEęR¤iQ·­±¦˘CM’„Á`«Tp8:^.,jNĄ Ń›X¶CFX†FÇDˇ‡­ůčâđą%Ť íÄřţCzŘF‚’~h‘R’Bě(ŚĘ îlhµňqRîčČA@§ÓÉżŁ Řđ—Y’׉Än'=/Î2cťdv)‹ĚkšĆÍ;wŮjµh685?w(˝”’˛ľĹŇĄ?'ÎŕaâĐŕŐ×îQ~ćynĎ’|ŹČĆ5Ě=NÚ©ÍJ\áŚŮBɵp’Az˙Ć囑ßŐ4ÍÝĂ8X%Iő9ŞMâ`08t“»ź {wŁţ#‘—“ÖŁlSĄ‰ÓvĘce#'?ŞŇÓčuěµ–hşÎ˝ĺU®ĽwŤg.>ç{ś:>źűůbţ7ł)LÄ. ˇa»%Ş%'˙ŢFŹ;ş†Ś©¶ă.?8•Ľ~ K ńc ËÉJóęŮ-{>ź§§'Xój–‰Łk¤¶ˇëŔĎSÇq†Áj§Ë„ˇ¸Ů—2ń)ŞBÚ¶M;I™°Ě±¤G“ *ëyŘ{ťO^db—'3itMŕűdâaiŃ}ŕß˝,L4‚ġŃl')ŢŔCKz8fö¬ăTŁśĺSí=ŠŰY˝µŢź×^˛ÄJ1RUÖ3ÇJeqÝ붆ÎiíM3K"¨XB× ^{űŰŰ-f¦§xěá3‡n¤”TąNwí]3ÓhÄĽńµ÷XZŮŕÔňs\oźůžc©9Ş•´GÍ-t‘r#ś`bČ‹{7ޱW÷Ľ?ý—~é—>Y ĆŹWäŔUŁ ł"mÖa6ZfË–8Â1“Ľ¬JA,Ę8NÖĘ Z"@1B†ô©Üáf,NŇqŻCµ\Ëdiš˛˛¶Î§żđe;ű0%סYŻí‰ ~ž×˘ŤR°ĹI‚fXT\+˙Řö»vőţTĐSluHĄ–ن—K ]7‘ń]ËJ=ş–rm˝ŤiZخÿřú›|ěôq ÷ĎżČÇ>…fŮüň_|źeÝËŢ×»[mŢßnóúŇ Ű˘R.QvKÜŘnQ±Lb +žŹ©ë,őĚOL¤)××6° ťßű*ŹĎL±ŘîPÖ5VÂŮZ•–çł„Ôl뾏»V«ĺĎT)ŕAŁ™Ő{/>wMdĽŠęGAbQ®ÔŃ5 żß"űHaă¸%"iéżzuQjR*׆šf ™ÝL’¦ÄéxŤď M=Ęď¬@Bś*MYń[Q@ş"ţDeŠj:Śč#ÉĆž6ź`3^ćvr‰v˛A"c´Ŕáú­ŰÜ˝wŹçź~r,C´o|ŕÓďtčl¶°gž'~dš»ţÂä/o¦ hýÔb MćŚ%-âNÔ ®Ě ůGůżY@ٸ"EöőţG[­´UŞöŇDšő+˙VĘ Ť~¦îň+™O79ďt™Ô=âĐĂ S ĂĚ[nŠëČa¶×Z˘®C•a_yí ~ŕC/°±ąÉńąŮű2śj-Qăt-R ťśßT·¨”]4Áľ-Pj P­ŁŔeÁĐ ˙>J¶[%ôűŘF‚©'¤qŔZ?¤KŽ5ęüö7ŢćŮŮę–Éoży…MĎç'çůŤ—^#Ij¦ÁňŔǶLţ÷×/sĚuřúҵ*“•2«˝ý8ˇdšl![AH?I@7¨Vyo}“8Šxme›ŰmÂ(fľ^c-ŚI¤ÄŇ4ntzTm‹Q’2ÂNőě0Đ´ ëźUAk’ 4«ŽăŘ$qDż×Â4,§‚$iŠ®%öX ÂPl I A$¤„‰qhI^Ęű)H8,Žă]T‹Š*MYqC¤2ß D?Q™¦’L@,đÄÎĽ±)sĘ:Ď­ŕ2kÉ±ŚĄĎÚ˝6‹KK›™ćůgźÎĎżź–Rb Ż˙+ŰÜ»q‡Ů“ÓTý«pnŽ…ŢcÄߤŚí÷‚e\¸M=`ư’TH¤Ćqł:éýL)÷ńÜî¨*'Yä%-–?”c(*ťIą7Żß~¦‚ˬ\[Á!Mct! RĄ‰¦i úlÝßiWHu˘DĎÇ !†$úűs3dETëĆV‹KWŢĺ‡?ň!`™ćž”3ꌎ?ŠRL±8Š"LŰŵ- ]»/¸Ůoü^Î9‰Ł]]*@CK4M‚OßZăNŰăó7ďP1 ^ą·ÂO^|)%/.ÜŁV.łŘé˛ÚíqĽZáëK+|ţć])Yé Ä1©”üŵ›Ľ·ľÉFňââ —×71„ŕ·^}DzX÷^^XäťŐ5Ö>/.,qnj’W—yőŢ :’×WÖřÂť%¶Ă ÇŢĹ‹«˛Ű@έ¬žŰîůł›ŰS×d c Ómć2]f‹‹HĽ0BÓ-śˇsM÷áVĚśrFýfÚF“>A$1-t‡4I‘cĘě Ň#t]Ď\µ`Žnl”€ĆhŹ[q“Vv+L•f±Ł Q‰€ 1‹«UŘNW±t‹›Żoł±ŢÂužşpǶđý,TňkŽÎ_)%yŤĐďróŇ%zŰÔçgŁ7€ĆyÖÇŕ{jćAÍ—ÝÔfĆĐĐ}nGMĘZÄśŃ»Źžć› n‹>ř ĹŔQyŘíżÓ4ĄV«=Ň™â7­ŐjH)˘8Ł^CR·‘K-› ĐDö¶%?yÖO1+¨žĚQ*Sk‰çů|ú‹_ćÂůÇxďú ]gîŘĚľmKŁJgGY‹T ­řM·ŚĐôűö »×…-‰‰©ËB[‚Ťe;wѵěRđ[_żBÇóŮň|^[Zăů“ó4,“÷¶ZĽżµÍ3'Źóů·9Y«đÖę—V7Ä Ż..óôÜ /ß]âő{Ë«Vů?/żÇ›«L”ţěý[Ľ˛¸Ś©ë|ćÖ]&Ę./߹˧®ßBA'yse [×xmq™Ë뛬úwŰ]®oµy|˛ąë^ (ěn‡8ŚBT×2*/€0µ)U˛ Ҡߦlú¤q€¦ŘŽ‹nş±2Aß'‹ë'&ĺJ!^ŻqŹ8Ířqcl˘řŕŕ¸hjŢ)HUë]±Ş]´Q®ve*ŮU)U™pŽe~Xf~xR§Ş5ą—\ǧ‡›+&ÓSÔŞŐŚ6 ąËěąYPá:—ľú%N/|ŕŢxű>}’’{8˛·¤>(·˘$CĂ– †e‹U:+—+„qVvÔ\dciĂţMˇńĄ…u~쑇Řô|›šŕÖÖ6ß?;şÎÝ^ź~±ÖëS·-¦Ë.ť `Ĺ<;wڞ•©zýÇŹ>Ä[«ë,wűĚVJ4lÍĐůOź8ÇĄŐ ţęąGXěôhy5ËĘ˝IÂGNÎsimCÓřëça¦\BĘ”µţ€§ŽĎQł­\¦µÝ/ŕ/fżlŰÎhł1ő4Ó.—.Ąaźqŕµ2 ^˛^0KËäxý0Ųmtł„g·»ZdR‡R9SÇóúŮ1t ,-&‰ĂˇsuŃt}‡X~ŚyŁ@gŞD äĘ7Ę©ł¶éÔ"S/7™4ć°Ł*kéć4†´ĄĎÉňYnß]¤Q«29ŃDâľ ©şL’Ű”¬]ýËw™}ä"qbO?Šţřiî´Ď? \â÷˛…R§ťÚLč>şÇb\ĂŃâűčiľUT`ŁYČ"Wx‘@~4‹T«‚ÔqdlGÇÇqLř¦‰ăV †ä÷č.Ą!ĺ]äµ0őťo‹2Ô{Ń˙¨VÚQ~l+VĂ––ąřřyî..ńÁçžefjr¬Í~Q©ě¨k‘‚(ŽI¤ ZrĐ4‘oüŽ’¨(zA€Ě‚»MÓ‘‰‡>ěŕYDô#řţł\ßjaé:1GłV单5žhňą·9?ÝÄŇ4Öű}&K.SĺAśđ}óÇЇČMîv:<>=ÉtµJÍ4ŃüČهy«ÍTÉĄăůô˘gf§i86÷†YÚ S5Ň4ĄY™dŇà ˬËʢ΄>G;]§Éßxő ÷–VđG‡íŁ> ¸Y(›Ő×™:ów®\Ć´lžúé˙‘–«˝‡ţŇ·†ŤZŠ 5üž0»tS‹Nb3ov1„¤ťÚ¨•v_ž[!$+B“žbZNNŇ|Xqt÷z9U!$e+ÂŇ"˘Č'ŚRĘ•–Ú9őÚXzd–j„‰1rŚý•ÎÔhś2™”’ůąY,Ëbr˘IšJĘCŕŮ8¶W&ő )Ć]¦›TKY¬Őj勌Çlˇl§UÂÁv*řˇÄ˛ěŚłRS÷!©Ř˙îÝ;<7?Ăr·Ďů‰aÓ‰ťšŕD˝Š’şmńÎÚ&A’ňÁ“sĽľĽĆ±j‰‡›uÖĎÎÍps»ĹTÉĄlę<ܬłây<6=…EÜŮÜfĹL—\˘4ĄlYšFĹ2ůĘÝ% MăN«C/8ŰČĘk*c«Šňł{=w• U;_Ͱđ„$8%ĹežFĄ&$¦Hâ0&ËÜč~$$D‰ČłWžçaČ~η(DÖ桑âůŮ{˛ĐH“řŔ6‡˘9Ž“;2µIS%@ő;ĄH´ź!Íę$š„ALĂžÂ.'ŹťĘ7Lb~¬ IDAT“ChqĽšżĹ Â`ĺ5–o\áâ‡^`ůĆU&N?‡ţđ Ö)Úasź«ůöZ’¤ôú›=Z­>őzé[~ŽHęl'.u=`VďłśTŃ…dNďáIOšßrž[MĂ'|ĐĚŚ)„8<;şÉ=ŞLŮ ŇÇ",§„í”±lgX=ëaé^î‡)Ń-ܨҙâd·š¦ćđܱ&†ŕąďá ŰvĆ 2G3©c+•iŽë`[FžÉ}I÷ťĄFŠIĆhz–iҤźűŹšmńŐ…u®nlóC§Ź$)'+%â  ĆüČcsemťÎϢ ÁB«C?Ž9Q­Đ &K%,]ĂĐOĎNs§ŐÁ #žś™Ä˛LlËâ‘© ¶ý€ŞiđÎę:Q’đˇ“󼵺ÎDÉĺĂ'çů‹·ą±Ý˘îŘ\ßÜĆ5tž;}"§đTřŘ?(úŹbËX8¬DҡTÎTŇ‚ÁvždPfh)¦ Ă€Dę”Ü ‘´ăť–±¬˝,ó[~j–ŤŁF„ÄÝÄ´]Ň4ř×ÇqśWÔFMUUuä g0ú4MŁYť¤.§DŞf=5™tg9}ü7ď,đCza×±ö‹#˘(¤uăs,^żAk} ¤Ćń‹?BŰpĎ;M?ţÖűŔq-IR’$EJĐÖŻ?˛IíÔA’9ŁG ¶—9ł‹-Úi&›.ÖÖÖäž=u¦‚8HŁ™ď|U9xÜ KTŇ^ĚĘF?q*Ŕl⺥śC/ű8F6‰űˇ5V?ŤRQÍď ™~PnYµZ-óŚ‚ĆF‘ëă<‡±Ć Űq©–ťűš×•´ßQŔ†–bj1n©ŠeY´Ű- 9Ŕ2˛űH:9ç˘ú  4MSz˝^®’¤…4VMńĘ (‘µ‰h6›ąî¶ďű„aHš¦9ń·ú;ő7Jb9Ę Ú¶ť;€ííí±ËŚIÖâ1jPŮÜAg=d~l’ęeŰ!ڞŚ@c¨’ÖmoP2ď/Sv›T*СŔ°ltÓ" |Ň8:ĐąQĚ›››Y 1Ěž[F»‡™Z”,ˢÝéňň7^çŁ?đ!tMËKq‡ŤwÍŐKĚťwŻ0{ća¦ç縱ŕ#ź»ŔJçÄ{€¬ľvéň"®kE –ĄsňÄ–őíé5ÓEĘĂf+ËŕF5Ę" aÜ|ülĺ<ůúúú¸OÓĐRĘV6żÂXCuJĂ$C§Ó›a\f•˝Î ŕE:†]Ď9u;í- ,#«†x‘I”ěź±/‚˝Š łĂľ`7J˝=˛*ꯍŤ€h¦CŁšU’¶¶2Ôűľ łCL ŃDŠi—óŤŞďő1d/ŻD¦R0]e•¦iůĄXSTUŇóĽü*_Z'©gäÁXµZÍ3čív;˙ŰŃń{ý·ňťJޞMĂ84ŚŮq®»#Më8ΠŐGOZ‚ÇR)đbÝ*c™&~FÜR5_Oe¸˝«’ Ćő›TJZĆS.4â084ŮPí*ęŻbĆZ­wAŞ:j 3 č ”ÜŤzŤ…ĹEĘîÁҸ*žqµ_ţ?ţ>ÇĎ]ŕ‘‹çąüŇ›”źř(ť{íďű®€Č¤„ËWî¶m`š:ç›ű¶śKÇŚ§Ś­Ô¦—Zś0şl§7ĂúŻýÚŻ}rt*€’ąT ›ň°/¦Űi‘D!ĄrelĐÂAeŠÝ'BbŔOĹswÚŰM§T®&^"ÇĹTA’*÷Ž :S=«Ez”˘”âQ5ŰGËdŞA}t“ Ś Ľ÷ë–Ź 8)škFşDÉ0 źÉŃJÍĆ R4Rüx¨ý=tvʱ©]bŻ×Ë™Ô")ĄĚßq°+°-r*Tżşv•EW˙ŠČçâĆCÍ™â9ĆÍ Wěm:¬T2Çś=GS“l‡?×ÔąĽ9 éZčBđ~; bYtŁKk4,]3H4ťmĎ'‰cÖú:aBĂ)¨Ź%:ápˇBHÉkK+¬úĚŐ븮ł§F¸4ŤÍ8e˛\âÝŐu.­®cč:–Ló~\}¨)ŻĎŹ2˙Ôü1 ;‹÷¸ł°HET+ĺńĆoĽĹöę]Î=˙aâ`ŔĺW/1óŃŹ±ĚĆßť¬-@ŻđĐ™iâ$ˇQ+ źĎ·§="ăÂu0eĚśŐg;vHŃ™7{üćoüăĘÜţĘŻüĘŻ¦ijîšdß®ňĂabćý‰ÝnwWuŘŘ«L~PR†žćĹD¸ĄZTąn)Lü &N5˘äŕŤD±ĺ&Š˘ĽW ě{RćŐ_^ĚbU*•Đ×ß@±šˇüĘž‰ÍȨţL=—Y-ŢÇQ[> MR±#H9*«+4†tFYŮß´CçbŇĹÔ#|Ď'NĹž łý¬^×u=ĎÄ)Çŕšq†Ě"Nv€?ľďaŇE' ď…X–»#wz„ťűh™LZę8ĘöëĄ-s)ŽĽŁ‚Ćö-“iY»cí]v.–ÜřAýĽşyvRl‘2L‡N{ˇé”+5biE÷gí˙î’đG+[|­ Sž·'"xÔ$(˝q™Xă h¨Ťěd,…CŔËÁ¦–ąkĹQ»ó®˛6 Ťůć»Üëz\<6ĹßýÔ‹<6٤jŰüňgżÎ3słüΛďňô±I>săĎÎNsŁÝăŤĺU^^\âáć$^€k\oytB Cwx{cC×):·;=®llah·6·(™:7{Şn™~Ą)bE|öÎ=ţĂű·řř#§ůťWßŕ©Ů&]s¤E@ÎŽŠHWý»ßxëq’P«Vyćâ…¬—ýďWK»¬\ţ4Qł|ăçź{–č±˙‚›ţI‚¸cl4ż]f™:Ë+mlÓŔ0uŞ•Ł©oŤcR’—Ú~BW¸ˇ1oe}·4řçżů(¸ýµ_űµ_µ,Ë,~ÇÚđŰUŐ3Ýn 7‰Äm’Č'ĘĂ@aĽ¶ŻNe¸_R]Ą'8Fś š¤R ŤZ.µz-‚`€Đ JĺI*ŽŘUýŔŁý¸Ş×·hJ¶]±BďCľDÚ}żń;÷!Đ-'lŘ c˘ĆŹăagŁ˘ @¦V…n·“e3í ý@ĹŮąG­V«±DüÁâ:_îú kLČ„¤ĐĎ{Řś÷ýAÎ2Łüó^Ď~Ô¤”9HČ«ăĘÉ[z‚©§¤2{©¬´¦› üMüŇç^ăçfąĽŃĺ·_ź>=Ë{[˙ěµkĚ×ü«·ßçëKË<\+Q˛~÷ŤKśťlňŻŢĽÂtĄĆťNź)×f€”&Aj¤cţŃ{7ůÚÝ{Ľ´°ČłłÓ8Ą‰Ěć¤@ź~/NřÝK漣 Çć}ĺUş=NŐ«ě0‹AgGećR„!×nŢâÄüµj…ją4VaÉMŢűÚźpę“l--đĚ_ůqĽ3?ÉrŇÄŹ&řnz…Ď©TLSg˘YF׿=ë‚”¦’Í>ô¤Í¬ĺQ5–ă “ş‡ţ‹żř‹źTŔ5Éł]d¦ŰI7'ů­śĘD‰]Öq>Ř›vKĘ”4ŮAĺDěqn‘}$]/!Šâ#3(Ű t†ańúŔd!qXJmnű‚R §÷OŘŃfńQ*o ŐRÖpnůhbżM†ě’”M¤‰ĐLt9@'Ŕ÷4ĂľOéLóZßçg^y—żXkłčGüţÝ ¤ađÁZ)ĎÂě×S=šuQΰxíęoBuuËU)Nµ7ضť—GKOrÚ™05qËŤaż`uŁa;\ZŰBJÁĂÍ—×¶SIÂb§OͶ8^-ńC'śť˘Zi°>đř­żÁ…©&yč4đö»l)HŤĎ޼Ǧ˛ÜíÓ #jÔŘňľ˛pŹvđřP ţúĆ^óĄ…%îvśiÔq5ÁsgNńňÝe^ťâęú&·¶;H$Ç+Ąü˝«{W^¨–Ž‚Ü4•´»]f¦¦ă„’cÓ¨ŐvUSö›żV÷‹ä8I˘±róť°ÉĆÄóôc“ďf` 0đBú˝€0Śéő&'żuTdI"ó€ ¦©aYBtS›HęĚ™=b4ţŮoţŁ nţçţWĄ”fń=h˘Ŕđ1`´ő,Ó'SIoµŚó•©DèďŹă׌v)ő‰Eąše”˝®fÔ‚iV ±,;oW8JpYd·)&Ôµ+ú%]×÷í//VĂŔ¶ťŃ0nm·8wöašŤĆá•ăř/!Ě*o~á38®Ťś˙ď'“*ßm¦šÍÍ>ë]‚  ĄŇÁĂGµ$•ř~BĄC_¬k&ťÔaRPŃ"Ö“r¶" ‘ˇ@[­BęŤ&zífNĎóH©‚§ť±š¦ĺ‡e‘Ô‹ë÷űt:ťĚ1U'ń—0±ŽĺTöwŽ?úĐţË3Çř˝»Ü“|ěv»ôű}*•JľHŞ]­jľßë~ÔG¬Ô~ęő:®ëîzvEÝrµMÍÇq¨×ëyReÓµaS»ąS~ÔR˸†ĎŢźöŔă^·Ď;}ę¶‹&4ľt{™‹Ç&iş6Ű^@ŠĹ+ËŰąŽüŹ?zŠ–?ŕŢö2•hšÎ3ÇŹÓt]NÔŞĚU+Üëě Ůź™›á'Ď=ÂŤ­wZf+elmJOb¦k5,· B ‡ ĚOĚńçćý­Öžs!Z­ý~¦Šćş.ŤF&*±źm·Ű\ż}‡k7nrúäqÎś<‘'MÓűćŹ2-ŢÂŃ×Ńd‡“g>ň‰¦~îG‰©řNYĄěĐéfŮŔąŮú·äI"éö"‚0AÓ•’ăčŚľŠµ¤Ěőp‚˛/kł—żőLŰefejËÉľ+ßó0EX'wŤU>`ô;:čĽęý !h }żéH E'Á"Ś˘ ´KD eFŹÔív±m›z˝ž#ă”™hÄööv^ę+ŞđŁWę|ôrťďŐä#ŻŔűŢţóLů›^ŻG·Ű˝ĎŚó €|Ľeg]–aäe÷ĂĆK)s?X*•rV—üüR@Á+ß© m¤¸ú€ żIÔjµ<"ë/ţ×iZ&ż÷ü9ţđűĎó_=4Ëż^ÜäzîZöú†Ą””JĄáZş[QÝű`0 ÝncYVľŚ>;uM°S¦W›ív»Ťďű»®†­5FŚ;Ż·”ýŢ÷z”­‹.^o›?ązťŤĎß^!J>5Çço-#ɲÝI*15p “©j¶Ńë‡!%··ńÂí0Áµ\~ęüŁ\Yߤb™řqLŐ¶ňë15Ť’iĐ #4 ăůĚ—]>uý6h:B7¨•Ë؆NG4,“żóÜSÄiĘŐÍ!ől[[[y&ĽT*Ńl6ó¶Ç˝LÓ4^yýM^{ű?ôˇňüłOaFţ6›M,ë~Žw=Ľ…Ą­óČă ~ä?űkT&O°ĽáÝç—ľ[fš:Q”Đnpěo îAJđü„ţ «dٶNąd i"żď~jr5&”:łzo‡-!϶%!ačă8%Jĺ¬d•$ ţ`§/&‘/2sú­ŃR»"q§e@©'Čx@0Ü˝ Ý%‘F~Śâą‚ŘČ).Š­j÷Şh“ĆÉ ÂNĐ´ť\ó ţ»Óŕčđá&x±äIű~)ľ˝îŁČŤ8š gĽfXTJvî.“nE 6U&2˨KZ–™0D˙L DQ@fýP*@ú{Woó‘©:‹~Ŕ7Zś);Ľ¸ŮĺbÍĺ±áÄ-fba§WY•jĆAő«ld1 ,ĄÜĹĄ¨$žGÇy›†*łĆQŚ.b4^lS©fÇéw·±‡`D„ĆívźżuńĎÍNqłÝg®^ă§NP1 Ž•m.L×Yh÷ůŇÂ:OĚÍ1W«Đë÷™p==Ă+ŰL•\‚(浥U&Ę%šlňúŇ*óŐ27j¤Ţ\Ý`©ŰçŁgŽłÚĐ CćkšŽĂ”k3_˛qÝQśŇń}ŞUycuť7–×řŕńYšöţ»ßýZmFéRJŽÍĚpîě#LMNĐéö°Lóľă¨~ňťď¬ÁK8.M' Zć“T'™0CĽTĂ;Lôť0!ž‘¤)ë]¦§Ş„ŇM‰ç'„QŠméضŽij‡.ž4é¦6˙ćźüŻ”ą-ňÜFQDřX–ŤăVH¤™¨Ľ~k—/ôc3ÔżŁŁ˛#¤IHúŘN Ë©ŕŕ…đŮ/żČÝĹ%&›UĘönĹČ8ŐvJ¬c¶Jíej|E|˝ońHYаŕ&ˇ¦ĂI+ĺ6^Yd÷8*ĎŻ”’(JÉA×µśúé(ăŐ\eҳ矎TĐ,:"őójĄˇKtáy>B3¨TŞůćţWŻŢćŁÓ îx>_ŢhsŞlóâf‡§j.Źâ7¬ÖĎ"€n?ŕSŃŹFQtßłS-jŔžtĹJ@‘[X¦Q^=“¤QĎű„Ó Ťˇek?NX÷ţöĹłl ša2S*ó•ŰKüŤ‹gYî xvnŠ×–·ůÚâ&Ç›uę¶ĹĺĺţŰď;ÇW–¨Z6?ŕv«C"4\Ó$ę¶E79]Żrm»Ťź&l<š®ĂćŔن|ěˇSü_—®ňłOߢâÂ-iQ&ż[üe٠ѵ/2¨Ögň@±×mci>Qj§ZžąÝű˘–sµĘđś©"Ř(SşÝ.zÚÍQ‰ôCëŔó+ŇĺŁĘŕŢţń=‡ź™Ďě•.TuÉßžěď©5}Đý«Ü8RšMöj­Žet;ۤIŚĺTĆ?zţ˘ś˛7ča9QiN%cožâ(k"Ďüüµ—®đPŮáÉz‰Vśpҵůź/Ýćź_<ÉśýweJ M3™Ýq39@.%¬ŔSŔ}ňłÝ»rÄAŕú}śR ×ÍtŰÓ`k7˘Věe©ÔńcĂĘřq}Ď#‰ű8¦$ŐłlX†$ţćóV"Ńč…6//­ó}sÓBĂ´llË ŁK*˘–Ő‡©äßľwź~ô!Cş3?I„aĆ—+äŢ@‘ýl?IĄŕÖh4RćYžýžˇ:ަi :K”ş˙/Vu¤ÄëőąeýçTLť†«‘JXhÜń:ńw'Č•RňŢű«Äq‚®k™óâ;W’WÉť‡±F‚Ť[®qká.oĽ}Ó4ůÁ.Ň(©–#Aw?,厌íAśčűŮź,ĆgJP5ŕóđS“1OęťCąmGŻCµ*A0V5LJ‰SŞP-»9«‹BËő>ŠľČ÷}Ҩ‹c$HĚ6{ű´$„˛D­ž4ěk—y˘VⱪK'N8ĺÚüO—oó/ž8É3{řáâ`YBdÔqă®cB°Ažů… ĚxSĐčúç úz¤Ôę“hšF§˝…«ď<ĎX ˘\]ŇŹ5Ľ8ŁţBÓ ˘>®ĆÍvDĄŇd®VĄßë„j–FJú‘A*uţź«×ůč铜šh’Kí˙qçň#IVťńß˝qoĆ+ł2«Ě 7b4š00˛…‘lX±eeYňĘ;ł`cya˙,‰˰b‡,#,`…¬°d!ĽŕŐĚŔ0žič̌ŚÇ}°Ľ‘YŐUYYÍŚ8۬¨›yăÜsľóťď<k$žµsTťe¬q$yP®Ź4Ą1|çţoř§W>ĐóŻ‘”MKg ÂvĚëç=w’ř _ÜëËçç¤+Č#„87V=ô˘\Ö x1Ž˘ü9SńżDÉśeY(ĘŁO1ŽU? tă‡W"? PV ''%Őş÷a/˝x3µ„¦µÄP.\4‰G¬V+6ĽŽ,™îp^`ĺY>śÓ }R–KL„† *ʧ»ÖÜ^ť8" ň%u˝Ć¶‰64F ťčűÖB ›ëĐYëo‰śĎÜO+*Xř»gáÓĆ av]‰ęâ÷Ř 2wUΙ¤YNž&´mCWť0RŽÎJZ7"Íú@˙&ňkaýól“ÉuŐóć‚ő’U‚H:›őSMćó9?XŐüŰ/ŢD„CLŔ‡& _yé=°UĄ­đÜ‹˘¸ŃĽű ˙łëPMT¤pý@†xLś$,Yą:!‰®—o3NĐZMśEőş’­byJ­wţöÉąĺ""NÓAfËŮîr&”¨(b:íKéĂ^­kÔh3N˛kÁŽŕöÓ78×đěĂűp•FđąűŘ쟬ţ!#ů{DăÚÚó?ćă8`žĎŢ• ^]Eüt>"RWgÖď¤ŐMG]w4Ťa˝nůó÷Ţľ2H팣iJ ´’HÉÁ{sź=mp{zzZYkSŘĘpő#ÇÇäăé€Ŕö‡\ŕ€†ľĺ> !’ŽńŽô—uÂŻ_¦ířËŹü¦©E}ÜEmö—Cg}š¦TUuđűűťőŚź”Š÷fpÖÁţńÝđ73?H8Ţ$Y†-ཫľ‡Ň#¦G}sYą:a$[jéś4Ínt»cŞŞÄ› ťL{Ő›č<’Ú n¤·Ťbęşć»ŹÎřןľÁîoţĘ4áË/î÷Ă! ČřeC?ö™÷žăăăÁŹXk‡†ŢC®U‘$ËÇŞÇ1M]c/‚ ŻĄv#_i­ĄYŻTÂx6ĺCF;˙Łę4Ć) cČŁžV#ŁPőđÔu…·—kÜ–ĆIÉ,KŹÇIg{ĐÍëZŕđä ¶<ĺŇ„$+Đf%ďşç¨µ&ĎbÜŻg‰)r_ü=żhďńl\óľ¬eG”ťçG§9Íź2f­ăWŻ=äέ1Mk¸{g˛WąĆ{Ogz©¸¦±h%ź* ˝hjž$<ɆÓXÍxĂ›YW±¨Xk¤ĘÎf$Ëu¶Ë'M’„ŮlFÓ4çV-Ďo–ÖjŇIżv±Zŕ]‹Ž'Ä“[=‚iÚkoz·Ś‚Ë4M÷"ČŢ{žź(>ű,kß„đťw´­4o6ă!Z(Ő,—K´ÖC¦ö„6˘ÔŚ6<ĺuUm´guäP˛aątčMGîM´űő Őę1RgL7uëú‘´žŢ)X'‰¤#Ëű .č?~|ó/÷ŢĹ^ýŤó|ôxĚžöZ‡JZóů|Č>Ă!wU€.ŘŠ¸ěŠ„!m)ĺPrÜwď©6ýÜ-°Zo%ÄPÔ]G¬ž§»kJz”l©×§ÔÄM{ŤŢ¶mń®! Y÷@{É/ĽĄ. ¤R$IŠ÷1M˝Ţh+ö‹/DÄ×-řUŐńňQÁ?<{‹gb=dňÖtčQŚJR¬iéÚĂ4MĂ3;;;4˝ˇ\Ć[ľĘ„¸ö QßGLîĐÔ–yüAĽé“˘…Ńüxˇ™–†ĆŔoß,9žŤécZIŇ$˘íz~ˇŇřŁ×e¶X¬™ź•ıFEO:ǶµtĆ3Ň˝ÓĎłčm hß ô/[?lůŃFş©X‚­ Ę8:šHСŤcOy•ÚőĂB*ÎV-­íĄçš¶ďČ'3šzÍş+÷jÚgY–1›ÍŞB˝ś{ľůîo€˝4‚gT¨ˇ¸,Ë錇 4¨Ó\ęĂeÄ(N6Ő®-Z¤đdÚ`Ý’łł†4=ü>v­,KÚ¦"IÇČdëËŘyŚťí+GŁČBŁőhUţ6Uüó˝»|ńµßoüpÎçßżßCTavóú}üg° ëxÝ˝ëČ“ŤÖÔU‹PăA5§ij®Ű=Eç׬‹ˇ2ňÉńV§ľZ‘ě¶ÖIŚŤđxňÍßŔŰŽ˛čP:&Ëň uŻŢHDвŢIDAT‚mÖ‚Nk^­ w Ľě=ͦYąZ,zešQi†i›ľ˘vŕ祥&¨ ýshnÔ€żzřK¦Qâď Ť»Ăëţ=t^đFťň»&Ṹćnęy|˛¦hwn%XןŞJ‰žŁúůß`Q$™Lź¬Vý>{÷sç9çéŚ#Š$mk‰"A$%yööi“‹˘(|UUÄĘ’ju·Řж f}2dXu§h¬ÚÉB÷gÁ—Ůnݶ5“x«ăhťŔĘ)ůxÜoŠę„XYŚ46!ÍĆOŤ`ZN/ZV`­e>ź_úyŕP9«0ŚâP ™kžçÔuÝ—ądÄ(Éć›®×îôÜháÚ(¬“¤şŁ5edY~p™m‹AŮ%ßŢ8%¶-ńlK›‡sţăÁś˙~¸ŕżţęîĺéŔ˝lý[·n!Ąd±X \ŰËPüÝY OŞ;ŁQ< ™°ŘäŮęş:„Żş_€ĘŽ™NgĂáĘ;ëŞ`$ktt=úSu1Gł;ýsp޶mh×+beŇS61öş!"@¤FäY†µeY±Dđéźý?˙·Ú–ć>r”óŐ˝Ź´Ý"CŢ{„”ŚóĽ/•­kĽí7”čľ}űö€žäö÷gTüIňˇ\WRŘh’Wřu=ćőő“r[Ćöü¨“yŤsÄ’<ÓëPJ˘"AÓZŚőhŐ Ć8TÔw˝OíxŤé儤8ç7Mýá‘& ç=ŃćówÂ2źz˙GKŤÖ Śś>«.“hŰ‹ü·5J‡r÷ˇże°8ŽĎ]Éž64źĘ˙üÖ÷ľßG_ůĎÝÎhŚb”ŚßxއX(U]ÚŐju)Đš’…,—[BÄCW>0ř‘§ˇl]†dKť0ťä<«ĹŁsĹ@‡‹#őšdSM»˛wÁ0‰:'č|Ęt:ë§,VKF˛E AŐmůÓÓé­ő9*CđďoťňíG+ľń±—¸+ű ů޵“$öOŘÔöˇŘ:˛X'qľ—[ C Ú¶Ą( ň<ôß÷ťĂćbś@Ćw)ątő’D۽Â+`Ôs(VKlW’j‡žÖF¬»ýd<'ÄŁ¦m¨ë^)ä{EËç^{Ŕ[uG*źĽ;ăKĽ‡ßhÚBŘ7Š$í§†U î°1ŇÁÂţ <ʲ¤,Ëߡtů ň‰a×sňĽ)^äŐ*ŁŰ9ĐÇrŮ2kNç JI&cŤ”˘W BÇumRÄ 8đ«\k­¸}+eŻÔ‡1}/CÝX´–Ăđv[ŞŕůČ=ŹwbOIEND®B`‚python-pot-0.9.3+dfsg/docs/source/_static/images/logo.png000066400000000000000000000103451455713015700233670ustar00rootroot00000000000000‰PNG  IHDR´ťÜૠ9tEXtSoftwareMatplotlib version3.5.1, https://matplotlib.org/Řaň˝ pHYsgźŇRRIDATxśíÝżoĎÇń'ţÎrŕ›@’oďʦăGM"h0Ĺ·C2H&Jű•ř?PZP˘ŕ‚.…]}Ą|h>‚Ň4Iťäű5‘ťâBĐͦػďŮŽőygvvöůVZé†enŢŻ»±agö\QH’Ôv3Mw@’¤:8ˇI’˛ŕ„&IĘ‚š$) Nh’¤,8ˇI’˛ŕ„&IĘ‚š$) Nh’¤,8ˇI’˛ŕ„&IĘ‚š$) Nh’¤,8ˇI’˛ŕ„&IĘ‚š$) Nh’¤,Ě5őź;wîĎÇŔ-ŕđxücQ˙ŐTżbr ŇŁ&Ö}2ǧ=’«UQQ`x|Šďźg@/vßî1jbÝź\ŽTkunŘą(Îť;·ü–r6çjo{ý‹,ĎαtČÎ`Ŕî§OŁćŻ»EQü1Z#ř˙c7€ ŕ °loFÍłÔÄČĄuźĚď†öH9˱'´çŔßţŮĚ ˙ÉÍĹĹoÚüňń#żůĂďů߯_ˇ™'Ń:ÇS`.˙ üőwšüř5đ?Ď‹˘ř»xÝëžHą´îřÝĐ*Éf9Ú„vîÜąËŔżó˙đWżún`G~ůř‘Ç˙ů_—@.?‰-€řľ„‘ßđřUá˙)—Ö}żZ%é,ÇĽËq_ëő&ŕćâ"«˝”ý[‰Đ·XV€¸Îä 0|ýŔ<ĺŘ)Śą´î“ůÝĐIg9ć„v `˝ßŻÔřţ¸Ý•@ýiÂđ˝<¬ŘüŃčäví=ŃHŚ\Z÷ÉünhʤłsB»°<[mĄŔҸÝů@ýiÂđ˝Tý^ť\Đ•bäŇşOćwC{$ťĺÚ€ýŁĂJŤĆí>ęO†ďeŻbó÷Ł“ú˘RŚ\Z÷ÉünhʤłsB{ °3TjĽ=nWuäÚ`ř^¶*61:yU{O4#—Ö}2żÚ#é,§~—ă`ŘŹŇÉđ.»Ŕ|ŠwuQ¤\Z÷ ünh•¤łśú:´ěÖâŚĆ Ĺ5]#—Ö}2żÚ#ĺ,7şSČZŻÇzżĎŇěG‡lĽ;ąŔť˘(>ýŕr­ôí*űë”w ]¦ü÷ćŔŰQó,Ç 51riÝ'ó»ˇ=’ÎrC{€=#±=Ŕn1jbÝź\ŽTkő7´ă†»4oR®Oh~—ć8é‰Që>™ăÓ©Őޱ M’¤:ů€OIRśĐ$IYpB“$eÁ M’”'4IRŞmoŔđvĎÇ”‹óżÝ3'9ŽmŽď)´¶ŽY[űÝEÉŐŞˇyĎIlA^GŽc›ă{rĚňęwŹTkŐčÖWW{ Üë_dyvŽýŁCvvOnos·( ±^Á·ŰŃÜ6(ź[´Gą;ö›QóVŚ­y9˝¶ćŔZ·GĘK}sâ-ŕI´¶ŰS`#Ĺ C§e^¦ŇĘXëVI6c©?>ć+đđ'±É€ŔLŠŹt†y™J+s`­[%éŚĹĽËq_ëő&ŕćâ"«˝”ý[‰Đ·¶[fĘ]Ż'Śáë×ć)k’*órzm͵nʤ3sB»°ŢďWj|ÜîJ ţäd8F+64:ą]{Oęc^NŻ­9°Öí‘tĆbNh–g«­X·;¨?9ŽQŐĎ÷ĺŃÉ…}©‹y9˝¶ćŔZ·GŇ‹9ˇ}Ř?:¬Ôř`Üîs ţäd8F{›żť|Đ—ş—Ókk¬u{$ť±Úk€ťÁ Răíq»Ş#×eĂ1ÚŞŘüĹčäUí=©Źy9˝¶ćŔZ·GŇKý.Ç/Ŕ*°Ą“íu ŘćSĽóhće*­Ěµn•¤3–ú:´$ÖČ´ÁhlS\2-órzm͵nŹ”3ÖčN!k˝ëý>Kłs˛=đîänwŠ˘řôËéoWď_§Ľé2ĺżcżŢŽš·blÍËéµ5Öş=’ÎXC{€=#±=Ŕr8rŰß“c–Wż»x¤Z«¨żˇ7ÜĄy“r}Bó»4g$DZÍń=…ÖÖ1kkż»(µZ56ˇI’T'đ)IĘ‚š$) Nh’¤,8ˇI’˛ŕ„&IĘ‚š$) Őž×ŔpýÂcĘŐ捯_ČIęc›z˙şĚÚśNń˛gĐĐ óç$¶Â<‡#ő±M˝]>¬Móăe Î~4ş—ăŐŢ÷úYžťc˙čťÁ€Ý“űµÝ-ŠâŹŃ:ŘbßîŻvŘ |ßĺăŢŚšG[kź®Ôł“šY¶őH}·ý-ŕI´¶ŰS`#ŰÁÚ'.éě¤&P–­A RÚWŕ%ŕO"“-€™$źQdíS–tvR(ËÖ &1ďrÜć×z˝‰!¸ą¸ČjŻe˙V"ô­íV€™ň1“> _ż0OY“¬}şRĎNjBdŮÔ$ć„v `˝ßŻÔřţ¸Ý•@ýÉÉpŚVlţhtr»öž|źµOWęŮIM,[šÄśĐ.,ĎV[)°4nw>Pr2ŁŞß˙—G'ôĺ{¬}şRĎNjBdŮÔ$ć„ö`˙č°Răq»Ďú““áíUlţ~tň!@_ľÇÚ§+őě¤&D–­AMbNhŻvJŤ·ÇíŞVąË†c´U±ů‹ŃÉ«Ú{ň}Ö>]©g'5!˛l j’ú]Ž_€U`?J'Űë° ̧x—”µOZŇŮIM ,[š¤ľ͵ŤĆ6Őu,Ö>]©g'5!˛l ęŃčN!k˝ëý>Kłs˛=đîä ű;EQ|úÁĺtĚ·; \§Ľkę2ĺżążŢŽšG[kź®Ôł“šY¶5‰˝×ĺ"Âg¸_YçĆ6őţuů°6ÍŹ—58űő7´ă†;JoR®ĄpGéĄ>¶©÷ŻË¬Íé„/k0˝Ć&4I’ęä>%IYpB“$eÁ M’”'4IRśĐ$IY¨¶etĂ[SS.$ěä­©]®ľď6°6§bĽ¬Á4´ ń9^<ŘŐ1čęűnĂamš/kpöŁŃ­Ż®ö¸×żČňěűG‡ě ěžÜ2ćnQ?zly+}»ĹÍ `ňYH{”;nż5Ďf ¬}şşšÉi…Ȳ5¨Gę›oO˘u0ާŔF×6!µöIëd&§(ËÖ ©?>ć+đČĺ'‘ŕ0ÓĄÇDXű¤u2“Ó ”ekP“w9nók˝ŢÄÜ\\dµ×˛+úË 0Sî¤=)´ _ż0O9vmfíÓŐŐLN+D–­AMbNh·ÖűýJŤďŹŰ] Ôź& ßËĂŠÍŤNn×Ţ“¸¬}şşšÉi…Ȳ5¨IĚ íŔňlµ•Kăvçő§ Ă÷Rő{úňčäB€ľÄdíÓŐŐLN+D–­AMbNhöŹ+5>·ű¨?Mľ—˝ŠÍߏN>čKLÖ>]]Íä´BdŮÔ$ć„ö`g0¨Ôx{Ü®j•Ű`ř^¶*61:yU{Oâ˛öéęj&§"ËÖ &©ßĺřXöŁt2ĽKŔ.0ߥ»™¬}Ň:™Éiʲ5¨Ięëв[k1®­7±öéęj&§"ËÖ Ťî˛Öë±Ţďł4;ÇÁŃ!ŰďN®°żSŧ\®•ľÝŕ:ĺÝM—)˙müđvÔ<›1°öéęj&§"ËÖ &±÷Ú˘\DřŚďWÖŐ1čęűnĂamš/kpö#ęohÇ w”ޤ\KŃÉĄ»:]}ßm`mN'ÄxYé56ˇI’T'đ)IĘ‚š$) Nh’¤,8ˇI’˛ŕ„&IĘ‚š$) ŐžŔp­ĹcĘ•ńµ­µu]5Ëşžžc^1¶ngĐĐ űçÔĽ>Ôu=š=¬«c–âbŚ­ŰŮŹF÷rĽÚ[ŕ^˙"Ëłsě˛3°{r´»EQüń´×…Ŕĺóö(ËđćÔ×UłBĺ%g~ ‘KëVŹÔwŰßžT¸ôS`ĂťŞó0/9ółX \Z·¤ţ<´ŻŔK`ŇO" Ŕ`Ćg ĺ#`^rćg!°@ą´n5‰y—ă&0żÖëM ŔÍĹEV{=(ű·ň“ë®”í®39 _ż0?ěŹŇ*/9ół^\Z·šÄśĐn¬÷ű•ß·»ň“¦Ă×VěĆŁŃÉíŠ@Í•—śůY/D.­[MbNh–g«­X·;˙“¦Ă׫~Ź]>Ń%+T^rćg!Ľą´n5‰9ˇ}Ř?:¬Ôř`ÜîóOš_߫؍÷'úŁd…ĘKÎü,„"—Ö­&1'´×;AĄĆŰăv?«ňđő­ŠÝx1:yUń¨ˇň’3? á…ČĄu«Ięw9~Vý —ľěóŢ!”Ź€yÉ™ź…ŔĺŇşŐ$őuh•ÖZŚ®ëŽĽ„ĘKÎü,„"—Ö­Ťî˛Öë±Ţďł4;ÇÁŃ!ŰďN®°żSŧ\î‡×-o}Hůź§ď)E{ęëŞYˇň’3? á…ČĄu«Iě˝¶(>#Ě^޵_ףŮĂş:f)!ĆŘşťýúÚqĂĄ7)×RÔ˝Ű~í×Uł¬ëé9fá…cë6˝Ć&4I’ęä>%IYpB“$eÁ M’”'4IRśĐ$IY¨¶etĂ[SS.$¬ű¶ýÚŻ«n‘3šŹµ4gĐĐ‚Äç„YX]űu=şqÄČŹÍçQKóqöŁŃ­Ż®ö¸×żČňěűG‡ě ěžÜ2ćnQ?zlůŻ 7€ Ęç íQîbýćÔ×U7„Ę夿̶WĽŹz¤ľ9ńđ¤ÂĄźně©iĚĺqf4ňb>júăcľ/I?‰,€˝ Ó ËăĚh&ĺĹ|Ô$ć]Ž›ŔüZŻ717Yíő ěßĘO®»R¶»Îä 0|ýŔü°?R¨\gFó"/ćŁ&1'´[ëý~ĄĆ÷Çí®ü¤éđő‡»ńhtr»âPŢBĺň83šŹy15‰9ˇ]Xž­¶R`iÜîüOš_ŻúýrůDÔyˇryśÍGĽŹšÄśĐ>ěVj|0n÷ů'M‡ŻďUěĆűýQç…Ęĺqf4!ňb>jsB{ °3TjĽ=n÷ł*_ߪ؍Ł“W˙€ň*—Ç™Ń|„Č‹ů¨Ięw9~Vý —ľěóŢ!¤Ó ËăĚh&ĺĹ|Ô$ÚohEQĽgřŁĹoţđ{~ůřń»íFë7†ţ©(Š+Šâż'˙Ęź~dů5eÁżg´†ăO×5 ™K3šˇ@y15it§µ^Źő~źĄŮ9ŽŮ xwr…ýť˘(>ýŕr?ĽnyűëCĘ˙<ĺďí©Ż«n•ËI‡mŻy15‰˝×ĺ"Âg„Ů˱öëztă‘3šĎ˘–ćăěGÔßĐŽî(˝Ią–˘îÝökż®ş!F~Ěh>BÔŇ|Lݱ M’¤:ů€OIRśĐ$IYpB“$eÁ M’”'4IRśĐ$IYpB“$eÁ M’”'4IRśĐ$IYpB“$eÁ M’”'4IRśĐ$IYpB“$eÁ M’”…˙ăİTcľÇéIEND®B`‚python-pot-0.9.3+dfsg/docs/source/_static/images/logo.svg000066400000000000000000000234731455713015700234100ustar00rootroot00000000000000 2022-03-30T17:25:32.476826 image/svg+xml Matplotlib v3.5.1, https://matplotlib.org/ python-pot-0.9.3+dfsg/docs/source/_static/images/logo_3ia.jpg000066400000000000000000000607051455713015700241240ustar00rootroot00000000000000˙Ř˙ŕJFIF˙ŰC    #%$""!&+7/&)4)!"0A149;>>>%.DIC;˙ŰC  ;("(;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;˙Ŕ¤"˙Ä ˙ĵ}!1AQa"q2‘ˇ#B±ÁRŃđ$3br‚ %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz„…†‡‰Š’“”•–—™š˘Ł¤Ą¦§¨©Ş˛ł´µ¶·¸ąşÂĂÄĹĆÇČÉĘŇÓÔŐÖ×ŘŮÚáâăäĺćçčéęńňóôőö÷řůú˙Ä ˙ĵw!1AQaq"2B‘ˇ±Á #3RđbrŃ $4á%ń&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz‚„…†‡‰Š’“”•–—™š˘Ł¤Ą¦§¨©Ş˛ł´µ¶·¸ąşÂĂÄĹĆÇČÉĘŇÓÔŐÖ×ŘŮÚâăäĺćçčéęňóôőö÷řůú˙Ú ?öj(˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€8}âi·óŘY؇’(ŇJÜgŘń®^çÇľ"ąČ‹ ‘ŚEüNMRńOüŤ:—ý|5eWŻNŤ5ě|˝|UiM®m ořJüA˙A{źűęĄĆ~"·bWS‘óÚEV¨¬J+_gÇ?·ŞľÓűÎďLřťpŽTłI<ÉĘGŕzţuÝéš­–Żj.lgYSľ:©ô#±Ż «ú6łwˇß­ÝŁŕŽ ůd_C\őp±’Ľtg~1©jšŻÄ÷:CҪ隌¶ť őąĚs.@îp}Á«GˇŻ1¦ť™ô ©+ŁČĄń׉ijÄܧŻŇ™˙ ç‰?č ?ďĘ…aO˙˙ľßÎŁŻkŮSţT|“ÄVţw÷łŇ|â=WZÔnˇÔ.DÉ!”yj¸9Ça]ÍyźÂ˙ů Ţ˙׸˙Đ«Ó+ĚÄĄŤ#čp2”č''vQEsťˇUµ Ř´ë ď'8Ž.}ýŞÍp?őť±AŁÄÜżďfǧđŹĎźŔV”ˇĎ5 EeF“™łŕŻÉŻŘL.™~Ő ‡pSĘ˙‡á]-xż„µŹě]~ ݱ§Ę›ýÓßđ85ěŕädr+\M>Iéł9đÝZZî…˘Š+ď (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€<ZŃő-SĹz’ŘŮM?úKĘż(úž•b‡:ü»L‹o#ťňäŹČő|b–şţ·4’Hó[M·)6îyd˙ 5¨€1Mk1î•ţbą­CLľŇ®<‹ëgĎ#pᾇˇŻx¬Y[Ţx^đĚt ćFçřXŹOĆ®–*NIHË—SPrłGŤŃEčžčż ď™íďl’#e•=łÁţB»ĂŇĽßár1ÔoäÇĘ!POą?ýjôšň1)*¬ú|˝·‡W<{Ăq/ú$˙|˙Ë&őúS>Ăy˙>“˙ߦ˙ öĂ®éuK0G_߯řŃý»Ł˙ĐVË˙ük§ëRţS‡ű6źüüţľó†řgo<:µé–#‚čF~ozôŠ­m¨ŘŢ»%­ä˛Ś‘…Y®:łsź3V=L5%JšŠw (˘˛:®n#´¶–âfŰH]‰ěxn«¨É«j—ŇçtÎH݇ŕ1^ń'Xű6ť—bKŁşLvAţ'ůW™×§„§hó>§Ďću঩­—ćëţÖµĽ?ČٸµýÔ™ę@ű§ńČ×WEŕ}gű'ĬŤ.żu' ?Â?çZâ)óĂÍř*ŢʲľĎCŘ(˘ŠńϨ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ (˘€ *˝ýőľ›e-ĺÓě†!–lgşV$~=đô˛,iu#;ŞÉ?…\a)+¤g:´ŕí)$ttQEA QEQEQEQEQEQEQEQEQEQEQEQEQEQEU;ýVĂKo®Ł·yĆqÖ®WńOýN›ţôźČV´ §5sâjş4ś×C ¸ńLJmŐŹöŠČGđĆŚÄý8Ĺq*ńĽšä&ĘÎ&‚ĚśącóIŽ™ôÉŃ^”0ĐąŕVÇÖ«W˘ň (ë^‰á$f-OShĺ<4PŁP{#ô­*TŤ5vaB„ëK–&Ç4GŇ4!$é¶âěůŽUÂ?.éOCKHzń§'993ę©STŕ ş8˙H—ýöţtĚT“˙ÇÄżď·ó¨ëÝ>=ť·Â˙ů ^˙׸˙Đ«Ó+Ěţ˙Č^÷ţ˝Çţ…^™^N+ř¬ú\»ýÝ|šî±FŇ;E±=€§W%ńYűŠ,blMzvśB˝ůđ?XÂrQGUjŞ•77Đó˝UmkZ¸ľ9ŘíÁěţ}ë:Š’$ą¸ŽŢ!şI\"ŹRN{i(«#&ç+˝ŮĄmřłA_ę«of†H•Ńç'Ł~żÎ±(Ś”•Đç 8Ët{?„µŹí­ ÝłÖłµĎ±ČŘ‚ôlç q÷OóŤz˝yéňOČúlokE7şŃ…QXEPUu JĎJµk›ŮÖÇv<“čsMŐu;}#Nšúĺ±C8XöÜ׌ëzÝć»|×WoÇHăĺŚzń®ŠGw±Ă‹Ć* ËY3ŻÔţ'¶ňš]’í‰._ř˙ÄřŤó‹¸ĐgřaZfŕÍO]EśmjzK űßîŽ˙Ęşëo†ZTcý"îćcŽpB ţUÖŢž–ýO6+_ŢMĄ÷Ä?ń˙–­ëő«ŁGÚß]ŚqXŻ«ŰKÜ÷Š+?Af}Á™‹1·BI9'Z‹Vv:Ł.h§Ü(˘ŠEQ@Q@Q@Q@Q@aqń#ZŠâXÖ <#•FěŢŻAŃo$Ôtk;É‚‰'‰]‚Ś ‘ÚĽFóţ?n?ë«3^Íáoůôßú÷Oĺ]řšqŚŠ šDlÁî˘ô u?‰Ďé]z~Ňzěp㱍?wv{r$Ń$±°du ¤wĄ>ą‡ZĎŰ´f°•ł5™ŔĎt=?.Gĺ]}eRqgMŞ­55Ô(˘ŠP˘Š(˘Š(˘Š(˘Š(˘Š(˘Š(˘Š(˘Š(˘Š(˘Š(˘Š(®'â>ť{€ł´šŕŁ>ď) mŕuĹvÔUÓ›„ą‘ŤzJµ7Ôđ›Ť#S´®4ű¨îđ°ţ•Nľ‚®kÄţ±Ö-%šŢ‚ůT˛HĎŁzçÖ»ˇŚMÚHň+eŽ1ĽĎ"­mÄ—ţ¸m!x ýä ~V˙ďY8#‚0GQEv¸©+3É„ĺ sEŮžíĄjvÚĆźí«nŽAĐőSÜqVĎC^iđĎShu9ôĆoÝΞbF1ü«ŇĎC^=j~Îv>« _ŰRRęx˙ńń/űíüę:’řř—ýöţu{'Ę=ÎŰáü…ďëÜčUé•ć ˙ä/{˙^ă˙BŻLŻ'üV}.]ţîľb“ŔâŢ*Ö?¶őéîUł .÷GÄäţ5čľ:Ö˛´Ž6Ä÷şLuřŹĺüëČ«ŁOG6qfu®Ő%ęÂşż‡z_Ű|Aö·\Çf»˙ŕg…ţ§đ®R˝OŔŁOŇĽ<Ť5í˛OrŢl€Ě Ř}?ťo“Ť7n§šťe}–ˇńKűf†·¨ą’ÍňqýĂÁţ†Ľ˛˝Ęć˙I»µ–ŢkűFŽT(ĂÎ^AőŻ˝¶űěÖŰÖO)ʇSŔt ÖXI>^WĐß2¦•ER=H‘Ú7WF*Ęr¤v5íŢŐ—ZŃ-ďF7˛í”z8ŕ˙Źă^!]źĂ}gěšśšd­®†čóŮÇřŹä*ńTůˇ~¨Ď/­ěęň˝™éôQEy'Ň…Q@kń3Uiu4´oÝÂľdŐŹOČ:ÉđW‡×]Őɸ\ÚŰňŹďËř˙JŻă _j%‰8—hĎ WođÎOÍ8űŇÜOĐ W©'ě¨+=ýců¶żäv ŞŠT*¨Ŕ`KEĺźBQEOSŐl´‹Ss}:Ĺŕg’ÇĐć¬\O­Ľ—6ŘâRĚ}Ż×őËŤ{R{ąŘÁÄQç×°úú×E >ŃůXĚRĂÇMŮ×_|Pměş~ś6öyŰ˙eăU`řź©+ţţÂÖEôBĘRišĂ»ŤFŐnµÚŇ7HŐräzśô­[Ď…öŤűüÉ  @e'ß5Ňţ­Ęyëëó\ëô6t/iz㬵µÉéżĹţéčkˇŻÔtëÍý­nĹ4g ×ŃŻRđG_\Ňš;–ÝwjBČľ;7ůô¬«ĐQ\đŘéÁă%R^ΧÄCŻxî U’Áě$™‘U·¬€‘źJň—mîÍÓq&˝c]đ-®»Ş=ü·łDUAäň.Çeë´‘]_gËîďÔáĚ=·?ď6ÖÇy¦üH‚ÇM¶´m2W0D¨XJ8ĎJéü3âüKöŹ.ŃíüŤąÜáłś˙…szgĂ‹;í2Úíő …i˘W*޸®—Ă~ĂhňneźĎŰťŕ c>źZç­ě,ůw;°ß[ćŹ?Ăňěn×3ŻxëLѤkxÁĽą_Ľ‘ź•O»J‹Çž!}M[[WŰuwuDî~˝…y·˘ÝkúŠŮŰ`n’Fč‹ęhŁB.<óŘ1xÉĆ~Ę—ÄtrüOŐóŤ˘'ŁncůäV¦—ń6ÚiDz•™¶ăÍŤ·(úŽżÎ¬ĂđĎFX•ežîGć`áAü1\—Šü!/‡J\E)žÎFÚŚ2Cţ5¬VŁĺHç›ÇQ\ňwG­Ă4w¤ĐȲFă*Ęr§×›|7×$ŠńôiśĄáýÖ>ŁźÂ˝&¸ęÓtĺĘzję˝54QEdtH'€+ŹÖľ"ŘŘLÖö›Ůáś6ŘÁö=ę/Úô–v‘éVÎVK•Ý+ČNüOň®DŃ/5ëáif `eäoşÔ×mqçžÇ“‹ĆMTöTw:DřźŞ rö6Ť~čÜçźé]O‡üo§ë’-ł+ZÝ‘ąČ÷Oô¬Čľéâ&Ô.R>ň…PÓšćĽIŕëßmĽ†c=¨a‰TmhĎl˙Ť_.§»—>6‚瞨Ŕ˝âúăţş·ó5ěŢ˙‘_M˙ŻtţUâŚĚěÎĚYä’y&˝ŻÂßň+éżőîźĘ«đ#<Żř˛ôýLCáĚzŽŁq{.«(yä.@qžÝkŻ·‚;[h­â]±Äz1RŃ\2©)«6{4čS¦ŰŠŐ…QY›˙Ť5źěĘѶ..?u¨'©üJňŤ7KşŐgxm{ÇHŮô·ăÍgűS_hclÁgׂßÄ>? ę>i?eŇeÔd\IvŘL˙qÄçňéC÷yşłŔ©ţŮŠä[#đľ°t]vŢčśBÇ˰zţ\½¨ŔAˇâŢ+Ňż˛$†¦¸’ 9ČÇză§ËĚą¶=ZŢÓŮżgąÎ˙ÂҶ˙ Tß÷ô…đ´­żč7ýýáO˙…]c˙A+źűĺkĹ~ÓĽ9eĄüŇÜĘŘŽ&UÁ©8®ŘÇ'eúžUIă©ĹĘMYzđ´­żč7ýýá]N«ľą¦­ńł{Tv>Xv°őŻ%đމ&˝¬Eh2"<Î?…_ÄôŻj†(ŕ…!‰BG…Ug…8{±Z›ŕj×­yMčax›Ĺ‘xj[t’Íî<đÄp¸Ć?ĆŁđߌ˘ńě¶±Ů<8üÍĚŕç1ú×;ńOţ>´ß÷$ţkUţČrďţ˝żöaMR‡°çęKÄŐXĎe}?ŕťTőmEtť.âýŁ2q@pO5r±|a˙"žŁ˙\ż¨®H$ä“=˛q§).‰VźíîŻ ¶\Şf‘c edă=+·ŻŇ?ä5a˙_1˙čB˝Ţş14ăM®S‹^ĄhÉÍě`ř›Ĺ1řh[-ăí±µÂíĆ?ĆŁđß‹ŕń \lÖÂÝC1wsůt¬ŠsLúÉ˙˛× o¨\ÚZ\ŰBű#şP˛ŕr@9ĆkZxxÎ’}NjřÚ”±?etůî±ń.8'ht›e¸ qçĘHSô’?*čĽ)¬Ükş*ŢÜÇHdeÄ`ő5çú€ő-bÝ.Ą‘,íß•.¤łPľźZôO č§@ŇE‰¸óđěŰömëí“Q^4cXîm„ž&ĄNzź ­EWęQ@Q@'JZFű§é@™^ĎPłÔ#ó,}QÇ×Ҭ׀¬˛ÁpŇC#ĆáŽ×Úµ­Ľaâ ]»59\/A.ů×t°oěłČ†k·¸öŠŁ¬j–ú>›-ĺË…TS´wvěy{x˙Äl¤}˛1‘ÔBżáX—Ú•ö§(–úę[† ¶qôÁĘţó\Ňľâw+»vvęÄ“řŇQEz'‚tß˙ }žĎöóôÚkŘOC^gđĎMiµYő_ÝŰDZO«7˙[ůצ†ĽĽ[NˇôylZˇwŐž?ü|Kţű:ޤźţ>%˙}żťG^ˇóŻs¶ř_˙!{ßú÷úzeyźÂ˙ů Ţ˙׸˙Đ«Żń†łýŤ M*6'›÷Púä÷üMyxąV˛>‹5O Ěú\óŻk?Úţ —ËlÁmű¨ý>ńüOňĎŃEzQŠŚRGĎÔ›©7'Ô)0=*ý®‰Şß@'µÓ®'’¤dŠ›ţťwţź÷čŃĎÔjśÚşLĘŔôµ©˙λ˙@‹Ďűôh˙„g]˙ Eçýú4sǸý•Oĺf]Iň[\Gq m’&‡Đ‘QA ŚÔQTg±îş>ĄŻĄ[ßEŔ•2G÷[¸üęíyĎĂMc˸›G•ľY?{Ľ>đüąü z5xµ©ňMŁęđµ˝µ%.˝BŠ(¬Ž“ĆümnÖŢ-ľ 8‘„‹ô WYđÂő_K»±$oŠ_0fâ*?‰z3M:Ľ+“ňćŔţx?ţuĹčÔÚ«ěCrýŮcĎßSÔZőöÔ,·>użŞă–ߣ=Š©¦ęvzµšÝYL%Ť˝:©ô#±«uć4Ół>…5%tQMwX×s°Uő'ÎwÇ× oá+§VXĎĐžA^sá;Ô|Meo*†Ź~öSÜ(Î?JôźZ5߄[eǰ<ţ™Ż1đޢšWlď$8Ť}đOëšô°˙ÁvßSÁÇiŠ‹–Ú~g·ŃH¬C)HČ őĄŻ4÷ŽâuŠI¦Zß…dRůdú«ýE`|8¸hĽNbíšR>#ůV×ÄíN1kk¦##?śŕv3ő'ô¬Ź†Ö­7žă$źsŔţµéCýŮÜđjŮă×/tzĄx˙ëĄ˙}żť{ýx˙ëĄ˙}żťF yf»CçúÝá˙ů´˙úöOýVŤgxxçĂşv9˙FOýVŤqOâg­KřqôG’|B¸iüW,dśA †ďëPxcĹGĂIpĹnr2ćM¸·CëV~"Ú´(i¶án"WÔ´˙!V<aŁęŹui¨ÚĹ4ăüäŻB?OνKÇŘ+««=jŹÔ]ťŮsţśßôOűţřšĎ×¶kܱÎ&T‘,1]oĂ]F9ô9,7-´„íîUąóÍzUżÝŐĽŹ eŤ—6úť•Wľ´ŽţĆ{IT2L…>â¬U=[P‹KŇî/f`«3Üö‰Ż:7ş±îNĘ/›c™v3/]¤Šöż ČŻ¦˙׺*ńF%‰cÔśšöż ČŻ¦˙׺*ôqź<,Żř˛ô5¨˘ŠóO|+Ĺ:ŔŃ4î”âf\#ýłÓňëřVĹy_ÄMgíÚĘŘDنĚa±ÝĎ_ËůÖô)óÍ.‡&2·±¤ÚÝčŽkO˛›TÔ`ł‹&Iä źŻSüÍ{ť­´vv‘Z»c…(öĽ&Ýn•ÄÖ«0e<<@ä~"­ýŻ]˙źŤCţúzďŻEŐ¶¶×ĄG©Dą’Đáđ:ˇ˙ŹÖ¸˙ë?Řţ ‰¤lAqű©}zŔ˙ZĎ’}fhÚ9$żta‚¬\řUFF*ęU‡PFŞ…+S䓹±Ő•X«3č+ÁşĎöχá‘Ű3Áű©}Iâ1[ŐäĘ.2iźMNj¤—R9çŠÖŢK‰ś$Q©gcĐ^)â f]X–ńŘNČSű«Ř}Ćşďţ átKg뇹 ţKýOáY߼?ýٍťNá3ojß #‡“·ĺ×ň®ęT ęHńń•%¬¨Cçýy—|>4-|Őîă1ôô_Ăůćş (®IĘ\Ěö)ÓŤ8(GdyĎĹ?řúÓÜ“ů­Wřa˙!Ëżúö˙Ů…Xř§˙:oű’5Şż ]W_ąR@-lp=~a^‚˙vţ»ž$żäaóýP¬_ȧ¨˙×/ę+jąOz¤V~{=ĂÎĽ!UsÎĐrOéŹĆ¸i&ę+ľ&J4dßcĚôŹů X×ĚúŻwŻ Ň?ä5a˙_1˙čB˝Úş±›ŁÎĘľńOîiźY?öZć„(˘Š(˘Š(˘Š)-%g€?úÇ˙x˙:mz†©đŰM»f’Ćy,Üó·ď¦~‡‘ů×;sđ×Z„ţâ[kž0ĺN=yÖ˝â)Ë©ó05ŕţúŤŇ7€óKś~@ŐűjĚŚ–łűŽZŻčÚ-îąz¶¶qçűň»ő&»}7ጲľĄzÓc¬pŤŁó<˙*ěěl-4ŰaoenD?…_së\ő1qJĐŐť´2Ú’w©˘üHtm"ßDÓ"±¶TĺővîM^= -%y­¶îĎz1QŹ*Řđ)˙ăâ_÷ŰůÔuŢÉđľĺäwţŐnbÔźń¦˙­ą˙ ¬_÷ä˙Ťz˙XĄÜůź¨â?—ň"ř_˙!{ßú÷úRř¬˙ik¦Ö6ĚYAŽ…˙˙OÂşżx6ďĂŇ]Ěš„RI<\gĘ #g žy¬vř_věYµxŮä“ äţu‚©KÚą¶vJ†#ęѤŁë±ÁÔ–Öň]ÜĹm î’WŁÜśWo˙ ¶çţ‚±ß“ţ5«áĎ˙bęéqx—Rź-#ëŇîýšGcÉ™¬¬j:¦«h××’ĎţVo”|Ă é]¬? ¬Uó6Ąq"ú**źĎšč4Ď hşK -ěŐĄ%”ďaôĎO“­F+ÝE¬&.Ł^ŇZzšň"ËG"†GX„ńŻxnj »Y­$$Ă/ltűŠözŠćÖ Ëv·ą…&‰ĆdĺŁYŇ~GŁŠÂ¬DmłG“č>9Ô´HŐ•ní—†Ač§Ö´îţ(^I [M>(˙Č_AZ÷ß ôąäßis=¨'”á×đĎ?­V‹ám°`eŐ&eĎ!c#ë“]Nxi>fµ<ĺGɧËţŕťďu}Cs.®îęĚk×<#áďřGô‘7Só0őěŁéţ5cGđŢ—ˇ/ú¸†™Îç?Źoµk řŽuËެ& Řľy»Č+Ă5Ý>M/[»´‘Jě”Ďu' ţUîu“®xkMńj/#+* $Ńś:ű{ŹcS‡Ş©Ë]™¦7 ëÁrîŹ?Đ|}wŁiëc5ŞÝĹ’űYG§C‘]†Ľiwâ {ěŤkĽ&|Y‰ď˙ÖŞ§ál¸ŐĄŰ˙\Fťmx{Á–>ą7QĎ4÷Jn|öµIĐiµąËBž22ŚdýŐč/ŚĽ8uý,ű]ľZ,˙Şţ?ÎĽš9.ôËŕčd¶şľŚ†˝î˛uŹ éZçÍyn<Đ0&CµÇăßń¨ˇä\˛ŘŰ‚ö˛öv‘ĹYüOľŠ ·–\8}¦~٤źâŤÓ(ű>—7s$Ążr_…¶ĄĂŞL‹ź”ŰîçU”Ż gů×M˘xgLĐTýŽea†šCąĎăŘ}*ÝZÖ S‡ĆV÷jĘŃţ»5yjöWłZHTĽ.QŠôČëŠöo ČŻ¦˙׺*Ëľř{¤ęÓŢKqvŻ;—`® }>Zč¬,ŁÓ¬ ˛…™Ł)cÉÖŁZ5"’ÜŰ„©F¤ś¶,QEĆz†v˝Ş¦ŤŁ\_67F¸@‰Ź~uâ_ľĽşď$óżâĚOřšöŤ{ĂÖţ!‚(.§ž8ămŰb`7|ţMgižŇt˝Fčĺą–HNĺYJç×]”*Âś]÷<¬fµzŠß 5ô=.=G¶±Lf4ůČţ&<“ůÖ…W#m»łÓŚTb˘¶A^yń3GÚöúĽKÁýÔŘ˙ÇOó•zUÔ´č5]>kLS.ŢŁĐŹzŇ•NI©âhűjNO/đłý™Ż-ĽŤ/qg oá?ÓńŻGńł…¤K{& ¶$?Ćç ¬đĎGRnďCŻţů­]s¶ş˙‘öË» °.Q”{±ă­mVt§QKď90ô±¨Ęסă·Ëuq%Äî^YXł±îMtúgŹď4ť>]:ĐEŕd¶X÷'ž¦ş_řVZ7üý^˙ßk˙ÄŃ˙ ËF˙ź«ßűířšč•z2Vg<.›ćŽć'ü- Sţ|-?6˙ęĽâKźŰÝKsQ\(県÷Şđ¬´oůú˝˙ľ×˙‰­˝Ă–žŠhí$™ÄĚĽŇ0;` ç«*>âÔíĂĂŞ'QčbüHÓ$»ŃbĽ‰KGË€?€đOáĹy¶źs¦^Çyi'—4g*zęµ{Ë*ş•e ¬0AW¨ü6Ňî§2ÚO-c’Š/ŕJŞăňLĎ©:žŇ–ć|NŐŚEVÎŃ_>ă߬xíu?µö©u3:ÚÂŇI+8˘»O†JęúiÔuEP™üy®«ű&Í4§Ó AlńöĆ1€F úŐ:Ôˇü4DpŠßĆzv¶ßřßüEfŁ˙>¶ßřßüE[˘‹ů+îTó5ůő¶˙Ŕ†˙â(ó5ůő¶˙Ŕ†˙â*Ý_Č9_r§™¨˙Ď­·ţ7˙G™¨˙Ď­·ţ7˙Vč˘ţAĘű•<ÍGţ}mżđ!żřŠ<ÍGţ}mżđ!żřŠ·EňWÜ©ćj?óëm˙ ˙ÄQćj?óëm˙ ˙ÄUş(żrľĺO3Q˙ź[oüoţ"Ź3Q˙ź[oüoţ"­ŃEü•÷*yšŹüúŰŕCńyšŹüúŰŕCńnŠ/䯹SĚÔçÖŰ˙˙ŁĚÔçÖŰ˙˙«tQ ĺ}ĘžfŁ˙>¶ßřßüEfŁ˙>¶ßřßüE[˘‹ů+îTó5ůő¶˙Ŕ†˙â(ó5ůő¶˙Ŕ†˙â*Ý_Č9_r§™¨˙Ď­·ţ7˙G™¨˙Ď­·ţ7˙Vč˘ţAĘű•<ÍGţ}mżđ!żřŠ<ÍGţ}mżđ!żřŠ·EňWÜ©ćj?óëm˙ ˙ÄQćj?óëm˙ ˙ÄUş(żrľĺO3Q˙ź[oüoţ"Ź3Q˙ź[oüoţ"­ŃEü•÷*yšŹüúŰŕCńyšŹüúŰŕCńnŠ/䯹SĚÔçÖŰ˙˙ŁĚÔçÖŰ˙˙«tQ ĺ}ĘžfŁ˙>¶ßřßüEfŁ˙>¶ßřßüE[˘‹ů+îTó5ůő¶˙Ŕ†˙â(ó5ůő¶˙Ŕ†˙â*Ý_Č9_r§™¨˙Ď­·ţ7˙G™¨˙Ď­·ţ7˙Vč˘ţAĘű•<ÍGţ}mżđ!żřŠ<ÍGţ}mżđ!żřŠ·EňWÜ©ćj?óëm˙ ˙ÄQćj?óëm˙ ˙ÄUş(żrľĺO3Q˙ź[oüoţ"Ź3Q˙ź[oüoţ"­ŃEü•÷*yšŹüúŰŕCńyšŹüúŰŕCńnŠ/䯹SĚÔçÖŰ˙˙ŁĚÔçÖŰ˙˙«tQ ĺ}ĘžfŁ˙>¶ßřßüEfŁ˙>¶ßřßüE[˘‹ů+îTó5ůő¶˙Ŕ†˙â(ó5ůő¶˙Ŕ†˙â*Ý_Č9_r§™¨˙Ď­·ţ7˙G™¨˙Ď­·ţ7˙Vč˘ţAĘű•<ÍGţ}mżđ!żřŠ<ÍGţ}mżđ!żřŠ·EňWÜ©ćj?óëm˙ ˙ÄQćj?óëm˙ ˙ÄUş(żrľĺO3Q˙ź[oüoţ"Ź3Q˙ź[oüoţ"­ŃEü•÷*yšŹüúŰŕCńe ”S"…|räĆťEVęQE!…Q@Q@Q@ç:­őâ|OŠŮnçX đ…ÁQž3ŠôjŇtů}Ě(ÖU\’[;QY›…Q@Q@W5â/˙`j–ö?aóüő żÍŰŚ¶:`ŐB›´LęU…8óMčt´QL›Ěň$ňqćm;3ëŽ*KEqŢ ˙„Łí÷_Ű~‘łĺó±÷óü>ŘÍv5s‡$­{™Ň©í#Íkz…ÍŘř»íľ*›CűĎ)śyŢfs·ŰÖşJR‹ÔtęF˘n,(˘Š“@˘Š(˘˛Këą~Š(©4 (®-˙á,˙„čcÎţÍó{cĘňńü˙\ŐÂ×ÔĘ­_gm»čv”QEA¨QEQEswľ.ű'ŠáоſĚdw™ŚnţµQ–Ću*FšNO}’Š(©4 (˘€ (˘€ (˘€ (˘€ +Ď< }yqâ›ř绞XÖ7ÂÉ!`>qŘס֕iű9XĂYV‡2V +‹˛˙„łţy>Ńç˙gyŻ×W—·ý+´Ą8rŰ[Ž•_h›łVîQEA°QEQEQ\ŻŤ˙á!ňí?°üý»›Íň>öxĆ}ş×AĄýŻű.×íßńőĺ/›ţö9«p´T®cĽŐ,ôęZ˘Š* ‚Š( Šä>#Ý\Zč¶Ďmq,,n-•$m>•µá‰$›ĂZ|’»Hí,ĚrOÔÖŽť §ÜÂ5Ó¬é[djŃUďľŃö ʞcíSyYţö8ýkšđGü${®˙·<˙/ĺňüügw9Ç·JJ‹•öŞňÔPł×©ÖŃEÁEPEPEPEqž.˙„«űf×űĎű6Ńţ«wgťŢŘĹv)»bďĆěsŹZąC–)ßsU眣f¬:Š(¨6 (˘€ (˘€ (˘€ (˘€ (˘€ (˘€<ËW˙’Żýw‡˙AZôÚó-_ţJĽ_őŢýkÓkŞżĂCĎÁüuÄqľ4ń>َj–PÚ÷Äţ%×fřrÎTłŚŕJ#źńn?Uľ( ęöÖ˙ˇW˘YZĹce ¬ 8*TÜ!N2ĺ»3J­jó‡3I/…¶şN˛K™ g@®ŹčqŹĄz y–¶ˇ>)Űífx ÇŻéµ…$•®ŤpRť§ ;ň»­řŻ\ľ×ĄŃü?S ,Ş 1NOTÚdţ9łŐ­ŁÔci­e•VVŘŚOşô¨5źëş7&ÖôfYł"ŕ°ĎPW¸ĎĄZŃľ!/ÇZł6“3ó?ŢSČ­š\ź»Šjß3•IŞŹŰMĹßNÇjű‚6ß˝Ž+ĆüI>»6«ëůWAG’ˇ@ČÜqÓŢ˝šĽ×â'üŤ:wýsOý ÖxGiÚĆů”oJ÷6´ßM¬C­h#ł!ĽĆŘŁ)ÇCëŠëfbHËŐT‘ůSŞ;źřö—ýĂü«žRćwµŽĘptŕŐŰő9ř›S×oxŮ"Ś2ěŹo9Ĺt^ —S‡H•ôxÄ—€®Ĺ ŚŚőöÍp˙ ˙ä+˙\Gţ…^•[WJtG6R«‡÷ž®úž3§ÜëĂĹ3OiíT´žbm‹Ž•čţ¸ńÄw?ŰĐ™Y|¬(çˇ>ŐÉřsţJ…çýu¸ţf˝.´ÄĎen†8 NÎ\ĎFôčsţ3Őď4Mn¬™S2¦]w ¬ř[Q¸Ő|=m{vĘÓI»qUŔáéYżäYOúůOäj÷äP˛ú7ţ„k'ěësxÎ_[qľ–(řëÄZŽ‚¶fÁă_;~ý黦1ü맲•§±·•đ^H•›ąÁüSuÎť~lHŘöâ»­7ţAvźőÁ?ôDâ•(°Ą9ăSňîç©ú×Ań+ţE”˙Ż”ţMZ> ˙‘CO˙qżô#[{D¨'eąÍě\±r\ĎoéĄ-W ë…qx§TO.ޞEöC2¦<ľpW=k·Ż1ą˙’˛źőňźú®ŠOšýŽdĺNWk´zuq>&ńmö‰â-HÖËj< Ç–Á'vĐWm^_ăČ–ăĆÖĐľvÉHqčXŠ0ŃŚ§i>s…$ŕěî\»ń‹5·{ŤÎhlTťŚ±‚\zĺşýYđ_Ś/Żő#Ąj¤<„.B»[pę¤~•wE$1 HĐU€yĄ¸ ńi‚Ś´±ăýĂZÁ¤e[YőcV„á>vîěű‹¨µĘi× f»®Dlb\g-Ž+ČŻ.uçń\S\Ă·V ›#Ú:ăĺăĄ{5y®­˙%^ßţ»C˙ ŠXYYµn…ćmEÝî—üŁđĹߊn/Ą]vŘEŹ(B(ËdzL×Cyw Ť¤·W˛(TłźaSW)ńIÂĹS8’tWúr‚µJ‰ZÇTŻB‹w˝»źđ“x«Ä×nş g·ŚőÂńţó7öë_ëţÔŇĎÄ‘o…Ď.T~đ#‚=Şż†.ĽYo˘Ćş>›m-«3#–9ç?0ú~íwOńźaŠ+Ý&!bĘce‘ŽěkµĆÜ­+~'–§UÁT‹“—¦‡¤«+¨e « ‚;Šă|iâŤGAŐlá´xÖ#ß h÷˝Źĺ].‹ öú%”@‰ă@Np@çšŕ~'Śë6#Ö˙ˇW5ĹŐłŐŘƓޙhô&¸ńguÝOţ$6óĄ”n)bĂ<–'ôŻFŞzNź—ĄŰŮŔˇR4ăřŹsřš–őŢ; ‰#űëú㊊’ŚšQVF´!8EĘr»Ö‡ âj7«i^L˛±C"¦ćvvŽ€ZŠ={ĆZ[­fŐî,É÷*ĺč~´|-†'¸Ô.2Ş*“Ô’ŻA¸†+‹y!™CE"•pzzÖő% räĺĐäŁ µáíąÚonÇ›|9Kâ‹Ů@ <,Ŕwéµć @_ÝŞô0÷ĐŻO¨ĹÓ-ţĚâěüM©Íăétw’?˛,ގD6äs]Ąy®›˙%fúí/ţ‚kŇŞqQjÝŤ0S”ă.g}Y皯ŠőíW\›LđôeV+ąTlIŕ Ő˝"ăĆֺͼ¤m5¤Ż¶Gج`÷^•ORđöżáíz}_AS$˙ȰżőđźČÔ˙ †/ A$`o•ݤ#©;ţ@V‘ĺŤ6®îcSÚTĹ:jM+ČńWŠĽ;z‹­ÂŇÂç•‘dwÚËĆkŃlŻ!Ô,ˇ»·mŃLˇ”űV'Ź †_ ]´Ş Eµă'łnůUţČďáUV<$î«ôŕ˙2jgË:|éYÜş<ô«ű+¦Ż©Wâü€íëäč&·|)˙"¶ť˙\°ľ'˙Č×ţľGţ‚kwÂźň+ißőÁh—đ#ę:ďłô_ˇ~ţW·ÓîfŹ㉙r;€H®cŔľ#ÔµćĽďBlŘ›zç?ĘşMWţAźőî˙ú ®ágßÔľ‘˙ěŐ0Št¤Ę­9,M8§Łąčuç?ďažőoŐnr¶ŃG >ěr}1^Ť^_ŕ;H®|as,Şěë#¦{6ěgő5T\däŻbqr¨ŞSŚŻĐ[ýoÇVńťBá&¶·Č8®Őú’?ěĽ#âń’f™.!}’…čOPG֯늯 ß«(`mß ˙şkŹřVO“©ö˘ţMU'”ś­f‚ťLaĚÚk©ŰjËk§\ÜB›ĺŽ&d\g$pjř‰x é°©\„Ćźˇć»ýGP·Ňě%˝şm±DąlO őÇCâďk{äĐôXü„lo•łźlä ý*(Ţͤ˝Y¦)ĹÉ'&źdVĐüi«Ůë1éZüyŢá şmxÉčN8"˝ĽÄ·µĆżhÚŤV×*¨6ĆAÜ»¸=O˝{†axÜé,Çą9ČÚ¤ř©ĂiżI?öZëü7§C¦h6–đ¨Ś;ś}ć#$šŐ8B”dŐŮÎŐZ¸‰ÓRj*Ć’gË]ß{5Ćx—ĆwpęŘÚ"k vĽ›ważşŁÔw&»F$)#“Šńß O¬˙nĎsĄZĹsxU™„¸ŕÉ#źń¨ĂÁJň} ±µeXGŻmÍ«ŤGÇšK}{űŘß ¨Ŕ˝·‘]݇µČ|AĄĄäKĺ¸;eŹ9ŘĂúW3quăŰ«imĺŃí rˇF^„cűőkŔ.©ŁG|š…ą…d(cŐ˛FsĐý*Ş(şwvż‘ť TŤeĚâű­ŽÂŠ(®3Ô (˘€ (˘€ (˘€<ÓVŠCńR'ą_>›iÇÝ^őétQZΧ:K±ĎF‡˛”ť÷w<ßâdr>Ż`R7`!9Ú¤˙z:ýŃôĄ˘”ŞsEG°SŁÉRSżÄy®ą‡âŤłÜŻťĚăµzUSťNt—`ŁCŮJNű»ś&ˇăÍKGŐnín´ˇ$1ĘÂ'ůŁ%3ÁčAâ°Żîďüy¬Z­¶›ä* ¦A“…Ď%›ňŻVdWR®ˇ”ődPިˇUB¨čŔ¤kF:Ć:O R§»9Ţ=­úŠA\Äť.íî-5KxÝŇ$ŘĺJä§5čVTę:rć:+ŃU©¸3„Ń<{Ş_ŮŘ6š¤p’̬HrqŽ?:í®?ăÚ_÷ň§$qÇť©““´c4ú'(·x«*sŚm9]žođĆ9Tľ/¨0ŹĽ¤zEQV§´—0ačű|—ąĺzČÔ<'ăIµXíËĹ$Ť"3±ŐľđĎcĎň®łÂŢ,ąńěń˝€¶†8Ü[qĎ®®™•]Jş†SÔiUUT*€ tt­'YN6q׹Ť,4©M¸ĎÝnö1Şs„•ŁF•XJóťÎâŚnöş~ÄfĂÉť žÂ» ŁY0~Îś÷E\˘”Ş^ ŠŤZŇ©}ÎKâOü‹ ˙_ üŤs§®ř[LŠí-Ţ›w—gälă¨ű§ŹĄt˙äX_úřOäkCÁ\řCO˙p˙čFş#5 ęúś3¦ęc+łHáµ=w[ń´‘éövF87Q2A>¬Ţ‚˝BŇ“EŃíě·—ćoď1äźÎŻ"$kµTz(Ĺ:°©W™(ĹYt0Îśśç+ÉśŻÄ-:â˙ĂęÖŃ´Ťo(‘•FNÜOëYžńMÄďm Éh10ó€9ŚW{MXŃ™QT·R3MU^Ď‘ –űok[ż™[TŇoäů˙ šâ~G">Ł˝r#Ćĺ#Ö˝ŠÔ´{—:őcRű ^qđú9ĹhÝAŤđJ‘üb˝"Š!S–.=­i8Î˙ KYč—ŔOŮäŕşkŽř[ÇĄ˝rbĆĺ#łW}E Ą áÜ%Cš´jßcĆ:eƫị{U-0+" ţ,ⸯxŇëCÓ†’ÚSĎ,l|° VäçcÔרÓv&˙3jďĆ7cśUBŞQĺ’ş3«‡”Ş*•žÝĎ#×쵆լő Q?xCůH§÷*§đŻ^˘ŠU*űD•¶*†Q”ťďsÍ~#Ç#řŽÄ¤nŔBą*¤˙ŻI-§Sš*=ŠĄCŮÔ”ďń}ńJ9$:vÄfŔ“;Tźî×q§q¦ÚçŹÜ§ňfŠR©x(öQä«*—Ü+Í5ÝVđÎľÚÖŹI±‘wlĎŢV•étS§QÓa^‚¬•ݚٞtżď yŘčfśHqź÷qźÖşŹ jšžŻ§Ëu©[}ś™q„* ŕsĎ'śó[B(ĆA‡=[o'ń§ÓśŕŐŁJŤXĘóťţAEV'PQEQEQEQEQ\Ţ—ău?ËŁ­ś‘ĽEÁ¸ í8é]%Tˇ(»3:u!Q^.áEpVZΫ¬xţ[K{Ů d\`Şqúźç]íUJnLŠ5•TÚZ'`˘°4żéšľŻ&›nł,«»k:€ŻŽ¸ć·ęeEŮšB¤j+ĹÜ(˘Š’Š( ŠŚĎ“Ę3 “ű»†*’€¸QEQEQX>&ńL^ćKG¸ó÷ckŚcükbŇŕ]ŮÁr¨š5p¤ôČÎ*ś$˘¤öfj¤ÜŐQTő]At­.âůŁ2,\ 8&©xkÄQřŽŇkíž›fúJÂŰrLS.ĺČÁüjÚ—-ú)SuWÄY˘ ł$ö®Sř›kopŃiöfĺU±ć»íVúpM8S”ßş…V˝:*óv;š*¦•}ýĄĄZßy~_ź}ąÎ2:f˛î|e¦ZëëŁČ&ó‹*ŤŠÇ ëžô”$ŰIl9V„b¤ŢŚß˘Š* BŠ( Ў¬ëÚś××{ĚjB…A’Äô“DÖ­uí<^Ú‡TÜT«Ś"«–\ĽÝöç侦…QRXQEQX~&ń4~ŠŢI-^ŕNĹ@V«gr/, ş PM¸RzdgNQRčfŞAÍÁ=Q=[R˝vťqzČdF\¨8'źáŻÇâKY®#¶xO°†`sĆhP“Ź7@u ¦ Ţ¬Ů˘Š*MŠ( Š( Š( Š( Š) ŔÍgęúö›ˇÄţŕFX|Yľ€V~ťăŤ Rą[xî)áÉ´1ôĄqzU‹xßĹ·W˛8·LłyŰś*ŹO˙]uÚ‡€t;›6ŽÖŰěłűąQŘŕöÎO5×*tˇîÉ»žl+â*ŢtŇĺóݜυ?ä¤^żq˙ˇW¤Üyżf—Č˰ěŕÇ­yg€#’/yR˙¬HäVϨë^ŻKĄEčv´_«9oxfçBK©ő,Ý\0FÝ…ő÷$ţUÔŃEa9ąË™ťÔ©F”#±ËhIá?íůŰI ßťű† ĎÍŚđ9®¦ĽÇŔßň<]˙»7ţ†+Ó«LDyekßCź>zmŮ-^Ćiń–5ě“t>Űśy[®3×éT5hZmÉ·’ᥑNB›‚źBzWâX®gř‡46nc¸’XŇ7IE®Âßáć—“4RM)Îd!łęâ´téA'&őF1Ż«)FšZ=Í­'Z°ÖíŚö‰NHĂ)ô"¬ÝÝÁai%ŐËůpÄ7;`śÂĽŰŔ[ě|eub$%6Iµµ¸?ν&îŇ ëY-ncó!”muÉ…gVš§;t7ĂV•j\ÝvůžYq¬éň|DMYfÍ•X˰ô Śă­z>—â+Y–Hôűˇ3Ć»leŔüEyÍĆŹ§Çń4Ą·Ĺ™•TŸô+žąĎZôm3ĂúVŤ$’iö‚‘v±Ç#ń5¶#“•o{hr`}·<¶µÝ˙ŕU‡ŞřÇEŇ'0\\—™N8Wy_ŻaKâýVM#Ă—ł6#Ť‡đ–ă?€ÍpţÂél×zĺÔ2\»°ĘI =HîMeJ’qs–ŢGF#(ÔTˇdű˝ŽçHńfŹ­KäÚÜí›´R®Ö?OZÚŻ%ńZhOo}á۸ÖP˙E?Č©©×X_ ˙äy˙_ű(­ßČ©©×X_ ˙äy˙_ű(˘?îňőO÷Čz?Ôí«–)á1âýŮÚţgÝĂă~:˙w8®¦ĽĹżä¬ŹúůúEóskĐÓ>NM»ęzugęşćť˘Ä$żąX·}ŐĆYľ€V…y•Äë_äµŐ 0«•D'…\Şţ=}óJ”۾ȬMiRŠQÝ»,?ü=3íićc]}–ĆđM6ŇŰ|¶˝EcřĂĂÚ,~¸¸KH-e3ơNsŔă®iź­-›BKÓg\¬Źś'ĚëÇÓđ­)ş|ńąŚj×UŐ)[żS±¬M[Ĺú.Ź1‚ćäĽŔüŃÄ»Šý})ţ+Ő$Ńü;su ÄŘ ôf8ĎáÖ¸/ Cá‘ —šőÜ2\;ť°ĚIxú“ďSJ’qs–ŢEb12ŤENżw±Ýi>.ѵ™Ľ‹[’ł‘Ę»K}=knĽźĹ‘řz6‚űĂ×q¤Áţxˇ'Žá‡§5éZ%ëj:-ťăýůˇVlzăźÖŠ´”bĄźqᱜÝ9ÚëŞŘn«®éş ę7".v|ŚŮĆ3ĐQPę~'Ňt‹x¦şą˙\ˇăDRYďŹń®Wâ§ÜÓ>˛˙ěµ7†|k¨iĐęzŮ’îk„ś€‰Ź—§=*•*jšś™ÄVuĄJšZu7ôŹčúŐÇŮ­ge›řc•v–úzÖÝy/‰´¸|3â«S§–HÎÉ‘wdˇÝ‚3éÇë^˛@5©Ć)J;3L-iÔr…Mâ2yá¶…çžEŠ$gs€ss|CđüR”Y¦”âHŽçŠçţ ß]jő¶…nHAłĺĎěxĎĐcő®‚Ăáö…mj±Ü۵ܸů¤wa“ěJť8AJ§^ĆrŻZĄG )YnٵĄëŐą¸°śJŠpÜTújőfčú–„łÇbc™÷”fČSŚqYľ;ŐĄŇĽ:˙gb“\¸‰XuPA$ŹŔV\ŞSĺŇęJť.z›®Äş—Ť´=2ŕŰËre•Na]ű~§ĄYŃüO¤ëŚRĘç2“Ť­Ź\wü+‡đ¬·ÓÖăXą·–îBIŽ\‘ě1Ó=ęŹÎŤ§jvşŹ†ŻpmÍdát#=ŹĄu{7ČŻ~ý?ë•cQňŰ·S×(¨m'Vp\/IcWÍM\'¬ťŐÂł5éš_·Üůnŕ”@Ą™żZu‹®h:6ŁŇnuÝM´Řćh­ßç¸#¦Ő˙‘[M˙ŻuţUśŁj)ÜÚľ&q˛Ń-z•Ľ_­ŮiÚEͬ×;››w¨RKc¨éÖąjľŇlç“R‘íäŔ/l&BÇ9®ŻĆšMŤć‰u{q{‹hÄűŰřgÍřĂşN±ĄÜM¨Z äI¶©.ĂĐÖÔů{ś•ý«ĹĹ+mĄĎDŚŁF­6 ŕcŠćŻcđ™ńdfëÚŰÓjáđ[)8ă=+¦DXŃQFFôćşĎü•kúďň•óIëĐęĹĎ’Ń=Vç¦T7w–ö6íqu2Cuw85yŹŚ..ĽAă´H\¬q:Ć ôÜFYŹĐ*ŠTý¤­ĐÓ_ŘÂé]˝ŇżÄ_¤…“¸–[Úv§g«Z‹›Öhł· čA¬[oxz9-áŔćI$`Oä@­ME´Đ­¤¶łßĺ<†L;g€:ţçě­î^äŇúĎ7ď-o!ľ!])ôy¶qö0FăĎ<ŽzÓ|6şBi4Nm76Í’ÝóžjŹŹżäP»˙y?ô!Q|<˙‘N/úę˙Îź/îyŻÔ\˙í\–[oÔę+Uń†‹ŁÎ`¸ą/2ś4q.âż^ÂťâÝVMĂ·7PśL@Ž3ýŇÇü+…đś>=ć˝w —.çl3BŹSęO˝:T“‹ś¶ň'‰”j*pµű˝ŽëIńnŤ¬ÍäZÜ•ôŠUÚÍôő­ŞňoGářž ďÝĆ“ů℞;†ť+Ó4kĆÔ4k;Çáć…Yľ¸çő˘­%©GgÜxlD§7Nvşę¶9Šńç§˙×GţBşíţ@vőíţ‚+‘ř§˙zýtä+®Ńäa˙^Ń˙č"ŞŔŹĚŠ_ďu=‰żäYÔëÝ˙•sź ˙äy˙]Çţ‚+Łń7ü‹:Źý{żň®sáü‚/?ë¸˙ĐEţ˝B§űä=ęvőKSŐě4x×÷+ źş%ľ€rjíyŢŮx‹ĆÜkW‚ ,FT*ŹLő?ŤgF—;měŤqX‡E%ß}ŽÖßâ‡ç™c3ËŽHQř×K‰,k$l®Ś2¬§ Šŕux| u¦IĄĹ­˝Â!1ÄÖ»ÝGÄšN›d×RŢÂŕ ˘Fá™Ď  ×ü%¦ř€¬ł‡ŠáFŃpHô>µ“¦|6Ól®V{«‰/66V2ˇTúdw®ąJ•OzOŞOA:pI®Ź±Îxvąń«\?Ţ•%ső<×ŞÖ —„í,|E.µň™%Ýű¬>ś{Vőg^qśŻ|Ѧă.áEVać>˙‘âďýŮżô1^ť\öŹŕű]X—RŠęi$0(řŔÜsŘWC[ךś®Ž<)R¦ă.ç™Ý˙É[Oúîźú,W¦W?/„-eń0׍ĚÂ`áü±ŤĽ.=3]*łRQ·D<5)SsręŰ<ÇÂ_ňQn˙ßź˙BŻN®{Lđ}®—®Ë«Gu3Ë!rQ±´n9=«ˇ§^jrMv”©A©wgj×Y|R[‹–ňâIŁ,Ç G5čń_ŮĎ*Ă Ü2HĘX*H u<}Ecř‹ÁÖ!‘n$w·ąUŰć pěďPřoÁ6ţĽ7‹y$óʨUäŽß‡­\ĺNpNú¤eFťjUd’Ľ[˝É|s§Ë¨x^áaRŇBÂP r@ëúfą/Ź _Yý“U··[Ĺcµĺb˘E=9Î2:W§W)«|<ŇuÚxK7|–€Pź]§§áE*±Pä“·ńyşŠ¬}au¤řÎ?2â;_úęI?@kĄłŽláŽŐB@¨<µăŠăm~éń•7WÓÍŽˇ ?Ě×mki *(P=…EWYIł\şŠ-"b@ŁľÓ’?,×ŕÁá{ËSi«ŰŔ·aÎÉ%bŠz çęĘę˙´ťNá®"i,ärK°Tź\éQJŞQp“·™¦#7QU‚Míf:Gěâó.#°E˙®ąĎŕ tvQ[Áe vŠÝPy`t Ú¸Űo…ÖAşżž^yˇ­vŃF°Â‘' Š~‚¦«Ť¬¤Ů®3MąAGĐŕľ*«Ó>˛˙ěµ×ř{ţEÝ?ţ˝“˙AWÄ~¶ń ·ĂänÇ—ŽsŽąúVĄŤŞŘŘÁhŚY`Ś fę@˘S‹ĄőB§JqÄΣ٤yĎÄŹůlżëŠ˙čf˝1~čúV˝á ]~ţ+É˘@ˇS89î=ë| QRjPŠ]…)B­I=žÇšxęôŻZë ›‘ö:śqą#ňĹvúw‰tŤNŐg†öă,’8VOb [ÔtŰMVÍ­/aÄÝŹP}AěkŹ›áu“ĚFtŹ<« cŹcĹ_=:JnÍ:UčÔ”©+©t:ëVÇS2‹”¸ˇČű÷¬š|·ľóbRĆÖQ+ýÜ,浜W%©ü6Óď.š{[©-¶ćŹhe¸éŠßđţ‡´ógo,’«9rĎŚäăÓé]ĺNoť=N$+Ň^ÎKEÔćţ(˙Č.Çţ»źýş ˙Č­¦˙׺˙*OxvßÄvđÁq<°źx1ăž1ŢŻi¶)¦éÖö1»:@7SŠ™M:J=M!JK*Źf‘KĹ_ň+j_őÁ«á‡ü.żëă˙eŐęV)©iÓŮHě‰:,˝Fjź‡Ľ?‡m$¶·šIVI7“&21ŰéBšTśzÜRĄ'‰ŤN‰ŐćzĎü•kúďňé•ĎÝřBÖďĉ®5ÔË2:8ŚcoĘ8ížÔQšmö ])UŚTz;ťyw‰ ľńújŤxťÖe˙hckď×ôŻQŞ:¶Źc­Z}šú"g*AĂ!őµ*5%®Ě¬UZ‹Őjí6°—Qđ˝Ěp)i#ġGVÚrGĺšăüÔ@?gĺ\§Ă;űH,o-渎9LˇÂ»‘ŚqšďdŤ%ŤŁ‘C#‚¬§ˇ¨®-ţX}ąf†úháVÝĺ G9OŠs‡łp“°b)ŐöŃ«M^ÚŻQ^Agm§čľ.šË]µZ†dË…ĺ_ŽŘţuě‘®xgMń/Úă+* $Ńś0ö÷ƦŤEÓŮ—‹ˇ*Ş2Žëąžş‚š!2Ĺ`c#;Ľî1ůŐďĂářĹĂhK7•˘$‚@ČäőëÚąÁđ˛ÓÎ$ęsy}—Ë]ßź˙ZşŤ @łđőŁ[ÚHŰť¤l’qŠŞŽ<şI˛hƧ:r¦˘ŤJ(˘ąŽŕ˘Š(˘Š(˘Š(˘Š(˘Š(˘Š(˘Š(˘Š(˘Š(˘Š(˘Š(˘Š(˘Š(˘Š(˘Š(®?YđŢjm©i—ÍcpÍ˝° »AWaE\'(;ÄĘ­UVš8gđź‹.ăk{ż~ă§ĘĚK~źĚÖ˙‡~[Ă˙Ű„      ## /.../5555555555  !!$$!!$$--+--5555555555˙ÂÍ»"˙Ä0 24@P!13"5`p#€$ 0!1AQq‘±ÁŃ"r3 @Paˇ2R’˘Ň#áBb‚p˛SńÂđ`€$ 0!1q@Qa‘2`A"Ppˇ±ÁŃRbr !1Qa0đAqˇ±@‘ÁPŃńpá`€˙Ú ×‘ו6­n¶MY‚×ÖžëÍo$úŠMŤž]ŞYöd?|çśsójPüőŮ.|uO›SµĘ]ÇU:Ö˘š,k­î‰z¦?ŽhôěÎ –@gnÎ/Ô^!´g‡ÎKZ{ť6ÓˇaöU>eăVş˙:MÔ±ąĂű=™×q޶úF¶~˝Ţ“vËľöÝ{ż}śÍMÇ#K<λú6ňd59OF^‚:E^l–—ýŤiĐĺµ8íĽw=’ë´d‚Öń˝RNdE `8öóűćE?a×ÎŃ+ÖÖ]ü[AĎ´}lÝbꀩŰ+2Ç‹éy¦Ź«źŁÔmŐ,»ô}8Ѯ֟—€ź×ˇăćˮt6t÷ÍžwóŰâ›!ÇV'‡Ýç <ŢůéC;će &PÂe &PÂe &PÂe'ÇAç |CőĚÚźâš;ë;üëťžúüöî­LĹ'°GŘ\řše {2†(a2†(a2†(iyô˘­ÍĹ—ńŻŁ¦372ęľÜwë¨ö&ccš•¬Ośóôc˛="6s6Oß«#KůÎnJˇą“ýË´Üm>‚)tćQŁKçaĚ7ŽąéE˝e5§đjž˝4c>ŕ|Ĺu亥4Z »Ç4ZTe,vxč”±÷ŕIŔzíń<ößiÉţ«M±¨·Z:űCLüőÎ8éĎk,=-§ć~mĐ©dů¦IĹšŁYůЧۉbôßÁ/mÎĐK˛ł->ç´C(m v¤ĺý`El‹ťh¶s'ƇÍ.dˇÄňˇôa_@NÁ[%«xź$jĹ”čұ]ńÍ.>ýžń珿çźeÖ›©ź/čëĎ?˘Ő¤A/’}ĺĘщν~mLű!ťx!ĄSîýlŕž «WZ<ÍiňťŽŁď”ˇn¸Źsî±íÉôěËţˇ^` ÚëJ×Í Ł[OĚ4üŰŻ?Îg~R€uŕ;đxÓ&qí/6ô°«`‹Ëö<Š–÷Od‹ťh¶s'Čë˙;âÎţąć}XEhz˘é63fĆŹÍi×çKŁNcvÓčhYŠv>Îz#k2ÇiÎ||ô)®Ţ{Ľ…`Ş}Âź­śÄ ^}‹˛Úĺ(Űń{JvCĎ@Ě"nÔť|Đš03 çÝ“öŻhÇÓ:!ó-‹ Чđ.Ő[JÍ}Ô­GFë^=~Gžëý¨×ś} ʰg=> XÍú`.u˘Ů̝ˬTÎŁ šŕ5Ś«`ą‹ú.áąGľeŃ{,-ęą§Ő¦2Ě>Ţľů˛ÎÇÝ"ń?÷NČVś U>áOÖÎ âY;ýi m¬ëÁÇ@mŞĺZ4‚ĺ`f±Ťé4­N ű€/Ô(V«ŐĆť:ň<sß Ű+ąëKůă¬ŮxŻK@š0é­d7Úvm:č!&řs.D2>Čî‰í–Ż›™ đ†©ťhş:r€ U>áOÖÎ âď=ńÝ&Ąsî~~”ě€çȶőqŤ¦d~y8ĚŹ´{ťq'’·ŤGőŰSR¨xďo=ËŁ6O W2•ŞŻW[äq0ëµbϧňˇ5 •O¸SőłŹ«ż\Ăß»3/„RŮq«Őý@^O+ăďXCLĹ÷Ć\6ł–Óó ?6čT˛F©ę~Ąłëĺăer뇬fĐ– ţWׄv.u˘ŮĚź6#¤^u—ĹěTZ;őqW[K™ňúµţ4:ĄVt µ|SK!Ď Ůq«Őý@5ţÜ{aꇞ€2N3ľrѵ–¶źiů·BĄśwdČŻTó‹ő×űq퇪zívĹ]ËúŕŠČEδ[9“ăCćŔĄÔö*›˘Ţ8/ľz•é^ »âh´ŽňN4¬ôhŢŚĹçşß«ôGŢşÎ&á–ŮŤk<đ…ę ký¸öĂŐ=dśg|ĺŁk, m?0Óón…K “[É.ŐňŤ `ký¸öĂŐ= ö»b®ĺýpEd˘çZ-śÉńˇó`ü?|ęmşÖ ÷Ęý@ďýü< 5ţÜ{aꇞ€2N3ľrѵ–¶źiů·BĄI­ä—jůF…05ţÜ{aꇞ€{]±Wrţ¸"˛Ńs­ÎdřĐů°'ćwůĄH-Ö?mqĐ1­—˝P/Ô_íǶ¨yč#$ă;ç-Y`Kiů†ź›t*YdšŢIvŻ”hS_íǶ¨yčgµŰw/ë‚+ :ŃlćOŤ›xýň‡űůłňŇsť‚•źˇźtc[.5z _¨żŰŹl=PóĐFIĆwÎZ6˛Ŕ–Óó ?6čT˛É5Ľ’í_(ЦżŰŹl=PóĐĎk¶*î_×V@.u˘ŮĚź6Żh˘OTkgďŐ˛ÝK:čS˛1­—˝P/Ô_íǶ¨yč#$ă;ç-Y`Kiů†ź›t*YdšŢIvŻ”hS_íǶ¨yčgµŰw/ë‚+ :ŃlćOŤ›ťhą˝ŞđCNí_×óît­ keĆŻT ő×űq퇪zÉ8®ůËĆÖXÚ~[©fÝ –@&·‘]«çŔ×űy}Xzˇç ž×lüŻ®ě€4\ëBł›b4=Đ©6`¨ J4|âr t‘“˘1­—˝Sä_¨·ęŤ’ÄÔz,ŐrN3Ńł»WÇö űJĐ×r őő@5o|<Ć. qĐf°˛Qą?bLľĐ®“и /—^°đďś…őół–€Đ,¸í˙:íŚS˛ ‡@Ď4)ój€ˇXóý'D ” }Ä5‘«ž\Čôşvf†uĐÜłCĎ4¨…şŕhZMŰ'D ”ĺóÜŻ‰‘öÁŕf¬ĘÉ_Oż Ý{PĚ5s˙@jÇBA.›ő#“@®A$Ź÷ńîš?÷Ŕ>´,í›'‹(VšëkČöäý,2Í;!»WähSv¤ĎĂ.Ś24@)5 mŚĐ–0&tĚ{_Îąô)Ú;LŠÝ8eý`ż‡­‹ę&[[âĂ®P/üĄŹHÇkg^Óç˝ôźŹ~]üwâ×TѤpÚ<îN€A4&kr¦ęP 0ďÁăeFÉbj‡ž€çę#Ż3^fÖPz¨e÷złŰ†eđ®‘ÔŘć(ď€ÚÝśhú_0瀓5ŐxĎ@°×´č‡|€~Ů8ę#GővÍĽćĺ–ęţĺgŹR€{ŕÁçŇ*Xę3ofo ÷ń·”x›fsŁehy€O¸f¶`„”;׎¶ĎÖ. •ΙťôÁ_D:ëŘćťsX]Á3ÎąËâ¶X땲µć.Ä5¤Ď.ř‹J÷{´ĘGÝnýëŻ4<ÁRČsčyčúXńżÍbżv­bňOBO«Ču–_ŽčÖËw˘ĄŹŹ˛Ąsśúçk+Őrf˛2f˛2f˛2f˛2f˛2f˛2f˛2Í{ÉěŻ0W>2 ‡Éb™¬¬C“5‘“5‘“5‘“5‘“5‘“5‘“5‘ăçŇŤ ç¬ÎZů§·“µ‡2dí`dí`dí`dí`dí`dí`d÷;/Y+÷˛˙ÚţK&`™@Ň@6Ú“ü¶É!oqF7”ň%d ëya!ĺ„6k4˛‚ ŽCŐ&•’´î«ř~)?SJI qÓPĎ Ëą‰ «ĺü ‚řQĆ÷DZľ7ľ6¸ß᥸ˇ¤±˘ˇ˘ˇ˘±¦±•Ĺ Ĺ Ĺ Ĺ Ĺ Ĺ Ĺ#,“1¤±˘ˇ˘°hQPŢHŢHŢHŢHŢHŢHŢHĚŹ¸’đÖPÖęO}ڞZVI†<ä+Ń’üź?LF`™X&2HIQS)0¤qäxßâ A¨%”–%6“ lÓ‰ÂÉX™âdANáGÖUčňnš-*© ·’@ĚÔm§tśVň°5ÁÔD ň xŹ –aĆ÷q6y§üM·Ľ¨¸ŢX^,lńä˝ăÂ×Ň<Ť %ŮF[ÚŚ!˛HuĚMp`3" xgµłÍ8 ł%'#ŔÁúbqË"ĘšÓş{]/ÇźMŞ,Ë•x|\Ď ·łĆĆ%–iŔß'\43< »–5iŔ~řHŚÁ24Ň4’ i2Ú߀kK$…¬ŐB=4ńb‹ 'ši/‹ řźâÂÇ˝%ńag‹Ă‘dX_?Lé§ßüXX÷¤ľ,,ńxf‹óÄ˙ľôÓď‰ţ,,{Ň_xĽ3Xź,1ńžńb‹ řŹßjř°łĹá™ăÄęsN1źľÔqbß Őŵ\XZâđÉ<Ź­ŕa&XÝ,•µ˘üń>^X/ÇĄůŃG‡iY§¶“ ŇHgŤh% ęRťŚ7 4al˛N'Ë×c‡’q‘ć^ Ý2<é8ćńĄ[ĹŤŐf É~8̲=¤Yž7‹ńŘńúch˙ۆJ#Ćj" pŐ±µîž'\ËiEŤâÉ[Y,ŐŤE™lt˙,lź‰#2 |¨1Ľ‘ľn )đfg+4‚y#}#y ÝA,3~/µçĚCM#q!Ä‘S?lM{az›ż‡uXł4–Eµęg퉯l/ScâĂ7âđćyž‹×ÔĎŰ^Ř^¦ÇņoĹáśáÄ×¶©ź¶&˝°˝MŹ‹ ß‹Ă;Éś/SW¶&˝°˝MŹ‹ ß‹Ă9ÉłČđ=Ś˝¶«‡8žĆźm­Má–_ăá”Y–4/¦XŰ<ÓµÎM{áwß|;łž ČŚCMCtĆé‚m@šY`RÁ´cuCtÁ6 –‹iűiŇ1¤cHĆ‘Ť#F4Ś!;¸Ťá¤cHĆ‘Ť#F4ŚiŇ0^Á”óÉňG<‘Ď$sÉňG<ô¤¸ŹęĎ˙Ú˙—.:ÓIvű}Ćc˙E#4÷Ă]ÁA™Qß/č÷^m”Lż­AÇ\uXHĚŽ%îS",Čň‘ý6s0Ű—1ůnń$ISşa»-˝ë`Ú-ËvóF%[eĹŘÓ®4»mŐ‹ú*\¦â±&C˛]ű¨!m8§Ůx8ÚÚZTi;UÄĄ·ýu›ÍČ{Y ¶OîH ţ츸¸omwMŮ »":hZ@ą[‘1ĄˇHTwÜŽôwĐű?Đ÷©Zž3 )IBo]ÂěłŮşf[-—y6ç!Ěbcý4Žß•’˙ˇďŻęMŘÜ´1ÝWCŢ«4‹’áŘí± ,„›lExíÇ!¤X®‡XZâ$2l?ăbAC>ć«đZÖu,˛űË}č×6\xíFgq[`íůg*Ř;­ŮbÚć¤čWŐşŔ±';€î' qŮ콇şY'-Cłśüq§üĹŚó·˙BÉ,ă‹ ĺöx+ôň î)AĆżÁPjt7Ľ ‰,FGÜŰGÜŰGÜŰGÜŰGÜŰGÜŰGÜŰGÜŰGÜŰGÜŰGÜŰGÜŰGÜŰGÜŰCWH/9ŤkB ŰÔÝƮá–cďgŹ˝ž ˙8‚;ŤĐ×pDPfdWëąu€ÚţćÚ>ćÚ>ćÚ>ćÚ>ćÚ>ćÚ>ćÚ>ćÚ>ćÚ>ćÚ>ćÚ>ćÚ>ćÚ>ćÚÔ†ńJ»CŚ$_Ą¸yçOT¤›Wií7Ĺ>î+»:SČĚŽ+ĺ"8î[qĹ,}ÄQŇŰŤşŤ—Žäf:TĄ)VčNN–Űhi±>G1-´)Ç‚B*ą!†—›zťÄŃ;‚Y…Ý® yç+±6S?p¬„i±¤•[Ă:°+Zc†TčŃJMţBĂŽşę¨ĆşÍŽ"^â˝VôΔúÖ.©¬DDŰĽ™4¬Ţ¸bżĆßdXfn¨LĚČ÷+d‹{â<ÉQŚ»’đE&ép”AďHvÍiEş8˝LĐŚ,QµeQRŇ‚víol;ÜMvý9aɲÝđéQ¤á_\@iÖÝE5¤–—mąVÓűŞRP™÷ŐR”Ł« é"!ĚĴRî&snµ‹ Űrş·ťyÇ—K·Q›ŘśB\DČË‹!*4ŞŮpLĆ„ěIji:“~ŘćŹ!ă…ÚÓŢ8Čdµ™R\”ňRjUľ!DŤG¸řü ‘•8sź†äIŤKjťé­9őm?±Ů"CQšźrzbĽ/:Ă–ë›sFčÖ¬ Ö.eÖäQĄ)J§Ű¨Ę>;ĄĽ¦2di6ťq—-÷feÓm‚dć""lç¦8,–ÝÚ}ÇÇTÔq¬RÝ Y ´ËM…%*+ĄťŠQ%»č˛[’Í.âkń«iý“%¨­MšěÇ|n-µŰg¦c4 ÉćͧŞŘş>j!°ë‹uĘ–TnŰč]-E$”•!B%îK‹Ä‚V•Ď őĘ "Wp8 µ­Ĺ ]śÖtűŹŽ™™Ă±ľčŤ 4RĂtŚQ¦RµĎ8Ź‘‘•»Z¶ú¶źŘşęn|ç&=á"Ér+켇ڡ{kN}[@µĄ ¸LTÉa#rÖÖ&»l¨§°ŚČů™µ«lxŻÉT 3Qę÷(VŮ2ζ4BÇÜM~ěS5˘´ФšURÓűÍĂ]ß a™¸ĺâkŇ­‹ żM¬„﬋"§"Ď đďo>Av‹ŠG×N µ\°LPŹaŘB„Őî>: ¶ă«bBP˝ŁzßNŤ$Ś”TnŤéϨۋm~µ©µ°ň_gé˝ű}[d„Ć´8âťr­ą󼇸řńÁµż,âBŹ®)ŢRĘţ¬=ÂŢR«±\€×nşaIe¶…Y-ĆíÖŚIµLŹŕ;}ýřŘä7ŞĹUÉ3‰ZÄŤéţCÜ|xR•)Vű""*r 8ő;yí×č÷?ʬ;D©"-˘z­ I'Řu‡*Ř]ÜťBB7$VbŮ9đßnĽ`»q }¸€ľŢ’Aëdćhvâ?ÓČ{ŹŹHOË\kÓU^©©ksN}ňsRÔD«…+„LehRR ôćPş'váRßivaF·DŤŽD˛D»¨ J’¬ĽŚ˘ůqńí·YÜ’e¦QâÚVă´o%ťşĄ§ö4ű‚.ă•{¦G™c˝–W–.‚ŚČĺ¦uµřjŰfFĺżČ{ŹŚ%*Q۬‰GŹl÷›ˇu,íő-?±§ugVVO6±ß‹ţýKAIIJÓsł›;b#N/÷xĎIrßlfxö>>‚Ą§ö4Ô’RL˛:Ś|8ďýuKARíiČ4Ť÷<Šĺl9Ş‹­ů źARÓű’ )řqßúę–.‚©Ůăó~TĄ%$»”_-ä¸!ý !÷đA_`MŢÜ ‰QśđL|4.}KOějJęj1đăżőŐ,]šż.4r¸PAëĽ÷B–µťä>Đj÷=°Ďp´a‰±dVcáˇsč*ZcRWSQʇ˙®©bč<ÎTčŃ.ů%Đfj?ÇĂBçĐT´ţƤ®¦Ł;˙]RĹĐy‰™OľäĄ-^9ʆ…Ď ©iýŤI]MF>wţşĄ‹ ó­ ˘ĺurYÔBµ3džŕGníŘ űzWn´·Yʆ…Ď ©iýŤI]MF>wţşĄ‹ ňő(’›ĄÉRܩƷCŘa> ʆ…Ď ©iýŤI]MF>wţşĄ‹ ňűĺĂ}U,ö˛IxV>>‚Ą§ö5%u5řqßúę–.Ë®s9HĆfgNŃš•ářh\ú –źŘÔ•ÔÔcáÇëŞXş.ĽĘ×™RËF†cáˇsč*ZcRWSQʇ˙®©bč<¶cüĽZI©HI!ʆ…Ď ©iýŤI]MF>wţşĄ‹ ňŢáwv5Kzw¦řv>>‚Ą§ö5%u5řqßúę–.Ë{‰yÉ©lëü; źARÓű’şšŚ|8ďýuKAĺ·ÓÎ}H*Ý™áŘřh\ú –źŘÔ•ÔÔcáÇëŞXş-˝ţơ¤ŰY8߆cŕˇu<­ő-ť}I›ő"t¸ď˝}KCĺ·ôĺ:­’F¬/ ó‰Bôy[ŞA=ٵyŞĄĽóŽôyÜjvń˙ÔňŢăGúU´LĺĄQ2Čę[˝…ýYA¨Ú·¦ú·Y«gVő»Í[Óęvâ˝<¶üÖü*Ök‰>Ý“şőK»Đ¨w˙ ±—©•Í{*öúó‡Ťĺę=S·—”ż-}˘y•$ŇŞ¨Z«uṊávf)-jqu,2tĄPî7ĄŐł9żoĄsv^Ýsý1JsJ5[Sšw.ľFŃ—^-Ţd`×pĆP+ŸÁÝ­Ä2ß®Fi8†Fe“Óá˛$÷ H4ł[[nNjΫۮţ»‰Ďô«gwNáŠřîä ¨QˇhQ->[t‰ÍEđĹ ŢOŔ6qÂ""Řë„ŰFfgVĆîäúW‡u.[Y¶âTJNâwÖµĄÝH]{˘ď„ sr+j´) ŻecVvŰÓşvúĚ;¤˙˝(—n.µĄÝXo.ę\+vëżçĺ䇛ť Čox(ńÝ’ě(mĂd_ˇî9^Í—‹·¸ťőŻmwZ łşP+öëą·J$ĄĹ›ŽV˛;§?ËĺEjS3`˝ ĎŇÜ…měy¤<ÔČŽD~­˘ßĚ»‚đî¬úýşîlĐî'öG´çŕ»»Ą».N¤ÉEĺď2ÓíĎł˝Ľ#φYi„`› ©ŤJôWiŰ­®Ě[M6Ë{\Y6Ú”jU{Ús¨^^Őź]Ą›n%D¤íî'˝<ĄíXc2Í@•k™ś[4ÇÄ;TX´ŽĚ†ćŘßd4!kT Ś! Bp^ÝÓŕpÚtŹ2ĵ’µš×ŕ-›–ý·‡ugřÝwÓĚß·C|;Ű­_oĚHUšâCęî­W ˛ÜL#·Ąk·Ł$14z’!E’íÖĚ.Ă9 íY<žâaľß ĎoGHf3 'Ť4ér0‡#r0‡#r0‡#r0‡#r0‡#r0‡#r0‡#r0‡#r0¨—#r0‡#r0‡#r0‡#r0‡#r0‡#r0‡#r0‡#r0‡#!¶ŰNŐDĄrP‡%rP‡%rP‡%rP‡%rP‡%rP‡%rP‡%rP‡%6Ă ˙"?˙Ú?ü­5eJ/çĐýT…Uű+•ZŻáL[Đż.Ëâ5Tî­<×_^Ë‘ ÷Rô&Af˛şäÇuý—ËBĄÍ§ˇKnavćŠ Xv*Ř{ah^'˛ń=—‰ěĽOeâ{/ŮxžĘ´e˛ö¢¸W á\+…p¨[@˝•›Y.…Wľ#˘uSä€mäŞW5a”Up0XQV®ţ—Nn/Ó •Č43ăÉ|†«˘ľŠrRĄs%~î©TR4mWGŚ—E!bÍúD_¸SŰŮTaeĚŻŐÁł+ëݲ*M!ŕtއ&dýĹĺł íú+«…yä©@đÎeU§7 Ú78W^ęđ'É„<žśHĆ[{Ľ¸´>AU’6y .U=•••u\GWńvĆo;*C9»GL[Ŕ€.¦`îŇÍă'eÚ>c^’©^ř{ŞĎ%»Hc7Śśa—iĹOu81›ĆN0Ë´âfnęŞutg fń“Ś2í8Ź‘ŃÓS,ݤ1›ĆN0Ë´â$ŕ9·vĆo8Ă.Ó‡7vĆo8Ă.Ó‡Ńŕ·vĆo8Ă?‚eVîŃĺ7‡śŘsăA1fÍç­X µy„8Ś©ݕʷxWBXňŕđq0'ĂÍLBĄ…”ŕeL3&,@—ŃL>«¦!|@ťY”1ĘMŘvWW á]}GuVS˛­¸Wě©LAVVVVVVVVBŚ4VVVVVVVVDá9«««««««©Ďň·˙Ú?üĄpşŻ˘:*ŃYYrÂFţ…ř©ť0ĺ’ą_në¦ô&x~Ë™ÂbËăŮMč@ŽHbrA—Weá\+…p®¸W Ś«/ĂwbĽ7v+ĂwbĽ7v+ĂwbĽ7v+Ăwb§»i‰2‚C™˘űo쯻ôţ•7÷ t rfXeŹÄjľJ\Ń/ ÂđkUGJ¸Ur‹´éŮúŚ>;DĘ™űný4nüĄŢŚš’ë…Ę­č_ER©Wuq~«Ş¬·vÝ_¨@ •/rěČ –nęôS:8¶Š¬-š›AyÎ$Ř?ŘIú…˙ˇą¶OŘ3.˘˛÷Ă›Ë:*C”ehćͧ‘Ű·Űĺ3”9ă2đŕŇů•FL]á»O0 §¸Č/¦ÝwK˰ Ęy…÷Ű®ßéOiž;»p%R†r~®ÎĎůÔ6`ČŻŽúnçěpÜy“ŔžJD2ý\!ěÉÚ‡ť»ý‡Ô˙/5f[ żW{2vŁŠ˘Ż_«„=™;QÄČYÔUVVBPËőp‡ł'j8‰:JLËőp‡ł'j8‰¸–eú¸CŮ“µ9yh†_«„=™;QÇeĺÂĚť¨ü <0Ľ¸<3gü‡’áČ"ÉpF(Ć\߼tâ3í ČĎ^F;sźho#/ŔłRá§×R]ÚHŮŁ?)…w†˛s mAI5(R<žÂ˛(X™I K­ö“˙“B^GeB§ô ˇ=·uEË<kÓTž“`p(Ě”ŇIµŹŚJ>5DÔWwŁŁ†y¨3 é4{mŤËFű ť'6ŹűhŠą`đ+⪥k"í‘ýаĐŃżYŕB>㬻§˙ĎhŃ[ŘÂrđNťFŰtâĚ ‚×ţE뺯DÂ=öR˘~é¦WSä|b\`v’{IÎ8Y˙ë»C‚ŐĄ^ŕRJT&7ám*´ˇ·‡Řg˝f'č;ŽwÔN 6j*ÜžĽ®Ëi*7„đ·—Jś%Fü7ń‘÷šMˇdŕ„°ČŃm`8şM {Y"Ńű“ŔŮQťműjţ5uMŔ—?Čž±G*üfđęäţ8«I'«'ş•™xšµh§ €=\!óZ”Źĺ9?ŰĆS–YRT/ť Ľ%»E ç*´T0Í›=X˙ Î‹hV. ©9řšĆó’ÝťE0Rö÷p2?#‹€m+˙\Îh¬ň0ĎŠ7ŢUfŤ÷•Y˘‡‘|ÍŽ'I Ä„Öŕ Ö“6eV Ńńjĺŕy‹. vÇ\Lh"¸CŞÝ«QˆÍăLN*âŁá6g k9´j‡ě5ľ˘»Ý‘‹¶ű‰źĹ˙ ‹cKÄgäšîˇ7T"—¸ Ä"Ť5\ČŤVIşfϬuSäBŹŞ>Á{ë§Ă§Ăa˝őŤfĐnN2×cľ‘­¤‹˘|SǶęIµ<Çä!o+E$Í<ÄÓ]ŢůUéŤď•^ŢůUéŤď•^ŢůUéŤď•^ŢůUéŤď•^ŢůUéŤď•^ŢůUéŤď•^ŢůUéŤď•^ 6ć’ŐPŃVQ!¤µ‹dÍ˝3i"~ş{l“ŇŁ7P5P„ĺŠÓ‚+O†)7FcěpÍž=Ä©cŞ=§ŁjÎLąmn̤™ŃVhŢůUéŤď•^ŢůUéŤď•^ŢůUéŤď•^ŢůUéŤď•^ŢůUéŤď•^ŢůUéŤď•^ŢůUéŤď•^ýŚ«IOHÇÇŃ*Ó_u4á5DÍŇpś&'ueg¤ĎĹť&clE• JÖÇL!—Ö]I9ËK×ëë‰Ĺbđű…7lđěüŠn/îëŕćhPŰťŢŃm¨-Ą$Î8K? ‡_4iŠRść ”gQ¤“nńŃg´m&É„¶2R:Çl«pP!-¦µřJI ŇŢă‰EŇo4ĎâüG¶Ň•´fĹŹě_ytŕH·í˝lTn-:4T%´?Č’0kd‘qŐ(`2óˇ˛mZŁ®=ÇR›€śŃKÇjľEÔĎ”FىUŮĆxÖh‘m:ؤ] rs“‹ ĐÚQ¨]Ť]g,¸rZ–#˘U“mZ>-\˛S÷V1–Uˇ8žŁpÉÍS‰ě+%Č(P™I3*Ęż1=ĂA‘x~Sá¦Wö ŐSŮ7{hť]őRxţë`žőG‚ŻŚb{‡µő‚• ”+е÷— üÄĹĹdâÔg­W 6ŇtR,X…÷H8$ťčŃ>a*ÍÓĘ'䦧5WtUŐ*±Č9Ň Q´©/pL« ÖÖnÂĹWíqü§8Ić&.+'JDäÔ~PŇU†ě ¶ůzUl"ő̢U›§”tYHŇĆ™dÄbş‘*6•&Rˇ:M`Á{ăŇ×Ü‹)úp´Žę:ą‰‹ŠÉ­¤Îz‡IŤ.ŰƵäŔŤ‘ŠEí™VnśFP¤ÔDŃ4Ş6F)°2ĘŤĄJź“ń…­„ŁĽ@ĂĚmť=‰ç˘zăő´&M“Ňy…#‹Ű2¬Ý8Ś«‚Ň•ŽU#€ŘeFŇĄ‡ÉI)…h §×:H¶cYäŢ3âž(QUÄśłGeĂxzٰćú˘Ąŕâµăš7ł]c¨ęqC‘#db‘{fU›§•wmXĺQ˛1H –Tm+ť§ya=pW|vôż%QÔ#y -"Žşâu’ŁlÓ#í¸¤\$E*ČeDĎ6SŇšsG´ŕQîÔpdlŚR/lĘłtâ2®í«Ş6F)°2ĘŤĄs¤îŞ›Łz4YöQŃÚèÎMdňÔlŚR/lĘłtâ2®í«Ş6F)°2ĘŤĄs”ć€+0Zř—ÝôÁRÎ’ŤdňôlŚR/lĘłtâ2®í«Ş6F)°2ĘŤĄs‰ZΊSI&?[zŚZ˛®“+˘€T«Bś¤6?#Oăׂr“¸łrlŃBÜÂ=1Şň…ĐhŐ|©›)–FČĹ"öĚ«7N#*îڱʣdb,¨ÚW8(Ě$Ć‚(a=‘o¤Ę‡>LčE„}Ćí¨ŃeŁ)ä٤^Ů•féÄe]ŰV9TlŚR`e•Jçő:©ŢfŐéQňźĆ–ŇltžžLŤ‘ŠEí™VnśFUݵc•FČĹ 6YQ´®o*Ĺj˘í»Ń9®RunŰÖVAÉѲ1H˝ł*ÍÓĘ»¶¬r¨Ů¤ŔË*6•Íĺ ę5Ş.Ů2©WÜî±ÉŐÉѲ1H˝ł*ÍÓĘ»¶¬r¨Ů¤ŔË*6•ÍÎ=e"‹µąP‘ZŚÂI Üť#‹Ű2¬Ý8Ś«»jÇ*Ť‘Š@l ˛Łi\܆żČ®¤˙Ě«óOQź”#db‘{fU›§•wmXĺQ˛1H –Tm+››Guá?IVvąB6F)¶eYşqWvŐŽU#€ŘeFŇą¸ô%#,«'óN>PŤ‘ŠEí™VnśFUݵc•odb,¨ÚW7.âqJ‚+—K‹ôňvöF)®e¬íJ¸#ŽUť„â;"WůśśÝ?yă%‚i­Sr±ÉŮ6ĐśBEŢťî¬Í8ĺI·*Áü0 ¤čŃţŃ*ŕ´ąú‡72ĺ°S‚ś˛Ó+v"-J˛°Q"y`c2©_t‚QĹwROTł]ŚČ<)°Q*ú6N>nÓ˛ÚĽhË/ţ»‡ÝE_Î$V›J#®WBËj"ń¦E”[$ŕ›<łKď!'ŞMăřÍâŐË,¤÷VzŔZűĘ' žUhď#ܶŹŢ‚•PEK ĚˇH"o‡­ŘW׎P޻֬ °Ą«´˘IşeKJěĽ<©ُžłLłvŃ:zóIčwÔ ežjŘ ÁF^;®wROTłFŮŃńQÍ˙°v¦ýž@?ě@űU×ęѬ2Fök ćŤđĽ Ä#ŰJś8\h¤ţ¤ZMw̸P ŠA€—”xW=ôĎE¸÷Hčžs€DßţKĚ! "b  ×xŹ+ň›Ă«’YÖ­ˇ~ŠM¦­Ł~Ś’ÍÚ\é7ęë㨄ĺÉ,+Iś^€ˇR„â˙7Ťâu‘vŐţN~_ŘŁőĂČRZü*+pÔ€U‚śÖe‚l8 rä“rŇfH˝_\˛\ ‚/SBŁHă2ÍŐC,»FĘF‰ţ4bć˙öŰ•ô+ëÉCiˇ5­V„ŠČ)ŃŚ(L¤™ąČŁŮkXäâ.ÚćHż_P—C˝Ĺ‚H¨Ô‘9…,Ö˘Iż.Ѳ‘˘Ť¸Ë´‰’/W×.ë6PżAĹÍęiÁ:UA ŞR{ ¶9iˇ:ŹWIŹÖŠOŢ«g€|¤ UĐ˝«wů’·Žë–¦n¬âyKłŁ˘Ť.›*ŐĘŐČfŃ č8¸ĄFˇI…8kY$ߦ]"Ă€§(ĹΧ*°llF‚éIě®Áä: ĄXcA¨ö—dđ©§éU K«íU±-ű\Ëuô›\W-#P^ŻŻ8Ďté ˙ń"Ó6çQ˝@ä 'ëW-ŻP_Ż«!ÁZV G8ÝN’M.5î3Ö.Ë…żí7kî9 6ŇtR,qZč?j­ýn‹†Áą)?e‘Ú^A¶ĆŠPâ)ĹT€I˝LÔg<€&Ă€§(Ĺ"»MĚzľľ@—h ‹Đ*PśqbęÎ!—¶l¤hźăF.r+G´ĺ±Qş"u#IôŇ>’s”ţ¤w•W@iąßVAbCőşť$˙ĺPTǺݯ¸^ł †ŠRŤ@SĎ—@˙®ů€„ Š€â¨Yp„eÄ9  …`‰íńÔłRA&ô)j­Fs·=iŐÁWWËHÔľĽ…Ön(b<é;ŤŤ.đ őG´éOB„ř¦ŤU!wćĆ#u=ÂśńąTnOTnôn¨d1®ârsš=Ő©Ď(ËËa=6pše=ćÂŹzŁ„DěşSС?Xš5tWpçš7&ńŤĘŁu„§*ţŠ4ţk|Ó¤ő¤±}/ńJ.Dç°`%rŻę+úŠţ˘ż¨Żę+úŠţ˘€ócE “›[ćŕ<šÄĆĂűEhŻíý˘ż´WöŠţŃC M‹ĄeČľ—ö5ˇÝűˇń=`/;nWŐů îR“ÁľˇÂî ˇÄ`|ŃĹeČ©ôÚç&~,âȤp9š_ŇPńžgőX7Ĺ4…´Mjđ˝ÓŠjĺÜ Z@ŽĂp×ѨŢ_7şáńAqRîUäß‘÷j›?6üŹCd‚®ÓěëdÉë?'KÇ*QŠç»:b1xźepg‹Š.ÍdŹ•‡V¤-â®wúJ&Ç+>O›eÉNk©Ŕ/q™,€A#Lä˝á¦–¦yńopÖ‘ËqĹú( 6$7Öżvł}Eż"€+pRIŔŔ´fy aË­EBýŻŞąç造͸«÷©úˇ…yľ­y>l$Ň·ŐŤ)eU×Ö\ăô˛mx4ŽřYĺ~m§ Ľ´šÉFń§^ NV$na·äTě01çlŢůP’×ô¬ő»ÍE|J˙Ľ*çű6üź6.Ł‘—:ZTř,«Ś­$…îVbŽFÜq¤ @kBÍČ­ű«¬˝“vÂCĆ»65Krx’{yÍö>Oź@ °Ҹ>M·qĺj'¤ü_eÇ6>m é2Ň’•ÖÂ1gÂޱ “’–”‚Zătżj75Ąrrkőßµ7=Lk_ě|ź4T®G—ľă†Á¸ëi&ë7y/6÷ m\®+áÂŰŤŽĺkȲJn''ĐAdĆř ÷p/§T•Řî:ŰqyŘěŢmîÚwžM·ťŽţבnAĹn}ű@V]+jâ—‘kş×;ćž :×ýR° ˇ"9şŰqyŘěŢmîÚíKn/;ýŻ#Ýp—.mÜ^v;7›{†¶»RŰ‹ÎÇkČ÷%Đe˙m*)F­í)ú…ÇW—1vsmĹçcły·¸kkµ-¸ĽěwöĽŹq3Ŕ;ł´l° aĐŇĆă­·ťŽÍćŢá­®Ô¶âó±ßÚň=¸*+d8jĘ~,î:ŰqyŘěŢmîÚíKn/;ýŻ#Ű͡/ĆÓ7m¸Ľěvo6÷ mvĄ·ťŽţבíńy­ßi%ťÇ[xžv;W›{ƶ»RŢ';űkÂűuQ-ÉĆ7wťm,–ÁžZٽѴqň¶`rV®«JîłíôńŘ(§*ô8X!ţu±6„¶ůQŹ›P';h5oůőX*YľŇ…Ô÷ĂŤçÖÂ˙`çr·ݞNw©KÇ[bĹÄÁ(3{yF{2ú_eón#‘? zOÍöůŔŹŹY‘ó°Ě·qŕÁ4 HěTX An'őD#Ž<öq…ǤĚżVŇDxÔ‚ŕĹŤ`Ca4óׯ83ń°™ŁÜ# ëőDÉ%˛eE]…Ůsçéw8ńÓZdm^ö:ßLn­ a*ä,!\‹°ÔA=f†HŘ_fńî]”Ťp:źĄdwPŘ|Ĺ!ŹĚPłr¤n_Ň””®¶1ĺٰ§q/’…ĂćŻí>UvgÇÔ$­mkk[ZÚÖÖ¶µµ­©Đśl_¤¬V¶µµ­­mkk[ZÚÖŃ@«čâ Ů­%i+IZJŇV’´”n żË˙Ú?˙˝˘\>”¶r=đjŔŘrđĘ·#‚ě”÷®˛ăX yßZ/ŠĂ/iJ_Ń+¶ř\Í©·śč qp3¬)†_µzbń“HXŞXrĽj?ĺĎ˙—sPp •fŔńC’Eú¤Mh\eMzçö©†GłWÝOüĘ»’»—Š…îľ=ašĽŹáyݞPĆ’Ä|Ň?đ×3âąźj|PünˇÄyl XŻíý˘ż´WöŠţŃ_Ú+űE˝+ 9Ň|g‘\ĎŠ3Ą`AŘ }Őn÷Őn÷Őn÷Őn÷Őn÷Őn÷ŐÂPw±Ęhżuņ;µýj8řęxŠ=’Ş4l3Ďx«´Í&©TŔoâq=$™ĹőS%qΠÖx«ě‚,¸„9иĎ*čiL äRŘŞ™ĆŘĄă«*;Đ·şqµ1Ť†? ]8ŐĚ\Ó›\~Lš»prµžA>Ëx×>€dËAć89>ě˵n9Ř‚yÔäń0˘3€ÁýŇ—‰©ű¦ŕ¦’Óň9üPB. Ľx`TâŢŮđĽ[RҰô«ź6EJ# ÂÔCYů·Ź@^¸R)Rě/+~&Ěďŕ‡­Ď‹x×4Ę@TY{ë9rµvo“ő`°kŔánkX yzcOBú»pÉű¬äűµáx˛Š´÷ô(€ÖaŹÍ”@ÄŁ!ĆĎ8ńlÇŇ‘+zě…8T_Ëť·űý[ŔŢą«ŤąéÍÖß<żöČI+Žú4†+Ąő-Đş5Ŕ#ť^­îÖü//–ěߪ&“u̵~缲&Y;VyR¸G:ÖĄŕÜŇý„k#ącPWI·¤‡ 3@ÖçÉ?*ű Ç +-Ĺráo]©†ľl™äx¶%K†"€€!VÔ6Igú"8µŚo?u’4ý ŕB“ Ă7îŁO1ÄćzňJ|cáx¤c©ˇ.˝âě>ß­źt·Źi~­–îZŔŢą˛Á‚TLxâM[_/±G7\ë@ A±ű~­;fŢ= ÇÎŮc´ÚŔŢą·4UN+˘ýűEV(XO%+€ëN‡Ĺr>(Ę3|WçLF3Ž-;fŢ=¦fŮc´ÚŔŢą÷Grűź RTíĚ łoÓ3lŔ±Úm`o\űžř9ÚvŃńĎ*šjşH™¶`Xí›xö9›fŽÓkzçÜBGŽ<­ Z–>ß«fŽŮ·Źi™¶`Xí6°7®}şŔŻ é‘qmM#ćĎŰőlŔ±Ű6ńí0s6Ě ¦ÖőĎ·Q­uľňĎŰőlŔ±Ű6ńí0s6Ě ¦ÖőĎ·Âć[WMź·ęŮc˛m÷Ö°s6ĚV;M­Ëź·2´FÜ,ŕÜŮű~­ąM 9-µpÔ´°ó¶Ą´,@2đÚ™s.ńíőŘ;€ĘLŰ€Ňë°[wž-9&Eµ:Wz  P|Đ-sňő/÷Ż ß˝…Ř2k}iŇěX4şpá}·$ă‰Jŕm&–^—Kqj1ójDÖ>-ň?>±<ľŐ´’¸ßŰ”\xR"ŤÉ±` i˝ĹúĄeĂa 8·úLڶ0Í)2l)2°‚#Ö\ 9˙ ÷}_ŕáš”Ý47Űó~˝"IŐ"0Ü–ĄAsż¬†fv .±3FÂ##ë ±NŚ ĎÄľăÇąÖŁHfĺ_ġE#':<\čPXÄKó(<”–/⿉HÉÎ{äáęUEi+IZJŇV’´•¤­%<Ą™°Đ†"´•¤­%i+IZJŇV’Šľô]XŽý⿼W÷Šţń_Ţ+űĹx¨J0…K‘˙-˙Ú?˙ëť0ęé•E |ř«fOÔ}©Ä*~fÝšžT& qPűťŞ}ĤąÍ\ř˙Ć ÉŘͤťŠćÜu¤ŻŞ»Ú<íŕ #ŁJ&¸§Zż@c]ţđ/Ń›J$ŚŹ!÷épw0¸¦—šÁÉvCµ77«ć„dĽâľ!G<3ćďC1ŻúĆ4Řq ŕmţűďľűďľűďľúmb"& ÄNâx|µ6¸®ç’&PÁťa¨Ţ4» č~í9]č_¦€Ýzţe`Äř°|Ę{Tpă ÇmŮ·—×J!1$fßďľűďľűďľűďŠL”BđĆŕ6Đ`zář5 Ą‚xČř+]Hß ˛)ç‚#ňTX &]~ĘâBĺÖo6ÜŃjăňP&„8%píĐpŹĄů™†"v?çŁ>b)Ë/reË 2B'˘-ÁA´®p °Ď*DÍV•LŞĽZ•0r|-óZt†AńJ·-A,ĆÝhIVŞ şç–B ·ĹCź Wx‚»ˇÝ]«*ÁOµaaî‘®<›âŰÁÇ0ź–쨣ń;ĹÓňT›Š•Ď5~Úé“ĺ™ö°Ąć2oNěędůo›ź®ťKľÇ P@écGF±ĐÝ~S§Z$ĽpvÔ}†ßvgfęőĎrźtş¤ăŢ>wĆĘď$VÚq’ţ éĺčqâ¬đűĄţŠx Wi4Ĥ]2É8ś=łp =‡­]߀J›ó¤LYůřŹFf±ç.qZ»Ä4°» ŃôąiĎę=ęMd>»(­8 >Z•pYî8Ôˇ—¶™B çĚ]Şx=ĆS°+l”B9‰OŤĂ ŕ\čcÔ›A˛XZkż%JeăUU¤rz—ąsi(UsWlpfžăźř«ĘÓ$ú>öp č>Ćßvga`»…p‡Ĺ5 €ČöSŚŤRYá!«Ńg9í ŕ.DĽJ€ă~‡·¦(Ç.9Ž"fPKĆO!č0â~÷Ϧš3ĆšDŃ{qÍ®•w¸Gü¬Đ»Ň%ŘcĂQ¤#‚•&€˝Vŕ*⋺qxüa˛Ýű*đ©;á´#ôëEVňä蝤8@uGö0¸pçp-DďK–îoظL8Ë$âUŐO ‡gĆĘ:'«—ÔŤľěĎÖZą‰ąAH™Ş«Š»MzIŘb ¬!Ĺsá­=˘\‰ÓŹT†řS a«ĺOŚl$ bˇl^vG(ËZ—€°_.o˘¸`<\řl÷F{iÖ:*čďOŰB Ž-ÓĐ>f‹€2//YÄz5 ŮÁ!ŕ™lđ\ĽŐ Ľâ†:†Îs™ő{,:9Î’˝č7úł}š;P,D©r"…ꑱdR™Ťc ¨ŰnĚý tŽ\Ż•žÖďˇoä;lO€%÷8> łä#jdAć¸,á%óë=Ź#\Sę ńş]]®čĎgzî?ŃĘŽs#ý[MŠ,áö6Ź8Ď/VË´ŢB/á¨Ă¶…ƱxŔ®^XoÇź; ŚĹໜx¶ěÎśŚt9ó÷=6ˉ€sQ@@hlĐHoJy-ôťoöTë’± VösńR¤Xîk s›ĂRłĹ—żŽôgĂöĎ–]@`r ¶čĎc»â–®ŽO?†č€.Řńy/ŕ»m2\N.áńN\FŢ;(ô‚AćíMB€1 ¨čű…¦ ňT•„QŽJ_Đݰ»©č#.ćŰWf uš>«;n?M?·F{ `FܱĺĄŔčăąď­ž‚|'鵞Ź ż˝(.â›}ażâ),ťĘž8ëHŚ7%ą2ľóż‚wFvw•†Hő}TNA/ĺ‘¶(ת¦×)dt;#zmPÄe!%ČcłhŔK–ćŐI©…b# µżXZžc¨]öÚ„ŞĆ†'ěŁÄč¸t¶S3Ŕ\÷*érűrpěĄDŘ#¨Ů±C§Ř_‚ÝŘŢoŕ\řĐ0ĂůÝ}ç ;¨vXzX%íîaŘŔ2ŻuđÚ®*Bs/ x’uŘJ3›Í6»ł=”:4‹·[‰ŁRC;˝ â±zń—đ;Gŕ·F~”`řŞđ˘[ě´Â°÷ÝRĚo'cÂëđžćDĎQŢv“l‹^Şőńgsëk»3ŮśbŔ¤MJé_S?YH…`ž˙‚ÝÔ‹žőŕfp(MŔ_Oçđ·&Çży=Ě,g’ä‘H‹‡¦×väŘnśö˝Ůž×]!°Ěň̤?ť\‚ŚC# ̨o=ë™řŰ“c߼žęőĚ)ä¶»·&ĂtçµîĚöÓüĆĹúnĘ_ĹøĹůjéZcéc`CďF%=ޱÓ'5wuaĄeôEcŮ’‰ö[·&Çży=Ô-űźk»rl7N{^ěĎňŮČâer2©ĺ«‚øż%KŠnŢŹµk |»Ž>Łś cިőüX? Žő<Íáť¶íɱďŢOu ~çÚîÜ› Óž×»3üĄŘl›Ç#ěÝK·…ŇĄ)ë/Č•u]¸Ł%ɶݹ6=űÉîˇoÜű]Ű“aşsÚ÷f’D€%.âŃ(+‚t8óiâ¦TŞęľ˙väŘ÷ď'ş…żsíwnM†éĎkÝ™ţD׋)°Á»PIXž…)q‰î>i®aÎVăJ$k×>UBą:ŻŇ—¸&M^ľkväŘ÷ď'ş…żsíwnM†éĎkÝ™ţ@0C«K‘NDÍăjŐx\Ř;Ök‚˝ÇŻ´Ýą6=űÉîˇoÜű]Ű“aşsÚ÷f”ĽW\-ymg÷”Cfá—Źk»rl{÷“ÝBßąö»·&ĂtçµîĚ˙kř.¬zŇTĘ·Şí c 7üŢŢßväŘ÷ď'ş…żsíwnM†éĎkÝ™ţ>ň©ňá÷]Ój ÓhÜ|şű}Ű“c߼žęýϵݹ6§=Żvgřěâwóý˘•UY[ŐÚ`>9‹2Á A·Ýą6=űÉîˇoÜű]Ű“aşsÚ÷fŽQdLĂö6˛é$ɸáî7nMŹ~ň{¨[÷>×väŘnśö˝Ůźă˛ä±µöď¸Ýą6=űÉîˇoÜű]Ű“aşsÚ÷fŽ…Ěô‹żm¬ź7$¸Ýą6=űÉîˇoÜű[ĚnŤ†éĎkÝ™ţ8ĽŘ>;[˛GRúíËP‡·b…ăѱe™OÚ žë»TSŢ«j… Ć7§`ÖnwÚĄdŕ'Âürŕ_;`O%\î'᎞܂Ág]Ť â‡wëk8`OÉ&×VK~®×F>!ôŘÎÜűڛįČţżcž`m„k'žÓ˛ăl’ymtř|ď×cü ŹŁkĂnčť§đŇNÚSÄ7DvŘhóÝňÚĘÎő‹ăń×8˝/Sá·#ráĽÇcüš‘µnč{®Ć ř™Č łŽ2Đěő˝|ĆŘžńşhŹ3°ă=÷ĚÚĄ†ćNA†t«ÁH”YKQÁM˛ph´"V D+ąYhř¶áaesgŠbĺO IpÚ‚(€|ż!MŚÜrdŠíhóz7DťĆÎ 7¸i/0ŰFÎs«Ćß ąŠ;í§&ZÁ=ßÇĘśrŤÇﯰ Čťŕx˘5Cä˝Ú‹čIŮHÜ’‹N¸ůŻvŁĐ6xŹ—â6ďJ°‰xц”_@NT‡ăăˇ˙`Ô¦¸é†ďÜq=źHx)Ŕ(íó;–G˘ŕ‡]ĐďěN!ôŻëbí¸çě4äČËßŃ:ϱě' r} ,Ľšµß–Ą-Ľ˘Â§›ňFőă€Â„#x‹éĚö'n†.kę˘-ÁlSÁ‘ëŮ©WčbápßŢŮŽńL°;Î^-™XĘ y=„´Ţ|}Š3¸‡É÷>ÂKc¨G€ł+#©Ź'°Ŕ(ťSJL™Ťçä 6=ps 8BOI‰©¶Ć—Š`=ąˇ%†ćĽ[*nKĎîY”Ú‰ŕ涀Чťĺ|Q Žß• S˙,MV_a#0·źî†Ć&2øűďµ)¦ţH™‰%‹¤â‡—±Ós*ë!áů'ďčßnáH*.I›Ç¨Ů ·PsŤ)BŔň~/-5äâ9¬F’—Ť -~ŹŠ@\ŤČě]ĐjzŠxă^ĺaȠ⥕:ŕ;ÝĎc…9© €“­ľÓi jű6Ş_bË_' ˘:ü˝‹®Ŕy§ô”m“c‹f±žµ4é`üŇŐŃÂř˝dvjý鬡đÖׇ–±Đ?diŇ'™řޢ•'KÝč+ß)=÷vĐM.~’rAŽă*áË’öÓHÜ’żś~鸨ĽŇ ŚŻ‚ôČÄ ĂZ›Ś/y—˝m`8L?d`Á 0`Á 0`‚ŕŔ¶čŔ"#žČÁ 0`Á 0`Ĺú˛bHé ‹eZŻ}ŠĹ‹,X±bĹ‹,X±aŕ2©¬˙"˙Ůpython-pot-0.9.3+dfsg/docs/source/_static/images/logo_cnrs.jpg000066400000000000000000000154061455713015700244130ustar00rootroot00000000000000˙Ř˙ŕJFIF˙Ű„ ( %!1!%)+...3D3,7(-.-  -"%-0---/.-7--------+-5+-+---+/----------------------˙Ŕáŕ"˙Ä˙ÄM !1AQ"aqs˛#23r‘ˇ$4BRb‚’±łÂ5DS˘ÁĂŃC“tđTcdŇá˙Ä˙Ä(!12AQRaqÁđ"B˙Ú ?Ľ4GŤŔ%BMŔ#Dp P€DpŃT ŃG€I°i6°Pl®Î}%tT˙¨n˘mÉǡ·;Ť•?”y][“đ™Ď'ş(ůʞ<î·SˇLĄú ťŃ‰tcyÉĂ)nÖËđ©ͦăýˆ{î ř¦x*qIK#ڤČűu µV€'€źb„;¤É%f^â’ßJµětMdvę-h>őŞ—ŞźWTďZię^ €˘—H[“' WĎüyżÜů^1ş¶y••M· ¤ü4—†Él·ÂIG—Řś[+ńÂVF˙yn—˝HđÜîÔ6ÂŞ’ ‡‹ŁuşŽ'Ř«›"Ë—\_ÁÚś—É{a9ÍĂg°‘ĎĄqÝ;9ż}·hd)…4ŃĘĐřśÉíŽaię#Q\µeëĂ1)é]§M4;&âő›±ÝŕĄKNľF÷ňtţŕ˘8Pdţv¦e™_™żÄ†Í“¬°ó]ÝŁÔ¬Ě()k™§K3d·śÝ’7ÖaÖ=ŠyW(ö>6F]=Ŕ#Dp P¸;DpŃT Ń4G•4GŤŔ%B„ Z̡Ç`ĂŕuESôXÝ@ o{ŽĆ0oq˙'Pˇ,x=8ž# ,Nš¦FĹ6ąçW@Iܲ¨ÜąÎ\őÚPRiSRl$M(úÄy­?Dwťv ˛ĘęŚVm9N„,'’‰§šÁÄý'Űk˝– B˛şTy}’Ynx@ĐžO<@śNa p8€'”ŕl‹'Ů@ ˛,źdYc˛}<ĎŤí’'ş7·Ís k‡S†°‹"ČĆÉ|ęËŚx‹yflĺcH:^Á©ýbÇ «W ÄˇŞŚKM+%ŤŰŘoŻx#h=Zć"»Ć*(¤ĺietN޶ȲÇd–YlšBĆBi ) ¤ Ăš¬‡ÎD”ŰWąÓSěl†î–.˝ďgĽtę Ba %%†u8ĽŁ©iŞ+$OkŘđ \Ň \ÂÚEAdZɆČ#“JJ7žsvÉŰ$Ő»úößµ •Ś’'Ç kšn:ÁEen ˛RFT! g`©¬őĺysżá”î!­łŞHŢv˛ˇ©ÇěŽ!YYgŹ·˘š¨Řą‚Ń´ü驪úĎ@+—f™Ň=ňHâ÷Čâç8ísśnâzI%QD2÷1Ď @žBôRS>W˛(é$†µ­s‰Ř¬‘ ÁĂö«Ż$3Q-l¸ŤŞ&:ů0|: ˝!ëŐŃ˝Xt”Âa†(š6ŘÖÔH•éuČřĐßg*5ăö¬uUE,r I$sÚ=…Br›64•-.¤khçÝ <‹ŹF54t¶ÝGbČęě×C]pO ŃáňÓJř'aŽXÍś¸Ľ¬ŕ©GaPÍŹ–0ň˘4Ż`4AŮި\“Y=‹,‹žďɨ¤ l,d<ŕ\ŕ ôlnMě˝Ô84ˇIúOç;Úvw-Ú!꣌•ŔOZ.ŚbĐG–§ÉŘeĆ/ÜZ9§­żáB:¨·ĘÁ-–zŞWÂňÉśßa;›ć«§Ş}SŞblÜŚ4?[Fź)¤HŘO4.%-«,® sŕY! ńDz–xLtĐAM#śß(Ö µˇŔşÖÚHwŻn ’t`rpµňő%ňĆçSzšOůăżáy(GI,şt´ZÖÔ©¬ć˛ťő¬ŠŽ6ňÍMÉžH-Ú‹€˝ĎHá«ançŚ:¶¬ä‚Tă ÉاňŻáówÎďö-äPµ‚ĚkZ>¨đTm –Ş+Ą’©)¤+^jv9F÷*(D‘r€sâ×Ößś?ŻrÜfcmwýď&T·IŹaiĐVLĚţűŐ÷—Ź Ý˛ńôYRČŇçÖ´IŘÖIUSg˘gą”n4đ`ŕ*˙¬Ió5ôîSśăĚćaµ&ĹÚ >«¤hpďꓲEO–_tÚxFŇ ©ŻŚé6¶˘˙]ĺăŘű…ďÉ*MOť÷.q ¬ńqżNŢ‚ŁD)¦Lü–>·xÜŞŠY<ýTžĂ>/ hËČą:š8»ü(MV3Q!ą™íča-ŮýVű-Zta?4ÖCmř9DĘŮ3Ť=q۸öŇ㕉\áÂC¤·_°­ŐfWäšDÎů¬éçt{úbĄc+2ĆĘI塅0§”°aľČ,sŕ5đĘM˘ňrđĐyçŐv‹»ŠčĹĘN ٞ5xu,Î:OĐĐy;Kă%Ž'¬¶ýęmDze4KŕĺŔžxU›Ś’”ݎfĐęoŐĘ4źp+©×2fďö­˙Ś?şmI¨íiúg3ĺÝO+‰×ż„îgű^Oô-(^ěŁůuoüĚ˙śőá •Ň'—lxO <-0x sž‚^Óô5A‚śd' —´ý ]G±źL¶ô í…Ę)†úh{Fx•巠gh<.QL7ÓCÚ3ĆHÝ/‚yžO•Sö'ĆT[&>UÚđ9JsÉňŞ~ÄřĘ‹dÇʢű^%ÓĺÔő/Űř&óy®ę?‚~f}ę‡űÉ“y®ę?‚vf}ę‡űËu?áţź÷ě‘ç3ölŢ´ŐLsg7ölŢ´ŐL—G’ë˝ *G’UŁť Ž»é7§éuýŞ6JFĽ´‚ŇAÁAOOÖAN8,*ÚFLĂ‚í<6¸Ž•IJnh®cł>Żś:Űżşëi†e;H ¨ćź¦ŃÍ=`lî÷)2µŕ9ŽiŢÓqî]đČT¬Ąŕ«ž-¨ę#ŠaV^!…ĂP<«?Hjxď˙*Ž`¦ç´ň‘}-íő‡őü-W|gÇLŇ90§”Âą0«1µşTőPčĄkÇ@‘¶ücwµSĹXůŤžŐuQý8Zď¸űq.ĺ1•ÂĄÔ.S*Óľ9ź, 1âUí?ú™]Ü÷—Źs‚Ő5ĎaÄÝ-ą•lkÁݤĐ#xţVź´ˇ!>1Lžkf@ž0§9«É¨1 Ş>–(cn ÷˛Ď{މ»Ć?RŮIEe„V^ q~‚^Óô5Lr—7¸l4uRŢ’(žö¸M3¬XŇáp÷Aµ¶(vAú {_ĐÔU5.…jŕăL™očÚ ”S ôĐöŚń…+ËŹ@ÎĐx\˘Xo¦‡´gŚ.¤sĄđOłËňŞ~ÄřĘŠäÇʢű^)NyľUMŘźQL—?‹íř—O”?SÔżoŕťM滨ţ s/űďTŢM›ÍwQüć[÷î¨?Ľ·Qá“ţé˙~ÉsżfÍëGůŤTµŐѝٓzŃ~kU)tş<—]čRSIA)¤§ ”úz§Ät˘{zޱ°÷¬d¦€k$·ĘŤ7ękŽ ń©¤ýa»ŻgR’˝ŔµŔE;;AUS•™„Ę_.v˛čÚOIŃ+¸˛ EJ8hŻ1şĎ$cÍíőN±ěŮܵĹI˛í¶ž3Ć1îs”`®Y]rÝĆ•<Ě—í)ĺ$üč«1Đ޶¦OˇŹß‘§ô%ŰᎯÚ+ě´¤ä1*čöZ˘B=W¸˝ľç© ožŞ.K‘Ŕ[áG'x#ůaAÂę1LćkcÂč Íă⦀SąŢZ‹AÚb:âwPoŘ\ţß&1é°ę–TŔyÍÔćź6F9Žč6÷ĺÍÝ\öČżó…’ĄĐikj!%ĐąŰ/nsw5ÂÝD®Ö\ůĐKK!Ц'Á ů˛ w±Ă¤\.ŽÉl«ĄÄŁÓ§iĎŤÖĆz[Ľ}a¨­ĚôěZF5ăŔ#ŘTбÆŠgZź(ĺś:’J‰Tńľi±±Ť#Öx“¨.„ÍţL˙Ăio!ÓĘtĺ-٤EAŢ&çzÁNČĹŁc81ˇŁŘ‡Ç©č"2ŐJ#4m{ĎŃcv¸˙ޤN×>VˇË#ąÚĹ…>ř´•dFŃżFáŇ­o´ ˝˝ŻčjĐevRI‰Ô™Ţ4ŃŁ/}^úř¸ťdő €-ţ@ú {_ĐŐU0Ú°C­–č¶dËŹ“ł´(–|´=Ł«ş8B#ż~ßő5X¶%1:CI›ž4őý夫PF«{ÁŐyť†ŔMĚę7ü-Ú":®?Ůţ†IRđÖŁ~sľkG^óЬxb kXÝM`u`śĐ°ĂP 5”9LÖ4ÇLŕé˘á泍Ž÷{‚Ţ…ĘRąá#A•µ‚Z§č›¶0:ĹË˝äŽĺ¤)Ħ¸/Śv¤†•nf&’ŃVĎôŢČÇŘiqüÁěU]šĽ?©î,éôĄ=R:ěţM›Ţ :•™ĎüAPjˇ©’'uĎ Šž ¤ł±…ü+ Ş]đ3wú#¤˙äÓëšÁE1 Ö%“(N O ÂLđJć8=ŽsÝaĚ%®pÖž‹88¬@5µŇ8â¶9Ţ{KŹyQ@SÁXŇ}š›]ĘŚáⲠ×4á˛&k[qÜTz˘Ąň¸ľY+Î×Hç=ç­Î$•ç8‰%Ń®Möe{đěZj}.FMí˘Í#Vűu­p)Ŕ®ŽZO†m+ń™ę3Ăš ŔŃh×b7’±a§ËĂÚ3ĆëĂ—µgŚ,fĹ%Ń`g«ĺt݉ń•Ł«|/Ft^ŰŘŘ ¨ô§yě?¦ěOŚŞđĹ^ÝĽÉ›y˛†Ąí-tşś,lÖcŇÓp\r˘‰Î},¦"ńgX5ÁŔl»\ŐŻ_IZ»ĄşíóŮÄRŹž +–Őq:‰ůHśA#“ŤşÚAÚĐv€´WLş.„’čÖŰě}ÓIIt—A‡Ş—šE+Ř8Íű§Röćjżă#?ú­9) [“—ľŃë¬Äç›Tł=Ă…ěßş5/(%4”’])„Ą%0•†žś6…Ő3ĂNĎ:wµ‚Ű´÷ žĺÔ”Đ6626 FĐÖą­Ř-™\–«’±ă™JÝtË ¶®¦i}đ®Ő%ňËÁUÂČŮ \k„ˇr†Sa†˛¦•ßčHCo˝‡ťďaiď]b©Üýd÷ Äc- ÖŕI1<÷—7í5KÇŮ·G1ÉO„đV SÁV™Oc8”ŕV SÁ@ANb<€dz°Ăĺŕícń…ăz°Ăĺŕícń„3Qag¸ün›±>2« U‡źńş^ŔţaUÍ×xC-öĚ—KuŽén»>čşeŃtű¤şmŇ]8”ŇR]!(IM%!)¤ %5­. 49IJIÔÉ) Vgrgá5&¶VůCĚľÇÍkʏ,zËx’–Ő“cĎ©‘8Ă衧ŐĘ[JR7Ęín׼ M z„/=ĽĽ—Ą…€^,g ޞžji…ăť…®â/±ĂÄ /jr>5…ÉEQ5,âŇBâÓÁĂk^:Ň: ňŻ\őd‡ÂaôíĽô­´Ł[áďŇY¬őt* čW=Ë$Ck2‚ś Ć p+ł )ŕ¬@§€2‚ś Ä p((+Ő†/kŤ«Ä őágËÁÚÇăjĆiaçČür—°?UqucgĐür—°?UmuĹ^ݾْén±Ý-×g]@şK¦Ý%Đ®”ۤ%)) M%@h.$€$ť@6’w Ăßá2×TEM»ĺ;OšĆŹ9îč_»i]/a1QSĹMł"mµíqÚç;¤›“ÖŁY˛Čá†ÁĘLŃ𺀠›ů6íŃ´ń<@ hŁşÍĎ ˘Ę«Ú˛ű! #B\ëť\‰8mG/NωT;›m‘HuŹí-č¸ÝŻ˘—“Ăb«†Jz† "”YŔűAqÄÄ™\ö<śN K#ś ĺÎGÍ„ĎÉľňA!&m©íú.ŕńĽw…\šk(†I§†d8Ś ­0Ę PV0S@^Ľ,ůx;Xüm^WŻ ?µŹĆÔ>ŤEŤźcńĘ^ŔţaU­Ő‘źńĘ^ŔţaUĄŇęđŽíöĚ—EÓ.‹¦ 2]Xî‹ ÝLşK Ý!)·M.@.W&iňĹŁV˛Ňx#pÖŔŐx?<Ťp×´ęňćĂ7'™]ÇkYĐÂńŢ$”üzĎo)n·ţQUU|°B¦(!B€!_ŽŕĐW@úz¦Ć˙ĽÓąÍ;ś7Íůq‘•LÚ2%;Ď’kľ«ľ‹ŔÝżhßn ^lKЦ'ĂQeŠAg5âŕ˙‚6‚5‚™]Žôejhä0SV_fľzéčĂęi65ÍúŔyěHkĆ«šětd¤˛ĺ†e(+)AZrezđŁńŠ~Ö?W„–šsŘńbXć¸_eÚAö!‚,ěýŽRöó ¬®¤Y{•ßńi⛑äQ†éi’n\ă{ W6·tęŤ]qZj)3»rmş[¬wKuŮŔű˘é—EĐî’é·[Ě–É:ĽMú4Ńů0lů_q8óľqú˘ç«jĆŇĺš“|#Q N‘ÍdmtŹyłZŔ\ç°5’®ĽÝćÍ´Úx‹CęŚqj,„îs·>Aě&ÄI2/!©p¶é0rŐ.|Ďî–°|Ćô gy6R•-—g•WN9`„!N<„ B„ B„ @2Ë5t•ĹŇÓÚŽ¤ë%ŤĽO;I|z¬OŇmŽ»›©úĆN/(ÇÖËM‘ŐŘi? €ňcd±Ýđ¶7©Ö+CuŘŽh ‚.ŰěPÜ{6ea.ä 4‡çRÎňË˝Şc¨üÄňÓţS›®–ę×Ĺs#3ni+bnmC\Ă÷ŮĄşb·5¸ĽGU q†Xö9Íwą9[ň%Ő5đD.–ë&BâŤŰ‡Ôw4;đ%,Y Š»fQö€o…»ăöfÉ}űĄşĐ毗ΦdŚŇÇodeÇܤŘ^dĄ65u±°ol /=Ď}­÷JÇlÉŞ©ż‚¨şÚŕ99W^í:y%ÖűZ&ńŇóAč˝úď‚fż ĄłŚ©ăçU?ä3ůTĘ8Ă@k@kF ,ŕI–ˇ|!±Óý•vJćz(‹dĤů(î!ë;SźüŁ «>šť‘1±ÄĆĆĆ 5¬­hŕÔČ…<¦ĺŮDbŁĐ!\ť„!@„!@„!@„!@„!@„!@„!@„!@„˙Ůpython-pot-0.9.3+dfsg/docs/source/_static/images/logo_dark.png000066400000000000000000000065551455713015700244000ustar00rootroot00000000000000‰PNG  IHDR´ťÜૠ9tEXtSoftwareMatplotlib version3.3.3, https://matplotlib.org/Č—·ś pHYsgźŇR ÚIDATxśíݱN[ĆńŹ5Řd_Á‘ó;ˇý N`ĺСßÁ™Ó‘Vř6…ŘŮ>–W"#`Ą±Ĺ$®54śĂ©ę:Ő˙_:ÝsOW}3%|»ĎěÜŢŢ €ŃýeîĐ                ÂîŚ˙í#I§’ţ´/éZŇ…¤Júď|ËrE âńč }źF}ĆŞW;···Ţ˙Í=I_$Kúë–×7’Î$}”ô?żeą˘ńxô„ľOŁ>ăŮ+ď¶'é_*Óü)—’ŢIş1]‘?jŹGOčű4ę3ް˝ňhkI˙¨8ţ«¤OFk™ËgIď+Ž_Kú`´ą¤ďÓřnGŘ,{´żIúʶ˙yú[I˙–ôÓdEţv%˝–´SqÎFŇßĹ˙;°â‘Kú>Ťď†q„βç]Ž'Ş ¬TŠv`°–ą¨.R©Ů‰ÁZPxä’ľOă»aˇłě9Đžóď­Űěw]ĹĽZŻeŐsřŹ\Ň÷i|7Ś#t–=Zk!^u]ĹĽZŻ…®Ź\Ň÷i|7Ś#t–=Úuăyżş®b^­×ŇZ;<Í#—ô}ß ăťeĎvŮx^¦uëµ\ô\ţÄ#—ô}ß ăťĺčw9n$˝‘te˛"‡’ľ©ľKąŰmą¤ďÓřnGč,{n}őCŇąęž59Są57‹ďj«ÁľÔćâ‘Kú>Ťď†q„ÎrôťBŢ*ß7Ô ŹžĐ÷iÔga{ĺ˝ŰţŤĘ6(k•?C·ŮÜ˝ž5°Ô ŹžĐ÷iÔga{5ÇćÄ÷ŽT¶[)Ŕ.Í3ˇńxô„ľOŁ>ăŐ«9Ýđź€h€h€h€h€Ї'ke¬mĆk˛6jÍF]÷…ěUô­ŻŢ©<•ާe¬mĆk˛6jÍF]÷…í•÷@[«nSËŻ’>­%›Ď’ŢWż–ôÁh-˝—zŁć€^Ź#lƢ˙|Ě­ĘŽÚ?MV”Ç®¤×’v*Ήţó$äĄŢ¨9 ×ăť1Ď»OTX©íŔ`-٨.`RéʼnÁZz!/őFÍ˝GčŚy´çü{ë6ű]W‘SkŤV=Ńy©7jčő8BgĚs µâU×UäÔZŁČ_äĄŢ¨9 ×ăť1ĎvÝxŢŻ®«Č©µF­=ń@^ꍚz=ŽĐóh—ŤçÍýaAkŤ.z.˘3ňRoÔĐëq„ÎXô»7’ŢHş2YQ‡’ľ©ľ¶sßÝ6…ĽÔ5ôzˇ3ćąőŐIçŞ{ÖäLĺÖ\Lű®¶ÚFfyi1jčő8Bg,úN!oĹ7Ď•±¶ŻÉÚ¨5uÝK¶WŢ»íߨl˛Vů3t›ÍÝë¶NĆÚfĽ&kŁÖlÔu/QŘ^ͱ9ń˝#•‡íV °Ks2k›ńš¬ŤZłQ×˝Dˇz5ç@ ~ŕ     ‚çÖWż;’tŞň´ůěĎ/$˝¶Ń×·dô¦ŽE˝čAŁ9žCŰ“ôEұ¶opąQŮűëŁŘ  VôÚF_ß’Ń›:ő˘/}/Çw*۬ŕiŃk}}KFoęXÔ‹tŕ=ĐÖŞŰĄů«¤OFkÉ泤÷ÇŻ%}0ZËc˙=zSôěDc‘ezĐAôßC»Uů‰ź&+ĘcWŇkI;çxţ˝Ź+zv˘±Č2=čÄó.ÇŐ…@* >0XK6Şű0HĄ'kنŢÇ=;ŃXd™tâ9ĐžóoĂŰěw]EN­5Ző\ÄzWôěDc‘ezЉç@kmÚ«®«Č©µF^ŢÇ=;ŃXd™tâ9Đ®ĎűŐu9µÖ¨µ'^˙zo/zv˘±Č2=čÄs ]6žGÓžÖZŁ‹ž‹@ď㊞ťh,˛L:‰~—ăFŇIW&+ĘăPŇ7Ő×6ň]ŽôŢGôěDc‘ezЉçÖW?$ť«îůŤ3•Ű]1í»Újëőa ÷qEĎN4Y¦ťDß)ä­Řâ幢×6úú–ŚŢÔ±¨=čŔ{·ý•-[Ö*2ołą{ť†Ő‰^Űčë[2zSǢ^ô 96'ľw¤ň`ŕJě(Ý[ôÚF_ß’Ń›:ő˘ŤćhtĂ|R` R` R` R` RđÜ)äwG’NU$\ę­©K­ÁRŻ{ô¦ŽE˝čAŁ9nŰß“ôEұ¶ď]¶QŮÖĺŁň><¸Ô,őşG@oęXÔ‹ĽPô­ŻŢ©­e.ź%˝Ż8~-éŃZ<Ńű¸–šÉVY¦D˙ů[•]Şš¬Čß®¤×’v*ÎÉđ3ô>®Ąf˛•E–éA'žw9ž¨.RiđÁZćr şĐJĄf'kńDďăZj&[Yd™tâ9ĐžóoĂŰěw]ĹĽZŻeŐs3 ÷q-5“­,˛L:ńh­M{Őuój˝–ŃżŘé}\KÍd+‹,ÓN<Úuăyżş®b^­×ŇZ»(č}\KÍd+‹,ÓN<Úeăy™šÖz-=1z×R3ŮĘ"Ëô “čw9n$˝‘te˛"‡’ľ©ľŁßÍDďăZj&[Yd™tâąőŐIçŞ{~ăLĺv×,ľ«­ه–ŢǵÔL¶˛Č2=č$úN!o•o‹—ĄÖ`©×=zSǢ^ô ďÝöoT¶lY«üÉĽÍćîő¬ [j –zÝ# 7u,ęE:csâ{G*®´ÜĄ—ZĄ^÷čM‹zŃFs4şá>)0Đ)0Đ)0Đ)0Đ)0Đ)xn}ő»#I§*OĆ÷|ÖÂę}1/úZŹšŮł¨1}k4Çsh{’ľH:ÖöÍ87*ű”}TÝÓđVď‹yŃ×zÔĚžEŤéŰ EßËńťĘ–0s˝/ćE_ëQ3{5¦ox´µęv”ţ*éÓ3Žű,é}ĺ:>TŹyXĺ%3> ö,rIß:ţ{h·*?»đsâ]IŻ%íTĽ/ż%źU^2ăł`Ď"—ô­Ď»OT©4řŕ‰cTÝ­ă¤ňř˛ĘKf|ěYä’ľuâ9ĐžóoĂŰěżđőǬĎ«ĽdĆgÁžE.é['ž­µiŻ^řúc–üĹ7«ĽdĆgÁžE.é['žíşńĽ_/|ý1­ë«ĽdĆgÁžE.é['ží˛ńĽ§šÖÚÔ‹ĆóŕĂ*/™ńY°g‘KúÖIô»7’ŢHşš8ćPŇ·†÷ĺˇŘ¬ň’ź{ą¤oťxn}őCŇąęžß8SąÝuĘ÷Ć÷%±Yĺ%3> ö,rIß:‰ľSČ[=o‹«÷ĹĽčk=jfϢĆô­ďÝöoT¶lY«üÉĽÍćîőš†Y˝/ćE_ëQ3{5¦o̱9ń˝#•WężŰľĹűb^ôµ5łgQcúÖhÎ@7üŔ'     ĎťB~w$éTĺAÂŢ·í[Ľ/–Á#?d4‹^’ŹFsܶż'鋤cmß»lٞ­ËGŐ=^(úÖWďTž źë}± ů!ŁyXô’|tŕ=ĐÖŞŰ€ó«¤OĎ8÷•ëřPqDFó°Č ůčÄs =ç߆·ŮáëŹY5ž‡\¬rŮzěC«Ćó`Ç"/äŁĎÖÚ´W/|ý1­ëA.Vąl=ö!2ŹE^ČG'žíşńĽ_/|ý1­ëA.Vąl=ö!2ŹE^ČG'ží˛ńĽ§šÖÚÔ‹Ćó‹U.[Ź}č˘ń<Ř±Č ůč$ú]ŽIo$]Ms(é[Ăűr‡$»\>DFó°Č ůčÄsë«’ÎU÷üĆ™Ęí®Sľ7ľ/A€d—ˇČhy!ťDß)ä­ž·Ĺ‹Őűb<ňCFó°č%ůčŔ{·ý•-[Ö*2ołą{˝¦aVď‹eđČÍâ—äŁ96'ľw¤ň`ŕJýwŰ·x_,G~Čh˝$ŤćhtĂ|R` R` R` R` R` R` R` R` R` R` R` R` R` R` R` Rř?úŘ$Ő˝»IEND®B`‚python-pot-0.9.3+dfsg/docs/source/_static/images/logo_dark.svg000066400000000000000000000223271455713015700244060ustar00rootroot00000000000000 2022-03-17T17:25:30.847142 image/svg+xml Matplotlib v3.3.3, https://matplotlib.org/ python-pot-0.9.3+dfsg/docs/source/_static/images/logo_hiparis.png000066400000000000000000002316401455713015700251110ustar00rootroot00000000000000‰PNG  IHDRĐ3ťwĽ|tEXtSoftwareAdobe ImageReadyqÉe<3BIDATxÚěÝOR"ŮÚŕóuÜůő›Nš^AY+(0.+\@á J'LŐ)«W µCkĚ@je­ é‰áězWđ}yȤEĺO™ÉóD¤(jJľ™ đËóžX‘˙Q€ęąŰ>ŘM.¶’ex9ę]vůëŮőýlyŘążĽUE r,ד‹Z¶,ęÝűĂ$ńľđ÷†”ąç~ ”ŘÝöA ë! Ę?„ü‚ƨź-?’%k·;÷—}UgÇt3Y>gÇ5«ŐK–ďÉŇIîďĘl:@ÉÜmě‡4,Ź—µ˙ů~x Řz6r>¶O“‹/aúQâ,OĽo˙™ÜÇO•Ř$t€¸Ű>h&CšÉuHĂôka: ß10ż i7Š%ŽD?T`SĐ *›Ç<ŽĆŤˇyFäv’ĺŰÎýeĎŢcĆcý"¤mŰ)¦ŻÉýúX€M @(¬E{ Îë%Ý„~˛śíÜ_věM¦8Ţ›ÉĹ…JŢž“c€Mđ›C “ĺŻPţVÖµdąŰ’…Łđš%(…ĎJl#ĐÖěnű ž\ś'ËnE7±ŚHgň±Łĺ܇˝~*Ďt€5ąŰ>¨%KmÄÝ oj-¤#Ňf) }T‚R=fą˙•÷ݞÜĐěÍšş]¶ťťűËľ2@)+›! ŞX­‡dąöXÉŚ÷×ÓÎsľµA›O¸I¶˝“\'÷™GÂĆŰU‚R‰ŻÇzĘTYá[đeaPś±fw­U'¤­WűJ…|¬¬'+=VRŠűënvÝôŕ0†ç‡ÉýĺÚQ±Ń÷‡˙S…R‰˙ăN•¨˛Â¶pĎZšĆv¦ˇbh&ËĎě„ XŹ—qŢ䏕…y¬ü+Ů'GJÁ„űk3Tż]ű´âČű«¤&±µű–r@)|P ę 9=™ß\öfj1ĹŃbe€µ?VnA\‘Ĺé/•‘űk<ŮĄ©cÝfĎ/n•bŁîń˙×O•(•^r?ÝS Ę 7]x^ F˘C!\áy‘5ăČZe vŐÉžŰřß9Ůpnt5Ú,žďP8… ĐłŃY1ňfZń]d';ëyĽ8U ot€ŮĹđÜĽçëub>t ot€d­Ű÷U˘tr%@’Öí…SÓĘ˝ôn•€" Lď(hÝ^4±•{MJëA (:Ŕ˛öD% é\ €<Ц#+AiüPŠD€0žąĎËk7;`&t€rąUXŁËÍ ŔĚčĺň °|Ú·W‚ €™ Đ^Ş+AéĹ6î[ĘĚB€đŇG%¨mÜ€™üK ^¨+A%|H–Ž2@nn•rĐhÍţzŁŰî)ŔjĐFdóźký] 汇íÜ_>¨ĽˇŃÚĘž‡îfŻ+>Ś<7ÝZ`˝ĂĎúŮ˙/˙ é nv€üĐžŞ+Ae ćAúđŹtÔçÉ’Öţ#tۧPĂ󰼓ԓö ľýÉÇÚĆß—şí=(a4,ŹŹ-¢!ůtj#ÇŕţČm‰oł%ë˝d?é0:ŔS5%¨”ř&fO#4­řżá<ç5Çęë†Tń,YnBţ!zńĂóÔń’¶ź˛HOÄůŇĐĽhÇÁî“Ű”†ęńąđŹdą¨LG€đÔ%¨z±ő•Xąnűkh´â˙űýśÖ;ťě%ë}Řú=$őű”|ö3ä7Ňö,Yog·ź˘k´âăĹÇěqŁlű˝ž-'ÉvÄÇ©ëđ¨ëÔ0ĆoJđDM *ĺw%(´ľkrśăşÎ6.„JGŠ˙™ÓÚJ×ú>Ýţ3wŁŠ‹Ý*bËţFë?ÉWWÉŇ ĺ?ib+ŰŽ8Á’m»J–Łdń`„ਗ਼TŠöŞĽ” ×9­­łˇUěltýb'´űŔfJçţ®ę¶5“%vK3T»Ó@Q§´ř+ Ó›•Ţ·SŇ s·} l­o0IlaĽh÷Ţƶ@Ž'!4Zq>ĺEź?}/qâIÍ ˝˙Tkšś44>J–/+zţď;ŮňkŠźŹ]•j#Ë2ěgKußÜ7»íë°菄­Őă¤&ąÍa?Ôpá˙µýo˙ß ţţĂČqX :!­ŢňóŰlůőĎçyśtÓhŐłăĺCHç7ĎűŘi–F+Ţ?ż…Ř)"íܰčĚăď…×PîP®—,' ü~ S÷–rËŇ€u1<Ž'Cüž]V˙$ÄFë4äśßfÇĹŹ°ĚÝv/ű¬“mK-¤AúǰxgŤQµě?IţFŤţͨt`ĐŐ• zî¶j;÷—}•6ĆcŔ:‹Ç`4™ŘĎ•{Žśž\pňµCóoú­ë„ôďvËăľ‹'äy"DÚâ=•ţgHGĄ?¸łU$@ ęjˇÜíaˇn•`¤Áh'¤l=¤ŁëĄŢ¦4XŽÁy#´c}â &µş-đľ‹z Ň›9?·>ŹŁí*ç7%ŕ FlŞ8š=m5\Úmh´bhţWX<<Ź˙ϒ古&‡… Ď_î»ŰÁ팷wŘî€7 Đ€×uŰ_“ŹďC™NŞŠŁÎ­«äł¸,2×ůhp~ZşÖĺ±ĹűcŢs0ĽN€Ľ-qGŁ?@NŰ—˙ ‹Ź:Ź'”38ą˙úY7O9ěĂ]w ŞĚ¬‹yµ lŠŢ¶-X­ę#_ŐÂc‹ěßG>ŻWřh‰ÇJ3 Obl´bţM«w`Zt€%Úąż<ľŰ>ŁĎâhô-™(ľ©}ťxË͇ťNŞ7ÝJ·Ý›ˇĆµ†ę[Y-ţť]nU¨6Ă.E Đăů˝5Ýżâ~݇8jß<äđ„`Évî/;wŰńŤú«ŕ ĘqŽ“}U­ahŢĚam˝dé'Ëß˙|Ţm÷łżSO>Ţlt­ÓZôłŻ®Çě‹a^Ë–w! ŁëÔJěűN6B]ÓíÚ‘`˛yžß󢏊'šď€,¨ŤSž|óźlÖK–! „oC·í˙ˢŇůĂ{öYÜO'ž×Tb&űóGv˙[%Ý™($:ŔŠŚĚ‹ţ=ąĽ›ý¦áYRŹSGŔ†KGÇĐ®9ăoöĂc`.,_‡8‚ąŃú[!*ł?ăhtu€ @XąťűËë»í^HGmmŘćÇí>6ę`ĂĄÁyü?Xźň7úá10ďýÓ†ČO Ń:Ŕ:dŁŃŹď¶ľ…´­{˝â›Üé¨óŽ˝°Á­fHóÚ?ÝK–ď! ĚťxU]×Ůó íĽ€B ¬Q6{ďnű Ҷmb¶2Î şě7ڇůŻxi¤9ôž}ÝI–3mÚYšôŘślhdzéöÝC˛ĎňX“'Č• „vî/ź¶2Mdó¦×˛%¶˛Üůzýl‰ëţ5ü:k'o‹#­8ýH<ÁëĚsV~ü=ŽLŹĎ‹†a:ĹŐ‹źě`:ą T„ €Bč¶O‡ńyQ/4ZÇAŔZdýG·€x„vȉ€jŠmÂÓî:SěPĎa=5Ą /ż)°ç´žšR:°·9­çR:Żé)K‘߼ĺuĹ /t`]zą¬ĄŃÚUJň @Öí‡ŔĆĘ«Ť{])ČX—ď9­Ç<čäB€¬G~ó ď+&y ëtťËZ-!:  ë”W÷ŹJ Ŕ˘čŔ:]ç´#ĐXXźnű!ä˘oiăŔ˘čŔş}Ëi=Ú¸°:ŻyP–®ŰŽ#Đű9¬©­-`^t&ÚążĽUV$ŻQčGJ ŔĽčŔşő•H| ůt>ů¬”ĚK€¬[_ €ĐmÇđü:‡5ŐBŁŐTPć!@Šâ,§őś(ĺ o¨ :P Ýv?¤­Üeú˛Ąáą•#@Š$ŽBĎc.tŁĐ—ë(YŚ@*G€G:úź9¬)ŽB?RĐ%h´jÉÇ/ T‘(–nű4ůŘĎaM'ćé^Šó`ô9PQt sXG yĎ•2GŤV=ů¸ź}ŐS jč@ńtŰ˝äc'‡55łĐ—EĄŁů/¨2:“ô•€5;Îé˙Ń…Vîą8 qny€  0I_ X«nű!äÓĘ˝´r_L:Š˙čٵ TŤX·[%&J[ąźĺ°¦ŘĘ˝© sHGď_ŤŮ7žÇ•#@ÖjçţŇČ%ŕuÝöiň±—ĂšÎCٵ« 3‹áąřŔF eđ),>˝H ȯ>Fë4ůXW`SĐ€âKçCŹ!ú˘Ýk„čÓJ[ŢźLř®öí@% Đ€rHçÜţ”Ăšbw!úkŇV÷Żü„ix€J ĺŃm÷’ʇ9¬I>Ižß(°‰č@ątŰť D_ŽÇđü­šT’(!zţ¦ĎŁ_B Šč@>VBç˘˙•Č›şďęaúđ ˛čLŇW¦–†ĎG+˙»iľo)ă8˝ąű®„çt&ů[ J:ň<°·kůűÝv/¤!zÁ5Ĺí¸H¶çj#ZşÇml´.Ű<»ž¨":P=%€’z ĎăĺíÚnG·˙öűśžW쇴Ąű~…÷Űpľó¦ŕ‘ĎcxĂ؇Đm÷×z{şíxâHôŻ9¬-nŰU6˝V±ývš|ü™í7FĐ€Ů5ZőGi?†°·…ąmÝöqČg^ôh8ýĽômÝă>k´â>;Éam·î@ Đ€éĄófź‡Ç¶íC? u;ÓyŃ˙H–ëśÖxŇ ý´t#ŇÓŕü&ŰgyÜöŢ`´?@ýK –çnű ĆżAßhÜäv‰ńͶŃ+·;÷—Ţ€(˛44n&Ë—đ48˙ç9]ánsň~Ęć2żp»g˙d°4Zťäň[ÔuźŐłŰ[ĎiŤýd9,ô6,H€Ł»íř†Z3Y>†üޤڔÚĹ77{!ąt˝sŮW Â¶#ą‹>Š7 `ëŮóŰ·N-nKďnű:Ů–ř\3†ÉG9­µ9X­řĽ5ŽrŹaúmöY-¤mç㉵śÖ·ń,ŮľŽ».Put€śÜmś†ÉŁqx[¬Ű~¶ś'őě$—ÇF¦Ăč˙„Fkřu ^‡Ď{â寑ź}Ţ˝çŃ,#Ó0|Ňó°a8ţďěóZ-|}HnKżĐOOV8Nęđ->ß ůťđët4XĂôxRčęÚś7Z»áńD‡zŽkŽŰ#86Š`A٨ó8—ŕ®j䪙,űI}÷vî/o• ç÷…×P†Ř“Ős^ßóç’űSÖ°(ő(Ďóµt”řŢÚ›Gµ0 ÓÓýÓĎjOč…ôDŰď7µěďÄŰý.»Ěű$Ţx»çŔF ,Nxľ<“î¶Ţké@ÎňřßýN sYGݤŰ˙owŁ'~”î§Ł÷{K ҇jٲźýŤŃ“FŹýç]†ÇŘîu-S=˙§9΀M&@X@Ö¶]xľ\1Dżq”°jş?PerXG}c«GŽçó(¶›î•´ űîFůźń4H˙ŇNH«P/Č1Őqţö:…oðż)Ŕ|˛Öí_Tb%ęI˝ëĘPi}%(¤% ’ŇöÇyU[ÉşšZĹŁśÖł_Ňc(î÷š;Óĺ?é*éÝöaňŮ!¶/Żöó“ř?ţk˛ĽO¶ůŹd9ž¤čók†üçd2'+TŰßJ¬Đy®ëJGcoŽô„Ľţ/×’őť–lűăţ>q7zâˇRákÜ–4PŽAzě‚Ô Ő8©,îŁahţżÉrĽđ|줅;Ŕü>*ÁJŐ•€…5ZqätžŁžcz6eŞ‘4<ľ ůžDx’¬·WŠ9—Óíż Fź?WÝvŘŢ=„ìĹűÇěyi¦qzČnű÷ÁĄćS ĚŻ®+µ۸ďÜ_ö”€ą¤m·Ď—°ćz˛î«ĐmÚ€*ĆđxÁáURĂO…ŃÓ‘÷Âóń~lÄV>†éĂă!ľxЍÇŰOfřćs Ěánű`WÖ"Ö˝§ ĚĄŰî„´3ó×đýo{?¤scĂđxxúx’ŽPŹĎWĎ.k!˙.ú#KśçvđąVěą ĚÇÜçęŹFG¨ŹJ[˙ďŽ<źťćdÜŘ~}4żMÖ˙ ČË'@OM ÖâP*iđÝąćZQŠë7%KM ŞE€A€tĘdW €e P&[JPY·JŔş Đ€"xPÖM€A€Ŕ;÷—=U6‰‚`^” Zčč0 @€ @€:Ąr·}PW`čŔÚíÜ_öT€u @â_J¬]ŁUK>ĆĄşíľ‚°tĆé)°rŤV=ůx3ăcŐm˛|Ývo‰·ë(ůx>rÍC˛ü‘ü͇Ôŕv¤×9ß–˙sí^nµNó/É·y÷Ů÷âÇ~¶Źóß6@ w€ůl)ŔÚŐ“%†Ű7ˇŃşÉŮeř2ćąŕ~Ajçf˛\%Ű˙W˛ě–bĎ5Zń„„ż˛ý7é6מm[Ý!Ŕ˛ Đćł«…RO–źąČéújcľó±€5¨e5Ř/ôžj´.BśĎşmń$‰¦C€e Pqdx­śg· Ď®ß_â÷E]ö¶5Z§!U…dtŠ®7ćşZ?2<^G7źćô·›Ż|/ŽôţZŔÄN’ĺ°P{1=±áË„ď¦ó¸?zŇ®Ł'C†n»ăîŔ2 Đ(¶n{oěői«ňŘüůó8jütáż›®˙µŃěńď|]s ę!Žşy;ăm?,Řž¬O¨çř`< ÜăÉ'AxŔŠhá@Ůl)0Đm_'ŹÇ|§–S ó·ć9ßÍ}ÎőŮkĐK>~űś) ׋d\­:ńnű!YN“Ďţž°*tĘfW *ëA Ůä`µ¶ĐzÓŃĎÍ)~ňsjĐ+ńü{Šíë;ĐX:P·J@ěOůżj_©ňA (s Ă¸Ű>¨Oűł;÷—= 4–צ|ÜČňoÉŇOCóÚ`®ô´ť|Őj° őäöź†8—|lŮk&@‡Š¸Ű>ílwCÚ˘.ž˝˝ćhq›¬gři<«>ľpý‘˝9p»siTĹ‘¶Y?󝇅ښ§ó§×Ç|§“˝V~>ę<Ε~˝¦ÄŰy•{ –#ÖčdĚő'ĄŃŠ··Ҷî·Ď»mďE°Rt(©,0Ź/’?d—[9˙‰ař^ů›ń"ľŤˇúµ@ Ň~(Péĺç~i=îőđ˘aöţŘuĆŇŤV\÷Ĺ‹źo´Ž—:‚şŃşsm-Lžë˝S¸ýĂđ4$ŻOř‰úí~Čö癹ĐX:”HšÎ^Č×Öt3ęŮr’Üžá‹Řď;÷—×ö°AŹÇ5UX©“ţlÁż÷eĚußCôŃ}+űşłä×ăӺ͡Ërś,7aúńçšáń$…Ž»Ëô›@±Ĺ&YN“ĺŻäËźÉrÖžOz{•Üľ˙$ËEňT]M ëpˇ‘ĘŤÖî„ç{Ł'ŽóýŹŮţž*ě|âiKö˝ěvÎ"ľq‘íX:ÔÝöA=Yâf18? Ĺk†aúĎävÇĄi/°B10>Ěa„ňç1×]? ¤ÇuaŰĎćN_çöÇQç{…ouCônű}A:j–Ű{îP`™´p‡‚‰ÁyHóz‰7#ž GŁÇíř–,_wî/ě]– Ň@űĎś‚ăć랎8ßĆ=d_]ŇvöF>Ż…—'ÚÇŰ;…y>N·}FOFHG—Çí¨'Ë»0~.úúŕDóˇ°$t(ŠçĎŐ˛mú’lßŮÎýĺW{€9ěM¸ľźkÚhĹŔöůÜÜ17â<†ęĎŢ8wúr^űvŰ{#·3ľwp3ć§âkđĂŇîĺ´˝{Ô˶łbŰö—ja¶Që05:¬Yśă<¤íÇö+Ľ™ń͇ód[ă Ç;÷—×ö< ř] `ĂtŰ˝ýĄŹ^Óţ'4ZÓü~m0Šú1^^=­X“úłď4“ë˙\úß_Ý~ď$ŰßKx>ďyÜîž;Ë`tXŁ»íÓÎqľż!›\K–«d»o˛`Ţ˙'TÓ­°6ŤV Ę›9¬éóŠnńŮ„ë«6G¸)áX):¬ÁÝöÁn˛ü ikµMTO–źI Ž Ŕ˙*k”×ÉíÍ•ÜÚtT~oěkî´Ĺ{q5ZçSÝĆô¤†Ý1ßqŇK#@‡ËFť˙śđp“ ŰşŤ@ä5r|+›K}&ŤB/î űŤVśÓ<žP3řĽŃŞMřąxýEx9'}$@`iĚ+r·}_đ]…—ó“mşXŹ8ýĐÜč¬EÖŽ{˝~ŢkOĆünśK}ůŻq'Ď…žŽB_ÝÜńÓÖ9âÍ‘kš!ť·˝ź\öG®ź4ň<ę%ŰŐwа,tXز=¤áyM5Ćś\ÔélçţňT9 ˇ§Ŕ7bü!tŰ_ßüÍńá{ …Ź“ß_ĹüÝga|řR¨ÇňFë(Lno_ Óżgrěp`™´p‡%»Ű>/o‚đ|'I˝®˛ŃúE¶«•ňeĚuÓŽ źôs«iă^žąĐ;ٲĂd{µo`©č°DYx>iľ.Ć‹o0ÜŃ‚óPŤV<9˛6ć;ß§úýt”ů¸ýó ·bŇ\čç…©s¬S·}bţ´]ű4bhţ>ůýŽ€eÓ–änű ľH=R‰ąÄ7/n˛yŃťY°9bÝ[ĂkĐ—łŰžeóoaÜI–ŤÖÖmÜgŻA:z'Ś;`±ąĐ{nßüŇĽ“Ü®xýǬţă:<ÝfË·ÂÍĺ@ĄýOnÄÝöÁiHçe˘śz;÷—{Ęđ䎣Λ*±°ř˘|OţäŘŠÓÔU˘ś’cůTˇ2÷EĎ]<÷Ŕ}kÄça=e6‰î3áy®âŮűq$şą†X::ä(›óĽ©ą†čć`ȉUŔRĐ!'Yx~ˇK!Dŕů˙Ş©§¬“rµž/W¬ńŤ2°,tXĐÝöA-vWe7›cr'@‡Ĺ]­dW©™µË€\ ĐawŰç!m-Îj]dmóX’ťűËž*›F€sşŰ>¨'G*±6WÉ>0ňX—JP=tCÜ^©ÄZŐ’ĺDČ‹ć[·ýĽ~GY'Xf”¶M•(Ś ­ÜČfw®…R ć˘ tÁÝöA jwU˘pN’}SS€ŤúźěqČť¦”µ ?Q‰Â˛o6KM Şgçţ˛§ ¬“¦Gź›k»¸šwŰş07:L!}ţE% ĎüôĚM€Ó1úĽęćÄČĹ›H€Óů¬Ąa.t€ĹÝ*°‰čđ†»ífrQS‰Ňh…¬@] ŞG€o3úĽ|šJŔ¬čđŠl$s]%JÇIĚL€Żű˘ĄT»Ű>ŘWf!@‡× aËëŁ0 :Lp·}°›\ÔT˘´śüPmţGW×°.tL[n[Ú¸TZM *ëV X:L¦xů}P¦%@‡1î¶¶’‹]•(˝ş0-:ŚWW‚JŘÍN†`6ć˘6’Ć3úĽ:ęJäɉ9Ŕ†řĄŔ& Ăxćζ/&q’@E Đa<á} Ŕ† Ă3w۵äB{Ţę 0:ĽTS‚JŮ2_1Ó ĂKu%¨ŁĐx“Ř5%¨śß• ˛”€u ĂK” rjJౝŇřĄ¬‹‚Ř ş đ&:Ľ´«l8sŃI€/m)ţOîV €M$@ŤN%%@€ @€:: Đ᥾@áŐ” ˛”€u ĂK}%¨ś[%¨śšřż y ›ŕżJŔ[čđ’QOŐŁ,o ĂKF+WŹ“"făD`# Đá%akőxČÓż•¨şťűKω€Ť$@‡—„­ă ` g»JPMtxfçţ˛§ •"<`*tOčj_°ač0žĐµ:~)Óř—ŔX?’Ą© •ĐS`Ł4ZGÉÇwÉRK–ĂĐm÷ź}?^_O–xýCň}'‘dč0^O *áaçţŇAu·}PKçű*Q˝˙ßJ0Fz˙L–­ěšOˇŰľósűÉÇóě«ëáyŞ™,'#ż3|üľ~ú‘]ĆŻűv`“hácdoĆ÷U˘ô®• ŇjJPÉçaB€qŇ üpäš‹ĐhmŤůÉó‘ĎŹgř q]ől9É–«gë¨<:L&|-żJNM۬7ZWŮČňé¤#Îżf_ĹŔűęŮzOĂă fgFźÇőÄźű#YöBÉBgä»·3nËn˛ÔíT *č0™đµüśQ=¤#»÷łĎ§×mÇQĺ·˙¬'ťďi=±5{/ ĺ˙ůÎqrÝ˙ –އ·G±N–›äď˙'Y.-äÇŹŚ(s Ă;÷—×wŰq8/üËé:هćP]_ `éFGyż›ă÷ă¨ńá|čçˇŃę…4Čľ~ŤŁĎç}=ôř{ÓÍ}ľź]ĆżÝĚ–čÇ`ţ{˛ô&Ž„( #ĐáuF0—×w%–ĉU@Őő•–, ¦‡Au}ŽßŹ÷ÓŃůĐc+÷ŁěóXwf\ă‡g·m:±}űcËř~ ßÓ`ý"YţJ~î§6ď@YĐáuß” ”â›6N~–eW €Ü.ôÜâé|赑ďś-řZjű#źÇÖď˙Ҷďńvőź=ŞŮĺ@háŻŘążěÝmô˝Đ/íŰ€ĺj´âëÄ ąöG˛t¦lY¶ž­«>Ź|Vq>ôtd÷0„ď̵žÇßżťń÷F·˝—ݦŰl=ÇĎjä$g čđ¶?CśSޞí3€ü5Zq:—“đŘ2}T}đ˝Fëë Ü~]/[Ďđ÷zsޢřwn˛ĎĚąŽ­9ęP oďé‰_ĂăHy€ÂÓÂŢÖ ł·±c}z;÷—·Ęä. ĎcX=ž÷C~Źľn¤mÚűĎţO™z}1lOC÷˙Ë–8˘ý"«;ŔĘĐa ;÷—ńM€ŽJžŃç›eK *Ët,@}ůü8AţR·}=ňúq4hç×Čçé‰a1Ho´öŁ×cř&‡p>q OoÇżçخڿ3şMß'Ä9ßăhüţéHüřXŢ{sMŤVÜîźŮ6î>ű?ß ±e~ÚŢ˙}`%ţĄ0µłěM/Ú‹Éčs`™jŮĺCč¶ßzíń-¤áďčďŤ}3ňůyh´Î'ü|}Š×«QŽíꇴóG|­;í L&lC ôăúľfËëŇđüćŮëěxB<±ŕ]x ę÷C:Jo≠9 Ă”â(ô»íŘĘýD5 Éčs`ŢpcŔŢh…)~îväçjc~˘ŇPűÇë9ť{kşíN˝ăÚčôئţ{6ň~Vqľôax·óSŔ§Ň¶öW!™—x‚ÁˇCX&:Ě&žAŰöŐ”˘P:Fź䦯đŞÚ ťřk#ˇÓ‘ŐÓŠŻeęŮ}/~G_ßN1Ę}=Ňm1Ţ,ŤÖCvűż‡8Šü­‘âŤVü˝Ý‘Çť—ŁËcGť§-ŢkŮß9{˛äĚč0ťűËřbţX% Ĺ>VćnűŔ4Ŕ&ř[ Řé|ăől¤ó[FŰ›˝ńłŁ#´űoülQýżůĂ»íĂdůZŘđ<ŠŁćăíMow'<ŽČÎ÷G•˙'›żýµş~ůüě•9ĺÂÓő»\`™Ś@‡íÜ_^ßm\‡§o°>Çى «ŕ [¨‚š§íŔwG®ëĹ×Y@śŰűä•×ńúÚČĎž–FkÜĎvŢY]EiĐ=KŘ]Ďqo Ö}€9sŠ­ÜCúf ëńIëv`*q¤x?ĂcxŢ÷٨éÓäó˝đ8WysâzŇŐńgżľň×âz> ć3ç5·#ű§™Ó:ă WÉú.”—sf;łž|śíÜ_ö”xSÚ’ýdäš82üý“ąÇÓ`üŰTë‹?÷ţČ^vBÚÚ=¶-˙4hcnô4ţůü<ë°Č~ŽÝöłŻö˙i­0#-ÜaqtÖĘ}tŽ;–+Î{~Ş $>(Aeé2ä'ĺŤV ·ĎłkępőĺüÜgz JřŻ Ľđ~9 ŹsĘÇP˝ŇQüµěçzo®+Á~4˛˙ö6bîy`)č° ťűË[óˇŻLíŻ "T߯đ8Š`qÝö×Đh}Č[j!mőýéźď§-ż‡# Ť_Ý~9Mj˙ď†ßĂůçźv HőÉç¶:Îća‡ői´ęľsëä€â Câ|čYnžµĺ‰/0Í{Ě+ľf«‡4¨ÝĎF-÷˛×qőěgú!mËÎŞÄvřŤÖ÷çőgßÝŻťDť†ç7#ל%ëë(ę ŇžOřî·ÜęŮhÝLřÎŹÁ‰ůmOÜ–Ý ÇÚŢëXK>~ÉŽáÝ7~ö!{ěůâ ;ŁÓI,kŰ—?Ţ[Âmžďdô±ýóšîQłßćUłŻßĎołiIňZ߲}ó8Ŕ*Đ!';÷—ť»íwá±mů´ŕKjÜW `.qÔgŁGťĽá›˙Ăé¸bčńÉčеě›^bBĆe7Ű“¨tŽó‹‘ý÷Ű×W~ľ™||7sPT}±~ő ßű‘ăß©OĽľŃŠ^^ťv_ů[ůKŹŮxňGsĆšďgËy˛Ž¸íN5]A‘¶}ůűkŢ©kk¬ĂVA÷ŰVÎckŤ5ţ`~SČĎÎýe|!ŢQ‰\ Ăs-řVĂó.Ş+ ¨†!ëVx ;âu{ąŤeŢýÓěŁŘr?˝|íd†8ŤÚč¨Íá<ę/Gr>Ž–7ýZń\ŚÝoE×hĹüg-<g?;vo˛@€53r¶syx·}rx…đ(ž-%6ä9TWÚ2ĽĂ×N©G%?ťkywŠç+˝‘ĎË9s:g}}ä1ë6<¶ÎľtŽćMÉ›‘şôÝ ů;†č{Ą9ÓŽyOăŹáż’uN"`mč°Bô\Ď"ÚU¨„8Żö0Tm†Fë[í“—'ť[Ëž‹Ľ ‹µĎ=y¶îřń6{ [ăöKQë‘—ÍŃ׍9‡ÓP˝G˘Ç}[c_…ÇđüZ ÷B?Ď>ŻÍy_¬űăĹ˙Âůŕ~)DX:,I˘˙ Źóę1˝řć͡đXŠ4p=y˝Gżľ/Ěč×tTy\>„ŐĚ3;r†ŻÍz! Ö{kŻOZ—Ńŕňx°/Ó}z|˙ÇČ÷ăexË=3ąjâ Ě8ň\ëP`ybH×hĹ€:Î?\ ëýš¶Ź·cx{Šb7[ޞŰ9 Ôżý\ŻÖC¶Äĺ1<ď<ۧťä6_o…ÇŔ2^÷©”íę7O}Ý[Óń5Í}u?ĽŢ•ęapl¦Ýžýî°sÄÇđzÇÂyŹŐo!=ŮeZźĂäP?Ţ·ţ.é1Ô›ńç_;QiÖ:ô7ä~·ó,§÷f/üFç\c “8r_€‰Ż?ę! ˝ľ­ô/?†ć1Ä*Ë41ŹzŁŐi‹ôŐ…éiç€÷!†Ź“F)wŰ×Y‹íŃVő±Í{ßá^±˙=ááó+߻͎µ‡ ÇćCvźąĚsžžň%<}ŻčëÜÓ'žžćTăÉúN€˘ůM `ů˛VägIN_D Ď‘ÓXLh˝,« IâŢŤV<ÁúŻŽzß-iőj! &ŰóW˛e#l—˝Ďú݆-/ĂóĂÂŽfćµçxW˝mőWľw8uč.=ŽăăĎuvímrݱݰ~F ĂŠd-É÷î¶Nź˝ßtńŤŚOI}úJŔŚv• ˛´ŘVg#“ÓŃćÍđr´iUÄíűöîąm,[0úŐäŁÉ\N†^AË+0(nąĽS+°•(•”*‘˝±ŕ˛+0˝«WĐěÄĺl4+ÇĂ ¶h™ A żŻ -µřŢ{Đ<÷ś{•ĄňŰÝÁĎ÷ Z§vţ<ö—‹ÂYąÔE{ĐźW5 (O;~ď–óéüó2/ ß×íő *öôű‡ó,Í06>Ë.íń\đxÄç$`;D@÷đ4ÖăŽlóH˝ ŐS:YĘJŹ2Üí Űz”µ›uě]îs7VI`űŮü÷YĚťý¸@QŁ $1ÉŁklŽ:ěAq aź]úßžĽŇ°;^—đ±FőţOÁŃUĄuŮ;ş¨°Q?¬’±ßöidř¦ŚäŐL~·Ç~ďĺőX`4©c”µ.x^µ"žľq îĺ}őĽűăă&ź|AôůĎe&]ĺî €ÍPÂؤVIĎÓ.uŻĎWq<\c<- ¶Š ^óÚĐëź§1™çŁ×‹ćsÁóŤ:Éf/Q·ź«őJ»w‡Őn/Ç—Űëćcl«.kÁ¶8^đ|5ĄŰ§÷÷ŞŮčŁ zYźÉĆź'´Ďçľ6yľ8™sś·u §óßĹśńÎö›u <*é5Ú3ns΀ Đ€Mę•đ÷ŮíĺňĎsxzť žŻâçŇíłizL€&S6`¤xw‰GF6ęńRŻůc Śĺ,?yáÇţď/™Ť™ŕëĎŹydGÖűŹĎŢ`{–OˇÚsĆ»%Ď«ŚřÜůńŃ_[Yކp]x"Ezľł™Ç»b^•N>Ćö[{ TW‰ ]M“  ĐdĘ^nŻž&€s{đwÁĄçKe?§@×GŤľ˛“’ÇŔ2Ůčoó5~Ë÷„ŠríGŹ®QĂ żŕů¦Ç_5Ać¸ü8ăłgg8NRóhqužEöútÜT^{w®qéÎąW'KŢNť¬‘&űĽÍÇVgÎó]hx…ŰË“ěđ4JĘFVf{Ę˝úY*Ţ]á•"(jÖjŢ ú ·†1ý{Îě§ZöW*%?.KăőßKs§úť/î}v’­{9†4cLśxÔhÜ=ĎÄ9ÎXŰ˝±B^fiBČ"F×Ȣâ|öRŔlčŚô5µ‘2‘cë­éą_Ž­ŢŇŕ…=ŐŚEłŃ÷‡ëˇGŐĽ{ťxŹ‘ ü®’÷˲ă¤;čű_łT5`ťŻs>xťŢšĎ÷•_Ôq,÷ňŞ ×k_–ž€X€‘ż4;%‚·—ϲ”M-`µő”nźÝO‹®Ťţ6ź±ŠQf}wřš4Ěl±\Ízčiě?_Ó9ŁźĄŕćťÝé±ÜŽňÇםń‹@XŔ·'ŻZZ¶TISPěFcĚÔĎÖ]ş}zĹçTŠ@é¬ŔŇu^j{Ů×é ţűlđóX–fă¬#č8iŚÜŹť3Ę ¤§I"‚›Śź‡ŇDžUŤ– 0ľ t€ Ťua#×× ťl<{~6z++¶Nőä±@Źáj×vN猨PđŻüu»Y±`zŚłĎφô*đx<ÇDžH·Äué.KŽžĺ>€~©ĂN|{ňę|ĺܰI˝§ß?h¨ä|ů?­Đ\sĺ/ZÁ1Hé.ÇÖąfŘĘăíăŕÇ‘–pLÁĘFŮČE‚Sé1Łu·Inň uęŰYkŁË¶dS㲕Ą‰í)÷če 5QňÇV\ç °’h †ţČĐrD€µťž/PM‡óÁcşY”ź¬ŘŃÇK?úđ4Îç˝Ň:‘Ť~xz3ĄŹ®˛TM |)u–żf+˙k?KŃ Ń—ú4ŔŘ€RÂ`Ö@Ż­j(•l%¶K tF–ňţ`ű<ř˙NˇÇ?¬»}ĽăÇÇńŇÁďĂÓöŕżQUäkţ{ą¦ŻŤŢ.Üß‹˝źĚ˙9ŘâąÇ?¶ňżý™gĆ %€°–&¨Ą}M°µzš`#”;fŰ\ŤýeľŻłĂÓëżËş/ęö˛›Ąuh»;؆Qşýf©G¦vţ8öYęó0]´ýëŁŃÚčă米R_+ĆNšńřz[˙‡q'Ť%€ÔQ_l„:Ű#e;OZ ˘“Ąlčb“°"űö22ŃvčX‰Śî“çÇě·ŮzłŃ#ŢÍ˙ŇĘ~xŻ2ž:cąl˙fż§-&XŚW*¸Ę+ #€ÔÎÓďú™ zŐîí®„;ŰälĆm­,q‹Wo/{y6¨Ű~Ě,żžwZ÷ühFűŻ3=ŮŁ5Űß”ôăă)JÚź˙TÖ>U*xąŕjJ¨«MP©ž&`k¤láö÷Śî祬@M%Ă{[{NHeŃ—i˙T.ľufŁwłDßËĘÉBoʵKwĆëöĆ®_GFht ®Ţkí K*’ůŰlćÓŤ’áQŇ}Ľt÷6¸Ď2¸—1©tű4­l]ŮčAôղРđ˙ľŔ#>ĺ?÷”q€ć@j)/ăţNKTâ]ŢŢPž:žţg°}¬4Ö6o|TW?.ÄMÚX»»%˝÷~MĄŰgYO6zę›XÇ˝SŇ3öó÷ą7c¬ŚO¦h9@ł uk ßi†µşËŰĘÖÎR`:‚©UfgżYá±Äýśዉő°ÓÚŰ‘‘Ţoô9!•§/nńŇíÓ´˛ud٧ zoĹóäČoůĎN–*\OŘ×ߦ<ht ¶ž~˙A·F @¬G´ëAŢÎP¶ßţg\®B dvV|–žG&ôrëf§5°cmô¦NLYĄtűÇlńŇíł”źŤ~{y·ÂcďÇ®CGů8{“Ť&ŚŹďTma”߯lěĄ@j-‚»mŚ(G´ăE´«ŕ9k‘ŚŁ,î^…Żü¶ÄçŠ,čĎK–tżĎł¸źWüţWőné@sšpĐ.q_ZŮşÖF_Îűöëˇ4űű±61?>‰ŕw'ht ž~˙p>Řţ5řőe–Ö–•^LĘo˛”]ú,ÚS“°Fí±ß˙[áëľ^Ăűř3_×»¸Fß^FŤ“¬ţ€úٲYó)ëúlMűµžµŃ‹÷ĺřug˙‡v‹ńĄÜc?n‹űľs*€ćů‡&XČľ&¨Ą–&Ř=Oż đÍč˙ż=yŐ2ćş“iNĹ^Śý~–žFpµ7Ř> ®RN{š`]Çą ˛‰?žż›E |™’Ü·—óÖUöPŢ»nŽW(7~ť•Sş}Öő>˛Ń# }±Á˛čYĘ>ßôŢëŻŘžŔ  ,fOÔRKđôű‡~–‚@}´§ü-ýýđ4‹˝Áö%+/ ţzÍď©3Ü˙ĂÓ—Kíďíeś§^ćŮěë8ŐÍ×n/®üŇíłÄk-Ý«Š€řáiL™6±ň>KR.Ď ą”pĘóăúç‘uYĂŹťqź$GFv”çţϰv*ľ¬*2»[ůţž/ý ·—Ń&ϲú”÷Ž@ďÉ’}˝ź÷a•F}ĐŮP{˝Éöł”‘~‘oĂ~˝˝<<€f“”i<ýţďĚćXog©Ľ{ܧ5vż¸­3¸ďńRݞş«Ě莲ôże©L÷2Ůč)hť˛™ŻłÍVTYµtű¦Ä„‹ŃÚäUŠÉQíŕK>¶{yŘ.č@™Ö?/ ž‚´7ův’g›·óűÇĎ~)ŻYťČľŽuą/†kś/#Úçđôy–J“źmŕ=ôňŚřâRţţ†ÇZőAôôZ]‡9l/t LŁ joć˝Ňšŕݬś`äцŢkd˝_ĺŮč/—ĘäNŹ9žŐdĽEűßmdMt`+  ĺą˝|žX«)©ž^«µáwÝlöĺxéŚî޶eŁŻ»ý.ňI EŰ;öëcŤF\ěĎő°íŕçńç†ý|ű÷ŁsEL"ů#˙Ů3 –>ÎÚůu0<®×™żňź&;Q‡ń:ş&Äőŕ×Gź·ĆÇko©ĎIŔÖ@ĘUíä횼ëXN™äËŻ+ĺŕ˛Ń×őŢî–.;ź‚ű­šŤ¸ýaIůŰËs_A)đőą„gşŽ«X>z|O1>ßd©2ĹĽ±z4ö¸Ń2żW¶˙‡§˙›rK´áÁš^ó*KËFLÓĽöq‰ŻŞŻ3î±ř{ťŢ^ë¶|TżĎ•ŚßĂÓÎŕżżeE+ŔžöłTˇć}éźŇŇ"gkm—*ŽŮ*ŢÇüó˙Ei×ÔYÇŔíĺ/^ë^çăuŻŕx˝ÉÇkĂ×äfڱĂÓĎ6?Řčg¶š:Đd/j¶?ńĺě~žŤŢ[ęŇ—´y€"‚]egŁ/[ş˝ťÍĽmŇ›Áţuk‘-–¸ăŰ?ł”íN¶ô8Ś1Úηł<čđ{Ą“R»G@ łÂ{č ·´˙'KW”¨·‹löä‚x˙eN"¸žqŰňKI°©ó['[m"Ukě8ëe)PŰÓ° ‹‰zĎ–ž¤·{㵝Ź×ö ăőípKăőDŘč@“µk¸O­,2O#Ëűb…lôîX6zY뼿[ęËßú•n,ö/˛Ž«P?db¶łŞ–.¨ż8"ÇkÍΦ[v˙?ć“ă­*ăçŁŕ3»ň@śsž•Ô/ű3îqˇDrC¤J×sús™ëw{ĺkĺnŮËŹ]Ë–Ě˙ĚRćg§ŃxýjĽÂîř?M4RĘ8­sŔ2˛–>ç‡ĺÄ´·—/żĹÖ_qâńK>ö:«p¸“i^ŐřŰl_ł‡/éĎÖĘŹÎÚΩÎÖ´˙í,L޶ŞWŇ„†w3ű-żW=?Ďę—Ţ KIPíµ¶“Ą2üűkz…·ůq¶Ż±˛ź/ĹŔäńçí?łrçĺ~¶A`1żj‚zúö䕸ě®ý†ěăוQ©”tdś­pZnmö®mŔćÄíϸýlĹŃőÜóő—ĆýuŻÔĘRP˛­Ńňvë&ö”7^?gëźĐ¶ź ˘ĂÖSÂ`12Šô őÓ¤/.#eľ_.]˛8żOĎó)KŤVGß,UF;e‘^7¨ťŁŤ»k•ô%ýhüÝei]ÔžCr¦«aIô2Jv§ EŃ IôOCdirěhÝöEĹ$€´´Â6X¬”{dą,Ń?oç´­ŇíMPć”'_üśÔ·&÷Ňăő>«1f˙l/ňż· Ś×˘čŘNčŔęR@+Ö ţkěŻýěÇ Ç»’׌|ѰVeŁ_¬Tş8jźX÷9Ú|ŮlĎ&”n×®čuĆ«˝ÜŇ``w°ýľŕ¸ŽöčĚąßhMÚŐ2ńS™ţŹ ŽËxźň Óž/˛8[`˙Óńpxz·5Á’8—žFŰM=ž"^ä|•úgÖy)Úď|É=.:v>O݇ô˛¸ű5µ˙/5ľ¦e‹#ŁmŢʵiçÁ4&ŽňqŃš96V=?ě–˝üśt°óëq§Ď W Ž×tm›vŻŻřL1 ˘?ł&zĂĎy0:P†ř±3÷^‘-•‚rďJř˛qݎít•gŁŻxŤ T ~EcV†ßĹ’ĄŰçe‘Öł}ă‹ôő8˛Ď·7“öŻłę{ůx‰€dČŢθo{XŐ`µ6[¤úÂ]~|Ý-pĹ1t3śŘ’ž{ŢŹrîĎ·(XrśżçiçÓ¨žŃ-đ~çMşYľt{Ń*Q1`˛{#f¶Űh˛Ë<ďşľ¤Ű»Ă-e O ¤ßgË.3˛ŰFă㯋LŞşYhŚý8^Ű \sFŻoňlk UjeéËóŻyyđU4yíÉvŢ«­aÂŰË(wÁËI_ ÷–Ęv_<›«®clÝy~‹”Śq8/ófé×HÇËĽc¦;<&ŠN˘ ~Z§ţbÁsŘöôŰě>[4şH˙\(uÜó&AÄy9<Ţ‹ĽÓĎł—ÚçP{yť|ÂŰ®šWŮ ËRŕüĺăµ—Ź×›ąźéŇä`‹ «‹/Ł4c*Ďxđh»Č·nöxle«¬­˝zđ˝RÖŇáéÇĘ Ťvމ Ëe{žFlß@ži˝5<ë¬ŔŢĽ2äË`IĆíť­ VÍ/ĺŢʦ«ćź7”no‚”Í;k"×rK‚Ěw} _HîçÝŻ{Ăóđěńz^âX]ä:óĆ…í!€TëÇ@ʲ¨mÍŽ Ô×< µJ_Ł;}}m€.,JşŽŹĺvľíďฝ¤Ľ€iŞT1«íŢ/5ćçCóÖCďlŮąy^)÷·SÎM×3ÇŇíM1Ż Ę…&Ú¸y{Ú;Té¤SůxMź_»3Żń»XJ¶”:P­Ý+1ZT+KĄXŻ*~Ýël»JăŻW °FÉühô˛‡ňףěôČÔŚ/đŹóűEU€_vl—eV`ď>oëuôq7›¬z±…ý6ż”űŹçóóLéöm1«BFoM•%(&ŽÍ—sîs¶ň’0Í0ëü{·Ćń:/0ß6La;üCĐp‚0ÍÓű˝§9¦e{Ż˝üqú˛}[ľpżŻě•Rý]¶®nSĄI2e~F›(ą)=űüńóO/OŢŢşľ‹¶<<ŤŃ´ <űĂ yd•§LËYk+ÝŢ,łĆóďš§6ÇčÝŕŘ‹‰)ł*?\îs·ĺ%ňŹ62^ŁMŁm§WEyá3lt€Ĺ´5AmĹ?\o4Ŕ†žv˛0–ôřňöa˝ĘQ`ć>[ľĚóŻ;tťű: h­« rZ÷v›J· Řmެő°űKÓ|Yó{ů4ăýě Źźm RĹ:ׇ§żÍř7Đ›ÁíÝlz=‡J·Ď;÷–Ő>ýĽZÂ*ű2ŻětŻá­ýzĺĄQęuŚvďçE6˝ŚyZýđô`ÍŚ6uěĚŻëţ~ŕÓŚë’îë?çe®/TA(ĂëlR°ĺđtÖcVY·x×*śĺ­—kÖ]oY{  oRšńfĆ=z%ďýµľź(<ű<ÖZű>lFd¸~ťŇö{3n<óÎëĺcŞ»âsĚęËű-$ŇŮÂńˇěgłąW[z,îÍ9oŻ{Ľöfż-§¶µźóÂąćdݬ”ˇUđţďdŹ6ĘF[Ú3¦,®mĘ–şŰĘl»¦HÁóŹŮěŕƧ%ĆčtŐ¬ËÜßąľL¨YkýÎęcĄŰ›y}™~^ĄŽÇčh=ôY׼N^!h—Ć«kP t  ‘áto‘u1¶őƶř˙gŮíĺ‰&[J­®˛ĂÓĎy°r5)Čő<Űžµč{†ČÄň ©4kd%Ď lD©é›%Ćü¦őw˛_Ł”{ńc*ÚĘúżÍĽ¶ĐĽc´źÍĎ0ż^ äąńZĽÝg]Ű'4źîŔę~Ěíiµkg)ýx‰€äăľëg1ń!e¶GyÍ&RŢĄy±ŔzĄ˙Îf—~ĚÄ™ć™UĘ}ňýU€*?Ý ÎŐŮěňرúsÇfeúššO:@3E@ëci™e)ŰôŮ`»ih{ô*]§wű2úkg) 3këd‹Ď»KNöčĎé‡*&|ěg»j~)÷qď**©Oůîç\k¨÷qzžÍžĽŘĘŇňۢ?ç˝®rmŹŠ*Î,}?i‘ľA Í' ąnJ]kx´¦ęáéQe_›8ą¨řő®•iŤ~ˇúíŻü÷űl| áÝ 0FđüxÉ1Ů´ó¬{D c}mšô»@ŚÉ5‡§Żłyĺů«?›®¬q{·ćçŘ×UŤëˇGµÖ”ŰŰĂŞ")ŘŢtý·µV<ßÎźăzř˙irác&•löś•@š¨Ż †Ůăµ^O#x~4á–N~ۢϿĘuÁg¨t€ćéM)+Zľ{žE&q=],µŢh*żŢZáuG%\fUĄĚŞń@~dä[7˙۶gčŢçďő o“^IĎűeĆműŹJé—'•óí̸ÇMŰt]™îHó4DúlĐťsíú¸ĺ㵓źż=׿ťs®ŹçűśŻŹŢÎfWśů´BßÍú,Sću®5ç3)á0×·'ݬoUo˙Ö5px_&¶ł Ý^v5ČÚťTúj)ÓýdĐĎżg?—8ݤî  ^—´Ó˛ŁűŻO˙ŕ˝kĐëgółáb\ü‘ŤÖy_ß慨šqűY¶ž˛ąT™őo/NEl‘üM ”·籎ë{Ł>#ěϸVďĎ9§6}Ľîĺçďóź«—_Çfťďă:ţ9›=îľ„kŃôýĎÚŁĎË^ß˦Đ-ĂcĐćŰ×µÖҵÁČ·ůďÝ ^Ż—Ą Ů.ş(üjdW•‘±ž^÷ůŕůÎkĐţwÖ%]´=âóCgŘ–Ë?~ä~F[ŤĽhŕ8‹ĚúóZěIdĺĹş˝Ó3đÚ%Żg?ęă73îŃ/őő`ób<_ϸ=Öľ±Ö}¤őĐăúßiAáý-ŻW3Ţß›AtŞP×ëĂÓlţÁýąű´úńq7ăZ÷:[}"ŢŃś×rJ¸eřŻ&¨D?+ZJ=»#đń5ݰşX}–­'ëwńş+<~ŐŕkĘßۆčÚĽźsűuÉëúF eo…ýfIżîŚ{ěełěĹž^•v]âqF0ôxËÇëű9ăőăÂĄÜS{¬řąć˘„w6«˛I±ŇôĹ?˙¨ŞcĐ€2=É6W-fÝďm^úhĽ~-4^ÓçŚ"çůűŇ® )ľ?ăo拾Äqw”ÍžLĐ_y}uŘ2č@Ćżt‹/čFňv6;“łµÂköv¨}Sđ¸«lv©ńŻůŞ«KŮč/żĹÖ_ĂűďfQ2ţö˛»Ňł¤Šűcű˝ějÍ—Źż„ŢsŠ(eśEŰÎ˦ÜËÇöUˇ€I Hü™Í/Ăßµö9[î]6ż:G§đ5$&mĄěőĎΡ‚čëő2«¶RL•×…óĆëhrU‘ <Ýlń*5˝’'vΛÄr]č—>ű|\ń5açüC+‹/O§ÝÚŰţúá˙W+wŮߡľ(Xş=‚ť9÷•ĽŽ`ŕq)_ţ¦ŔâMŚ|“­–y}źĄ¬ö‹RʢţXÎ~ŐńÓÍ&MŕH_äĎ"»kő1v’g°Îł”ycčӰ폣ô<żeQ–z±É<ŃUdź·VXŢb’Ţ “EX—rű¸ĽľN×ó°ő9›=ů§•_Câ¸zźżöÝŁ÷÷?Îć]“"ţlG«Ě¬ű3Z”&˙şĄď0ŢŰźsĆë¨ĎY~ ]îŤ×ý|Ľv ĽţQ^˝äeIźĄşů~¶¸ĆM{/E®oý•'(nîśWÍľł“Đ€˛ÄçűŮC©_ŇzÓüµ#íÚ+´Ţrú¸Čµńĺj{0)+ł6}™ŮÍ÷%ž˙·l~fo–Ą v/Kk…ß”D™\ÎľżÂűëzüíĺ/Ą¬ÍÍHK>góčŃćťl™>ÉgŃóŰAE˝8nÎJ?ŹP7gkzŢŐű:á)ŕ:/>ŻW%cŮđłŕůz¤>=ÉŠ¬ Ţś÷v_`ĽĆíoó­Ś1;ŇÎŇ—%}öMb©â˝Ľlđ9Ż—-·f=Ě%€”ĺţďźŐd;Ćkśí@»ÍxŤŕyŃ`í(˝›•ŔH_"żËFe·S@˝5ĺľë3“J©´…Ęmˇ`I%ź« žC]Žł"Aô2Ë$]{źľôéŻY±ějăuqq=ú:ÜŹU׏ĎDŐLx8¶ö9L&€”%ľ€kg‹e—őzŰî]ˇ/6SyĚUÚż“=dŁ÷J7E3·ËÖÜÝßŃńł˝R0űyŢżť5ľR7“ËîgŁ ä´óhâŘ:Ä«ĚIŢ—ű[:^źe©âL{C{ÁűV)ź1Ň„‡xľuM5ifř?M°šoO^í¶öăM˰ţ[髥€Ö6áŢĎbýďEýĽĆ÷˛ZY*CzŐř˛ăł«=‡ě¸˝Ś2·/łň'fôł´ží±ŕ9;~ŚĄ ŁJ"ĺę¶g‚ç•vŠóćýÖľżŰËŻ'kxŹ˝áxť]2ĽĽĺpŇű9ĎŻqeľ—űüúÖu@Ŕt2Đćł^e˝µ5ëôíÉ«8ڞ4ţť=dk´xěř˙ĆńeE?Kë6÷â÷§ß?ôµ2[äîﱥş×»ţůH/ŰĆ,Şdń¬×čľ.ůőcMÍŁ|=Ďć7fĎű‚˘[$+n}ý“HZ+<[ś·Ţg 1F`tŚÝŻI‡§ďóc¬łâ3Ţ Źłj–{áçţ­‡~˝Ĺďń]ľ,M|–y“­ö˝NŚÓ‹±ńzśŻ7ţř8x·– t\ăO㵯†źË–/÷ůőíťëĚ'€0ßľ&€ÝńíÉ«V–‚ă/ňź­5ťKÎň׋//zYZ‹·÷ôű84×(U­8vŢnakö f0ť­é3Kśc=Ď‹< ŞţŇd‚yĺ[{ءűmŃoŕ9¨›Eŕűđ4úţ·| ,r\ôóvřTjćŕ"Çy5Ęę˦}FéŐŕ}T}®éŻů‹ç?ÎŻťüóňQ¶ř4üś°ž výí}ß 1UĹűHçËěúpťăhs×Ű$>ni‚U‘ßÝĺăµ;qĽFu’ĂÓř :š„÷;Yó{{Gc׸yÁôűGÇŢşçU_'üŰ™µůĄ;ńíÉ«ól}ë8PÁÁ§ß?h¨ä|ů?­Đ\sĺ/Zˇ±ÇžĎ*Ž/¶˙8Ź/×_géËÖ†w§źĺ_r ĆöŤŢ9R°ô?[ö®ŇÚ΋OăÜő±‚ýŠ/)ŹkťŤžĘŘ\ŕ\ţ˛â@)›íü·{ů1v÷÷¸–‰eś{Gk?·Ć®÷ĂM‰vę÷ŮńqU±~6š8P¤2B Ě˙6xĚË ˝—ÖŚĎ<®o°tĘ €Őť/ĐL€ŻŃÇžĎ*Ž/¶óŘne©ś_‚ćÓÄÝÁö»Ětáđôk¶]cĎöN_˙™U»äL=łŃO‹|fű—/•€IţO»äŰ“WťÁö9K§(űÜŞńîîĺűřu°Ď±uô [)˛gRϲ~ߢÖč NĚŞ ž‡łá¤…‡¬ŢMŹźv>‰bŃŕyOđĆčŔÖűöäU—:YĘ8o5ômDvíőŕ˝D€čý`ë>ýţAfJYÓíěaýĆVţ÷e×fěmQëhÇ·Ůěuľ×}Nú<؇n–2ŇűG1nÎňó{ź„Ŕ4čŔVË—aŔůŢ–ĽĄÖ`»lg÷vńôű‡wz™FHŮʱEĐ|ZąőXRˇ[řącmŐĂÓ~ÖÜ 2#Ý…×ÝLëÍ^Ő`ź;Ă-ŇßW˛Îmzďo˛âókźS  [)/w™‰­-}‹1!ŕjđ>#tňôű!ę%9ŰíE–ăÓDĆyŚß/Ůj™äżg‹—đ®Łh‡“Ű6Ž˙ëší'Kô»Ľ/nJÍJOŮć1Ž^g«­wßŰH¶<ĐčŔVůöäŐ(+ł˝#oą5Ř>Ţwođóřé÷}Ł€ŤKĄĹgeG?ĚËËXîfÍ _(_ďsż¦ďc?ß®ň`z/ďë»Bë0Źçy‘źĎËzżż;@€YĐ€­‘—k?ŰŃ·ßl_•u§&zŹţHý´p‰ň˘"8{xÚËš9y&‚Ë‹·‡§‘…ý¶!ďkL›ďűřŘÉŚÝ÷Eţs/[ßä€Qµ€©ĐhĽoO^í=ýţá^Kěô`K”3Ţßń¦•uŹ5¦eŁł9iMňn–2Źo dVŻ*˛‹Ű l±&—n/jĽŽ*~í*Ç"ĐP˙§ ćú·&¨˝}M°»ľ=y™Ť_Ť´ł”Ť~¤)ŘŰËăÁÖ­4`Ż—˛Ś›ä]¬üŹYš(ĂrŢk`t€ůZšę'* ¶ČÄĽŇE-ÖF×>ěš&I#رĐ=OĎłff××EoX`t qľ=yŐüř<Ř:Zc®·öŠ@ş¬UvĹ»íëÉBú‡§QaăL×®Dö9°t QňőΕl/&JąDg'¤€t·{ÚËKÎĎ–Ö=˙¨cWŇ´őŤf!€4Ć·'ŻÚYĘ<.n8ń ź€Űî˘űx˛ŕýb†–.ÝúńÔ„:ĐßžĽęd‚ç«je)]ťęž¶ Ü·=Ř®–~­ŰË~Vď,ôw ­Ç}x罎Áł’ţB™ţóúáđô«¦€Ý €Ô^<żÖĄ ‚čTçđôh°Ĺä—Ď Üw¨ŚűľŇ—YÇ÷5l‘űl‘Śč4áŕĘZŮÉŠă÷:żţěç€-'€ÔÚ·'ŻbýnÁórŤ‚č-MAbśµ‡?' #P|xz>Řţ“Ť•^/ýŞ) ý} Űă$_§}žŹ™Š«ę­´öy žŹŹŮłG·ďެ4‚:P[y–´ŕůzD`îă ŤčX·nö ţOeÚăřţ3KÉń±Řl/łŰËă_ű]VŻ,ô»…ʉ§ňőŞD¬nůµĎŁrÂĎĺóc˛GTFŘĎ«*üg8~O˙”ťŰC¨Ą<ŤĚ÷ö†Ű`~@7J‚«şQ†Ő&N¤lňÖŁżždÁóiצłÁc»ůňÍĆ\YŐîÖr oö=5ł 6×Vő{˙‹Olą+eâVńýŰ_ĂçÝűÁ{ąŰP{/ţ~V›äTdźÚůg€ůľMóŁ}ů’.é5ę\@éĐćki‚ÚdÝ>Ź×Af˝:ßžĽúňôű‡®¦`-âKčĂÓ^öc;ţ˙÷…JšŻ.&üąÁX4 kÝór\¬Ä{ýč˙#¨ăôëýŢ5¨­Ę­Drx::¶#x÷©˛Ů:ßS1Ył&u–ßV‡§÷y|;› B¦ŔéYóôÍöň*[ÇŻt,öóľřTáä€"ďç—5ö}ś«ŁšË›Ż«íG?ă9î˛4ÁďF0`÷(á0_K@uľ=yužĄŐşÎËćĂşŚŻGÝÍË´w+yĺôĹ÷ĹßűűąÝ”%Ů6LVY«°Ű?ő_šŘµČgB RűEŕęó`\˙g°Ey{ź§wÇ^ţ9.©n¸˙ßÚďT}bŰţďéc~,žçĺívxú6K“ćÎV<'ď×d°č@mäÜ3-±1×yů|(_ĘDíç˙×ŢŔëźg)+¶jýüµ§+–%Ét1IaËQD†î¤‰]ÇĆÔ˝nřA\S:™Ô.ő˙yĄŻšĆZŃ ™o¶üXŚëĚ×üšł}brŔáiTrąĘĘźĚ4ÇW;1 t ňŔíG-±Q&0°nŁ,đÖ†2ý^fŐ8g—nO_Ä;÷•ă¸Â2»Ł5ŃWîč醩â˙šg‡˛{Î}˙ąÂŕă2Áđý­ .?he©:Dg ß[\K×]Ĺęm~Sµ `Ë  uŰ–fظ·ßžĽjkÖ$Ö`°_Wţę)¸zRá+öXwÖşçĺč–¸ĆďÍŻeâŻý=Ş Ü銙b¬_ łŃŮEí¬Š C)HßYňŃŻw¤/®·*™áŐU·iíĐ8ŘYčŔĆĺ[iő!°Áz¤uŔ»Y*ĺţiCűĐÍ÷ˇ ó˛ĎĎ3ëž—á.+wbDTJV© ‚çÇy¸=áq,¦3hĂŻša'µ+(çYČ{+ŚÍ]™ÔtµË*¤÷Pĺż#îׇ2Ŕv@ęŕJÔJëŰ“Wçšµ/ťo/źĺĽóE9Ý´vňçáÚ¦e”׍čú×CďÎĚF¶îyYŇşçirFYc4úí űąűĹp‹±řsvk?źśÁâöe˘ď¬ł5nW]Ë|W&uîmÉu¨Ę÷®9l=t`Łľ=y_RZG°~Ţ ú¦ĄبT^v¬lg)«đs^ŞuU ]×zčńĽ'3Ţ—uĎËsĽ–˛éńś·—1Fžçcĺ_˙?‹Ź˝ÔKéX}g­'č™&'­úąňőŽÍýĽ·Ząţe\XŞ`7üC›ňíÉ«mÉ|ŮFŁľ‘eĂf¤/ôÇłSă ëV>6ßn˙ká,öI"cůđ4Łźłň× ?'#Úşçĺ8.qÝóiăäq ¤;áşu, ˛’(#}3hĂţ^;ú­Ě‰4÷[ÜOłÚŞ˝Äó­é3FÁďÖp×îT•ľx×Đ}ß/x|^d©BĚýŘçŤý| żČŰbš›•>wĐ(čŔ&EÖ™ R}uľ=yuńôű‡ľ¦`ZcżÇ—Ö/óLłxÇ—ÝQ¸»RéîzžF¦x™e¤űy¦ňdÖ=/Kw#Á­ňƸKŹ©ŇŔîŮâ}ţ>ĺ¶Ó/VŰ›š°ý×s8•ĐV©bČUĎu{Ălń2Ű?MĽę”ôlŻó1_·—ż,đţŰůńÔ*đĚqě650\äśs11ž&@Ý Ű }ÎźĽy4–•nŘ1J¸° ^h‚ćÉłĎßh‰ÚS!€:ř4üo –ŹJŁ—S¶5?ËüRübę-‡§GŽ©RtóuěWĄĂ—+]ü>Kµç;¶îů_Ă`çäíź÷,żžŹĆóńJöht€ľ=ye]fXŮçÍб:k_‘‰¶Ńyˇ?vʇ‰R)[qô%řoĄĽ~yAôŢÔ€ęĎ%éYNYÁóĚٲ_óĚŮ"ăĺn¸›)9^o)€ýk 6™T×üţŹsóEGüłä=({ ť5´îócpQíˇo µa ¤?ĎŞX.€Ú@MpÖ@öyăČeÝbÂÚçl|=ňśĘŻIú)˙Ů.mĘ ˘OĄIÖ=_]ڇ“•źĺÇÉ {ĂßOŻÇ&o°úńÁ¦—u¤á¶B‘@cy“•ÓDEŹá»…ÇdSĎ éÚ7ęĎb“¦˛úŘ1čŔ&t2¤&9Ę'=@ZcżŹÖ[ŢË3†×kµ zoĆZľ‘鬪Íj"(wPR ÝI“âşôy0ÎôSyÇSď űÚú˝żˇW~˝ŕýŠdÉ—łTČćôw`Ä}Źqމ S˙lóĄ<Ú\ű‡&6@öyłŚľ@~§)X“ń h|ąÝËďŤý=ʸŹ2űům­Ň÷$‚臧ń[Ńrë“ďńĺ|ł0uPNŮöÔ­ă&Ć^d'ž ^Ďů®¨|[ŕţŃwîßŐ żË”q’—/§Řşáw%˝fŚ›ö‚÷~?¬pxÚ_đň¦Áźö+í‡Íč­đŘŁlTů"]÷ăąľ ?k8žvž t RßžĽjgëz±n&=°>é‹ęţßcmT2÷Ç/°÷ÇîŐČF~¶¦ýéfĹ2Ń»ł.SPçJݤĽŕyęŰč§7˝÷şĘ3UŢX˝˝#8]¤¤wŐźF×U7cĺA§Ŕ}˙[ńg”ńńřűÂc˛Š (eK“·ö ´KSĎ1qN/+ŘÇr,[©ţlW'„°EĐ€Ş˝ÖŤÔúöä•Ňş¬Ó¨¤n+KĺUŰĂu©ô+Ý›D?Č ,ü\8_?ëÖ•ś”<čŰűáŚŮeś#`ö§ŇľĄřRŕľżj®K“†ŠL¸»+á5‹”Yż[˘»µź]SŔ˙¬Ŕ#>5ţZ±†Ď˝YŞžńg>ˇĘg`€Ł„;Pµ#MĐXń˛’–¬G*ťc¬ťŤ—U}đűö©7ا´F Ú—çď~Ę>žËJ]N¸NňI ëěßóA_őňţmM¸GęÇĂÓ‹á}YV‘ë†c¦‰‚Řgű°W«Y"ŕýŘńßě÷Í‚źKʆ™Č›[ß}ÔÎí9÷ŰË/Ý>rÓčń—®ŐÝl}ËĄĺc ®÷'x€Ý €TćŰ“Wń”/Ç›+úχ¬ÓË,3ÇŁ`joˇgHYbŻKű’;ĘČ?ŃZbß&e1GŮvŮjËéÇAUëϦŔËó)ý;r6¸Ď‹|żîu;jÖzńí%žď¦¤ăiŃěđ» ç•ßłĹ'vľ©Ág uT5ém|b@9çňă|óÎ_ĺm>‰áŔµ`ű)áTé7MĐhʸł^©´vŃcŤę(Ýť~_49Ď#Ŕđv¸viůűő8Xţţ§/ŃÓşłťą”Č‚|^Yđüçţť3ů‹]7k˝řeĽ_yŹR©ňÖŇŻw{çśEˇť<Ó~ŰlĎÄČ´äÇEÇÁG§€í'€Ŕ6hi‚ĆPľ˝ůÚš€µ‹l¸šGpcŃ,݇ŕů(Ŕ±ż†ý:ĎŇşčý,]Ţ=Ú‡8Ç]éŔÂFU6›á}{ýŮčwöďXĆa%źÓúšk't®*2ۢë­Ç±{3u_ł·…źcO*ź°´ţóx\§źeĹÖ¸/ţYřđôÜa °ÝĐŘ-MPy㞠ľćSE€úů9xÖHAź˛ţěMűp­3 ‹~:Č×›—‚I1I˘;ö׋­ 2UëEűţĄąvâ_=ë9Ö$_|R߬ÉXE2á϶¨şµ9ď–ďçŮčĎň±Ö_Ă«Ľq(l7k ̦T1”§­ ô#”nrđ<üwmŻ™1˝±}×ţ™$TD´áű<[°ě1ç©nŤ˝V7Kđűű7ÖÓý2řůëÖ™ŞS$k·ę‰ wŮâ%ĽçŤgkď˛ÖŹ>+x^8źÓ‹śż[ĂóK9Ůó›t˛çµ´¶ű»á–&\ÄąčEţYvŐëőްęLZ€-$€0ďĆ@Y^h‚íđíÉ«öÓďzZ‚Ťű9x>©rڶ3ŐPŠľ9Îeʉ(ˇ˙vÂçą·Y*»»xđ.–`Őţx[đótŐô“-†6é¸/g™†4i©ČÄŚN‰ďăuĹ×—˛űŕd'+jŚÓ>?´łŐęńč[J w *mM°5Tç`ó~žG@ŕýFö%e E©ŘžŽ™©źĄÚAéÁó¨žF ý·sÎ]Ö¨Żö=+4>Ö1©‚:÷Çůq_V¦ţQ¶ą‰Îť<›ąiŢĺ}°ÝÁótŢYäş}7ĚÂż˝ŚkŇż†c´ř5Üä`€-&€¬Ý·'ŻZ™ŠŰäWMŔFMžüpźIYĄ`=<í¬eźŇš«ů~X/űG4‹¬Çgk)w›˛Qc<,Ň·ťüţT{Ś.âw ·UâX?ÎŹűnÉĎ˝éµČ; 쏷 —›~Ţ9<ýXř<c4]Ă»]‚î@Zš`«Č@gs~ĚĹ—ŢÇůmżÎx\d'w˛@]_yęĎvüü—Ö9O™Ź÷kŁŕůţ”×ß›rë9ÖvŚžgi ú˘şŻÖć­·˙wxl­ł,~¬AľůójŚďóĘ_őöň—Gm7ŠTŐ¸^ź¶óĽł7öŮ *Ä’'K\ëO˛fN dč@Úš`« łł‚çÉäőĎ‚ç!@ëĎO_ÚwŻ_äżŮ±ó`?KóîÚç©_gĎO†ĺy‹(Ţ­Ľ^äÇŮ2ţ]ĺŰkŻ.ëĹż©Á>ě 'I­k"Öâ×™8ÇýVŕú˛?śŕr{yľeç ˝ěçŠń{Tť‰‰lúŞČg\ç,€-&€Táźš`»DYţ§ß?ôµô8xF_|?mžW»l*Y~“˙ßdŰťŮÖˢ wAĄÔžłÉ™¨ÇcűpďP)ÍYŚ*[*ńżŻóŚćňŽz™·Sš¬qT“˝yťŐŁjB\żf‹O\‰ăřfËÖBżĘ¦ľcĚD ý*ďŻ/ůqz?a|µóëʢţrPl/t  2–·O+“yCŐâ ˙ĂÓXŁôőŕ÷I·QáŹá7<Ľďč’˛iź^oÉą1Î1Iŕ}ĹÄ­lzçČ„îćAöIŮ矇eüSŔ?úĺËđ§ čM9YkĄ‚Ů:kxΞ.]›75Ú—öđłé@tś·O/˛b•6bŤđç<îĘ“–ŚXä8ŽĎoó-÷¸Í~VĽzĆŤC`{  UŘÓúJ‘‚?,~Ě"˝ŻUđüÇýŹ}y7ܲ)›Lďg)pđű'$DVd^^O¸µ“—ômĎ9Wµó­3lBwăe°i’Nűö–|Ť"Ôč×ŕşR´”{\{˘šÄIŁGC”ŃOďcŮϱí^˝żeYü<"€ŔVřöäŐţÓď|‰Q_2Đ·łOeŢPWăéĹç)cůľ˛Läô:Źé‘=ÝÎę7IĄ—ĄLíú”ţŤŔkĘ$źD_´Ěs}&WěžIË0Ŕ´ós§Ŕy1*J¬đ:× Ţ»3¬*RŹLÜßöýSc—H×Ě« îÁ…`»ýź&`KȆŘmí±ß— žÇu$Ö>ýšPŞÁôČ"Ľ˝|9Řţ5řËó,evłI÷ëŐĎŇ™‹Ľý~Łn/ĎkhNŮËó˛(ďłÉkˇ žoŽŕ9E)ßţ~…׹™rľćmMÎ…ý¬xP÷:żö5Ozż›šČŮS9`űÉ@íWMŤT48‡­ü÷(…ŰÝčŢO*UźĘÔ·ňíßůĎ~,S壟oŃNä?Ók6m]ÜTľ8>łu¦Üăý0řź* DľČ˙~bÝóŤ—'‚O’Î}űĆŘňŐ8˙žĆř\40OÎkt.,ZĘ=˛¸›9™%&áž~ÉßCUb|˝tPl?t€Ůd5@3Ľű˝Xđ–×ţ©4x«Ŕ#ZK¶Q꿬ˇÝâ|×)x>Ţ+!HĽ®÷S¤ď˘·[ęøľüš_b›5 ô.ßâ=Üšě®_ę°ßžĽŠ Ú™îh¬ŢÓď4TrľüźVh®Áąň­ĐČăîs¶xé>6ë`pśő4k•ąsçšĆJĄ^G%ß‹­™Îşúa’č“—…3#ôD<ŕńubLď«’Ŕc2Ѐݒ‚«ăĺj/Ďk+QŇ=Ęç.ľĆpd žĆd˙N^ÎĆŻ®űLe t`×DđĽ•˙~#ŔşA‹•üŽ łĂÓ«‚Ď}Żo€˘ĐŘ{š€ąOŹ˙=Ę˙/J·+íÝoý÷9Ż °čl‹}MPkJ$ęSŘĽźK·ł”×˙ş-Ť?SoÂ˙w§Ü·=ŘţĚ×®(ť5Đ€*Ük} —ÖĹŽŚó˘w ­©˝šČšŽź˝ÁöiřłŠŔ}•Ň$ÖŘ_îVxŹíÁc<çß'•mʉ±.ú‰í@ŮĐfki(Ed+·5ĂVék)‚懧˝Ę^/eJŹ>Otň-Ë÷!‚éÝĆÓOăĽţ[–Jâ·&Ü~1xoçK]#R©ýßć<îjpżYU•€ť €0[K@)ţ« ¶ËÓďúZĆŞ6ŘÚžń÷Ř"Üü<©}8•ŔŹ}Íç­E~6xĚ?ďëd‰kÄÇ÷Jŕ(•5Đ€*ô4ţ„őŰ÷édÍX×;öócţsoÁÇĽ]` ř»žçîŃýâ<ô<»˝”}”J:P…ľ&Đź°s2¶÷ŤµŇ#(ܟ󼓟óö˛·ćw´ěóG¶ú¬µĘďçÜŮůÝĽ=cMôß+xŻŔŽ@Ö.Ę}{ň*‚ {Zc+üˇ `!í ‹sáM6ąú(@ürÎóľÉ˙ŁĂÓI÷Ť¬íŢeÔç»˝ĽĽF?űy‰›x?'ĂŔ˙áéu6ZçýÁŻsžyZúE÷Q†yúylX뤄;P•ž&ŘwšňŰÄsa”ϲg­;áöŁ<Óz–"“‘öó­,7˙ú5˙{ágśV‚ýöň\yv jč@Ud-o‰§ß?ô´,¤=áoź†˙ŤŔp ¤÷'ܧÎkˇšđ·ôOk¸_M¸ýËĎűs;Ě_; tč@Uzš@?ÂÎHÁäÖÇPż‚˝ůRÚ3ĄµÇ'e…˙9Řľf?˙ăýÝ,đĚýGżĚ] ` ¬Ŕ¶ř·&¨·ČZ¶úVř˘ `!í »ű!(śJµ—“m~{ůËŚçě—üŢ" Ţyô·Içö8çż\° {?ż˙Ĺŕţď `SĐŘ-MĐ˝,JýŇd7šňŰ”sŕ¸(w>)đ|7çąşž~žđ˙ţk‰—+ʸwćܧ;ŘN ¬aţľŕýÖB¨R]Đ›«˙ôű‡;Ís¤,đö„[Ţn‹s`?KżZîsł@yRĐ˝=ă˙ĎK~‡˝)ŹóĂűßĂŹn/ť[€Z°:ŔßžĽji(ťěeý» =ă¶V~ű´Ďďk˙îRp|Ňů 7¸­+‹h2t€éZšĘőôű‡iAšáwMký ë÷–|ě´ uoMďńË„ż©04ž:P5AŘfRľu{ůnđß ¶ă,­>/#;n?Ya­ňČüţ×`űĺ‡-Ëžźw=&M†je‡§űĐdÖ@*őôű‡›oO^ő3UšĆÄ("•1ďćŰqXŽ íŹť˙â\řÇđ>‹–=ź ŢüýóŁżĹs˙•?ć~đüý’ß_đĽwĂ×~ ÂĐxčŔ&D0öL34JWŔ n/#Ř\F‡˝)kĎxL/Kő˛Ĺzí/ۧ,eÁ[űh<t`˘Ľ±zstź~˙Đ× Ŕn/»™É5Ŕ–±:Pą§ß?ŚJÓ Ę·;A:°)­ŁjŻ÷ôű‡žf€š¸˝Śăń—ěđ´=áÖXŹüq‰÷_›Ňę @`[ěk‚f‰’ŕßžĽęf‚čuwˇ  †R ý±ž†XŤîl‹=MĐH‚łő&űŘ)čŔĆDz&^g'šŘ%čÓÉh†jĽË¬Ď[Gݧß?Üi`— LgMm¨ŔÓď"x~¬%j%úDö9°sĐ€Ť{úýĂÍŕGOKÔĆE>±`§ uYč‚¶›×{úýĂ;Íě"t ž~˙ĐĎ” ß4ĺô€ť&€ÔĆÓďş]-±1ÇůD€ť$€ÔMdˇßi†Ęuóµčv–:[ăŰ“W{Zˇůž~˙0*#n=ôęÄ„ĺó€ť'€Ŕ6Ů×Űáé÷еw5b˘ÂË|âŔN@j)/'.ľ^4?°î9@"€ÔÖÓďşď´ÄÚśäŮţdč@Í=ýţ!Öćîj‰ŇçČ  L÷B@=<ýţ!JąwµDiĎ&@A˝4‚çS Ť‘ŃŹµÄRî3Ás€™Đ€FÉŔDż× ‹¶:<M Y ű;~0ŘúZc®»Áö|Đfwš`6tĘ  ŐńĹ·ľŁZ}Mő•„ź¶žÖŞ›ĄĚsç3€Ô%€ŢÓŤö‡&ËdŁfúK8Ö¨·§ß?ܶČDżĐ? '±f|´‘ćXŚ tĘŕ 9¨N_č;`:%ŞwşďĎł”Ťn ¤6¬ówš ZĐź~˙ĐÓŤćKJ¨ŽlX}‡kÓő5Án‹ -‚軚Ť[/˘ L&XNť2Đ}ÁÓP&@@ĄoúźOpś1˙óéůŕÇłÁvłCo;Ţëóü˝°¤:Đ׍tŁ  :ů„Ë&4Ď˝ÉFŤ=ćú™¬ć&ů˘ ?~ŰËÁݱ>ú6źă˝Eąö—ů9 €Ô)€ŢÍ…šč˝&€Ę™¸Ň<]MĐh&ů5CLTq¬ń“Ŕ4Ř"ľmô¨Aó“´ĘS›úÓď"x.Ű,=_ÖÁF\h‚Fq}kľnf’źs#Ť÷(ŢäÉhńůű _çܤ:€’ýR·úöäŐ×ÁŹ}]S{Hx®L$lě\y>řq¦%áXVěVso?®´DmŤŁPä¸n ~ĽlG­Ő€ĎŢq-yďó7ŔzŐ1€ľ7řń9DŻłř/˛^î4lô|y=řŃѵ&xľ]Ç\Đßj‰Úée©Śµ*¬r|Gý·,Ó÷jô™;2Ě?É4¨Î/uÜ©<™•ľ¤®ź^–B}Mµ8_ĆýHßÓµŇĎĎ•=M±•Ç\Ň[Zcă†Ë# ŽłsMAÉÇyLäŤcýĹ`kWüňwůçíO®!›ńKťw./«ô:e‚ěŞČzyď‹<¨ĺąr/?OŽ2çŘ ™‚»uÜŐ1[uWÄg‘O­+뜊Ž÷v–ŞcýšĄÉ3í’ž:‚ĺýÁöG>®ďŚi€ÍűĄ);šg‚ÄÔmÝV™řRď^Đšeě|9úÉúôG›Ę;}ĚŤo– )ßýŘgËÇP·ă>+p˝}¦vÍřöî踍#[pß­űľÚ7_<ŽŔT†"XŞ€ DF@*R €%:ÁŽŔŘß–7‚˙ď$“"b€îžď«š‚ä]Ëâ0ÓݧĎi ˙#´âóÓ—űńĺÉw˙xŻ›x]}÷Ďg{×3Q$ШĘç§/S‚<%ĘGńú%ĚćŁGü‘‹äú,^˙‰×4ý~ďúâF´`X$Đ(ZNÄë×0O”w[úO_ĺë·xM%Ô }čçóÓ—]'Í_…yµy ¦ažLżÔúÚ$@1>?}9ó¤ů¨đżę4^ď÷®/&Ţ5h‡:;•«ÍßÄkćç™×$µuźÄ띪t¨ź:;‘çÇaž8oÁ$^o%Ň ^člU‰óďM‚D:TI€­ůüôĺI·k2€÷mĽÎö®/nĽóP tz÷ůéËQ|9ŹW7°}ŻŁ˝ë‹Kź(ź:˝ůüôeŞ4OíÚŠ”@­Ę&@/>?}ą_>„áUťß%%Ď_«F€rýCŘ´ĎO_¦ŠóOAň|YŞĆ˙cs*P&člLnŮžÎ:?ŤşŠ×s-Ý ,člDNžŚ×ľhM—~ťÖ‘ţXúőUţő•5&†B ŚISj‡ř&¬·{řGŇÄć]ĽÎLr€ç*]|ůÔĂ<…Ç{çx'ÂĹŢ?â‹×_ňkęďč±HŞ/í‹ß«n§軝PŤÂü »ľ'Ois'2—˘”Z´w"ÂĄjô÷ŞŇYVL˝Â -hŮ,Ě=Ňą S•kŤmơÎÎ:łxß˙Ů;ôto´0ć–@ąĎé´6{ Ő{¦8†Ęî=Ł0OšŹEĆ®©*ý̸•’čçnP¬i/x¨ xřئ¦łĎż÷:Ţď'ŢE ‡{ŁsUaŘRUŹęt(÷9ýß KL tŁ–{Î88ŰśrMâőΆ¤á*)®};Ôaľ¶4ńđ¸}\3Š/+ţ®â=ţ™wŘđ˝1-Č˙W$€Ĺx#Ě+|$ÓˇŚçtJ`}‰&¤űę a ŕ9Á8ĚçťPiĽŢĆűęT(†ĄzĽi¦ĺźŢ¨Î,H¦Ü6¶iˇőás“`Ă÷ĆQ¨{sĐÉtŘýszę<‚Šżs:%Ţcç›§ÄąNÔ(ŤQßęŘ8Ą$ĐGÁB ´đIÉôw<€O »ĐĆĆ@U Ŕ¦ďŹ'ńĺX$€{,’égOÂVźÓ§ažÜ˘ ?[źŁ{‹Ä9­I÷V‰ôřG!g\@ýş<ú3ťmŻĂ<@šV’Cy3Ŕ¦üS€HkD)‘÷ßÔŐ'^B[űîŃs9v*­ ç ´©Ŕ ­“X'¦Ąűëyü|§±ę‰H»JI ű€A{“®Ĺ‚Çą`HÄ0?Ë«*EMŹV‘ć’ňĺ©Í}ŕ9ÍŤ„€]ÉGBHśÓş'ů3ţ§Dz›ţ!@ĎŇ€)-x,$ť k­ĺáŘ(Ŕ˘ýkšW~Ě óŔćżg´Cç¶.QĄ5ŕřËs÷öü\$Ň…Ň t`[şĄIŞJ Đ 7 ţL˙@IŇ\ň|i“¶zx$k4MŇQ€mŢCöÓ·řËÁń W“žć1ęX8ę'ěBz€|ĚgĄ{­LǡÍÖo,LęÂ|“öâč°NHţbGďň9çéĎOÁ±°›–O(Ý8H¤eJbydpÔ:Ž éŔđtBŔc©:‡^ěç±éiĽlJ)”:P‹qHĘ›Hvůţ4ŞĐ€šŤÂ×Dşy%Ü!%őTťCďľlP±ÁłLč@mĆák"Ýŕ صăˇÝ-6 ™WŔíâłń ='Şs؆.¨F/’:P«q/xśx°;šP¦{ĎÁôcď>ĐÚĽR(€Ý«JIĽřň!¨:‡m[TŁď E$ĐšĄÜq~°Ś…ŘÁŔvĘ—€Öć•q|ó§ö™@Kö®/®D‡Ę-ŰÓY燢;Ó…y®ăD(vOhĺÁržĎ± °%C=ü‰ 5ĐčĽ2Í)?Ř,ŔäőÔÔ˛]ĺ+”á8ç:ŚIwHhIě}tް…Éĺ8ĚY‡ęŤOШ/çľĆńž 4/?ď>-ۡ4Ł<& ĹnH - @ż†~řGg-Źuâuš+Tă1Z}·g&Ü'!ĄçťH@ŃcŇŹň»!´üpI ź,xž`ŽÂ°«ĎŽ…h\÷9‡’!¸‚ćĚ„€»,ťw> ¨Â©c†¶Oh]Jž§ŹS`C$Žçşx_=`ă?›łiÜLšcS·ĘϲԲÝ3 ę’ÖŕtGÚ" t`(R›“OÎ 60ŃtůĘYčŔP,6gkˇIsö®/f˘Đś?„€ďIžCăŃŹ Z¶C’.?`TŁë’0ţÖČĆ$``gŁwBAcśŢ–™°,'ÜRňÜš(Ô-}‡?ŘÔŮ? t`TŁëL6»ŕ|°ŰĽ``FyN©ú‡–Ě„ )6Dđ—řĽÇ—AňZ’6už C$Сę‚jt`5ŞĎo7V ТúçT(h„–ß Ů»ľ@狜<—d6Ą5ąOňý@†.UŁĚgÜ5áLѱHÜéX€ˇÎ)óÂe'Tn*ŢKÚ"y°8]~cĂ$Đć™OÎ ~ M:íćĽŰÝ®€9Ą–îÔkďúb* Íř]ČR$Ďa8cQIô “@ř*µs˙( ÜBűöK÷M›€ˇŹ‡RK÷ˇ bS!h‚öí—ĎE6G‡áŤE?ĺÎl€:Ŕ·Fńú3>hFBä‰gxv"q/› B8ŽăÇ6fS)•Ë Ř»ľ¸…áĘąĆ"u.‰ľč—:>Ş2‰áŢ; Đľ8ÚhR'‰Wď!Ësňc‘€Á;Ďť(x t€»kéź|ŽÂü!x߀/śEIuö®/Rëď™HTí7!¦ś<—0Ć’čŹ#đcŁ0?;Ģ “„đjşxż<€/śEIŤ¦BP5č$yÜ!%Ń?)\Ź:Ŕýş0Ż  jšľű#‘X™–÷ß:wDy'պܻľ¸†aÉťó$Ď»,ş"I˘ŻHŕaŇ&-zś  †ęóőŚň€Ą±Ą6šÔ@÷Şiß>0ącć‘îáhˇ5H ¬ćĐąč0Ih_Ć"±¶WBđ7©ŤćóI*đ^Şsłw}1†áČĎ’Źa^ôpIôI ¬näaÍ Áăâ—7!đ­ Ť&ĺ›÷ŚrIžkzä5L`=vlAŰQçx?žř?žOJ|P¤˝ë‹Y­Ťłë‡ĺ7NÂ|Ý`]iüéXˇ{H <ŢyĽmP9˝ąÁřˇ0ÜIťbí]_\Ĺ—©HTá­ C|^¤Äą5 `SăĐsa¸›:ŔfÇA¬Ô?Ç—N$6F+|€“D§dłĺS}>ůIkŹŔ&äŤ9ÜB`sĆ)‰náŞöJ6ę‰c.î%‰N‘ö®/¦Azélr€ü|HÉsĎ `ÓNŤAo'°Yă`ájťŽâËH$6N{9€űI˘SŞ×BP¬3Őçqšź›ÖG0ŢJ`ó,|@ť´ďi ž7'`.Ier‚öL$ŠsTźBîę6  G:rŢB > ® i_śůÓUčć’Ô+%jo„ˇ(G{×Ţ“Ć嵊S‘zÖ9 ýď$ĐúłXřĐb Ę'ÁŰŻ‘{!ŔjsIa 9Q«•{9¦ń=™Ă 8÷Ř–_…ŕ[čý’D‡Âĺę»,ű§E>Ŕ sÉ8N=J±w}q_.Ebçlfř 8‰/#‘¶5ö‚oI ô/%ç$ѡ\‡ÁŽîmçös<|ü$‰NIRâVŰđÝ:ĘçŇÓ°Ľ†¨S°M#!ř–:ŔvH˘CąTF‹5@©RýP(AnĺţB$vf˘uű`Ř<°cčŰ#‰…‰ßÇqP}ľMăÜ2€‡;ÍăVŘą˝ë‹i|y+[wŻ#ah_Ţ4eí`Ç$жKʢ%Úöď*¨VwjI)ö®/NâËT$¶ćËąçą ËÇžY§(€:ŔöĄŇU°ó‰éA|éDbë^ ŔZóČŹć‘$µrż†íÄzďúB¬‡á<č’ě†MZß‘@ŘŤ.Xü€]s÷ŽîZ¬Eťbäjč×Á‚{ß^ç¶ů4.oň‰°#6j}G`wRű=‹°›‰éČÄt§´¤Xy* ” WE?’č}IÉó‰0 †{;°Kż Á·$ĐvKvCńÝęň&V7Žc©Ca ’č˝y+y>ńž~1ěÖĄ|K`÷Rý\`kÓ4)‹ÄΩBXß© ‰”B}ăRňüD†!Ő8bŘĄY~–łD  qŔ,‰ŰabZ†QĽďí ŔÚ>čfF)$Ń7ćµäůŕ¤ÖíîĺŔ.˝‚ż“@(Ç8·lz’Ç"Q ›֗ƶ„RH˘?š3ĎF‡< 3ĎžŰI ”ĺ8ž śˇ?é¬H;»Ë1Î ¬gd#6%ÉIôgńŇ öá҆gäh3`×TźßA <çβ޼‚âX0xäxĘ’’ě]_ĚÂĽýR4îőeĂłg‡Gő9PÂ3Čć­»I ”éłaă“Ó41íD˘8ÎîŘČŇŠbě]_ÜÄëEPŮö#“x=Ď›É]{-w“@(SZř8·&§ąß ŔŁÇTÎC§8{×'a^Ť>Ťż¤–í/blŇ™ç΋ Őç@Žt?ů1 t€rĄ t °™Éé(¨>/Ů!x´tşŤ‰gďúbć碟‰Fř‹íí‡Í`—.ăsČ3ůčeK çÂ&§Ť{’[ěđČqo®l„˘ä–îGa¸Ő苪s-ŰNő9°c©ę\ëö@(ßXb 59MÝF"Q<›O+wŠ–ŞŃăősźŤ>”öĺégýYŐ9ćľŔŽ-6s9>ä$Đępž“€Ŕę´ŻCďsÂđhűq\u" ”,źŤž铆ĚôłĄÄů‰dIĽ7§MNć˝Ŕ.¤ç.(+@¨Ç‡<Đ>9í‚Öh5±Ů`3Žm¦tą­{j#ŰZ"=ý,)qţZ˘‚ďŚĂĽSŔ¶ĹgŇ•0<ś:@=ş ¬39ĄŁĎO_Ž„`#Î…€¤$óR"˝ÖÖîéď|$Îů1›Ć]HÉó‰0¬F .#­řŕarÇ“ÓúĽ€ŤH­Ü…ZäDzjwţŻřŰ”PźVđ×Nçš§„ůżâu$qÎ]ň‘eťH[6‰Ď¦3aXť:@}ŽUhÂŚÖhUľoąő>›™?[QťT)Żça^•~Ż’ÚÎNóß)U›żPŐÇŮ,lýy•;Ľ°†˙€*ĄóĐÓDíF(ŕNŞĎëuć'?}ątȶçH®`3ţ!U[´rľ:‚6îoą?'çŞ\¶ç•[’’çaŘ t€úí~úňDŕŻ6}#‘h†Í›e6Ŕv9˙؆‰Ł^7K  Çą% ţ» Méâ˝ÍbŔćŚň‘GôLűv`K.÷®/^ ĂfI ´C%Cź¦# Ć"Ńś7B`îP!íŰľĄóÎ%Ď{ Đ­ÜşC!h’*)€ÍJ]~ĆÂĐ;Ő€>ÝÄëůŢőĹŤPlž:@[´rgrőąJĺvٵ°Y§y @â=¶ Ú·ý‘<ď™:@{N…€J»ş-¶kśŘŚ4vÖÁ  ?ŞĎ>˝Ţ»ľ¸†ţüŻđ@Óře|~ß˙© «űálj”_ľűßźäZÚĆďě™P0 ÇB0÷Ř™Nýšĺkńë˙lz^ąÎżǵÓ‚sO%Ů]sĘ_ż«űQ7ńs}¦r  ˙ ')y~) ý’@gŁJYŘk.Ě,äŘ‹űâ×˙ _DFŢm —ZąO,„0ůüĆN$šwßë#÷5€^˝Ź÷Ůa¸sľ; _7ÜfĄE¬Ą„ü"ůľsvĆ6lIúěĄf6)lŢHx¤´ţ±\aüű˙˝ĺ<†ÂÁö¤ÍŹačź:Üâ»ÖÓŰţ?ů¬°ýĄ‡PŞjď<(„…†ÄůŘĂąŻĄ6Ł'B@#óÎYřšżĽeÎ9ĘsĚ_ň„.—ŞŇ=´y(Učřăł,6h§Ĺ9­ÝY–?‡Â°ŐçmšÄ+%ÎĎ„bĺńćňqB ďw•ăD$Сţ‡ĐbŃ#ť1’Ú»k‹ÂʤŠMyŞ?żńĄ ˛Rđč9墵{Z¬›‰ÁQ9ë˛ćÖ–4.zžÖއzĆů†Çś‹‚Ŕt]ŠČߤĎŘ źµrH C; ›|.Ćâśt7Zîr*TĚŮç,S!››S^ćę ·˘1x]>6 €Őü"ÍÄëYÚh(s¦Í›iă¦Îş_Ą\NÚ¬1ŠrH C{ ďéđ˝Ńç§/GÂ@mňçÖg—ďŮT›ťS.ć“SŃ0Ć`%ťT/%2_«:ßĘs¶tNúĐ+ŇÓçíʧ˘,čĐîh9‘>ľc1„i%Émž¨€ŤĎ'gąĹćQĐÝl¨ömĽXýŢ)UK ĚT<Š­Ź;SEz{N‚”<×Ňľ@č0ŚĐëü˛‹‰…tnp' Ô"^Ç"ÁŽ…z™OžĹ—gć’e+Ŕ}~úRňĽn‹äą1ĎîĆťÓĄ3Ňgů±'6l”K†őJ *Xp˘&ş&đ#éśÎa€^ć’ł<—tDŘđŚmĽx°'BP­”Ä|¦e{1cĎ”ÇH]u[ĎcLrá#…’@‡á=€RsEH,†P…ř9M“бHp›, ßąäIWY\ŘĽQd$U’Ä,wěąČcś5řăĄNGŢĺ˛I Ă0>7ů\‘tYü6UčÔŕ0ŘÉÍýFÎé€Ţç’Ó0_ČÔŢt8´q U—’çĹŹ=S#%š[:RhćÇČËN†ýJUčińc*u«{ˇdíđY€rć‘7ąĄűD4!•3€{ý*UIÉXÉózĆźWŤO›ţî/$Ďë >ińăyĐ2d¨RňüP(U^¬ëD‚r4lo.ůÚŠ”ĎłîD‚9š¶;Źśäy¤*®¶™3Đ‚I>Ö”úÇ 5O{”ÇĚTDřţÁłhéî†>,ŞĐ)‘JbÖáh Řţ<2mĆNóHItsF€ÁŃĄŁiśâř™¶Ć ËÇÓ–:ťä®MTF¸ëá“ÚđiĹ7ZňQÚäs?hĆú,đŔöç’čmKťË„ŕö{¤Tá­sĎ›‡.ާ-­Eú$çY¨:đŁĎ$Ôw–ë{#ř<ŇGSŔnć’čmű·P©™*ŕćǡé=NIô·…ü•®$Ďë&Ü÷ŕIg‰X†±P‚Ü Áç‘Çrěf)‰Ţđś1ŽŐźŔßtBPĽ·B0±čIcŃĹxŠI yčX†'*6)„Ď!›ŞĐGÂćl”6î·Ě?… hih&ń“(§˘Ń”4`P­É6 =JĎ©Ăxťětţhv;Ҧk tľßë´†Ö…:ŃXŮ(ż~‰aŚeJĐĽuf3lŚ:Ł~˙YÉ9Ź c¸ślâ“&Í‘hJ—wŢAďňůÔ#‘ gÎę€2ćŹóÇ&ěçq<ćsiĊ yľ±ďVŠgŚë‡EňxÔ¸Cň“µ?;>?Ă&lJŞ"°ŁŻ-’Ml‹ęs¶!m  ?˛1B0l9ynŚÝß÷ëŁ0ŔŁkk“@6"ďĆzćçІ‘Đ·ĽŁ~,l‰ÍPÎüńµHTďßB0čąÜˇą\ďR§‡a€µY§Ö&lĚŢőĹ,Xim˘Ö =;¶(UˇŹ„Š?~9çW$Ş6Ňbzňűnsęv[›€í“@6jďúâ2ľś‰D3´äŁ7yŃĺŤH°eú śůăIĐ^Őś‘ŤăeóÄvă l‘:°q{×Îłk‡–|ô)-¶YtaŰRĄÔľ0@1Ž„ jż Á Y+ohš:Đ­ÜŰ %}R Ě®č|…Ř»ľ]Ěj¦}FB°UűÖfŔ˝ Ř. t Îł3Ř„‰“˙q|éD‚;GŠ’ćŽ7ÂPĄ'q\eÎ8¬ąś÷{7tŃ€-’@ză<»fhF^ ;¦ Ę™;¦äą Řő Á ¨„ö=jŘô¬Kč›Vî&ipŰäĹçŠ]kĺŘ»ľHmÜg"Q%›®‡E%4P“N€uH ˝ŇĘ˝Ť¦VÇlĘ_J’ç‡ÂE±»NÎg†ţý"ŕ»lŹ:° * ę76!oĆ8 á((ČŢőĹ4ľLEÂśř›TĘćKĎ' 1č@ďśgׄ_…€ 9 ’:lŚ…ŠbîhÎP›˙‚bíë¬ ¬CŘŠ˝ë‹IPIPł‘đXą­ăX$(ŚMPÖÜqjîhΤ"°2 t`›TÔË9čl‚ó¦)őţ60wäQś@©ß¬LŘš\Ip)Ő ŹôF(”*t(oî8 sFŠôO!€żą‚˘ĄM^ű¬Bض#!¨Ö/BŔşň9Ó*R(ŐČdŠóNŞăôa0n†ż»‚â)ęV"lŐŢőĹ,ľLDÂ$™ÁQá‹É4°ĘÜ1u/›‰DUFB˝±&Ź3v<%° t`śgW§‘°Ž8A9/&)L«R…^ >菎nđxŠ;€“@¶Nz˝>?}9Ö ˛źU`ćŤćŚĹŰ»ľŠBĆŽoJŘUču2Čd%ym$T4™VŮ…Ř»ľ¸ ’čµ1ö d§B<„:°ą }*ŐůEXŃ+! ")y~( P”÷B`ÎP!¨ÂčóÓ—ćýŔ˝$Đ]R…^źNx¨|žôX$¨ŚMPÜw&Őе Ş+!¨Ć©VîŔ}$Đť±RĄ‘°‚c! B]śHŹ…Š˘ ˝®±”#q(Ýąçđ#čŔ®©BŻŚš<đs’&!"AĄlţ€˛L„ *ćŚŔý.Ő=«Î…¸‹:°k—ÁAµé„€HçIŮÉKµ÷ąĎO_Ž„ʰw}1 ZăÖÄ8 €Äąż$:p+ t`§ö®/RňüR$Ş˘š€‡pŽ4µS…eŃĆ˝? AÓ¬ Ŕí¦BPĄ±$:p t ď„ *ż?’ĎŹîD‚ĘŤYE™ A5ŚˇÚ¦ÓŘîćÚb_66ë5ÎkY‘@vnďú"µâ›‰„É2ÍPąK+Ţ7˛2 tđÝęsŠzť~úňP€ t ŞĐë1î’ĎŤîD‚F¤]č>ĎPÇŐ3/č†H˝n§Úą č@),„TDŰ0~@ő9­Q…ĺř]ŞaÎ ŃLŞçLtŕ t {×i€i—f=Tđ7ąĘd$48y¶ e A5Ě €!úCšYřh-†M(É{!¨F'ÜBĄ.-Jfç @ö®/n‚Ť×µřI€ňŚjÇ(^IĂ%”d*Őč„€eůśč±HШWBćŤ3ÜC˝-)yž’čBĂ#cďú" 2g"QŐ|Oő9-ëâ„y, Pç ×AĹ08ůĘ‘hJęJ÷áóÓ—§BĂ"”f*Uč„€…|&ÔX$hܱ@T÷ŐÁą±ŔPM… I‡źźľü”;0 ”ć7!¨‚Ĺ–Ť}&€T…>Ř-Ő}őpn,0PAłŇs-%Ń…Ú'”f*Ő aAűv†B:”Azl˛mŚ Ąđ S!hţŮvď‡UŁCŰ$Тě]_¤J‹!P‰|.´ C1RIEpzŚ›`łĚ˝+°w}1…AĹëĎĎO_žäŁ ĆH %’@Ż€]–dŞĎń™ĚąŤ„lV'Ő Á`¤Nu©­űP@[$Đ©&0qŁą}źŞ†flěś3Đëđ“emsXşx}ČmÝGÂm@J¤šęŕ¨:PŞ˙Ë`BčL|`§ţ v)|'ufImÝ˙ŻSç¤CŮ$ĐRM… xżÁđäsźß|á(€Ď:Q†hďú"uJqÜ·Y ÷gnď>(Ź:P*ířęě1ďD˘8“[&ŕ©}&4Ei˝ŹW=kˇqnqŢ KëçažL?Ďë0HčŔmśă\¦w+ţsvG:ô«``T„6@:Ç+U¤K¦3HčŔ7ňůÍ#‘(Î4O´ocň]ŕDSe·P…NMŇćťq'ÓÓ™é§ńŇćI °®+!hÖ+!(ŇťUć{×7A˝Dc!ŕ»9üĚžJuń:Ś×'ÉtZ'”ěźBP´!hO®‹Dqfů¬óy/LĹyżSÚ,@?FB@ĹŽ‚µ5ęÖ…ŻÉôtęÄGK$Đ’Ů˝ŰçÜć2ݛ߻ľĆ—™PeŃć `¨¦Bpë>%Ď߉ŤHëř§ńúS2ťVH _äJŮ‘(Ňä˙?稕çŤ@/t, vgÁFxÚłśLO福uçŁFč@ÉŰ|ÎP+Ď~üŽŤ„6ű|‚*čŽÔ–N Ż…€IëŹăxĄŠô˙J¦S* t TCĘ÷!hʱiĺdřŢőĹU|ą:ß1hŐç§/;Q¨C›Ňß=čç^9 Zą3Lß'ÓOă%/@$Đ’žŔäĘŘN$Š4YóßS…^ž‘Ĺ~ŘĎT`¨ów‰ĄFí]_ś›á¶”8Ś×§xŻűS2ť]“@JĺáŰŁ2¶L“|¦ů:ďIĐ.Ów Ú5` ś´M+wë·ÉôCE l›:PŞ_„ x3!¨_ŢÉ9‰"=¶Š|"„Ĺ›đŔFü$U ŔĂĺc/´r‡ouń:ŤWJ¤’Lg[$Đ’Ś”m&Mx#EşÚŔy‘Ú¸—i,`ľ·ŃĘ~(-’ééÜôT¨ 3˝@J~=Ę»5Ç"Q¤G'żă¤{Tý”čŤÉ<ÚHŞ0€µĽŽeűÄë<^˙ÍÉô!a“$ĐâćÝHŞ` _?Őçĺ~·.7ôg˝Îâ¤äůX`íů˘ÍÖőřʬ.ow:<\Jž§$zJ¦ź/˛ č@‰<ŕęĚk'U±\;‰"]Ćď×͆ľ§“ ň§D6Ż€ůâŘt °ţ|>m¬? XÉb˝3ť•žÚĽź8/ťuI %úU w‡yPIyŢnřĎS…^ž.ťÓ% `ľŘ8›®aďúâČ˝ÖÖĹë8ĚĎK˙čĽtV%”h$Ĺ3xŻß+!(Ň4·jۤ‰°I:¬Gz=T <Ţs÷Sx´Qź—ţ§ď<”:P”üđ˛¬|îuĎĆaľ “ňlĽZ<'ä/…¶8űń»8Xi›ćŠ<+áŘŻćľľ{°›{iZ“D‡ÍřľĹűˇŞtî"”f$U AŐŽ… H7ůĚň>Ľ^ßE0_Äś‘G`€É’ŽD6Ş‹×iĽţ›«ŇŤ3ů†:PçŮŐá?BP§<ěD˘H“'ŰÓ`łDŁřťô}óĹ{B?t¨Ľá^ú1Ž×GUé,“@Js UĐ6Ş^*^Ëő®ň?ßI0_dAűv臤΀í]_ś…7ß«J·iiŔ$ĐbÄ’ĹzX ©ó;–#‘(Ňe>«ĽO&Ůe«B€Źe=3ëńB°y{ׯÍďa+Ća~VzşĆÂ1<č@Iţ-Ő A•Tş–ëý&Ů7&ŮEOĘ€łáş.S!čm~/‰Ű“ŞĐS5zŞJ?ŃŢ}8$Đ’X©g >…şäŠť±HiżS—[úo˝î"˝1€{9˙Ľ˛1®ô''Ńu„íIë6©8iŃŢ˝’¶I EČíŰ%ę`p^§±kkIí8Ážú; ó€»ç‹éYiĂu=nlşŘŠçćř°ăxýǨă5Ž6I ĄĐľ˝7BP—ĽŕřF$Š5ŮňďťÉwî&y^É€-ČGµĄ$úĄhŔNŚâőŃ9ém’@vN5Au~‚ꤜešě :'M¬m„)Og˛w˛áş.č[’’čńzś‰»´8'ýOk;í@J }{]fBP•­ĺÚú™äy‡şÝéľ«P®«ô‡l}®źÎDźěT$Ň›!”ŕ•Te&őȵN$Ęü.ĺ3ÉwA÷2í;; ţFňĽ>*Đ« ”-'ŃĎDv® éŐ“@v*>@Ňl$U Ƨ˘P­ĺz·ĂďqZÔô].Ó±€ńlĺsF ô6éuÜŹâËk‘€"tA"˝ZčŔ®Y ©ËLę‘+YU ”k˛ă˙ţ{oA‘Fń»Ű |ĎvĆłŐ™ ôę'!ŕ>{דř’ÎEż (BÓJ¤WFŘgŮUI%A]lP)×$źEľë µÉt™Tˇ€ńl­~čU'ŠÝ“@vAőyťTźWŔ•âMJŰ’'Đ3oM‘|—đ ¤&ż @ŮR':-ݡhim÷<źŹľ/»#l•ä^Ő,†ÔÁY‘e+őĚqUčeęě:`€sĆQźI}¦BP‡ÜŇ=%Ňu›„2Ąńđ§86>ŐÖ}7$ĐmS}^/čupVdą¦ůĚńMĽ=Ųé ăYŞ/:ň  .ńľ=Ť/?‡ňŽšľJů”Oů#¶HŘšx“ď‚ĹÚŐ”ýgE–ě}ÁßďYD/Ő~®Ä€!ŚgÓ3ĎsŻNć‹íűE =ióSĽ^Ä_ŐčPŞ.^âXůjôí‘@¶éTŞ5‚*Ľ‚bĄ é¤đżŁ6îĺ˛ů ĎÔîÜX¶z©ĄďL†'Ţ˙'a^Ť> (ҢĄ»"Ç5I }ř,„´B;ľ˛ AŃŢ×úωhĘĄ €ŞĺÍ`Z·ďP±´vŻT‰ž*Ňu°„2ťĆ±·®Ok@6*ź{>‰fL… ŘďZZpěD˘X©ĄYíßź·ŢĆbíkĂ@ĹăŘ4†=‰&Řp9°1¨·Iëńz´u‡RŤăü“sŃW#lL®"Ф—·ź¨e«ţ ńÜŽsę­,–ÔJDzvć‹3aß[ŕ‡–ÚşŰĺIáR݆¸’@6"ßxU´EűörżoŁ ÓCÉŇĆ“I#?‹¶śĺĺ >¨i›ÚGZ´3_w/uĄXą­űIp>:”¨ ósŃ=G@ŘÄŔ=íBţěFnÍTŠőJŠÖL÷†Ľ{\'Šr©B ¦yă8ľŚE˘ i|¨};ěŽő7Š—ş”,ťŹ>(ęň)ŹÍů tŕQ$Ď›uĄ_±ßą.Xx,Ý;?[2v~•ŚaS•ËąH4Ăq_=%Ń%ҡ,ç’č?&¬m)y®ĺG{´m.—łĎË–6ź\5ö3MĽ­E; ź7vyŢů"%‘E’D˙ tŕ1Ň™ç’çmšAyň¦š˛5W­ť»QhŃY®7ŞĐ(|üú!čXÖ’Ô’w* ¬C"Š“’č'ÂđwčŔZâM5µß‹D“´ă+WŞ4µřX®–Ď‚TeT®tO8 ś3ęXÖ&ÇűđhéP”ăśďa‰:°2Éóćý&Ĺz%E›´şů$ţ\icŔĚ[\îDG(Pš7Jž78ć6e)‘ţłg ěÔX%ú·$Đ•Hž7Żĺ ÚÚż{é{׉DŃZŻĆQ…^®Î™U8oÔ!Ą=ÝĘčC:>.^Żă/˙Ż·Á&~Ř…cëK_I ’ÚďIž‚öí`„ hÓ|VxËÎĽÍEÓˇ€RćŽćŤí˛ˇ€^ĄuÉxťÄ+U¤§„úTT`«Î%Ńç$Đ{-ť]çĆŮ> "e~GAőąďN“Ř ťZÉFů^»·Jž·ë*µŮ†Á~·Çl]|îLr{÷gaľˇč¶C=H ÷O’ş0Ož›,µofA¤XŞĎË˙îLňłÚdS¶7BŔŽćŤ©c٧ y޲wB0hO„Ř•˝ë‹«ÜŢ=UĄíÝa·ľN¸SľA¦EÉóa+÷{8 ßťB&­SŐ˘äŤo°Íńę˘c™yc»n´aj`Ť€AĘíÝĎr{÷T™îŮýú8ä$ş:p«Ü˘#-‚Ře<ťeRQę»SŐGeÓ±€mÎmş6ţ€­KüsUúż‚ŞtčKĘ ťç ł#üMĽ!ž¦c<’Ë8č4Đ,ď»Řm0}wĘ3 Î+Ůx¨¶>V=óMםh4-Ťű΄€©J‡ŢĄŤ˛†řK ÉçÖĄCŃeR}^ľÁ}&§ńĺŇ[_4Ďqúž;ž„ůBšM[í»Ěă?(Ú-UéW˘1ĘE—"|‘«ţ ÎQ˘Y>ײľ“i1r,Ĺw†šH¶é¦loTˇĐ×5^)qîČáx+Ôd©*ýYümş&A'=x¬Ă|ěď`H ĂŔĺ´{HőŔpY)ÓŘw˛xM"ÇIhÚĹm'wąŇ˝ă@ŘđÜqćçť{Ć ÇÄQ_Ô,­_¤ŞôxĄŞôTť>XŰiśě凕@‡Ë7;-ۇM+ćriß^ľÉŔ~UčeSŔ&çŽ'ÁyçCdł5ÍŘ»ľHĂŇ9é?çgÜLT`%©`ă|(]%Đa€–ŞÎSőŔľ Ú;çŮů‹“Ą› ý»“&žA ´’uCk­@/ăŇýxĄyŁŤYĂď΄€Ö¤ç[ĽNâ•é)ˇ> Ö7ŕˇR>içˇK ĂŔ,µÝSuN*hKĄúĽ|ď…Ŕ=¤Ż„€5çŤOrŐąM×ĂĄú€ćí]_LS‹÷0ŻJOŻ:uÂýĆq®Đü±Nč0ń†–*ŃŇ9çÚî± ˘ Ěďę(X¤,]Ú©<†/´q/Ű(ßS`Őń¨ŞssEsE–™ŁMK]s‹÷ažL?Š×•ČŔťšoĺ.Ť[Şř3^"Âe˛Pé»SÓs_¦"Q4-xčÜ1mşN®mşĆx—ď=‚˘üS ?ąĹűYĽžĹߦë,8/nś·üJ CĂ>?}™Ú´§Äą„ß»TQPäw¶‹/#‘(Z:K;Żoig_¶|o€»Ć iÓőiž;‹˘úʧ#lI|&^Ĺë(ź—žŞÓ'Áyé°ÖśĆ­ţpčĐ tÓŠWZüH‹ v sm—ËdłKůŇćĄo'“iň8 ÷Ş›7.w+;2Őçp‹˝ë‹KçĄĂßś¶ÚĘ]˛”8O­3:áSç7ůýMßٱHĎć“Ű©B/۸ős©XiÜąś8O›¬<#X8S}?öÝyé˙ ÎKgŘšmĺţżŢ[¨[^OŐo‚…FEA™ĆBPĽÔ¶Ë„čv“ Ęąti¬p" ž;vůymÜÉmnĚ`5ąKa:#ýl©8ćUPÜư¤VîŁÖŠöT CĄŇ9^igŹŞVˇúĽĚďsúţľ‰â©>ż{Â8 Z—•îŤ*t€ÁŽ5Ó‚ÖÇë›»Ą6îÂP¬'ůýń9ĆĽ±ËóĆtďďD„{ĚÂ|‘Ř€ÜÁ0]Gy=˙ßy\fýŹVĄ‚ĎĂřŮofL)Đţ… ™h?],Őç|„ŕŢÉáe|^Í<§Švěł ĐüĽń Ď÷E„ĽU}ýHë%ń%­™ĺ±Ú"™­9ŽźóI+ăJ t(TnłşXü‰ŕ<»2żëă áXíۧSa(VÚ <ÖM ©±d$Íyś©±ô/'Ó3w’×ţÇĆp4&}®SńF]p%С KIs»ĐŘ´I>ŁňĽ‚âM}~Ż č5Üs&ÂPőśqŻ_öěl†ŤÖ°e9™žZ]źé>Kcăgú] k©č°c’ćlA9űĽĚď˙(č0QŐç+LS«¦<ńŁLŁtď‰ďŐT(Ş/va^•ôk7ŞPb“&Ć<Đ/BĐŹśh*Q}ş:lX®H×/ůŐCŤ]RQPö ‚ňM„`-é,Ísa0‘Şó*ÎgŇ<ćŹ ţ™ÓĘc˛Hv?Ä?oů˙vA˘ň\í]_ś ”/ox›¤k© çŤ1&…«ľ ]Ö”«Ę‹)ż„Ż­ö S­§‹˝¤{ÇX$Šw©BwýŘĹë4hőXúDćHĺđť._›ě’cÓ ”Ç&:¨P^§J›ŕÎś—NŞ.Ţ@‡l©ťŢÂrĄÁOK˙ŰH´¨„E‘r AŢ ÁÚş›ř\˝ 6ŠÔp/:”ł|ľ*P1çĄSš‹7$Đy¨Qü ˙ż-ý·nňŤ˙A/o Üę­ĘŮ2ĺęó7"QĽY:Ë[坉[ńŢÄ{Ň™*tÎ7ĚŹÚâĽt •ÖÁ«-Ţ@§Ô/•›;¬oć­|(ÓAĐV©ŞĎ?y»Š·©gzńc®tOš Âkç ]ÎK§@i3ÇIŤńxďšcQ¤lέĂD6⽸'E¸Ôa †#u'ŤWę:÷süíł0/¸˛f̶uůęH ´Ĺ˘HÁň`ˇ‰âMlBŮŘdmbrVĹDf, Đ´4{- 0L©K`ĽŇYÔ˙Šż}ް]Ug*Đ‹" l†ŞéÍ2)+ß+!€¦éR|‘ÎKŹWZCNÉôôz%*ôl”ʍŠ:@;,Š,FÁYĐ5¸ŇĹaăś'_ÇDĆý Ú”ş+] Ź´/mIëČ©s`ĽR{÷Ôć]‹wú4®í/,ІK‹"ĹSáYÉŢÍOČfńe*ĹÓ!Ú“ĆaGÂŔ<‚˛|~úҦ`cňyéË-Ţ­3łiŐ­ŤK ÔOëöň'¶]¨p—Ý@żK&ý°1ˇ|5¶Ó~H—2h—M @/r‹÷”DOUéoĂ|CŰN@\Ą˝ęĄ0ŰRćÔj"˝´xź‹_fÎI žŇjú\B8‚fB°µAUH-D½ X»ňŽĄu;°eΧî“.‘Η”6î“'*KiŮţ\Ú—_Ęjş‰H„ *ZĽůx03Ĺ_ŔÎI¤s!ŞĐ%Đb1)‡˝Ďc¸Ş{sł=3!á…@§¶řZňA"Ý{ ·}á$%Đâ8­íŹiÜÍţŃAţD"„ź…`먲h&Íň˝Ě} Ú7łď9[ßŔÔDúÓ´Üjf!"$謑I‘XTźÇ°Č×Ő•0ě„6îîeŔă•j®Sa`Ăö„ I‘]ŞóâĎ’m˙Ć é}Đ%ĐÚR* $Ďc:‚0fBĐíôÝă€/“<FŁvŽü6iéŢ»–ON ĺ…Ŕ¤H@7űGCR}ĹĚ5ÖÜ h–¬*Ž`’ďuŞĐ`7Ę8Ń20uoôŇŇ}&Ý’@ŕ«JňčąÄ^X*3ăĐľ˝MC1ślݱä90Vů=č8éŘÚ«˝›ýŁ˝VON`÷VířBO~ČOňÇ !\›|l–… 1 µă°ǵ[Ŕ‡ľ`,ňűĐey/‰.5[….°[«äą¤^\ŞĎă¤mw ´ČW" !€­<ľä@€1©ďE’čžg[#°;’çÁŐęó©H„ąŢ$hŰöł„0­÷>`s$Ď> ‰Ţ%-ÜřČ©äyxS!ă*_oéAPYŕ°‰tހ͑<řŚúžt)ÝřľŐ“@Ř “"ÁÝě•Őq'"Ć…„  =†ĂzŚéß ´$ż/ť&ť&{ˇ€éĂ´ĺ<ď™×=¶iź{cśÁÍKa€µYmďĺ]ŕnJ+÷…0„gt–v“"ÝP}‡Şć ęB÷H÷@“Uň|.wS·j´:#°?Đ%Ďűpł4Í‘qíąî±ŕ!†˝z/7N,Éókˇ¸źşŃ~čÁÝě -ž—:ŔćiÇו—qü(!? ‘áLŕÁJŇü©ä9ŔŁ\¤ĺü;¬•:Ŕf©(čL]w aĚ„ $ bä{âˇ0Ŕ˝]×qâB(®¶r?‰Đ&-ž”:ŔćHž÷IĹeW&%Ăš%«‡ŁĐ‘îůŽZljŢuÖ v~]DX“OJ`3V’çąŮ?*óA$°—vÜO™Pľ‰†V÷Ş€Íň{ÎsÉs` tĆxźn†¬•:ŔúIž÷Kőy‹| JŔř°/„ľę4żź °&{BđžYŇÍ0ŞoZ<) t€őŇŽŻS7űGep:‰0TźW!YĂ´vč>VƆĄęüR(6C7ĂĐšěŞ"°>ÚńőíĄ„b‚˛ŞĐăС>¶HËÖ&s‰ě! _…€uůʬĹń“·ŻgÂЧZ}~"aĚ,déF™l~•´'Śŕ0ß+O]{đŹyZVž{6ť˝¶ˇyĚ6äŹr|wkś]žS”çV~fÍE‰ČʢĹü;ÖBŕqĘK汊‚î& ĽH´oďgŕó.|fIöę÷t..ó{Ě©0Đ‚<®.ă´ů|ć)ó^gůź[¤ĺśĘĄ`vUÓđ(Z¸<\Ů›W;ľqĐš8Ž…ÓÝŃĆ=ť:»ŐkÉsv®TśçăĎ´ś×šÜá˙2©˙ěź%é^;2B4ë ŕaćI;ľ± 6¦wdІE]YM_Ę˝ÖŔ˝}{ĺžiKFŞ,°.Éókˇ`×ę|ÖOŰĄe"ý¤nŐeŚG$ó¤*šI‹'%pÚńŤË !e¨°e:F¦<űN-° ŹLžßVé?ĺß‹úśłHŚüNă™´xRZ¸Ü]™ y.y>ŞÇ$cî5čÉ÷N{Ť0&%ˇp,y@ Ö<żmČÇď:ţA}'ó^ĆŁI ÜŤýÎÇÉ~ľîťđ)eböi#^ -ŘPňü¶łüg”DúhÓ8Uč<š:Ŕ×ÍŇ2yîÁ;®AÇ$¨˘¸żˇvđ€^Íóń­1"°KŢąůŕ÷0M›MžŻ”äů›úçtKŕóV-۵ă§3!x°B@§.óřđ™1"­Řbň|eµ7ú+ѧQż Ź%điĄ’ŕ©–íŁx”ŔT$lZ;y@OĘâęSa ;Hžßö2˙ů?ů€I |ěâÉŰ×%yľŠŃz)ʦ“˝™#´'Ц'ĎW¦u_t÷  +č˙Z¤eŐůąPŚŢ‰<ÚˇIz'x¶1BBíÉĎŁó´űäůíűÄĎHŕ-ž”:ŔŇeZ&ĎŻ…bôi˛Â`Ę˝TG<Ű`ŤjŰôÖ:~•$úźůÜ,şîkŃâII nÎ)=+űŘĺăťp´X'=čîŮvł4vˇ&ϧŤž^YhV*Ńß;öŤđXčŔ­ŞÎçBA„&ű¬Ó^íěÝ<Ű’E·ě@ăÉóŰĎÉ7ĆěN<š:0F‹¤ęśOS) °~’ ôfŞ €m ’<żí§zÎ!I csńäíëoUťó‰Č?‘X»Iíđ=±@ €­<_) ÎJ5úžo‘-„€Ç’@ĆbžŹ’8? >ă…lŚôF:°kîA#8yľ2¤eKw-µŮÖ5ăŢϢœ’@Ćpó}ţäíëҲ}!|áĹj*3ÔNĐUčŔ.M„ o$ĎWJňüŤÎdlń÷F,˙×âII =»ČÇÓ'o__ _aâ `ótú 7ŞĐŘŽ’ç+ĄŤű/ůżëܷˆ}/¬:ĐŁ’0˙»]{>Ţ _”x+`6O’€˝Ö©Ăäůmgůżďű˘łAćyY t 'ó|”Víϵkç^¦ĺ*X6OÇzsh›zeń#ěäşë9yţĎł3żŰť =·<»â™·xRč@ů8®űśĎ…{ŇR`{UĐ! ÄčŐD`{F’<ż})Iô—ľyÖHő9k#DVÚł—Äyi×>00™šŘŞ’<7A@oUčŢ ^ŞÂŮ«DĐUčÜ[ý$ĎŃ˝Éń8 Č6ť¬•:Ä9› BŔ=ÖI€;“<˙ôX1ÇĄTŁO„‚{\Kĺ÷2‰®[=1 t e‹$qÎf^ŞňÇ ;3É÷âCa ĂçŰTřÉó/*óvż×}áá.,ŇŹ«ŮśĎ|7@ů¸°ż9d˙]€6îĹWÂ@gĘ®±,° ß AL’çw˛—ŹW9V?äĎă'o_/„„Ď\OeÁĹT$ÂRpó|<Ď/DßJžłÁ—މ—*€&Ř+€©B¶eOâ‘<ż˙¸1©FçË^ Ahµzbč@ fůxúäíëgůP‰Ć¦©>hÇ ! Ce˙V‰-Ţ#yţ`«jô7µÚV×TYX1Dh*Đ>°HËýÍ˙÷ÉŰץ ϵ°…—ŞňÂ= €fLkgčIy¶©”ŕ’çk1¤e5úą…jÔą{źÇ×ěččŔ¶• óU›öó|Ľ¶¨LbyÁh‹/=:1ąOTzÂHžod ů»-ÁFď—dž7Ľ'o_Ď[=7 t`iYm^’ćϵig‡´ hϡ*Ď6Učôđ;Aň|c&ů(-ÝŃŐl”×UŮ÷Ü"Żřš.®”@6i–ŢŻ6_ ;°xˇhŹ˝R…0b’ç[qŹ?µuÝueˇMoë+lâ¦wśţÝŰ\µ9­8÷hŘ"‹ÄFJň|ëJ[÷?kÜé÷ş:t]uEĹŤî4-[´?ÍÇĚŢć4ör5$m}Z¶g˘€Nťi- 0.’ç»W–¸çř˙iô.Ż«×Uwţhůäţăű¨$ÍÎÇ•Öěp&!îŐ3a Ógܱ0ôOňĽ “´Ü}ž?/žĽ}=’đ×UIžżIËEôcŃňÉ©@îăĂJóKÉsĽ`•—ćA$š7©íŘ 7SUčý“ĺĹ`ž– óą=Ěéře«L6ž@8ĄBďÂ; ěÄ|M˙žß măŢr˝î…ÉőÝvČÇÉ"ŃČĘdý â  ’çáMĘ÷—żÇ˛ŔíÇ|ĚŚCw~Mť'Űq{6BĆq#*í0V sćŚEÄě @HeÔ©0Ŕv层ĘŘvż›2Ž»*Giž–“ő*Óc:K& ˇúNŕÓ$Ď»˛WźĎgů{ťĺĎź-vŰúő4Ôëi"ŁßhálŐu=Jű‹ą˝Ő9Őçq­ŞĐ-üř@çĺ{dYěđ{2É‘*t"±(>AňĽď±hŹ.ŇżUéĆĄ›»–&őZDcTBĽK CL塽Ş*/ókÉrřh 3 €°ĘdíË|ś ŔÇĘDn~ç}ž–Itâ)­bź @<’çŁ1©ĎëWů;/]€~µWúZŻŁßRő?ŤQú-ÂIJ CŰJRü]˝ˇ¬’ć×V˝ÁW˝€.îĺçÂđiµý"Ů'2˘’€1Kľw&Éó1*ßűaÝ+}•Lż–_CĄkč Ł6Źp’č°[«¤xůüăÖ_/ňCx!<đ ±ÁK@&’ _u™–“Ú,ÇS>xĆ‘Ç&Iň|ěĘűÖ4-[Ľ—yüż“éią•Ş‚·Ď_;“·˛H~""DŮĘH6ăö  $Ä˙úŕď«"‡Í±÷9@?$ľ ¶r˙1©BŹČB1€ jňüM˛`Ťý“LŻż‘’L/]dŻĆýŹźUµůź ·ĚŁś¨:wµŞ’‹UEř]/răĐĆË٤ľśЇ’\˘¬NŘUčqY(Ęu1äăű´LśO|őÜÁ<ʉJ ĐË fŐ: €ľ e⪗I€ ąđ.ÖY~ÎÍlóĐÉsÖh’Ţo÷^>ćiŮő·tÁ ‘TŻ×D9ľKËÄąÖěÜ×"ŇÜŽ:˝x)Ý*­‰Ź…ŕÓJűŢ’„M’číձ̹P´Aňś-Ň­6ç5©ľÚF÷·úYţzë[ÇÖß˙Şşüżią`đ•±óH'+@›ňRw"Ýšć{ý…ýľHz\'ů9w© `÷$Ď١UE÷đÁo˛|¬ę©~ţU˙÷íżW“ôo»őonýą~÷lŘŻ‘NV€zÁč^Y(u* ź¦ =4Uč´jĆDňśĆß÷f‚ŹW®"ťď˙řĘčŔ™toZ;ŽđyBV©BźŔnÔ±†ä9Ŕf\E;á˙ ˝űInă8ÜIef§ň&Đ LťŔĐBkSĄ<¨Ť¶˘¶ŢP:ˇ¸DŻ˝|3'0˛QyţNđ}hÎŔ¤$R¦gÝ3ĎS…(±#hôôż·ßntJźŕĚÂͱC ×:;€{ÔW]Ě•D±ýśŤÁ{ xĐą_J{Ăč”îą"ĐćđYčĺšÉBŘ‹óps4í“}ůôŕŮ4¸÷`L&őÉ#ÜŁÎBżPĹ’…ĐŁŐü"žru¤$:3_ÍQ®J{Óč”L&"Ŕř,löNK:@OęŁŰÍ/şőK‰oZ€R'9“Ő3%0:“úîńÝź?/V,”D±Î°%ÇŽ§‰Ůçî=čÎŐjnRäéXč”Ęa}÷sząŽl¶$ř›ć…"čÔĽÔ7.€@qę#¶ÜO0^ÓU_ ŰŕdˇĎf1€­ćq]É€n˝/őŤ  P"Gl [`3YčĺšĘBčÔŠ S—ßýůóe©o^€ š0űôŕŮD1ÜOzńdˇtljVÝzWň›@ (ź<›ŮçTl¨ŘLzą¦őĂ°Źą÷T)0pEĐ™«Őë˘ä €@ida°łĐmŞřYčĹ;Sť(€ÎĚWó«’?€:Ũł/LpX‹ÁóŰ‘,ôrMęS¸hץ"čĚ»Ň?€:%qT/_z®ľ­ÎB_*‰b9…  }WŠ «ůGństŠđéÁłĂŐS%ŔdćlGşľ€2ĐşńnB€RČ>ŕ>2ó6řîĎźçAzŃ}ݧĎđ…*‚Ć~S­[Ô§_O€ě}zđl˛úc¦$¸GĚĚ›*€ŤdˇÜ×­^'Šř¡"hć»?ľŽq0߸‡:%}Ŕ&˛Đ6…^ţĽH:@«ćŠ 5ˡdźGčd­^ š) 6®ú 8›ÉB/WśÉBhĎ;E`žqtrdY° '–l ˝üľN:@k}běm,HwYĎ3C€Ü=WliöéÁł‰bŘH° \1xîÚúŞk0oŤe©^í  ­OžÍVL”;…°,ôâťŘ0F\ŤĂXúÄ«ŐÇJ ±Ĺî>_@ g2+ŘŐĚѶ[‘…n®@¸˘/B•‰€yĹ5t˛ôéÁłi}ŔîbđüD1|›,ôâą¶ Ý~1?ĽP;ąböy$€@®dTĐÔsE°YčćLÜxl.ŘĹˡ~0t˛óéÁłx×ÚTIĐĐdŐ—ĚŔ·ÉB/ެž;ĐNżďCŹAô+Ą°Ń›U»9Řą„:9zˇH$+`;ďAŃÎŚÚD´ë»?ľ\ýq¬$ľiąz˝ň@ +ő=~3%@˘…>U ÍL»’Mőw0î1Ż"hßwţ||,1@ ^Ö§v –:ą™)Z" `záKşţ€ĎűÇY9W_YÔŤM€l|zđě 8ľ€öLÝ °•$…^v7U íúîĎźăQîJŕ/qÎ0Šk.ĐČÉlő:P ´ČĆ,€ dˇ‚»ĐşE—ŠŕÚ›ŐÜa9†*€@N9hŰěÓgŰ‘,ô˛®ú»™b E˙Tđ×&łÇA`Q_o1 čdˇ^ě™( :`Ŕ˛ĐÁ]č´É58đy)ŚŮhŽn_@ ‚t%fˇ»"`3Yče›ČBčFD?ÖO#5šŁŰ×ĐŘ»OžMÝít'ĎOŔ·ÉBYčÝő“1=f˘ ˘c2ŞŁŰ×ĐČEşö\lEzŮbú©bč† :02±­{:Ć.€Ŕ^}zđl˛úcŞ$čcm¶ }^¸ş ÓľR‹§őü`tĐŘ7Ůçčsň" ˝l®.©ŻĹŁg‚čŔÄ{Ďcýđ˙đý°ÇI^\Ř™)‰˘ĹAÔ/«×Ą˘`b›őŁv«h1 }:ć Ŕ6b–ÉŞ˝ŚYč6•+fˇżkĆ@Oýĺ媭ŤAôŹő|`(â˝ç§c.töIVDŮ^®Ro#sQ,”+Š`Ł8Î{ˇż+Ö: ýTQt§˘?¬ç‡J€eé˝ç·9€˝¨łĎ_(‰b˝3f˘;ť(]lĎž:ĹH€ý9 ˛YJH˝Q ŚY}ř\IË.€í¸ ˝lqľĺ~€~ćë úBi{Y'ŽŚž:űb!§\v!µ÷Š X3y›ÉB×ç1j6ĽC~sőŠAôąŇ ôfŐ†iżjčôîÓgłŐ%Q,‹¨ţĘB_*‰bÍŔVdˇ—Ďćešpĺ 4ź+'÷e™ŻÚ®SĹpC€}x®еtŚ|Ɔ’rĹ{aeVlPgˇĎ•DŃdˇôßž®ţ8V@.ëŤ?Ü"€@Ż>=x6]ý1UĹ,„ĎÍA±bđ|¦ŚGâ\ô«> ůQp’ Ż(őX1|M€ľ˝PE›+¸!+Oź0’ţn©ż+Ţ´ŢĚ @ż}h NĹ şÓü€ÜÄ1ţăzm‹/ Đ›úŘŔ#%Q¬ąÜé˝"(ÖdŐ7ÍŔVÜĺZ>wˇĂ0ą–(sőF´á9W@&âďSk˝÷@ OlĘ&HwXM6ˇÚµK™dˇl×ßĹľn®$Š& †éPŃŹ^Őw żTŔžĹ ůăú„ î!€@/>=xć®Ů˛-ë !p7wĂ–ëP0`k˛ĐËgS3Ŕ}÷çĎo{Ńý<ß’:}9QE„o›+‚˘ &lAú Ä,t×jě·?Ť«‡«×Bi=<ß:}qDnŮćŠîWßĺ9)W &LŔVdˇ—ďLěązĹ{ŃéôAđ|GčtîÓgłŐJ˘Xó:8|›“Ę& ` ˛ĐaRĎŃŘżş>Ň}©4€Žž7 €@%Ęö^ŔfőDÄd¤\3Yč[“…nŽ@»sÉD«4€–-ŕy#čtŞľ_o˘$Ęd­X Ĺ[“…^¶™"ŘLú ČBç[óř©R€ŢűÖx¤{<Îýi¨˛ER]oÎëÓg®ŘŽ,ôňťé÷ňňÝź?Ç9ĺĂzn ĐT š?v-gsčt¦Ţµ>UE›+Ř^=1±ĐQ®D)€­úĽĄ±â ú˝Ĺ߼rőŠ™č˛Ń&ć«6ä‘ŕytşô\?Ř2Đ‚ÝÉČ+›“SôyŁę÷dˇäéV6ú\iŰŽĎWmDZbH'€@'>=x6 ˛řJçřvh ÎČ[(‰bą`·>o®$Š& Ę÷oE0čľöކ=^˝–J¸GL‚:^µ§Š˘čtĺµ"(ÚĺjŔu© ±÷Š h˛Đ¶' }ýž,t(ÚD ßwţĽX˝ęw;,Cußů\Q´G€ÖŐ‹/GJ˘h˛Ď!A=iqBąW}ŮT1lŐç-,ôŇĹůŰ™b(˘ß= Ő±î ĄÔmÁ#‰Pí@  ń@ ĺŠAż ĹÉlD)›“T¶'®|łú.27Ż­^ńH÷§Á±î0ę1xl âUŠ˘}čtáą"(Ú…´b®Š6HŘŽ,ôÁ°y  ¬ţ÷âÖ±îÖq`<âóţŘ}çÝ@ Uź<›÷o•N´ &8͡l Ćc"  Ěąçi¨Žuź+ ĽE|ŢWĎýBQtK€¶ 6>«~@;Ţ+‚˘ $lIşů{퇯VŻăŕ~t˛—ŽlďŹ:­ůôŕŮ4Č>/ť`´(©ÜIWş™"Řš,ôô{őĽŽqS Ü9čú~ôřZ(„ËŐëŃęŮ~«(ú#€@›d+”-îVž+hÝ;EP´ź<;P ›ÉB7Ż ›>yQŇcVúR‰@±Ţ¬žĺ<żTý@ ź<; v©—Nş1WE‹Áó™bŘš,ôňMeˇ CL”X˝â±îéP–uÖů©˘ŘtÚňBo® }őÝTž/}ŔXú˝Ą~odˇ «H‡rČ:Ď€:É>=x6 ˛óJwQ/vÝpÂCŮ&«ľN? ßYčPĐóŞŘ–@:dm±z=”užtÚ0SĹłČ Şw Ű9\¶çŠ`§~oˇ$Š' `¸}őí@şą*ě×rőzşz&Kpʇ:mT(|¶ś-tÎF•˛MëWŘŽ»Đ‡Ń÷MŔpŐôG«˙ú8Řüű3ÇăÚ/E^ĐHňéÁłŁŐ%Q4A=čA\XýqĄ$Šć.t€íű˝E°?˛ĐFŇoÇě×ŐŤÁôąÎĹ€ůőqí«—µ˘  ęGEP<#𼱝#E°Yčĺs ŔÄkXVŻx¬űĂşŘv-VŻxTűSǵçM€T‚ e›Űĺ˝râCŮ&ź<;T ”»/iť,ôÁp ŔřúđeťűŻŕžthĂ2TóÇ®Ň,:»<Üź©Źo?PEĚŐ»‹ÝkU6ÇĚ)†Ę†:ş" ]ß@ŮóŘŰ÷¤Ď•ě<>^=CÎËň÷Ś*yw’ľ#ŕ.˛đĘď·˛úgăJŮ~PŤýWdÍśŹNČB'°ŚĎ÷Џ«OŻŹwŹYé/Ťaăüj8ź+Žň ăűR"”M6ěAH0ľ*×T4fÓVŢlpŔ¸ýkNšľ5§˝Z˝ŢĆŔ`•wÍ{ÎŕďU(ňµTłplě•@BÁdá™÷ ul čJ=ît…IŮţ­ ëń©Mě­Źż••î®tĆ>źŠ÷›?8†,čqÇR¤ÍŮoŠ`pď`żăßy¬*™Ęfő~iŢ—ő÷ŁM˘kń¨×+ĹP,›ÇŔ3 ßK^Ýş+=f¦ż1ög$ć«W š?6§–żgô^ěDΗď`XdźC^*ĆŘ)s>Ć©ŢDc'Ś Ď_˝Në#Ţc@ým°‰ŽaYŹk˙O`X˝śĽ0@9Đßű:ňl<ü',ÇŠöŻc $06ďA–~QôÔ÷Ĺô…’€ńĚ{WŻ—«W<âýi¨˛uÓ)UÇ>­ď7?­O×f ˛  × &Qů±¨ |‹+Ęó˛ÎţňźwÄ1®:oŢ—źĄ;účŮS}_‘|g¤Î.ęl]ÁtŠš/…*Nö°>¦Ýé]#ń÷ĚŢŹ`mf …`†˛ĚµëĄÇÁ˘AiҸľŔ÷ÁŐ™:ÇúľâüGĐâxŕv0}}ĚűRÉ‘yř<Ű\ý™¬čő}¬o}-ŮpÄ/°©Ý–EVĐ /NLdŮ–Ć‚ z9Žikeü h›ĎřŔ†HöŐčűĘqe#.]Ž ęcŢ×w¦ÇąÂBɰqn×O×w››+ŤXnč±±ŚŤŁŚŽý;®74ll/‚…ŻÜ žC ˇ $çĎ©YíÔů¸qz®$öęŇř€ úľ‡úľbć|ĐËř ÎâQŮ«˙ůŻşŠó†ĄŇˇ#·ć1ă|nÓ<Ńßr|Sź<;Xýq¶zÍ|Eű™ŮY ěŘn®ţř¸z(ŤěĽ¬@íilGĎWŻ#Ąaś<’:şúăµ’čťÍuäÖÄ5 %‘Ą7ńŘRĹ0şg2Îď§J˘«çôoJ´M‡uŰôŁ6ŠD1hţKüS°śűdݱ®Ä8yŠ‹)2ýX„*Đb÷7Đtűaőš(Ť,ĚCµŘµTPd›čçĆÁٸľŻ×ńmťŐ÷i=ď›*ŤÎ-ÓĆČ{>q¦-Đ÷‘Ĺóxlp+Š:#žGÄ×Ćl1Šs _ŚmŘVökť…3[˝^A™®ÄăťE ĄvűD›˝×ÁŕşM_*(ľ=5ÎŁ]}żz˝µ+˝—:?­ë»ÚuUŹ,QJ[px«-°‘¬—ußçřŇq?‡§A˝¨çvőĽ>R h»ÔůĚ"TYć IŁ4QÔδU8©ľďWŻĂúkw›]ŐŻ˙Ô ÇB±µŮ±ť>ŞŰěşÝ¶Ön{ýę]”‚ć0ř…€ő1u‡J¤3ËşŤŤcĺ “ě˝×ůřúw¨6¨ű[Őßĺ­ńÁU°XÄpÚ‚ďë¶@;Đž«ps˙üşď3§ŕöł÷QIĂŐ,pw[¶>ňý‡z 1Q*¶¨çAâ^´ÂŃ.PŢB@\Ř% ŘöäqZx.ĂM ńú ü5ÖŚăĚ˙)‰bÄë抶jۦő=xö!¸VĄWßýůóż4në&ˇ ¤Żę“ S=G—á&`~é”-úđEŔ-ď‚z)ßĐP}Y|]¬˙Yť©ľŞ˙űÖ—­ŢŹE¸ą^f)»ś}‘Ŕgdˇg/fa>R Đ[›¸¬Oę×÷ˇ ŞO•ÎÎÖG°/WŻ˙†*hîj5˛"€/‡*Ht¨(˛OŹôç»?ŽAßĹ]˙îVp}ýç?oµťÓ·Së@ů˙­˙·ŚrJ!€ŻÔAˇłŐk¦4˛Źš>®y@9íé:Ŕľ˛G·í·˙yηţűoőźë@yt©}bиWřyŞ#ÝÝÜżŚŠó÷˛7a4íî$T§€Üv×?»Ëwüłu&ř¦¶ćň‹ćhuFI€­ÔÁôIřúbÚX˙©˙Ľ4rň7E‘'Ż&«˙ś|ńO—áן– ‡;ęËôÖ˙ş\Ő“+…rg9®ţóŕ‹ŞĽímnmóŻ?- ę`LAËĺ¬/i·lc}¨\Q×áÎz3 7kűĂ]Ó˙|Îvµúś—Ú m Qô»Çm*ŁĂzňęăę?żśč-Vďý±ď'ăĎŘuÇřäŐQÝ˙PżÇMď3ľź8půĎęu1ęE™jóGř:€ţfU.§ 6»ĐĎ‚\÷íC·6=ż_„»ţŇ~_Ř{}껼ڭËÓşţľ~6§[˝ç؇Uíí˘×‰RÎýÖvďýŹ;ţÍńę˝Í÷üަ ˙ćp š÷WÍĘäîńpü9Ź{m#Ňú·ÍíYóş–Íc÷îÇ9ýÍ;šW9Ěqú}~ős·ÇB»Ž)–őë·şÍ[ô>.Ęw\ż{}|ňę˙ÝńO竟s¬o]ʧ·Ö!¶ă/®żŻŞ/zmGňíwň#ć9ąęąŢ¤ÖőţçűXCĽŻÍ™†›µÚMßÁĺ­~~QÄÜńć3~ë3l|†«ĎzyëůXŕűNi+–·Ę˘„~±é<ľĚ~xHëÚéßA»ó×ô˛Í"¦űŹLľęŮęőşăFăËFü·˝LJ7wJ_š^˙»ýv¬}}?ëŽeßOĘgü[ĺëÂóŐë¨AĂ7˝U—ÎW?ëbőç/{JěÇÉ=ĺËötĎďí,l·ÖV»÷[G»îۇnmz~cŰü±áĎ~\·i»Hů}]Úöłô]^© ŘĆţX˙ŮÄaý:ş~Şg®Ż67Ż~k7÷˝ďŘ6ﻯjZu˘¦ýU›eRµ-O^=.dl¸M{ö±ŕ:ńf‹qT÷ăśîŢ{[ßU“ß5Äçwřý\5¦ŐcЦő~Rż¦·~nĂżŐ¦čĺ ćýűícgőBŢÓŃ®3|»Oę1íóĐ<·®żGőĎĽş56ľ(~~=Ľ1âÇQ–Wűuýóy`˙‰,ÝŻ!ܬŮŢŚ^ŚîÄÔŞîĚęş3IX;?+–Ý»PmîşĘč3NëĎ7 Í_ë@óíń̲~>޵éĽÝ¶"Ô/6m‡Kí‡s˙6/Ćzzžđłâóů¨Ĺ÷vžX'ŽĂţ×Ăßø¬ńTű°zýoŐpťg˛ëňEĂ74_~?VŻŮc‡ućŐÇşocçÖQ¨éÔŮěcrßł3Q˝Z·{ŻC|Č©ÝýL‚â3Pe@ź‡ćÁóMmn|ÖNëEunĘd¸{  mâ~ë úˇ˘2éÓ¦ő⡛EĂĂúçţQŹß' ˝•ő†ŹĆgwÖă?ęúÖf?»÷}¨×#ŚŤŮw]˙ĐQ]ż™»Í>¬źéußôűęu2řç:~ľ'ŻÎęş×ÖÚřn'·Ęń4Ď7»n§o֣۬żńgŵţßëľ`6â¶âË~q¦_¤5U"Ď"©Ťoëů¬bP)sŁE.É Wł®­ŹuŁ5ÝË;¸ŮÁv˙{ďdůvđw6čOZ ~ÝíÖšÔô‡QtÎU}ůÖç|=ŇgęŕV»÷Q°ŠŃřÜĂç; 7É}ÔŐIČuťż˙¶bn6ÝĚ´ăeâß?k©-?ŰóçhŤúçŤÖÇ=_·ô˙ú÷s^ü†5ř¬vúýŢăwżŹ`ńůőĆ:e€2­Ű˝3Á=ífŇ>žůu ý÷žňu·ůTYčlóLŮěkŢvşÝôĽÉ¬S@¦9Éś8Çzűńł:sŐ†8ú¨ëµŮ˝Íy‡uňeµŽđ!ôsŻó¤®+łž>Ű´^‹> űą·:~Ţó,ú‚Ş­8ßc[qMYPľęš„yb}‚uÝzÎ{6€:tö“ĚvŢů§jw÷±ť__PµëS s) ›$Ił·Żţţ‹ĆýU57Ii;ăű~“Sa  ßßá÷µëkÖŃ˙w ˛!”LJ=XÖ C\tŮ60~(í/“ŕX\†¤Úhő{&“Â/űň±Ţ˝ůbë2˛iíś ˘=Ś)fő"·ľé(8ŠĽ­9ńL=Ţ›×6Đr]? Up0·şľŽMGđ-śÔ'\‡öŮ/t3Ç©2­?†W“vňËeýľł"€Ţw‡őąmŹ2˝=ĐrŚÇç΋ܽY5(ątÔ‡;6m×rußîÝ:4ĘTí/a·îddĎÚ®mł,tvSŰĄ´?¦8eś·żë7Óśň8ńręńˇy(#©ëëöf6‚oeZŕF×\Ţoµą«ťgcjŘyÔ—Ďˤ´Ł^s쬇PŻ^4ţě26îę$µ5”ŁÔ•8±8,ô}ď˘i= ÁřJö:¦8 ĺbĆ2¦č§/ÂbuąőxęjFR×CO˝”6őy†ď釄gcťSâÜh}˝J{ď˝ĚŤ6ëqɩƞ†bVwĘťâۮ祌ݮB¦Ůç‘zű•ĄŹźç^ĐŻťłXZíČÉé˝ĆFę8Ç;&v,×IHŰŕąR&”űüçrOYSGlÔ˩ϛ$Lš]]Ă® ¤ô[G…ŹŹl‚mEŮwŹ–_ŹO\łĆHęzÝ^š‹I!rkwć«×Ó„żĘ8¶ý[óşvN%©ę߬ŕ˛x=‚v‚.üúÓrőźďŰďÓ-žŻ”úů®~źY@ßŢAkť}óŁLKxô?É-gbQ ţµj\ţvý áa¨îµXěřł.WŻÇ«ź1Ŕ÷˙:ąţČB˙ş® ‘»jaó6~Bxoô——MčC‡?· qbŘf,Ö«_6.>WÄiýŚ´11]/ś<LŤlçąş}ë§AÝ‰Ź¸Q0Ç6,őYž0öIýÝËËĺ2t{|ŰŇŁÁ7´ą!/Öµ‹ë±ę¦6¨ť1Ĺz±}±§˛ërürµÇ:±˘gÚ—t]ŹŻnŐăĹőř¨®ÇÓ~÷¤矪żŮ鲯ľ*¤®Ç:ţch/‘§şëúןN{ţ.ßlüť7§=ľN,ďĂëź•o–á$±ĎôU°»úßׯ'Ż^Öuöh‹çëiR9UßY›'3Ä÷ň.Tw/·¬//Ëô¶l0O(“¶ŰŠů–ă»IÝVĽí¬YMöÔN”ܧlÓŻÄď3eüśr"fU—şŰ˘Ş jş c$ż+fr’ř|˝lĽQ¨'˙cP5hÇ«ŠňKbŁ™> ¨‚Zmt “ëź5„Ýĺ7ßĎ»PŇš~?‡×ŮĚşrMµ›mSy.Vĺđ¨.ËĂ{:Žăe&´50<şîPňŻCŰďvUÍ[RNY”ź×îŐň&á'´pĽj›GÖĽŘ÷SMüÓ'·Ăž¨l? ľÎŤ ĺĂAĎovş·ax÷íV‹woł·şSő74ľöĂŽĺ۸őćůřçĂłPŰ7·6ľ?ŁÚŘF ěĹŽmEěŻ'-µË–žˇÓş,víťČGő/Ö町t‹¬ţLż-eđßGZ]ö•áőşźąoż%üÝIźďż ÷¬>g—ÁíăÁv¶U »í“†…~Ňî°>źYâß_Ď/[~Ţ.Ż'Ŕ›'DoBęŽî|ŰćIhď>Ŕ›ţ]đ“t1~®€/Ž~}ýŞS†hŰ›ÄönÝÖĄÄ$S7cőj¬ô˙Kę›ubÓĐnöůíNzh;řŻ©Ěx 釭OřáNBŰÎ>ok•Łß Kęsú´ł¶ńë@ľěŇŽ˙ĘßëLżsf‚čŔ­yzębsµŔŮ•ŻčëĹÜ…/oŻbý,łz|”\Ź»›Vcîűž“E]Ç®cąĽëzęŐ”yĎy†ĚK_sŚ?¶Ţ`UefĎ[ţ©×[.:=¤jç/zůŚéWĘ­7+îŁ_\źJ0ĐÍÜ"%x“[>&>_EťÂůwµfgM ]€^ ¬Ś‡žíşhiPŹŹűýzp6Ľ#Ç÷ý\ 1ÓńźšoŁÚ86Iř óΞżžUÔˇß[Őή÷ű ďęöev=éďř ¸Ý·¤é~č&>]śžCS'mČJ šĽëĽ^UG\żůblüTv;Î5J¨ë‹vçóÄ5z{“Üť„jU ¤źîá8ţţn·oěr|ů]¶]Nm+úßĹvâíýâS×óĐqÝ‹ő.Ą/L™?]–¶9ä#­&˙Nř»»ě»9Ęô¶¸řf@“Ž]űb'řäŐĽĄ:qŞŁ3âŃî˝Lň™8ÍB·GöżNśôäf@}^ë稠¸PXµÍˇĘtXŽŕ»éú ŚŘ6/<´ÔŽÄ úc Y9¬w´§“ň—Š’ćžËŢ€Şvę8ŰRLnĂu µ´»©bťń'đ± Ţ…ę.ľ!wĽ]?WUúŽĚ©6ńŽŕąbűüfŇÖ˙Ń0í‹Gܤ T–E|ÎŞu5^gĄ˙q}/ŰęĹ×ődşÍ>ocb“óÄú*3ŹÜ¤<˙ż)ľÎś„~®aq:mŹ›~wT&Śş hęBńq«íójő±&üÝ÷=ż×_ö4ßíN5^ž%ü„˙PÇşJN‹ßi ǵÚ\M––4Óo’ÁUbß3ÝPOc_śŇë‹xZJ_kţ©wŻďÍx2Đ«Ćó,±Ło’…Ůg.~¶Ó‚żŁÓšU\ÖńĺoęNż«ŰXçOBu7ŰýŁ÷ľ$mŘ•~Źâ‰P˙ŻĄźőxëő–Ô *ý×őEÂßÍocgđýřSňooެĚđÔĺŐdë¬ôXoZ:Ţ}˛§şÚÔBóh˙ŮáósiťŃ¨NKIąRaďJ}¶Ę o·cëÇşQž$ţ¶ĹŽďm}¬v_^\wň9e‹n÷ý†jŃ=őű)+(RÝűw6Ćű&şŢ©> UŔaö?[uŞĎÉDn÷íěĐîÍZ¨[żŽ+'*tÖ6·ŃŢěÚ6+xZí_«LôăA\ßl#eţ)čČ}}I߲ÔcF"ehŃű»­Öü®zž#íęß[¬/Ĺľ2ž6má÷-ЍiŐ˝Ô‡ˇűcĄăďŮč±\^îqCÓ>NX„”{Đ»łÔÔ2*żţtşjž‡nO>Y^˙žB !˝ĎËîw˝ŁŻŁLoOśâď<é÷S^ /Žâäşź z4 Ő.Çă–v8îĂëž_n÷íöü\ÉÖ%/iY6 ‡Ó6ËB§+ç­ťć ™ţ‡ű¬čÇę1Ś^śNÎ{űX‡š…ţżć…mfŞŚűiż+ţŽßëĚ÷7 Ëé}Á_RÖŚţ`|^†ôFľĄčä—ż«[[ě´H[eź7=Ętš/ňżŘă˝[űťř•š-Tí0|ú ě„j‡ăyquĄÚŰtđš~Ći.8E†Avmó,4ß­šŇ6żPřt$^ňŁbľáżŠ€Ťsîüűő±řMěÝű˘Ţm bWW Ě{ü­1)îcbŇ@)ĺ»č¸¶ăş\WĎä˘Ř«qkčŰ{ąă˙–Đ`ÇAĹ»†7ţÎŁ~?o o¨–!ŢýÂŰ묕4°H –ÄgŞi fšxG˘ç ˛”MFq|ŐtŮl¤›ąobÖîס"Đ—čKZh[K  üúSĚśŚŻľ˛ç«S'‡D7ʇÜös{#€ľť7 î!I ôĹ ›lŮÝbóâ?Eµ»1b }ŮóŔ,˙KŔnş9dYÖß{®vj÷–ňăóĽÚćiHÉ>ŻÚ™¦móúęXŹĄŽ‡0AŠđ˝"t_2ɧý·/ś‘řˇŔ÷<”yďUńăójÍůQčďŠĂőU C˘»ÇňjçâsÓv"Ý b č›]î|É}ęQ¦±bU÷ť4ť´Mę÷0±śž¬ÁІţv8ĆAËy%“Ŕ~W—íEÂ@j6˘,ôĹÎíô×F¦\w! (ݶy8ź'üŚvŻóE1‚č@÷ô=ĂîKb?2†Ó¸&ľlčTóůgÚĽ7'/‘śQ­ŐÇőçô´čiś±KÂÓo{©§yJ©o6H2fńdä¶bQWˇß“–;#€ţm—uǸ«6“żüď}ľ‡R\]?Őf!NÚç«˙\Ň»l­eůý_®g ?aîąÚ©Ý{`[[m›§!^qŃtR[mj ‰›ű†;†D6÷EÍŮ”7üľä´ľD=f,RĘÓŢßm5ďűÜóx'†~Ţ7,ę»Ńű¤ÇúÓGÂÓ>N#™î©-Řd˘©eÄc߸6÷˛Ąźör(1»¨ßlŚwΦeźß,&Ż;ĺ'Ż– Ţä: zűç Ë:x~9čZxT×î硻;îĎBÇí*%p=˙â9ž'üĽ…ţr°›6š¶{°źşÚtÁošq[Wš¶6 ®˙÷¬áĎŠ×ćĽőuđĹ*Žťb[ń1XĽ,mŚź:ľżTŚlŮN,WíDÓż}p}´ęĐçŁŰ[ ¸/‰˙í|Ŕőxâę®Ößá=ŹW u=ěˇÍNY»Bß2ĽŕůçőqqÝvT ?Żëďű “z7®oľCľ¬ &i×*ü_‡eqx˝ůĹş¨ůßxűÚ8ö}žŘ.,†Ôţ  ßím}uĎ~ď]™±ńč鳆?/.(1H OG×™U›!.ęÁYüng-ÎňÜtQíÚ=jíąŞ.~fĽo÷T»{µ Íč?öÚ7uňUőE)ęůW i›g^ ˇéŘérU7bv z9.ë¬čsnŮ´?; ěÖíí㶸Çrç÷%)›Kg˝Îos ŘëwĆŇf?ďąÍţ1qľ[ň\ýx‹€ďPúęóĆd›j\đ:´źĹüż¸grţnOď‡aŹ›ÖW2-p‡”ąÔsĹ7šľä˘îK®ÔădVăâŹőfLčł®÷7¬ęwJ"Čo…~?±­|4šŕůçýÄŐő†«_zX÷óúŃż˙2±Źęs>}’XÖ‹ŽŰŠţʢj“âzŃý"µgńkÚŽ/†Ö Wbó¦îäS˛ŇÚ_LN»´ď°ß«®źŐŹ88{Ú»{gšŐç«/~»{Ë­ůŔň y°—÷rQ˘Ôö®źľ±şÖ%NvcFP\(<Äśjb7Kř ďďůç)ăťI]ŢpWży=Η) |-%Hˇď__’k=µ÷3żŤcáj\×bŔŕTŢǺŢçZÎYâß/ő4щ*Ö÷¤Ç„§LŰBżQ]łŮ"áwĚz যó.zh+úßťŐĎÍQÝ/žx€ /c?Â=H~iĺČĎôěó÷ßřwż„ć ŐÓÝÍv˛ú<ż ňn÷j ;Η;›ss÷N¬ç ÖIvßwZpwľáß5ÄÁŢ&űç×÷Äşű®t±N˙w$ŇËÄ#ąb_ňK§»"« îůmZĽĂ¬ô#íR6 ,ďíĂ«ŁÖć cžçˇĎŁÖ(­Ý¸şuśűˇj©ýńŮőőP»śöëO§;Ď%bĆnn›ťÇŮ—Ä1hÜő!łľä"¤ÝÓţş®ÇÝÍ«`Äíů÷Aý{ă)xÇZłXOŢ·řłvy/ę+ëúe‡uý(¤eź/ ^›9 Őşćń j|µŮgąsü ú_®ţţúz֣IJÝToIü±éú(íÔëP¶=Qô"¤mdŮ}|׬_ś}Ń/žÝę—Ř»±Đ×ÇĆ翡›ŁRŽ˝Z~3 \ ă{ź4üů/6x^°Ż`Ż“ëŢ&ťtu‡lśĐ˙Ńx@’׆‹”çjľˇ ß…ćôÝ·ëJ\z¤[,Úű‘•ö>¤răY;™UÁóŹ÷üŰI¨˛ŃçˇÚ0UÖ•$ŐŽńYâ÷¶iBÜôçWW׌ńČ@¶'­č‚@pÓ.Ěúžő8ÚÝŤă©3Ëě6dµUŹ›®Cl?Ć÷u_€?–aĚş‹§Bľů=°ĄXÖöĺ"q>rŢa]? i›Y˘w…׏Yťř4/ľ¦W‰N7¦âÚ|“ąfµ~ýôúîćîÉ–ĎĆYh ž^źŇŐŹŐF„Łž˙műęEÂśď îçuTßęă{ţăş_Üo[ „!Đýéo™t¨)·Ë-îF˝ ÍčłşŃ]fńýT‹ď)“ÍáűŞ˛ “/&‚čW×»äŇ&Ď9”Í,¤eÄ˙w‹çj™đ;â÷¶Ź AÜôřž‰ÚÇ„ďď°Ó2´oŇču[ű´Őţqűgqn˛ŃK:U%őH±ĺ†¶ůŞ~5Ď*içJ†Ş[=^ŐĂóÄń;0ďŰéu›R×ÚĹĽč089#żľ$żSMRëqÚ:Ä·çőŰăîůNj˘{ď2­ë©k2·çą}ą;@W]·2ď;«źĘ=Y˘j»ľ FŻ75ý\ďCzySß”şÁ$žÖ÷­n«ŕůëägc·µ›÷!mÓôa˝éáxoý˘SZ`ďÜŢŽÔŕ¨d}ëu´ç÷Řn‡^eħt>‡ő€®\7ë“o č›NĆ˙ëąşţű›ž«IÂĎĎëÎĂj0•ü>©OC€üU§ÔĹ…uĆË´•÷TÝWő{Ř~Ńdťíóˇ—űĆŇ?_ę}eˇž(nj›Sť¦­}ź ˝ 9ŽüŞö`Ň7_Í®ŹYŻúĘ6űŢŮŽc ú«7Ő†¬\ú’vęqę:Ä—ő÷,ě–Ť;©˙YëĎCzö.[Şë´8ś…¶‚ç9śÂP%V¤”ńA¨2ýË{Žă{®‚¦w5ľŢ„?křÓűřnß´đ3^×k-–gq‰÷;ÖăŘ?/çQkýbUç;ö‹ë5+ý"ě‰zză7 edŻĚ˛ZśŻĽ©;ôË öÝY·Ľ6Ůőůc»»Ě |f!żűŘďTć5‘ŚĂ·‰?ĺĽ@´79\O‚›×ý¸đRÝMÚtc×Q=):ÉĽĽOB řĎ=lŮo·ÔŽĆŃ4T™ôő8&ąYŔOá8ěnű‘«Ě6dµQŹ×‹ő§Ťëă3đäŐˇyëIýly`ę˝Îëö:§ź†Ôħ´;¨űWIßNb[ohĐśöĐ/-[꓎’Ç4Őßý#´“u?ox:I>ýbU·š–çş_ś W˙PÉ^ô^c#}šŃd3Ţíţ6¤ TÖ}čUG»Ë˝›ˇ:ú(ÄßmuĚoŐ)&|/Wž«­M˛»o7î®T®H`řŞ{­â„¨ŤÍ,łPm6‹ílĽ‡{ńÍľĄZd‰Á硝Ł;ăł÷˝1OKăťţŻ®9čp2»,fśSf;rz}źazŠüźĄµ«žŽAüw‡źĺ˛ă1űřÚ´8žŻĆGÉeWÍQăŘ$ŽQ.vú®ŞĹűˇ˝MúýlÎsŰ´ţŻĘ „}'W´WŹC=ľ~]ß­ľ_m¨»ÓşţNZ™kWcě ő—{ęzęzâ—óŔmëú¤®ëĎC»Ńw™¬Ç­Ëřęúşł*ł>e^VĆ}č»1~Rľ—[őő7ý{S‹ţż/ë~ uSÇí1MµN˛iýłjO¬˙¤ĄoçŞńx&Ö˝'ŻÚzV__‡źŻmęź·Xë~q†ĺ@?LÎĐÓ:×PÖ݉/®y ČĆě«>燆i¨ŽĄ]Ö–ßęÄeýó&ő+uP°Č¤śJşă/Çűvă$'ĺ¨G÷ˇS’·ő­­vă(¬źĽZ·ł_?#ígbç–qđeŰ< eŰćăßúZ–.ÄĹ‚SŹz§ăŃyřDßż.źĄŰăÝÇ=|–Y‡sĹÇŹ?ÇÚ¦×s©6ú»IݦTľ«ďë˙îřŢÖó¸ď[üÝ7eÝßbáPžŰ”ľäř:P´˙ľä¸ĺştÓ–UëË{Ö+Ú¶lý^őwhŢ„vtŰÔő.ć!Tăň뫍 ©Öóľ˝Úqް;¨˙îYTýO˝~pUןu}ů±…1ár‡ď-n~}Á‡–J)–ŃI¨N‚ ·>ă—eqŘŮłž¶ů2–E[×áě±_ŚĎŃŰ0& ř>ďŚ÷ďuaí@~÷íV˧Éí›#ó(Aµ)ě8tsDéAXo`úüŐEß˙2« nĺ·Í3×Q°c[2oaL ”?¦xÚÁO>¬çpŻëyŰí×yýĎŰČű|^—c0f}Éqő¸«÷0ąglÜ…§*Öő.ćWY×÷Ş/I9 "÷űĐ_·ĐŽ­Şgußµ˙«˙ű‡ĐFđ|×ru‚鼣2;ĽăŮč*x~‘4®ĘîeĎmEíÄqz'€ŢTŐńż(đťżČnĐŇÎBE ÁľîŘ|żççŞË x×îÜ&9‹ęśűĐ)Cµ›˝äćYkWeź—ŘĽđpĐ ďDí@é§0-,›}Öˇ8¦ëjsç¶ďá"”±ţpź—ŽZe‡6»ô ŇÓ®k:»d@-çűĐß0öoşVÇ3%·Ą—­=ßUß<×/»@onĘĘ>_‹ď9ż@ó‚}ŐŽąEĆuc‘Ag\j°cšeÝ«v /Ű‹JCĆOóIaî őŻ ­łŚ3Č·-‰Ď¤ :Ś»ó¶yˇďľÚśž÷©6c—î·/©ć‚%Öăů@ʍĄŰç­Ô6űxăťŇy”q‰OłzcvnźmYŔ||žđ˝Ĺľ¨ÄŔk5'ks<ŻZ)·_,µŤâ  7WrVSž áăöĹç2Ó÷¶ßęUúČsŐIťĂ Pb˝ýIaűmó4”™}ľśx0hĐ–ÄgóQ(;cHkJ\d˝ŞÇÚ®|ú’}ŃK«Ç‹ú=ĂÚě㢂bU›–ú|Ć»Â3ülóŚëOÚýßeŃ»[')Ż­ëażĐ›(÷(ÓµI–»ţ*©ćĽ}7»6s –ĽÍ`×ëëÂ[†Y¦YčëÁr ÷ˇSŽr‚čůχŃ6ż…NödĘÍŘÚiŽŻçIe¨6ţžçV‡ÖAôĺžëńĽ€ŇŠÇλ÷śÔş^ÂŐŐ]Ć%f”¦šó˝˝Ş?‹ űö·-|¶’‚čÝŻ“”Ó/ žCĐ›yí3tÖ‰ ˙>ôüŽ˝\˝§ýWTžgž«Në\ęwě>tJ›Řç¸YimJžWŮçÓÂkĂÁ@úö7.D‡q·q ˝ßű¬7»¨ÇK_X¶s±ýžjR-Ŕ缮 Ťş~šy›˝¬ŰëyÁĄśzŻvÎ÷ˇ?­űÔ¬7Z\µôl¬ç59×˝yčkť¤ęs>ş˙­ŕ9äA}WéŮçq±hé•2`™ddC°/źŁA×Áü}K ô«ë>u˙ăţn®d©>ß:pśÓ¸a=ž9îußĹęÎŰĐEćůÝźoyÝWÇÖďŁ-^†*Óz˙}ÁMż¸Ż ó) ŕ+č»I=ĘômďémH †ĺ» <¦`_ ŞT§ÄĎŰŐ@i]ÖÁ¤ý«Ę)Gë.: H˝Iśśd\×܇Î8UÄăzśÚwn; z™U›»}ŰźďYÂOw4ńKi›'YH‰mIî÷ČýŽ)桟cPăďy\/®.| čKćÔ㧡żŃ:«ňˇSx›˝ 7óÓA_±Q‡zúí#Ď»n'u˙˛÷ú7 Ävąšßô1ľżăiÝ\dö}_Ö› úş+~q«_Ľ@–Đ·UëôűëNꦵ«[íăţ7ÇUëŃŹ[ţŚk·×Ąg,®’Üşę/ĂMrŰcý"äďoŠ2wły#Ţ ? ŐÍo Ú—u‡üźPeh/"ŔÖmíżoµł‡&>W·ÚŰ<ďoşl7fˇĘšůr\&CŚ)ëůŰ6cŠĺ­×zLˇ W_ňő=Âżţô·=żŻé­z<ąőÚ46ţ­®ËÖ"(ĺT×iRo&őúAüó‡ }ýzÝ`ÝĎ/ŠX;¨N ˝ý|„oŻI_Ýz>ŞĎYŤi®FÚV¬ÇrżŐĺr©­¸×˙€ž+i©IEND®B`‚python-pot-0.9.3+dfsg/docs/source/_static/images/sinkhorn.png000066400000000000000000001105241455713015700242620ustar00rootroot00000000000000‰PNG  IHDRčńţ$ësBIT|d IDATxśěťwXTG÷Çż»Ko˘€Š ˘ ‰bWŠ‚Tڱěƒآ±ü˘y5jěFě1F5Ć^bAŚT"vQ4@Xéu—=ż?|÷ľ¬» »° ¨óyž}9sćĚ˝sďą÷Ι Á`Ô*ř5mÁ`0 yf0 ŁÂ4Á`0µć  Á¨…0Í`0 F-„9hÁ`0j!ĚA3 Q ašÁ`0ŚZsĐ Á`ÔBtÔ-žžŽŇŇRmŘÂ`0 ăżđÔ]ęÓŢŢ/^ĽĐ–= Á`0P‰7č"##C¶0 Ář/jżA3 ÁĐ>,HŚÁ`0ŚZsĐ Á`ÔBf0 F­¤   Ćg ĺĺĺŐXÝZqĐ………ÚPËřČËËĂÝ»w‘žžÎĄ˘¸¸¸­b|ě<{ö ŃŃŃ5mŁ /^ĽŔěŮł!‘HćW×˝ääɓطoźFuŞŠĆôÚµkabb‚k×®©$߲eKŘŰŰĂŇŇź|ň‰¦ÍŃ%%%čßż?¬­­ńÓO?©U¶¸¸-Z´€˝˝=,,,ĐĄK-Y©=JJJźź_éňÉÉÉ4hćÍ›‡¨¨(¬]»ăĆŤĂË—/1qâDÜżź“•H$8zô(žůŤ5B˙ţýË•˝víZ·n ;;;xzzVşÎÚŔíŰ·˘_ż~čÝ»7 €ĚĚLŮŢ˝{srřůçźkČęšgůňĺřć›oĘ•ąyó&Ú´iĆŤŁ]»vŐdYůTćZyú{NN‚‚‚0wî\čęęĘä•w/™0aw/QçZđ÷÷Wz- 6 —.]™3g´ßđw! "‹©qăĆ€¬R™‹/R·nÝ5kÖL“ćh”óçĎ@|>źJJJädΞ=K‹-’K/--Ą .P×®] yxxT‡É!##6oŢL666\)ąąąäěěL/^”K4h .=""‚P»víŞd{VVµlŮ’zőęUˇlNNyyy˝WçFŇţ€ŚŚŚ(55•$‰śÜljĎçÓěŮł)!!AˇĚÇ@qq1™››ŠŹŹ/W677—zôčAnnnŐc\”˝Vťż¸¸8…ĺj{9r$9rD.]Ý{‰¦®…‚‚j۶-eddh¨…ŞˇŃ7čS§Nq‹9rŻ^˝Ş°Ś··w­}Š+KóćÍaffčСÜSđöÉôöíŰré|>>>>ďÍ›sbb"Ú·o8;;ă믿Fjj*D"QĄôýüóϰ··‡···Lş‰‰ BBB §§'“ŢŞU+řűűcäČ‘•nÔ©Svvv*ÉšššÂÁÁˇJőŐř|>>ýôS€ŤŤ ¬­­Áăńddˇˇˇ8yň$V­Z{{{9™Ź…łgϢNť:€?ţřŁ\Y4iҤ:ĚR‰˛×Š˘ó·rĺJ…ĺjsż~ý:îÜąŁđËWE÷’wďËšş 1räH|÷Ýwšh˘ĘhÔAoÚ´ |ţ[•b±Ű·oW©Üűpc°±±All,ÂĂĂqůňeą|"‘#GĘŐń>´¬¬¬°víZ„‡‡C(˘nÝşUŇwőęUÔŻ__ažµµµÜ…hee…S§NaĘ”)UŞPďż/ç§ŞH$üß˙ý÷YďcçŕÁŘąs'¨4ÖX›úIy×Jjj*^ľ|©´lmjGYÖ­[‡)S¦(´OÝ{IE¨s-Ś3‡FZZšZuTŤ9čgĎžáÜąsX˝z5—¶m۶ŹŔÓ$vvvčŃŁttä`۲e îŢ˝[Vi###xzzÂĹĹ…{ઠ‰˙ý·ŇhHooďZ{łřĐ‹Ĺ9s&>űě3ą·Ź‘‚‚äĺĺÁÇÇ­ZµÂ˝{÷đđáĂš6K#lܸQi€UmĄ¤¤gÎśÁŕÁćWt/ńńńQů^˘îµ`bb‚Ž;b÷îÝ*é×j/ő©Ś­[·˘gĎž6mÖ­[‡ääd$%%᯿ţB```ĄőŠD"ěŰ·7nÜ@RRlmmŃşukřúúbăĆŤŚŚD@@.\Č•ąsçŽ;† ++ -Z´@űöí1tčPN.)) +W®D^^rss1a¸»»cÍš5 …đ÷÷ǰaĂŔăńđÝwß!-- yyy077Ç–-[QQQذaöďßĎŐ=iŇ$@ đĂ?(m[tt4öîÝ‹/^ŔÚÚAAA2źűE"fÍš…ĽĽ<äĺĺˇU«VX°`®]»†={öŕĺË—hҤ úőë‡nÝşC‡áŐ«Whذ!ĆŚ¶mŰVúřk‚nÝşáĚ™3čŰ·/6lŘwww™ü1cĆpźą333qâÄ …Bččč`ęÔ©€””™7ú±cÇâÖ­[€ŽŽ|||ЦM› mILLD\\ ‘H`mmŤ-ZČÉÝąsçĎźŹÇ+W÷Í›7qáÂÁĚĚ 2źăăăqńâEdddŔÉÉ ýű÷Çţýű! 1hĐ ŘŘŘh¬mQRR‚3f`üřńĺ꫊=ĺé± …°´´Dpp0Nś8»wďÂÁÁÝşu˝˝˝Ü±SUľ2ś÷îÝĂüĹ‹«­Kzý]ż~úúú ŹÇChh(RSS1uęT899©u¬Ęë7Ę®•„„üţűďřńÇѲeK=zŔŰĎÚľľľ m/Żż—×ŠŠŠŕďďĎÍÝąsŕóůđöö†‡‡‡ZÇđÂ… °±±ąąąÂ|uî%ĺˇęµđ.žžž8{ö,fÍšĄr™*ˇ‰ěÂÂB˛°° ăÇŹŃ?üŔTőîÝ»Âň .T$öćÍrss#]]]Ú˝{7%$$Đž={HOOŹĐľ}űČ××—ŚŤŤI,Sii)}óÍ7ÄçóiܸqôŕÁ …´oß>233ٶmŰŇăÇŹ9ý)))4wî\211!´aĂęÖ­íÝ»—ęÔ©Cč÷ß'"˘eË–‘łłłśť‘‘‘Bööö€Z¶lI!!!B»wď–iĎĽyó¸ ±m۶ї_~I·oߦ۷osm:}ú4'/‰hţüůśîţýűÓ˛eËhܸqtűömş{÷.:~ü8Í1fÍšE±±±IÇăQtt´Úç´,€†Z©ňąąąÔ˘E ®O888ĐČ‘#)44” ddÓÓÓiĹŠdii)“@K–,ˇ ŻŻ/íÜą“öěŮCEEEtçÎ211ˇ'NČŐݧO™ ±ű÷ď“…… ?~<]ąr…Ë5jyxxĐhďŢ˝ĺę‹Ĺô˙÷4dČĘÉÉálěŇĄ‹Ěyôč-Z´ęÔ©CăĆŤŁ… ŇíŰ·ÉŃŃ‘ŞÔ¶ňČĚĚ$äääDDo\úôéC”ššZnŮĘŘŁĘńxôčýřăŹTŻ^=j×®Ť3†Ž=JééétčĐ!jĐ …„„Č»Šä7lŘ Öqy—ˇC‡Rff&=ţśPÓ¦MË ›8q˘\H$˘ŔŔ@š0aeddĐ­[·¸~^TTD“'O¦C‡©u¬ő›&MšpýćÝkEjstt4íŢ˝›\]]©E‹´{÷nÚ˝{7w–˘jݍ?ššŇˇC‡čŕÁ´wď^*,,¤{÷î‘©©©Â@Żň>}:Ť5Jiľ:÷)UąŢĺďż˙&ccăj ¨Ô޵k5nÜÄb1˝|ů’ttt8ńěŮłrË+sĐÓ¦M#äéé)“îăăChÚ´i”H7nÜ "˘őë×+Ťhüő×_ µnÝZ.»gĎž\ýçÎť“‰F ăäćĎźŻ4ÚĽcÇŽśU†ÔA›™™ŃĚ™3eňÚ´iChČ!rĺfÎśI¨QŁF4mÚ4…őÖ«WŹV¬X!“gggGhňäÉJmR…Ş:h"˘×Ż_Spp0 îâ@VVVtőęU9ůŕŕ`…‘˛AAAäŕŕ@;wî”IďÚµ+ůúúĘÉżë ăăăÉĎĎŹ>|(';jÔ(Ş_żľĚ9W¦ű—_~!ggg‰D2éÉÉÉ¤ŁŁ#÷PÔ«W/jŐŞ]¸p–,YBżýö[•ÚVeoJąąą4věXZ°` ŔŔ@•n0ęŘŁÎńčÝ»7YYYQbb˘ŚěńăljÇăŃŃŁGeŇË“ 'Ż*™™™r×›ôz’ŢSˇČA‡„„źĎçś=Ńţó211ˇ7oŢČČŞs¬¤ýćďż˙&"ĹýFz­Ľ{N˝˝˝Ëí7ęôw˘˙őéKKYů¦M›Ňž={dŇ===ÉŰŰ[iýŠ0`Íź?ż\™ňî%×®]““×ĵ %66–Čśgm˘‘1čM›6aâĉÜçc[[[|öŮgŇ7tlÝşµRzĄóŮ5j$“.ý˙öíŰhܸ1<<< ‰0ţ|ŠÇ!şwďΕy7DůWTT___DEEáüůó6l'§hěą2äää`úôé2iŇO[ rňRű’““1sćL™úőë'×'6löíŰËť'Ľ~ý^^^€ `ěرk›2 0mÚ4,Z´‹/FĎž=qüřq„††VXVU{Ô=ŽŽŽhܸ±Ślßľ}ѬY3Lź>TfuĺUĺčŃŁrçzřđáŢ‹©ŁóęŐ«066–ů4ëě쌼Ľ<Ü»wŹKS÷X©Ňo”]+Ş J/k‹˛ţđôéSîž/ĹÉÉIí~›––ĆEÔ+ŁĽ{IŻ^˝”î¶X•kAJ˝zőŻ_żV˝QU ĘúÎť;ŽŽ†‡‡îÜąĂý¤ věŘQ©Ő]¤c§ďî?-ýżuëÖ\ÚăÇŹą…4¤±,e/ś7n(¬ďÓO?廝ťşwď.3f­)LMMĺ¦˙Hë-/¨ÎÔÔTî&%-×´iS(Ě«M"fffđ÷÷ÇŠ+đŕÁĚ1999ذaĘ:lmmadd$“&Ę]HĺčŃŁđńńA·nÝĘ˝ˇ©˘űîÝ»ČÍÍ•;R#×ç+ şSĄţ+W®ŕäÉ“2żS§N)ÔW\\Ś+VŔÎÎ<żýöĚĚĚ0}út•öuWçx(›Ň¦ěxĽ ŹÇC›6m””” mSWţ]öíۇżţú “&Mâ~‘‘‘€ýű÷«uÝ4kÖ ąąąČÎÎćҤLÎÎÎ\ZeúŽ*ý¦˛¨{-)“ŻĚ5©ˇPSSS•dŐ˝—”w-$&&ŞT§ÔŹ…BŐTEŞ|Ö7mÚ+++Ěš5 ÁÁÁÜoëÖ­ÜÎČČŔˇC‡ÔÖ˝`Á888ŕźţÁÖ­[ńđáCüúëŻÜJ8‹-âd+z˘)ŰÁ•=ŐUôä¦)ôôô*µ\^9U#jŠĺË—+LçóůX˝z5śśś”>4)BŮ— eo<7oŢÄëׯ1zôh|őŐWHNN®’¤reutt ‰äú™t}y¨R˙÷ߏAÉü¤o~ďR·n]™i)Ť5ÂĎ?˙ŚśśŚ7®B'¤Í㡠(wzPU䥤ĄĄÁÂÂëÖ­ĂŇĄKąßć͛ѣG$''ăĘ•+*ë›2e lmmŕí׸]»vaîÜą°µµĺä*s¬Té7•EÝk©<»ŐŃŁ 33łr—Š®č^âčč¨ô^R޵0~üx•Č TźŻ¨’ÎÎÎĆž={pčĐ!ÄĆĆĘý–.]ĘÉnŢĽąRúíííqŕŔÜ»wK—.ĹÓ§O†™ůąnnnÜßąąąrşrrr¸ż[¶l©°>MOő1bbbb4Şó}$22˛Ü ľcÇŽ>P„ŹŹ&NśŐ«WŁaÆ9rd•ľ*H#GËľ-•%++KáWMń÷ßٍ¨Hć§ĚEŚ5 }űöEDD7ˇ*hňx<ţ|>_ćzÖ¤Ľ”"((–––rż   ŕff¨‚H$Âřńă‘‘‘iÓ¦aÖ¬Y5k–/_.s_©ÉľsđŕAűě3­}–¬*<[·nEÝşu1{öě*­yüďxDEEÉĺ);Ň7‘˛äääŕęŐ«đ÷÷—†PWľ"Îť;‡ž={*ĚűěłĎ ŻŻŹ@,«¤ďúőë°´´ÄÂ… ńË/ż`ăĆŤ4hÜCuöcccq˙geeieČN“X[[—ű5DÓ÷éµPŻ^=Ěž=Oź>-Wţőë×ĐŐŐ•FMMMEjjŞĘőŞŠÚ˝€PXXřřxüňË/đööFnn®ś#’H$011‘ ˛Ú´iÄb±L§‹Ĺ\Y"’ůßŮى‰‰hܸ1Ú´i¶mۢk×®đóóĂرc±oß>™1Ž;vŔÚÚáááŘż?÷¤•––†yóćÇăaőęŐÜĽ×wë“H$2˙—E‘ťeźä¤Ëx>xđĹĹĹHIIAJJ Z´hÁÉKÇ—ËÖű® šĚ“Ú'm—:ꛤçI‘ž˛mQ•1cĆŕŮłgré·nÝÂŁGŹđő×_ˤË|ő(›^RR˘0]ŃÍô]y777Ś?óçĎDZcÇ*Ą[  44W®\‘sJëÖ­žžžÜ†*ĹĹĹr ôWµmĺńćÍooĘŠúł­­-ľúę+`čС ÇÔÔ=WŻ^Uůx<}úTn÷¨ĄK—‚Çă)|“Q&ĎçóŐţ đ×_áúőëJžëÔ©Îť;#==‡–ËWt\\\\°dÉüřăŹŘĽy3¶oߎ}űö!66V.ŕMťľŁjżQt­xxxŕéӧܵš(p[Ők©˛zĘĂĂĂCárÉeQ÷^˘Îµ0dČrÇ—oŢĽ ™ťüü|îÁKă[SŞöť””D|>ź ÉÔÔ” H ĐąsçdäVŻ^M|>źôőőÉŘŘLMMÉĐĐtttČÁÁ“ÓŐŐ%}}}211!###tŕŔ""’H$Ô«W/™Púw®®®2 geeŃ7ß|CNNNdkkK-[¶¤úőëSŹ=čźţ‘±1&&†ČŘ·páBąvKí”Ę ş}ű6—_XXHcĆŚ!===˛··';;;:xđ —'=ŇvęččPHHĹĹşϗ±A Љ'Ę-÷äÉ…ĺŽ9B‘‘‘$äÎQŮEä+ÂŇŇ’tttdÎź‘‘ééé‘@  ެ«]»vtöěYš>}:Íś9“Ž=JQQQôóĎ?S›6mdÎËÍ›7©gĎždiiIĆĆĆäééI—/_¦ččhęŮł'YXX‰‰ yyyŃŽ;(&&F&˝{÷î´}űv '???ŞS§Ő­[—zöěI—/_¦„„jҤ ‘ąą9ůůůŃ… ÔŇ-ĺÎť;äëëKóćÍŁť;wŇřńăiđŕÁôęŐ+N¦¬fffÔ©S'š9s¦ĚÔuŰV·nݢ>}ú‹‹ ™±±1}úé§Ôż™é>Ë—/'[[[®Ź4iŇ„†^%{T9Do§ľučĐÖ¬YCkÖ¬ˇŁGŹŇرc©oßľôâĹ ą6•'ź””TQ÷ă¸víµjŐŠëË 6¤đđp™#GŽP÷îÝÉÄÄ„LMMą{Çţýűeú¦‰‰ uëÖŤ›˘”——G=zô ĆŤ“‹‹ 5hĐ€›äčč(7ý§˘cĄ¬ß”Eٵ"E(’‡‡Ť3†6lŘ@›6mŞÔůUUŢËË‹~ýő×Jő[)Ź?&===***R_Ń˝äúő뜬&®…w1bmܸQ&M,“——uíÚ•›j¬)xD•źP Ł_ż~¸~ý:/^ gggđů|ŢĽyřřxl۶ IIIX´h‘ĚJbRŠŠŠźźĎ’T—ÝoŢĽAýúőkýç¤ęâúőëhßľ=€·c†QQQČĚĚ„››:uęTĄi"µĚĚLĽxń...ÜÎÇLEÇŁoßľČČČ@tt4„B!’’’ŕââ"\YůęF$ˇ]»vřńÇѧO.]"‘ 55ß}÷.^Ľ„„ąOŢÚî;D„¨µqLMÓąsgĚš5KáPdMŢKŠ‹‹aooŹ7nČMýŐu÷äňĺË€FŹ­TćÜąsJ÷`0µé±¶ä«›Ó§OS›6m”ć …B@ÉÉÉŐhŐűËţýű©GŹ5m†aaaÜ*nŐEíŚbТE XYYáďż˙V8-F"‘pc˝{÷®nó F%)--U+†A]ůęĆŐŐĎź?W:}/::Mš4ŤŤM5[ö~2xđ`D˘ Ǣ«“ŇŇRlذk×®­Öz‹ĘN&®Eˇ_ż~xüř1fÍš…{÷îáŢ˝{¸víţřăĚ1IIIX±b…ÜĘ: ŁöqëÖ-¬\ą'Nś@JJ ^ż~ SSSĄ^¨+_S››s+€ĺççs3:ţý÷_lٲ‡Ćźţ©p%†<<]»vĹ—_~‰ˇC‡ÖŠ5/^ ôëׯZë­µcĐeÉÎÎĆ‹/’’‚‚‚ÔŻ_ 6ü¨7™g0Ţ7„B!^˝z}}}ow˛´´T:§T]ůš†˙ý™™™hĐ Z·nŤ¦M›Ö´iď%‘‘‘8|ř0~ú駽ϟ?X¶lYµŰń^8hÁ`||ÄÇÇĂĆĆFnăęäńăÇhÖ¬YŤÔÍ4Á`0µZ$Ć`0 ĆÇ sĐ Á`ÔBf0 Ł˘öB‹-Â?ü  [ Á`ü$Ć`0 F-„}âf0 ŁÂ4Á`0µć µžňÖbV¶§/Á`Ľď¨í ăăăńé§ź–űóôôDpp0,X€»wďjĂn­SRR‚ţýűĂÚÚZnłůŹŤŔŐŐvvv¨[·.ľüňK­Ö—žžŽ+VŔĎĎĐŐŐ…‰‰ ÜÝÝ1}út¤ĄĄA$aĚ1řţűď+]Onn.‚€'Ožh° ˇÔÝţŞ  €bbb(&&†  “'ORLL ýóĎ?tđŕAúá‡ČĚĚŚŹŹŹÜ¦íµťóçĎsmăóůTRRRÓ&ŐąąątîÜ9jÔ¨ QŁFiĄžŇŇRZ±b4hť:uŠI$ŃăÇŹiĺĘ•dkkKm۶%4kÖ¬*ŐYTTD¶¶¶Ô±cG µ‚Á`04C•övssăśXRR’\ţť;w¸ü-ZP^^^UŞ«VRRR¸ŚNť:Ő´9µ‚€€­9h‰DBŁFŤâöď߯TöÉ“'T·n]Ť8h"˘Ĺ‹ŠŤŤ­˛.ÁĐZţä“Oŕččxřđ!Îź?ŻÍę4ŠŤŤ bccŽË—/×´9µmîä‚ĐĐPŔ·ß~‹!C†(•uvvĆŽ;4VwPP ,, Äf2ŚZ‚ÖĤ[Ĺ@BB‚¶«Ó(vvvčŃŁttÔ^Ď…ˇůůů(»-ů´iÓ*,ÓŻ_?ŘŘŘh¤~GGGtęÔ {öěaAg ŁÖ UĎ“››‹ÄÄD€úôéŁTöĘ•+8uębccQRR‚fÍš!88m۶•“}ůň%öďߏ¸¸8ÁŰŰC‡…©©)VŻ^ C$áŰożE^^ňóó!°gĎ@ZZ–,Y‚üü|äĺĺÁÖÖëׯçęřî»ď––†ĽĽ<››cË–-€¤¤$¬\ąyyyČÍÍĹ„ ŕîîŽ5kÖ@(Âß߯ “yŰT§m˙ý7Nź>Ť/ľř­[·®Ôqʉ‰ÁŢ˝{‘””„ôôtŘÚÚ˘yóćřňË/5ćÔîßżŹ'NŕŢ˝{¨S§ÜÜÜđĹ_ŔŇŇRm]ČĚĚ4iŇ 6¬°Ś@ ŔŚ3PŻ^=µëSDPP¦L™‚ČČHřřřhD'Á`T‰Ş|/o :%%…ú÷ďOČÜÜśŽ?®PGII Ť7ŽĐ°aĂ(!!ŇÓÓiĆŚÄăńhńâĹ2ňëÖ­####233Ł 6PLL ­ZµŠ:věH$>źO .$""‘HDóçĎ'{{{@śžôôtš7o™››ňđđ©gٲeäěěL¨Yłf2íš;w.™Ú°auëÖŤöîÝKuęÔ!ôűďż+l[|||ąm{ńâéččŞ[·.©u>îßżO<ŹôôôčÉ“'”——G§Nť"KKKŇÓÓŁ“řGˇË IDAT'OŞ­SJź>}YYYQ‡čŔ”H.\ GGG˛°° ¨­÷?˙ů׏j*X+--Ťttthüřń5R?Á`Ľ‹Ćô§ź~J:t ˛°°ŕŇ-ZDJu,Y˛„««+•––r饥Ą\¤î­[·čرcśŢ'NČčٵk—'uĐRfÎś)ç Ą*tĐDDóçĎ—sĐRzöěÉĺť;wŽÄb15nÜPXXÚm#"zřđ!×]]]ĘÉÉQzÜ”±{÷nâńx€î߿ϥ/[¶Ś;emQ©nŢĽ9Čä]ąr…ľľ>ĹĹĹ©ĄwňäÉ\»ýýý+e›&čÓ§™››SaaaŤŮŔ`0R46}âÄ DGG#&&W®\źźŕ—_~Add¤Â2EEEX˛d Ŕßß|ţ˙ĚáóůÜ'ń#GŽćÍ›hÚ´©Üçň   ››+¬GWWW©Ýĺĺ•7ö,-WTT___DEEáüůó6lÚm€ćÍ›cýúőđőőĹîÝ»ajjŞ´~e 2!!!8xđ ÜÝÝąt777€P(ÄóçĎŐÖ[–úőëĂĐĐP&­S§N¨WŻŠ‹‹±téRµô•=oYYYU˛­˛¤¤¤ -- YYY8uęTŤŘŔ`0eŃř4ŹÇCłfÍđçźÂŃŃoŢĽÁđáĂ‘śś Ů ¤¤đäÉlذA&˙ńăÇ€/^ //Ź=¸şşĘEóů|4hĐ Úođź~ú)g‹ťťěěěwîÜQąme™>}:¦Oź^i{ôôôđő×_#11ŔÝ»w‘(ă”‹ŠŠ*­_<¶¶¶xóć nÝşĄVYWWWîďÔÔT•ËĹÇÇŁ¸¸X¦|exůň%†ŽĐĐPtěŘaaařüóĎ«¤“Á`0ŞŠÖ‚ÄęÖ­‹¦M›"&&EEEÇđáĂedňóóążëŐ«™|ÁŐŐ™™™ÜeoĘ5Ať:u¦«Ó6MňäÉL™2áááh×®fÎś‰É“'ăîÝ»ĺéiéWu’ §§‡’’$''#;;[éq-˲eËŕęęZĄcřňĺK`۶mhŢĽ9† ‚]»v!33uëÖ­´^Á¨*ZŤâ.ëH}Výä“O¸ż---Ń·o_ĄşćććČĘĘBrr˛Ru)..V»LY”Í V§meIIIÁµk×Đ˝{wµD^^şwďŽäädŚ9;vě€@ đ6ęZŰäćć€ÚŃç–––:u*Ö®] ±XŚĐĐĐ §ZI$ś9s¦JËŽ&''Ł{÷îXşt):tč5j¶oߎ`„ •ÖÍ`0UE«ó ­­­ążË:[·náĘ•+¨S§ 8wîśÂ9¨ˇˇˇřţűďÁăń0pŕ@ŔŤ7ťť-#' ŻĐcccŰpA"‘ 66¶-«uÚ&%33ź|ň  „Îť;+Ý B7oŢä^‚‚‚8ç (~@’’ššŞÖuEv%&&rÇ˙‹/ľP[˙âŋѩS'Ŕ÷ߏ۷o—+?gÎtčĐíÚµS».ŕíĐ‚··7FŹ-ł(J—.]ŕää„°°° u0 †VQ7ŞL"‘H$"‘HD-Z´ŕ˘oăăăI$ÉDoذ˷±±á˘cűôéC»wď&""ˇPH...€ćĚ™Cb±Š‹‹éČ‘#dkkË-Á••ĹM}š4i'›™™I~~~Jُ#""¸Ľ´´4®+W®¤&MšŞ_ż>˝~ýšÓ)‰čűďż'äââB"‘$ ×~@#FŚk·”wŰ&‰”¶ŤčćÍ›śťH(Şun˛˛˛¸u¬gÍšE‰„222¨WŻ^Ü4°ť;wrç"//ŹŚŤŤÉČČ233ËŐ/Ťâ611ˇß˙ťÓź››K 4räH.]]ýYYY4dČ@ĆĆĆ´téRzöě™Ě±}řđ! >ś:wîLŮŮŮ2ĺU­+11‘š4i"g«”~řëÓ QS¨í ăââÇă‘®®.‘©©)“ľľ>ńů|Z˝z5'+‹éÇä6Yh×®őíŰ—şwď.ł.waa!ýřăŹäęęJöööÔˇC˛łłŁŔŔ@züř±Lý9994gβłł#777ň÷÷§Îť;ÓÉ“'I___ˇ&"úé§źČĆƆśśśčóĎ?'OOOZ˝z5Íť;WĆ)J§oéęę’ľľ>™šš’ˇˇ! ş}ű6ĹÄÄ@ .OGGGaťę¶M,Ó°aĂČŘŘćĚ™Łî©!"˘ęÖ­éęęR۶mÉ××—|}})66–Ö­[G666ÜLDHHH@FF4h;;;Ąc·"‘YYY°´´¬pígé§t:’Š‹‹±páB™Ą#ĄH$dggُ¸ 4ŹÇCVVŠŠŠ```CCCčééimÍiuÚVUŠ‹‹! aff“ ĺcbb’’‚ţýű«\Gii)222`ii)ó9]Sú% ’’’  a``gggčééUX®˘şžŰ·o×ę”"m믩ş C›TË´¶IKKCzz:ôôôŔçó!‰ ‰Đ˛eËš6í˝@:˙ľęŻ©ş C›|šÁ`0ŚŤâ÷»([°Dę,ĐÁ`0 FuńA9h"ÂúőëąéWeQ¶*WTT"""´mÁ`0jQ-Ӭދµk×ÂŃŃ­ZµđÖa/]şŮŮٰłłĂÓ§O1qâD™ą­ÁÁÁ6mLMMąő Á¨i>1č””Lš4 ÇŽăYż~=ž´Í_ýoooĚ›7żüň sÎ X·n¦L™˘µ Â„††ľýö[9ç\gggěرC%˝D„}űöaذaĺĘÂÂÂĘ•3f >Ś´´4•ęg0ŞóA8hKKK9Gěččzőę)tĐíÚµ“Ó‘‘‘ĆŤkŐNU122‚§§'\\\ŔçWß)""¬[·cĆŚÁÁ1qâÄj«ű}˘¤¤gÎśQ8†« ňóó±hŃ"Đ'XL›6­Â2ýúő“űФ888T¸ ™ŁŁ#:uę„={ö”xfbb‚Ž;Ę^2ŚŞSk^ŤD"fÍš…üü|äççC `Ďž=Ţ®µ˝dÉäçç#//¶¶¶Xż~=WÖÉÉ YYY())áö ćńx8p ę-KJJđŐW_!22WŻ^…łłsM›¤"ÂéÓ§ńŕÁ4h€áÇCOOwîÜAzz:ôőőQ\\ ccctîÜĹĹĹ8ţ< AD000@çÎťŹ‹/B(ÂŇŇÁÁÁ8qâîŢ˝ tëÖMáV—.\€ŤŤŤÖ¶*Ťŕ,›4i‚† VXF `ĆŚN›Ú˝{7Fڎ’AAA2e "##áíí­TÎÓÓgĎžĹĚ™3Ů4ECÔš7hŹ333ś?ű÷ďÇŮłgą<>ź9r@TT”LY}}}´oß7nÜI_ąr%®_żŽ_ýŃŃŃÔˇCąt‰DBÍš5#{{{’H$\z—.]hŐŞUUhMůĚ›7Ź;ť:uŇŢAŃĹ‹U–ůň%µk׎ĐÁ•ĘĄ¤¤Š‹‹Ó„™ ăżÔš1čŞÂăń°mŰ6üđĂřĺ—_ ŻŻŻRą„„\¸pˇJ«*˝Ďl۶ ŁGŹüůçźđôô„X,Ćwß}‡Ž;ÂËË«f ü/wďŢ…X,ĆëׯeâŕŮłgHII‘IűöŰoqőęUôë×_|ń–.]Şvť<mÚ´Áľ}ű’’ÂŤ …Bšš*,“™™‰îÝ»«5%ÎÁÁAf‘śćÍ›s«3ő/>>ĹĹĹpuuUh×ýű÷áéé©’®—/_břđá E‡°gĎ 8Pˇ¬t,^(Şl+Á¨ĆAoW [˛d """ЧO•ĘDFFbŐŞUmp‹÷wÇŽ±fÍĚ1ĄĄĄ6lnßľ­Ö8¨¶Ţüýüü0eĘ™Ľ)S¦ČE»óx<¬^˝...xţü9¨RçX:˙ňĺKÎA›™™ˇ°°Pˇ|ÝşuqýúuĄKq*âÝilĐÓÓCII ’““‘ťťŤ:uęT¨gٲepuuUč 8€A©4+ŕĺË—Ŕ¶mŰĐĽys 2»wďFff¦Â) ’Ť CuŢ+ýî˘#Ѝ_żľĘ΀ʭď˛téR\ĽxQír“'OVú&R6m._ľŚĂ‡#55_|ńÂĂĂUŢB[ÇEŘ÷ćÍ•će‹Ĺb¬X±‚{X۸qŁścW…çĎźĎçË,ÖQż~}dgg+-Ł««[ĄĄ?---1uęT¬]»b±ˇˇˇNµ’H$8sć ľüňK…ů»wďĆöíŰ+¬;99Ý»wÇŇĄKąÝÝFŹŤ;vŕŔ0a‚\鱨_ż~…ú †ęÔ:-]päÝ7‰D‚ŘŘŘš0I!S¦L)w5&ehëm455ĹĹĹ2oÄ•ÇăaÇŽ¸{÷.ž={†ż˙ţ‹-Â’%KT*Ż­ăâěěŚV­ZáôéÓ mٸq#ľţúkî˙E‹aúôéhٲ%6mÚ„‰'ÂĂĂť:uR¨_úX–śś\˝zţţţ2 áX[[ăĺË—Ş6­R,^Ľ×®]ĂŐ«Wńý÷ßĂÓÓ­[·V*?gÎtčĐAá˙çĎźC$ˇYłfĺÖůâĹ tëÖ ăĆŤ“YĄK—.prrBXXBýúőkčęęVzg,ˇ„š—.@&--ŤŢë¬\ą’š4iB¨~ýúôúők‹Ĺ5l­ö‰DÜŻ^˝z€ĚĄ•m{^^“‘‘eff*Ô'‘H¸˛-Z´ŕŽqDD„śľ˘˘":xđ '€öíŰGůůů•ŽîÖ·nÝ"}}}ąőŻŻ^˝JË—/'"˘’’Z¸pˇ\´rßľ}©aÆrkŽ˝ 344¤k׮ɤϞ=›ęŐ«GIII2é›7oV¨¨i˛˛˛hČ!€ŚŤŤiéŇĄôěŮ3™sđđáC>|8uîÜ™˛łłęůá‡hÆ ĺÖ•HMš4ˇ‘#GĘĕՀâăăĺňBBB¨GŹ Ë1ŚĘSë4ŃO?ýD666äääDźţ9yzzŇęŐ«ąoéďřńă5mŞÖ°´´$Ň××'ccc255%###ŇÓÓ#@@ädĹb1uíÚ•śťťéôéÓ őĹĹĹŹÇ#]]]244$SSS266&}}}ôő×_s˛łfÍ"@ S· ÚĽyłÖŰ^÷ďß'oooš>žôôôhĘ”)ry#FŚ Ť7Şß8Q.<˘Úąş€D"Avv6Š‹‹Ń Ađxź/1Ş­Ą?˘ŁŁPÓfT ćććZé*­¤6cĆ lÝşµZ4źĎ‡˝˝˝ÂĺG•±k×.Ś9˛Bą¦M›V(Łh)ÜÂĂĂ9gC ÔšĄ>U٤¤/^Ľ€““SM›ňŢQZZŞÖ´(f0ŞH||–,Y‚Ö­[ăüůóZ«Źˇś6mÚ`ýúőČČČ@vv¶Ú«“1 FYjÝZÜ—.]½{÷ŕŕŕ€Ţ˝{#''ÎÎÎČČČPş—ouáďď??żr7(K«V­ŕďďŻŐő±sssńŐW_ˇyóć¨WݍA,,,ŕďďüűďż5mÁxĎ©uúŢ˝{ţ·0ż™™?~ ‘HÄ­řTSŚ1B-y+++ś:uJKÖĽĹĚĚ aaa€‹/jµ.†j°•µ †&¨uź¸Ąë¦đů˙3ŤĎç׸sf0 Ł:©5oĐŮŮŮ;w.·;P\\&Mš7nÚµk¸sçŽ;† ++ -Z´@űöí1tčPN_RRV®\‰ĽĽ<äććb„ pwwÇš5k  áďďŹaÆ©ô¶…‹/˘GŹčСJíÉĚĚĉ'  ˇŁŁ©S§RRRˇPşuëběر¸uë""" ŁŁ´iÓF­c÷±PXXm۶AOOcĆŚ’““ąăܵkWřřřT›=©©©Řż?âââžžWWW|őŐWĺ–Qµ˙2 F­yÖŃŃ»»;4hŕíFđîîîpwwGť:u ‘H0sćLxxx )) ‹-ÂüŽ;bňäÉčرŁĚ¸źŽŽLLLpčĐ!>|Oź>Ĺ#Đľ}{?~_|ńBCC+´ëěŮłHLLÄ_|^˝z©I^ZZŠÔÔT,[¶ [·nĺŇE"’““±zőjěÝ»ˇˇˇxôč¦Oź___x{{ăäÉ“j˝ŹĺË—cŔ€ŽŽĆŚ3pěŘ1ěŰ·ýű÷Ç´iÓ0jÔ(ěŮł§Zlٰa\\\°qăF 8?ýôZµj…~ýú!!!AN^ÝţË`0 ZĆňĺË 999ɤŻ_żžP»víH"‘Čäýú므Z·nM%%%2y˝ző"Ô¬Y3:wî‰Ĺbjܸ1 °°°rm)--Ąożý–._ľL(66V­ö“›››\zPP988ĐÎť;eŇ»víJľľľjŐ!ĺÂ… €víÚU©ňµ™ÂÂBš={6Ť;–ŚŚŚhË–-22}űöĄ(Őńnż©,ýő zőę•LžP($@ .äŇ+Ű ĆÇK­y.‘H„ůóç|||ä>KwďŢpűöměŰ·O&OWWŔŰí}}}!…óçĎcذaĺÖ…ž={<  yóćjŮnll¬0]GG‰‰‰UË–9sćxŰĄ_|¤Ô«WOîSuUú/Ářxy/ôăÇŹ‘źźŕí đ]ĚÍÍążoܸˇPǧź~ĘÝíěěĐ˝{÷ ÇüĽĽĽĐŁG”””`Ďž=– ^«*¶¶¶022’I\[˙ĂĎĎŢŢŢČÉÉÁÝ»w1hĐ G—––†„„.Vá]ĽĽĽ°eËąôŃŁG#::Ze;ňóóńđáC@Æ U*ىţË`0>>jMXyĽ~ýşÜü˛NSŮŰgť:u*]˙É“'! 1jÔ¨JëP„ŽŽâĂOlPĄDEEA"‘Ŕ××W&ýÜąs 4H¬qăĆri)))xôč‘ZeBˇ;?ŇŻ3ˇ‰ţË`0Ę[•G IDAT>>Ţ íććĆýť››+—ź““ĂýݲeK…:Ş27544mÚ´AË–-‘źźŹââb…oB ísńâEXYYˇE‹2é{÷îE§Nť`ooŹüü|ÁÂÂééé ŽŽľţúkđů|­Tו+WĎçS\\śLz^^ąşşrç(99Y&Á‚äďď/3˝éŐ«W@ĺóů.ąąą4kÖ,rpp /// $///úí·ßHOOŹ\uú/Á`đŢϤ˘˘"äççĂÂÂBëuâŃŁGpuu…ˇˇˇÖëÓ6111HIIA˙ţýkÚ*˛K,#!!ÎÎÎryŇsÔ¬Y3™Čx±X üüóĎ\DľĄĄ%ţüóO,\¸qqq€řřxnýwuÉËËŹÇă¦Ô%''C__044Třv^ťý—Á`ĽźÔŞOÜę```Pm77CCC´nÝúpÎ ww÷š6CŽŠěŇŃŃQčś˙ťŁw§­]»v ŮŮŮčׯÖ¬YĂ9Ë›7or»Łť8q‚sÔ•ÁÄÄDfľ{ŁFŤ`eeSSSĄźÎ«ł˙2Ś÷“÷ÖA3*GII ^ĽxGGÇš6E©]ŇqbM!˙Ţşu+\]]ą9Ç^^^xőęvíÚ…/^Ŕßß_Łő2 FUyo?q3*ǦM›ĐŁG4mÚ´¦M‘A›v‰D"ÂĚĚL&˝°°ĄĄĄ*ďďÍ`0Ő sĐEEE000¨i3䨭v1 FMÁ4Á`0µ6­"ńńńʨ˛ĐăĂE•>˘-Xßc0><Ţ‹Ą>k"ÂĎ?˙Ě­Ą(_şŞUTTž>} ??żę4‘ń_ÂÂÂTÚ™ŠÇăaôčѰ··×H˝ęôMPZZ*·Ń ë{ Ƈ{®€µkעqăĆhŐŞ•\Ţ»[ăřńăř矪ÓDĆńđđ€ŹŹŹĚ/::®®®ré–––«Wť>RY ‡ß~ű vvvrkzłľÇ`|x°1črHIIÁ¤I“pěŘ1…o@/^Ľ@nn®ĚfBˇAAA8}ú´Fßš•côčŃX´h´˘ż2}¤2ÄĹĹáňĺ˰±±A˙ţý‘™™)łM% ťľ'‰››«Ňć0ęČ2ŚŠaoĐĺ°jŐ* 8PéÍ®qăĆr7^ XZZâôéÓŐa"ن©L© Í›7Çĉáââ˘TF}oűöíŘĽyłĆe FĹ0­"ÂńăÇ(——žžŽuëÖ!$$Dáć8yňdu©”ŔŐŐvvv¨[·.ľüň˵ç]âăăááá: K—.đöö†§§':uę„víÚaĹŠ2ň—/_F۶mń˙ěťwX×÷˙ßË‚ %Á.Ĺ*˘‚Ä Q5(~bW0Ć’(vE‚‰1˘[Ŕ[„hŚ "Š#‰hPQб!(ě˛ô¶ĺüţŕ»ócŮ]Ř…Ą¨ózž}”{Ď=sćΙ93·şşş˘_ż~čŰ·/śśśTŢň±>¨‹ŹÔšö˝đđpŚ3F%Ů•dáĺĺ…aÆÁËË #FŚŔСCńĹ_ ''‡‘[ż~=†Š#F`ÄđđđŔńăÇk}.ŠŠŠBŹ=Đľ}ű·~'1‰D‚S§Náńăǵ*Ź^˝zÁÜÜnnn¶®ńą}ű6`aaŢ˝{ËäŐµîꍆ_ţűí ==ťLLLäŇ% Íž=›Š‹‹ÉĘĘŠ®^˝*'sëÖ-ęÖ­[C©”‚‚şxń"učĐĐ”)SŐžŞSBB%$$••łąÄ®]»(!!RSSeäsrr(""‚ôôô¨˙ţtâÄ ŠŹŹ'ˇPXíq¦L™BĎź?Ż—s¨‹ŹÔ–‡ ó5é{Ź?&ggg•e]\\TÖ-‹)66–ľľ>effĘlfBTQŹiiiôÉ'źŹŹĺććŞeżŞęŇĄ yzzÖYWŐŤ\’K—.ęÝ»w­uäççSż~ýČŃŃQ­rŤyŢęPPP@–»G4Qwő;Š[ ™™™h۶­\ş˛- +Ó¶m[•öI®O 1hĐ ŘŰŰăĺË—×_\\ŚK—.áÎť;LżŁ››úöí+7ÂXÍ›7‡““ó)666Lze^˝z…~ř_ý56oެÖöőEm|¤ĽĽĎž=«Q·±±±BÝ5ˇIß‹€ŹŹŹĘ˛ŢŢŢ*ëÖŇŇBĎž=TŘܦM9‡sssŚ7vvv066VYż:ŔÜÜ\#şÖŻ_ŹhD—şŘŰŰcذařüóĎk­ĂČČ–––xđŕZĺóĽŐÁĐĐ;vDFF†Lş&ę®>hü§\Ą¤¤Df)VVV°˛˛ÂňĺË1dČtčĐANF___á^ĎŤ¦ŞvíÚ…;wî`üřńX˛d ôőő‘‘‘›7oböěŮřâ‹/0tčPŤ3** S¦LApp0fÎś©1˝uĄ6>’ššŠ°°°uüńÇX°`Ú6iĘ÷GŹŵk×T–ýóĎ?ë|\Epą\•^úę‚&î“ĚĚLĽzőJÖÔŽ–-[âÜąsuÖŁn]4öy«‹˘óÓTÝi6@+ÁĚĚ @ažH$°mŰ6äçç3[Jáńx°°°h(S ‰D???ôîÝ{öě‘qô: C‡5j‚ńčŃŁZʶnÝŠµk×â÷ßW:ϸ±¨ŤŹX[[« k‹¦|/>>ť;wFË–-5*ű.ł}űöoĐTx_Ď»!x§ô•+WpćĚfŤç˛˛2X[[Ă××đćÍŁyóć‰Dpsscö"¶˛˛Bnn.ĘËËѬY3˝•·0\˝z5/^,“źžž^ë˝…ë“äädDFF")) ĆĆĆčÖ­&Mš¤ňśŕŕŕ` 2¤ÚŻăśś<}úććć8wî\­›ŚĘËË1{ölÄĹĹáúőëJ·™ÔBˇ~~~(**BQQ¸\.~ýőW>„˘˘"˘]»vزe 8Nť|¤¶Ĺb™«˘)ßS·yŰÇǧÁ¦>ţWŻ^źĎ‡™™|||‰{÷îÁŇŇ ¨vʇ«WŻ"%%]ştÁ!C”ĘćççăСCČĎχH$‚««+,#“ššŠ 88Ý»wÇ©S§T4»»»3rD„ččhĆŚ===DFF‚ĎçC[[óćÍ“)OD¸téţůçčęęÂŰŰżüň 2331oŢ<…ÝuwďŢĹĺË—ÁápđŮgźÁÁÁAíóV†H$ÂŃŁGQTT„ĽĽ<´iÓ\....r¶$&&"&&Í›7ÇřńăakkËä«rť”!Ö]FFbbbŔçóńá‡bÚ´i¸sç.]şmmmąú¨k]+¤ńşżëźÄÄDÚ¶méëëęŮł'EFF2ů€fĎžMZZZ´hŃ"şvíšLůaÆŃß˙-§÷Í›7äččH?ýôýúëŻrůË—/§]»viţ„j§§' –-[’‹‹ ;vŚ^ĽxA±±±Ô©S'255ĄcÇŽŐ¨'--Ť,XP­ ŹÇŁ)S¦Pvv6 …B?~<•••Ő¨ŰÎÎŽ$KŮŮŮÔż@–––”““ŁňůVeѢE”™™Y­ŚP(¤eË–ŃG}DČÔÔ”ÉËÎΦĄK—’‰‰  GGG™ÁLµőuyňä -\¸LFFF4bÄZ´heeeÉČiÂ÷ĘĘʨcÇŽT\\¬QŮŞ@VVVŐĘ…††Rll,ó÷˙ýGÁÁÁÔ˘E ęÝ»7Mť:•Nť:EŮŮŮtâÄ jÝş5…††*ÔuäȲ··§sçÎQII =zôfĎžM={ö”$ĆăńČÓÓ“^ĽxADDyyyäááAÓ§O—ń7nPxx8ŮÚÚR×®])<<śÂĂĂéĚ™3ŚLQQŤ7ŽfÎśIĄĄĄ$‘HhíÚµÔ·o_ĘĎĎŻ±®ţűď? $cccš>}:­X±‚©cÇŽôůçźSvv6­[·ŽĚĚĚä@ …Bňňň˘oľů†˛łłéÎť;diiI_~ů%•••‘ŻŻ/ť8q‚‘ź2e 9::ұcÇčđáĂTZZJwďŢ%CCC™ç§*ç­ ±XLŁFŤ˘;wî0iĎž=#sssŠ‹‹cŇ$ ­\ą’śśśčÉ“'DT1mҤIŁÖu’2sćL™:RVw©©©D­[·&www:xđ ýúëŻJëŁj]óx<ąşž={¶L]×Ä; Ą,X°€-šťť-“·jŐ*š4i’ÂrgĎžĄyóć)Ě+//§ĽĽ<ąt±XLÎÎÎ*˛˛˛Rűˇ’~i€îŇĄ‹ÜCôďż˙&¤««[ă(ĚuëÖŃË—/•ćóx<úꫯdę7""‚._ľ\ŁŤ•ôöíŰ©S§NĚčs4bĉD5ę©+‹/– ĐRĽĽĽčÚřH}ˇ®ď)ăäÉ“4mÚ4ŤËVĄ¶ZĘСC©eË–ĚYĘ™3gĂáĐ©S§dŇ/_ľL‡’““eŇ‹ŠŠČÄÄD.@ďŰ·ŹĐîÝ»™´˙ý—Ptt´ś=ýű÷'www…ç@–––TZZʤ‰ĹbęÚµ+)>q 2„ěíí™ú ˘}űö1ů>>>r:44”´´´dFţ˙řăŹdhh¨p6Ŕ”)S¨U«VrĎO?ý”ÜÝÝĺ‚^uç­ŚŰ·o“ťťť\úĆŤetDDiiiÉ\ł¤¤$@&L ‰D˘öuŞ Ą(Ş;""ooo˛´´¤ȤKëŁ2ęÖuMĽó /^ mmmËô˙#44ß~ű­Ârźţ9ž>}*7âtttäö€“'ObňäÉřđĂU˛mÆ xňä‰Ú?uFĚ@«V­dFK€««+Z´h˛˛2¬^˝şÚňYYYhßľ˝Â<Ź???„„„Č4—;::"..N-;.\???Ü˝{—Y‰µôÔµójă#ő…şľ§ŚđđpLžźŹ}űöA$aÓ¦MXştiµ7űöí„K—.Őxڏ¸8lذá­Z[zŁ×HKKKĺҲłł•g b —˛·ReT^3»Oź>Ě—łX,Ć„ ••Ą–ľ†@©/4ĺ{ÇŽĂ1cT ęČ*ÂĐĐzzz23EdeeÉ=kBęŹŇé?iii –ˇPeË–ˇk×®HMM…ŻŻ/UÔU>ź4hćÎť+ó;}ú´Ú>­n«ĚÜąsŃ®];DDD¨¸—:„€€ć˝*ĘÖĐD°*^vďŢŤ .Ŕ××\.۶m­­-3[:źżęšóUŃÔuŞUëŁ6u]íqŐ7őíÄÎΞžžŠŠbJ«V­TZŇ®U«Vđôô¬Q®6M}«WŻĆŐ«WŐ.çëë«‘Ą ĄKe*ű˛Ň˘E äćć27‹48oŢĽYap€7n(ĺ¨óçĎǵk×đÇ 33“&MBLLL˝Ď‹­JM_ÇŞúH}ˇ©fćđđpěŢ˝[㲊ĐÖÖFŻ^˝ŹěělĄÓ´RRR`ccŁ–îgĎžAKK‹Y]:â·¦—ĘřűűăçźFbb˘Lxĺ‡rNNŽŇÍAŽ?;;;ôčŃ‘mŚv„B!fĚʇůóçC,ĂĎĎŻŢ–6•žwĺQÖUů÷ßÁápŕááËŹ3Ű·oÇŽ;еkW¨qaź%K–`ďŢ˝µľNšDÓuýŢh@TTŇŇŇ0sćL>|¸ŃżvçÎť«ňt–Ę´nÝZ-yEÓr^ĽxçĎź&MšTmů±cÇâŕÁX¸pˇJÁ™päČlŢĽY-;«Âáp°˙~$%%áÉ“'¸rĺ T'˝Š~íW­+‰D‚ű÷ďküxMŤçĎźŁ¤¤„y0jJ¶:|||Ź„„…SňŇÓÓˇ­­­ô«±¸¸X.-??ׯ_ǰaĂkjllŚ"..NáţÜ2ă'„B!:OOO™‡ľ@ 9ćÚµk±qăFţSů 77\.VVV°··Gtt´BżÝľ}{˝v‹ýóĎ?033“›zĄ)”ťwu¤ĄĄáňĺËLÝ@·nÝđÝwß1_źFFFÜĽy;věŔ–-[Ş}#=räzôčCCC…ůDÄÂĘoşb±"‘‡ąÉőôô°nÝ:f#†Ő«WŁ[·nđňň‚žž^­›X«ňÉ'ź€Ăá 77—ů˘#"„„„0ôôôtĽyófff ţ_ßÔfîs]‘nŐąhŃ"ôďß_¦K¤°°K–,ÁÚµk•–ňä nܸ>}ú0i«W݇ĂÁ®]»ddwîÜ ś$‡C:::ÔĽys222"ŇŐŐ%.—KsćĚadýüüËĺ’®®.‘‘ééé—ËĄť;wŞV*˛yófj۶-YYYѨQŁČÍÍŤ6nÜHĚ´/róßv$ uéŇ…^ż~­QYU(..&˛··§C‡Q\\mßľť†N7nÜPZÎÓÓ“\\\($$„BBBčÔ©S4mÚ4>|8ĄĄĄ),óß˙ŃŔiůňĺtúôiZż~=íÚµ‹\\\988Đ… (&&†,--iѢEM!!!tńâE:wî™™™ŃĚ™3éîÝ»Śn>źOŽŽŽ4uęT Ł;vČ;99™ú÷ďOľľľtčĐ!Z±bmذÄbqŤuC "cccúŕČŐŐ•/^Ěäßľ}›<<<ČĚĚŚ ČÍÍŤYס°°LdmmM­[·&.—K¨S§NODóš=<<ČÔÔ” ©_ż~´˙~JHHI8p íÝ»WĺóVDtt4ą»»ÓňĺËißľ}tţüyZłf -X°@f*QĹF,ýúőŁéÓ§SDDŇÚµk™©—Ş^§ĘudhhH  ĄuW›ú¨©®Ż_ż^cÝTć˝ ĐDÝU Jď"‘^ż~]ëyĹ4oŢ<Úşu«ÜĽľÔÔTš3g«ôĐiŞĹbĘÉÉ‘ŮaI Pff& fˇ‰w‰›7oŇСC5.«™™™IaaaK%%%ŐĘK4QĹ<üÄÄD***RéX999ôŕÁĆO¨ŹşćihXË{Err2~˙ýwđx^ż~Í,›)ÝĘłňh`Y HII@ @ëÖ­Ń«W/|üńÇŤmÚ;‡&ëš Đ,,ď9qqqčŰ·ŻJÓŐÔ‘eaa©ě]¦"Ň=Ş#55µţ a©7Ţ×kěćć¦rŔUG–……Ąn°wZ ¶nÝŞt1Ę ýőWŁ®ÇĚR;ŘkĚÂÂŇat lÚ´  wĽŠŮÉÇÇÇgÎśÁÍ›7ŇD–:˘î5ŽŚŚdŻ1 K˝ĂöAWCFFłˇ˘%ÚŇŇŇPPPŔ,ČT `ńööFtttŁŻóÍR3Mĺ …B¨4ÂSY–·ö ş6l؀ѣG+}[XXČ<¸ŠmîĚĚĚÝ&˛Ô‘¦rŤ÷îÝ‹ť;wj\–Ąń‰Dxřđ!ŢĽyÓئ°ĽĄ°Z D„3gÎŔËËK./;;[¶lAhh($‰\ľ——Ξ=Űfľ•<ţŽŽŽpqqAßľ}Ńż¸ąąÁŐŐ˝{÷ĆşuëdäŻ]»†Ţ˝{ĂŐŐýúőCßľ}áää¤Ö&ŠhJ×8<<śŮ DS˛‰‰‰đňň°aĂŕĺĺ…#F`čСřâ‹/ ąőë×cčС1bFŚ?~ĽÖ碨¨(ôčŃíŰ·Ż·m5ÁíŰ·áŕŕ ôîÝ»Öz˘˘˘0xđ`Ü»wÎÎÎ× µűę« IDAT•őOuő ‘H FXꙺ®?ú®’žžN&&&r鉄fĎžMĹĹĹdeeEWŻ^•“ąuëuëÖ­!Ě|+)..¦„„JHH Nť:1PěÚµ‹(55UF>''‡"""HOOŹú÷ďO'Nś řřx …u˛Ł©\ăÇŹ“łłłĆe‰*ÖŽŤŤ%¤ŻŻ/łľ¸‰DBiiiôÉ'źŹŹĺććŞeżŞęŇĄ yzzÖIĎÇ5d‘b hđŕÁµľľŹ?&JJJ˘ěělârą4uęT&żľí×ĘęáŇĄK€z÷îÝH–˝?°K}*!33Sá’lĎž=èQŁžžŽçĎźĂĘĘJN¦m۶ČĚĚl3śââb\şt wîÜaúAÝÜÜĐ·o_•·_lŢĽ9śśś˙K±±±aŇ+óęŐ+,_ľ_ý56oެ±Mď›Ę5®Ďmµ´´ĐłgO6·iÓFN†ĂáŔÜÜăĆŤťťŚŤŤUÖŻ&&&077Żłž 6`˙ţý°H1†††čر#222jUţěŮł033c–Â}öě™ĚţíëׯÇ4bk}˘¬ěíí1lŘ0…űwłh6@+ˇ¤¤DfoZ)VVV°˛˛ÂňĺË1dČ…k®ęëë+Ü?ôm†°k×.ÜąsăÇŹÇ’%K ŻŻŹŚŚ ÜĽyłgĎĆ_|ˇC‡jô¸QQQřňË/ŚYłfiTwS¸ĆD„ŁGŹâÚµk•­ \.·Ţ÷¸®ë şĚĚLĽzőJCÖ(§.v>}úT& [XX0˙o(ű5…˘zhٲ%Îť;×ÖĽ°}ĐJ033“é§«ŚH$Â0}útäççÇăÉäóx<™›ňmG"‘`ńâĹ066Ćž={0hĐ BKK :tŔčŃŁ±k×.Üşu ۶mÓČ1‰[¶lÁÔ©SqüřqŤg i\ăřřxtîÜ-[¶Ô¨ě»ĘöíŰ›üD¤4Ŕoßľ]áEĽÓ_ĐBˇK–,Aaa!ŠŠŠŔĺrń믿ŢĽy   ˇ°°íÚµĂÖ­[™˛VVVČÍÍEyy9š5k&Ł7>>yyy1bVŻ^ŤĹ‹Ëä§§§ŁcÇŽő‚*’€Ă‡#==ŮŮŮh×®şt邯żţZĄťU‚1dČjżŽsrrđôéS››ăÜąsujţ*//ÇěŮł‡ëׯŁsçÎŐĘ_ąrgÎśžž ¬¬ ÖÖÖđőőPq­ŃĽysD"¸ąąaäČ‘Mâ×góv]yţü9®^˝ >ź333fř˝{÷`ii‰T»AŹÇĂŐ«W‘’’‚.]ş`Č!JeóóóqčĐ!äççC$ÁŐŐfňSSSqŕŔŁ{÷î8uęŔČČîîî2şŃŃŃxđŕ­[·Ćĉĺ®qm¨NwVVâăăńěŮ3äää06ş¸¸ ¬¬Leű!‰pôčQ!//mÚ´—Ë…‹‹‹\Lbb"bbb ĐĽysŚ?¶¶¶L~Mu]‘‘‘ŕóůĐÖÖĆĽyóTLWډ‰źĎLJ~iÓ¦áÎť;¸té´µµńŮgźÁÁÁAa}^şt ˙üótuuáíí ‡_~ů™™™7ožÂ.¦÷…w:@s8áôéÓxńâLMM™<---"""ąąąptt”)««« gggÜşu ź|ň‰Lž­­-lll°{÷nŘÚÚÂÄÄD&?66#FڍżSű÷ďĂŮŮÍš5Ăжm[\»v S¦LApp0ţřăxzz*-źžž>ź_mpćóůđóóCHHLLLŕăăÁCGGGm{y<ĆŚ?˙ü–––2×L-Z´@§NťđÝwߡ¸¸={ö”yč5kÖ "‘7nÄ‚ ůĂŤ}ŤËËËqţüylÚ´IŁ˛š˘ĽĽ™™™Ř´i¬¬¬píÚ5Ś9sçÎŵk×ŕââ‚ĺË—cÎś9reűí7¬]»ëÖ­ĂâĹ‹‘––deeÉí…Ëçó1eĘěرČĎĎÇرcqôčQüüóĎŕp8ČĘĘÂÇ [[[Ĺb€\“|qq1¦NťŠ?ü۶mCłfͰ~ýz 8ŃŃŃ022Şu}Ô¤[j—tžşÔF‘H¤˛ýŠH$?~<~řáôęŐ @ĹËS˙ţýqřđa&€‚‚‚‰ß~ű VVV(((Ŕ¬YłđŐW_ađŕÁ*ŐuuĹbdff"$$­[·f´P(ÄË—/;;;pą\ččč`Á‚řďż˙đé§źâČ‘#>|8ŁK$aôčŃhÓ¦ Ö¬Y´´4|ňÉ'čׯ~ţůg,\¸÷îÝ{Żô{1Š{ńâĹ€LMMĺňĽĽĽ9::Ęĺť={–ćÍ›§Pgyy9ĺĺĺÉĄ‹Ĺbrvv¦śś•íó÷÷'+++µ5ęgFI'''3ékÖ¬aęD,+-żnÝ:zůňĄŇ|ŹG_}őegg3itůňe•ν[·nŚ}Ű·o§Nť:Q‡´#FTk_e,XŔŚV®lŃŞU«hҤIreę+âäÉ“4mÚ4ŤËVE ˛˛˛ŞV.44”bccĺ҇J-[¶¤/^Ȥź9s†Đ©S§dŇ/_ľLGĆßŠŠŠČÄÄDn÷ľ}űíŢ˝›Iű÷ß EGGËČöďßźÜÝÝ•žC@@YZZRII “&‹©k×®¤´\UfÎś)7zYŞ»´´´ZÝsćĚ!…zk˛_·oß&;;;ąôŤ7R\\ówDDiiiÉÔ{RR  &‘zuM¤¸|||¦{{{“ĄĄ%8p@&ýÓO?•;ďĐĐPŇŇŇ"@Ŕ¤ýřăŹdhh(“ö>ó^ôAW÷%W]ŢçźާOź*Í©ŁŁ>ř@.ýäÉ“üđC•í۰až|řpŘŘŘ`Á‚2ë•Ďš5 ÎÎÎ2ţT ¬ł¶¶–Óß·o_Ś9...LšŤŤ ttt ˛ť|>›6m€­(ŠÖ2www;vLe] ©»&ôôôđŕÁ¬Zµ éééLúČ‘#™n"B@@€\˝[[[ĂßßźéîŃT]+\ ÚÚÚxńâ…Ü~áť;wĆëׯeŇâăăa`` Ó:Őąsg"))Ie[ŢeŢé&îşÂáp°gϬ\ą?ýô“ĚŤ©ÔÔTÄĆĆ"44´,¬™fÍšaÎś9xńâŽ;†{÷îáĹ‹2Aą´´Tm˝<K–,Á¦M›äšˇ?ţřcäć檭sĎž=řꫯż˙ţ;ÜÜÜ ‰đý÷ßĂĹĹýű÷ݶĽ……&Nśđđp„……Áßß8xđ śśś®µÝX×X  ))IfťoMČÖuF/s8888ŕČ‘#ČČČ@űöí‘™™‰ÇŹ+Ô§HżŤŤ Nť:…ŇŇRÄÄÄ >>ž řEEE*ŰsďŢ=¦9ąę`Ee/aµŃ]y¬Š&t×D—.]0cĆ ¬X±+V¬€­­-† ‚€€f Izz:2220`Ŕ™˛şşşŘ°aó·¦ęş:Úµk}}}™4.—+§ßÚÚČËËc¦öI›ţkwňľđ^|A×…öíŰ#((HĄŚâââ°aÆ&µ÷ăÇŹáááKKKlܸvvvX·n–-[¦RyEÁ;;;[ip*ú.•˝aW‡ĄĄ%ó˙>}ú0_Îb±'NDVVVŤ:¤_É|>űöíH$¦M›°téRĄ×Ą1®ń±cÇ0fĚ•¶nTGV†††ĐÓÓC^^^µrYYYrÖš^éÔˇ´´4PKŹP(IJeËеkW¤¦¦Â××jęâóů€AaîÜą2żÓ§O«ä?Ťˇ»&8vďŢŤ .Ŕ××\.۶m­­-®]»†?ţř™™™4ibbbŞXcggOOODEE1/­Zµ‚››[µÇŞďk\•ěÚµK%ŮđđpěŢ˝»ÖÇŇÖÖFŻ^˝ŹěělĄÓ´RRR`ccŁ–îgĎžAKK‹é2‘Ž®ée 2K–,ÁŢ˝{‘(Ó^ůž““Łpsi׍­­-zôčÁČrą\Ťľ(WÖ­©…rYű•ńďż˙‚ĂáŔĂĂ€`Ě1Řľ};věŘ®]»@µÝUŕďţąVu­i„B!fĚʇůóçC,ĂĎĎŻI/ŰĐĽZú5Wuţ¤D"Áýű÷Ă$ćÎť[«é3•CPÄíŰ·ńňĺK€···L`«éF–2věX> §ŔĄ§§C[[[aź;PŃŹ_•üü|\ż~Æ cî1ccc 8qqq çČŚW …8tč<==e†@ 9ćš5k™ŕź››ËřuçÎťaooŹččh¬ZµJÎŢíŰ·+q® •u+ň?UuWgż2ŇŇŇpůňelܸ‘IëÖ­ľűî;ćëÓČČĆÝ»wN ŬYłÔŞëúćźţ™™3śEž÷˘‰[:…&77ŮŮŮ*IHHóIOOÇ›7oeXZZŞý«ĽL¦"zöěÉĚ Ž‰‰aŢ’ů|>"##™~źŰ·o+í‡îŃŁž={†›7obÉ’%زeKµSźŽ9‚=zTŰÄMD‰D‰D2oîb±"‘HćčééaýúőĚuZ˝z5~űí7+]đáÓO?…««+€Š~éĘS;š* đ“Ęjbîó¬YłŕááE‹ÉőbÉ’%Őľř$‡C:::ÔĽys222"ŇŐŐ%.—KsćĚadýüüËĺ’®®.‘‘ééé—ËĄť;w*=Ć… 9r¤Z[‰DB]ştˇĚĚL•e_ż~­‘c“żż?ŮŰŰSxx8ĹĹĹŃöíŰiřđátăĆ Ąĺ<==ÉĹĹ…BBB($$„Nť:EÓ¦MŁáÇSzzşÂ2˙ý÷ 8–/_N§Oź¦őë×Ó®]»ČĹĹ…]¸pbbbČŇŇ’-ZDŃŃŃB/^¤sçΑ™™Íś9“îŢ˝KDD|>źśśśhęÔ©F;vě;vrr2}öŮgäëëK‡˘+VІ Tš®wűömňđđ 333244¤ČLgLNN¦ţýű+Ô}÷î]2dµlŮ’ ÉŃŃ‘Ľ˝˝eî>źOŽŽŽŐÚ_•ččhrww§ĺË—Óľ}űčüůó´fÍZ°`Ě”/˘ŠŤ:úőëGÓ§O§ ¤µk×2÷¶Şu­¬*§››]»vŤnܸAdjjJ†††ÔŻ_?Úż?%$$Ȥ8öîÝKDD………4xđ`˛°° kkkjÝş5qą\@ť:u˘řřřëć]ç˝ ĐDssrrdvô”™™I€JKKk .o+ĄĄĄôęŐ+*((¨Uů‚‚š7omÝşUnŽbjj*Í™3‡‚UžłÜäçç7ąëyóćM:t¨JvIe5Mff&EFFRXXĹĆĆĘĚV„4@UĚ{OLL¤˘˘"•Ž•““C<`üâÁ”’’B|>_ĆW$ Ą¤¤PBBĺçç3éůůůr/މ„’““)--­Úc JJJ˘ňňr•lU‡şčVŐ~)ůůůL NMMĄŰ·o+śź_™ÜÜ\Ąö©S×őEyy9őěٓΞ=+“.‹éĺË—4yňd˛°°hRϓƀCĶ%°¨Nrr2~˙ýwđx"##qâĉz˛š…………ĺ]… Đ˙‡©©)† KKËZ•˙óĎ?‘””KKK :­[·FçÎťabbÂl}Č¢*ěRźU¨şÁĽŞ$%%:věřŕđčŃ#…Bf%&Uaż 5„t˝—Ę+?iii±Á™…………ĄV4É/čż˙ţçÎťĂýű÷Q^^řřřŔÉÉ @ĹN?~~~(,,Daa!ěííńĂ?ŕĆŤ8|ř0ŇŇŇЦMx{{+]Ř?33GŹĹÇ‘ťť [[[Ěž=[m[óňň€ÄÄDŔÇ1kÖ,ŔôéÓŃ»woŔÝ»wqúôiźŹaÆa„ µţŇgaaaay;hR_ĐBˇ3fĚŔ§ź~ŠgĎžá§ź~Bxx88śťť™‘Ő|đ®\ą‚cÇŽáÖ­[X»v-öďߏiÓ¦!00@ż~ý-wś°°0X[[cűöí=z46oŢ {{{Ś1©©©j٬­­ ;;;´nÝPVV;;;ŘŮŮÁŘ؉‹/†ŁŁ#ŇÓÓß~ű }úôŻŻ/úôé””}†††8qâţřăź´µµ ­X±B-Ű×®]KČĘĘJ&}ëÖ­€z÷î-sNDD?˙ü3 ^˝zQyyąLž‡‡ şxń"‰D"˛°° ˇ–m,,,,,oMć ş´´”ůB6l\_®§§'ŕäÉ“LşŽŽŕĺË—XĽx±Ś>éhěŞ_Äß~ű-ŕłĎ>cľzĄ´hŃBŁűŹ …B,[¶Ś9^ŐféqäČ™<éą•––ÂÝÝ\.ýő._ľŚ &hĚF–¦I“é~đŕĘËËŹ?FXXLţŁGŹiiireŤŚŚ`nn.“& †•ç3áß˙´oß^sĆ+áŃŁG(**Pü«hć˙·nÝ—_~)§ŁgĎžL9sssąódaaaay7i2ZČ€Š`Vu>˛ĄĄ%Ľ˝˝akk+W¶Yłf* šâóůĚhkéj}’••Um~ĺV‚ׯ_+”166Ö¨M,,,,,oM&@÷čŃůż™™†®ńc››ĂČČ(((иţŞtëÖŤůAAHćE"??źů÷îÝę`Gkł°°°Ľź4™>hcccŚ;@ź؉DNć—_~ÁňĺËk} ‡qăĆîÜąĂ|MK … Ź[[Ú´iĂô3ßşuK.˙öíŰ*ZFŹ­±ă˛°°°°Ľý4™ »v킵µ5’““ńÝwß1ýÇĺĺĺ8uęľ˙ţ{L0D‘HÄäK˙–H$2˙Żš!!!°˛˛ÂăÇŹ±wď^ćŘEEE}s‰DĚhđŔŔ@ÍW8 K“¤ÉčĘ…BäććÂĚ̬Ţúb Ááp``` bĘ–®®.ôôôĐĽyshkk¶›ľ´´EEE055Ő¨^–w‹& YXXXXXŢWšT4 Kl€faaaaai‚°š……………Ą Âh–& YXš Ďź?o°c©»Ĺ*KĂÓPţŔúBÓ‚ Đ,,M"ÂÖ­[d)Z)ýő.]şÔ`ÇcQť†öÖšl€faiBlÚ´ °··o°cúřřŕĚ™3¸yóf“E5ÚX_hZ°ó YXęHqq1tuu뼗xFFfÍš…Ó§O7ř&)|>ŢŢŢŽŽ–;vaa! Ôž·™·ÝX_h:°_Đ,,u -- ţţţY+}Æ =ztŁě`fjj 333DGGËĺť={GŽip›ŢFŢ`}ˇéŔ~AżŁěŰ·‰‰‰j—Ó××dž ęÁ˘Ú…eË–!;;}úôÁ‰'<‰D‚3gΠ[·nřřăŹÔ®üü|xzzâ×_………Eťt¬¬¬pűöm|řá‡2y ¸rĺ 0`ŔDDD ??‰łfÍ‚žž^ťŽ-ĺ÷ßÇŐ«W±cÇą<___Ś9C‡•Iţü9ĆŚmmmć'ÝŕE$aôčŃXşt)#íÚ5,^ĽYk^,٬¬ ±±±022RÉΨ¨(|˙ý÷ŕńx¬?Ô“?ÔĆXęzO¸pá6¶ ĆŔ uîÜ™ľţúkÚ°amٲ…>řŕ@čŰożĄÍ›7ÓÂ… ÉŐŐ•––‰D˘Ć6_@@]şt!OOO™ôK—.ęÝ»wŰôĺ—_ŇÉ“'5˘+==ťLLLäŇĹb1-_ľśRRR¨yóć´téRĘČČ "˘)S¦Đúőëę{óć …Bµl¸uëuëÖMa^qq1999ŹÇ“KOHH „„˛˛˛b|k×®]”@©©©2ň999AzzzÔż:qâĹÇÇ«m+ëňţđŐW_)ô‡†ňÍóŢčŔŔ@9rdc›Ń`ŘŮŮŃgź}F………2éíŰ·g˘ÉÉÉLşD"!@YYY m®JxxxČ=߼yCÆ ŁĐĐеĺćÍ›dooO‰D#úţůçęŇĄ‹\ú˙ýG×®]ŁČČHŇŇҢ””&ĎËË‹V¬XˇPź···Ś,Qyy9Íž=[© Ż^˝˘-Z(Í˙é§źč›oľQšoggÇřVll¬B™ääd˛´´¤yóć©4ŞÂú¬š1¨$ŰIDAT?Ś9Rˇ?4†/°h†÷"@K$ęŃŁÇ{ [µjEçĎź—KW ‰*ľJtuuĺŇ› C† ‘{ 7&L ={öhLßźţINNNJó—.]J˙űß˙żĹb1µnÝš˘˘˘T>Ćőë×ÉĂĂCiľ@ CCCĄůdff¦ô®¦}öěYjѢíÚµKe›«őYhÓ¦ ť;wN%ýőí ,šá˝$¶k×.Ü»wݱÍh0Äb1x<\]]Ő*gbb;;;deeŐ“eďĺĺĺ8ţ<ĆŽ«1ťfffJó˙üóOxxx0_»v ĄĄĄ8p Ě"÷ďßÇŽ;°oß>&M$aýúőXąr%„B!6mÚR0ô„ÇăUŰwjhh>}ú <<\­s#"lٲS§NĹńăÇ1sćLµĘ7ušŞ?4E_`QÍnv\ŇÓÓ±~ýz˘  ß|ó ěěě>źŹaÆa„ 2#˙ţűoś;w÷ďßGyy9lllŕăă'''“îĂÂÂpôčQŔÝ»w1kÖ,@ëÖ­±rĺJűNűůůˇ¨¨EEEŕrąřő×_oŢĽAPPŠŠŠPXXvíÚaëÖ­jŮ,‰ŕçç‡ÂÂBÂŢŢ?üđnܸÇ#-- mÚ´··7ÜÜÜę\—ĄĄĄ>}:>řŕµËúřř¨áńx¸ző*RRRĐĄK 2DNF  22|>ÚÚÚ7ožL~~~>:„üü|D"¸şşbđŕÁ ŹGD¸téţůçčęęÂŰŰżüň 2331oŢŤáÇĂá`űöí áÜąs;v,ĆŹŹéÓ§´µµ€ŘŘX¬Ył}úôQXéééčرŁÂ<)nnn¸páüüüŞ•“R^^ŽŮłg#..ׯ_GçÎťU*W•·Ń®\ą‚3gÎ0¶ĘĘĘ`mm ___ţŚćÍ›C$ÁÍÍ #GŽP;đôôdüaăĆŤMÎXjAc~ľW&##ČĐĐPXX 0€>LĆĆĆ€8@Dý'Ó§O'4aÂJMMĄěělZ¸p!q8ZµjĹĹĹQhh(}ôŃG€şwďNˇˇˇJááá̱…B!-[¶Ś‘355eň˛łłiéŇĄdbbBČŃŃQm›«ę9r$­Ył†ľţúkJLL¤ÄÄDjÖ¬Pą‰Ş¶T×ÄÝT8räŮŰŰÓąs稤¤„=zDłgϦž={Ę4ifggÓşuëČĚĚLn@ ŹÇ#OOOzńâĺĺĺŃ!Chúôérý„BˇĽĽĽč›oľˇěělşsçYZZŇ—_~IeeeäëëK'Nś`ä.\HS¦LQh{m}‰hذaô÷ßË錋‹ŁNť:ÉŘýŰożŃäÉ“Éßßź?~̤K$  °°0ĺĺĺÔ¦M*//Wh7ŃňĺËkl~ľrĺ (ěk­ÚÄťťťMýű÷'diiI999ŐęVF}ů‡‡GŤţŔăńjô‡ (ô‡ÄÄDÚ¶męŮł'EFF2ů€fĎžMZZZ´hŃ"şvíšLůşúCcú‹fh2ZЇ‡ şxń"‰D"˛°° ADDAAA€lmmI,3eĹb1999şsç“îââÂĆęXĽx±ÜCUŠ———Â‡ŞŞ6ůůůęС-X°@F‡ qăĆ©VQµDSÚßßź¬¬¬ÔţU®E\ľ|™8ŽśmEEEdbb˘°ĎŃÇÇGîĽoß>@»wďfŇţý÷_@ŃŃŃ2˛ˇˇˇ¤ĄĄE€IűńÇÉĐĐP&MʨQŁhٲeŐžGm|éěŮł4oŢ<9y‘HDré@nÄ}yy9YXXPnn®Ě„›7o2}Ž‘‘‘r´Äb19;;×DďßżOÖKĺ˝}űvęÔ©učĐI1b„ĚýŞ Ťé•ë˘:řâ‹/Şő‡… Ň××§ěěl™ĽU«VѤI“–««?4¦/°h†&×­ŁŁ ˘™ÖÝÝ\.ýő._ľŚ & ´´AAA€aÆAKë˙ź‚––<=='Ož¬ő±k›§ĚćŞr/_ľÄâĹ‹etXZZx{«ß°až˙üs<}ú2é\.WáęM&&&r«U‰D"››ă—_~‘±ŃĚĚ ÚÚÚřůçźQVVmmŮŢ­“'ObňäÉrsn«Ň˘E ¨qśÂÂ… áç燻wďÂÜ܉jËUĺ]đ‡E‹A[[ĹĹĹ cŇ‹‹‹Š€€…ĺęęMĹXjO“é®JĎž=™ţfsssć&ż{÷.ĘËËŹ?–qxxô耊}e6WĹČČH.OZN,ׯ‘MĚĚL<~ü'PuVT˛±±Á©S§PZZŠÄÇÇ3aŠŠŠädŹ9‚ĽĽ<ćAVXX űLů|~˝ôÓs8ěŮł+W®ÄO?ý]]]µu4oŢW®\X,FóćÍ™ôNť:áŹ?ţÉ-d‘ššŠŘŘX„††Ö¨_´ř|~µr{öěÁW_} bŃ 777D"|˙ý÷čÓ§úőëW㱚‚?HĎ·.ţ`aa‰'"<<aaađ÷÷‡<'''ĄëlWö‡m۶©˝ISń–ÚÓäľ Ą({#­|3µhŃ–––2?oooDFFâ‡~h(SjúŞ’Ň¬YłFYα©#}©Ň×ׯł.ˇPeË–ˇk×®HMM…ŻŻ/ĺÜŔÜąsŃ®];DDD¨h 9tčĐ®];9ů>ř%%%u¶QíŰ·GPPPťvjÖ¬™ĚYŠ®®®Â‡|\\6lŘ ’O¨Ůץ-BЧOćËY,c„ *}u˝Kţđí·ß¨fűöíH$¦M›dVYS„Ô._ľ¬ę©ĘĐ|Ąö4Ů/heŇŁGć˙fff>|x­ôOž<óçĎGďŢ˝U’/++«Qć} ş«WŻĆŐ«WŐ.çëë‹ŃŁG+ĚłµµäĺĺŐĹ4Ŕ’%K°wď^$&&Ę4…RĄ)%999hѢ„B!fĚʇůóçC,ĂĎĎO©ť­ZµŞ“Ť5ůR«V­®š†`ňäÉ*ËJĎ»U«Vjcţüů¸víţřădffbҤI‰‰©vC‰Ćö>źŻ1°łł§§'˘˘˘°iÓ&šš˘U«V*ÍÚhHh_`Qť& •allڱcÇâرc¸xń"$‰L?4üňË/xňä ÓW-íŻ©ü`ü÷ßĺ´—ö]Umf–H$¸˙ľĆĎĺmfîÜąđńńQ»\ëÖ­•ćcŕŔ‹‹É˝đŔĚ̬Ćc…B:tžžž2c@ŔĽőŔš5k‚ţůfffrÓr”ѦMĽzőŞZ™wŐ—˛˛˛ ŁŁĂô?Ş ‡ĂÁţýűqďŢ=<}úW®\A`` sŹ*â]ň@TTŇŇŇ0sćL>|ř­~©Ż­/°¨N“ ĐD±XĚMé‚űZZZrXşđHrr2ľűî;¬Ył\.ĺĺĺ8wîľ˙ţ{ÄÄÄ0ň}űöĹĺË—ńŕÁ”••Ďç###]»v•ŃűÉ'źrss‘ťťŤ–-[‚ÂÜHéééxóć LMMˇĄĄĄ’ÍŇs“>¬‰‘ăp82:*çU=ďşÔk忥H74 öńLLLή+;wî„Nž<‰QŁF1é§NťÂăǏѲeKą2eeeČĎĎgţÖŇŇ‚¶¶6rsseä.\¸{{{˘¤¤„9_kkkĚ1ůůůhѢttt```€îÝ»Ł[·nrQěÝ»·ÚóP×—ęş5aCqűöm|öŮgŚ˝•ý«˛o‰ĹbD"p8FVOOëׯÇ1cT´ÂtëÖ ^^^ĐÓÓSčo?8::Öčđé§źÂŐŐńńń°°°¨uë_SˇŞ/°Ô =l\ ÄĺrIOOŹŚŚŚ¨yó椭­­t­á’’ &[[[ú裏ČĹĹ…ĚÍÍÉËË‹=z$';uęTjÖ¬}ôŃGdnnNÇŹW¨wóćÍÔ¶m[˛˛˛˘QŁF‘››mܸ‘é"(22Re›KJJHKK‹tuuÉĐĐôőőI[[›BCCéáǤĄĄ%ŁËĺĘĚ—¬ €´µµI÷˙µw˙.©…aŔ9–HËq‘ ú±En 6´FŁ­ MŇQTSa@-QIöci¨¨EJ‰:$*HĂÁ°ďî˝B]«Ű˝ŮyŐďgóŹď{<_xDÎs^‡MMMp»Ýpą\p:ť°Űí°Ůlžžţ’ńľÂŐŐ099‰­­-ĚÍÍaeeĄÔ.ç÷ű±»»‹łł3 B×u¸\.ôőő•zIc±ZZZ …ŤF1??Ź˝˝=ěěě@×uŚŤŤáââđřř@ źĎ‡ŽŽx<hšA[[ŽŹŹ_Ě/™L˘ˇˇ¦iľ{źÉRµX^^.˝ľĽĽ„ÍfÝnGccc)[‡š¦a||Ľôމ‰ hš‡Ă—Ë·Ű §Ó MÓ‰DŢłVňü\´GD°¶¶ö©ď]EŻł@_O™ý? …>l7M÷÷÷®ÖT,‘Íf‘N§Kź™ËĺN§‘Ëĺ`š&›óżA6›E"‘(őÎ& ¤R)d2™żę§}~~F*•B<G>ź/mĎçóĄ  twwc{{űűĹb†a  Âçóýq˝{{{ßü‘÷úsj%K¦iÂăńŕîîÎ’ńk!żÇ«–kţ«łP/j˘@ý«h4 żß˙ćţL&a/¶ŻŻŻ#TzzJY]]ĹĐĐŐÓ¨(ćáďÔCT l›Ńwčęę’››1 Łěţ““immŻ×űbűČČ<==ÉůůůwLÓrĹbQ–––daaÁę©Tóđ±zÉ‚ l@™ĄLęČááˇĚĚĚČđđ°ô÷÷‹®ëb†lnnĘéé©D"‘˛ \__Ëčč¨Äb±˛OvŞ%ápXš››% Y=•ŠcŢWOY° 4‘üĽ9ŹK*•’\.'ŹGzzz¤˝˝ýÝ㎎ŽdccC«şeć=˛żż/łłł5{ŽŻ1ĺŐc¬ÄMôźnooĹëő~úQŚŐ"™LJgg§ŐÓ¨µśfá{±@)7‰)šHA,ĐDDD b&""R 4‘‚X ‰ÄMDD¤ O݇ejjŞs!""˘_ř """ń/n"""±@)šHA,ĐDDD b&""R 4‘‚~!pŐ•ÁĄ2=IEND®B`‚python-pot-0.9.3+dfsg/docs/source/_templates/000077500000000000000000000000001455713015700211605ustar00rootroot00000000000000python-pot-0.9.3+dfsg/docs/source/_templates/module.rst000066400000000000000000000016231455713015700232010ustar00rootroot00000000000000{{ fullname }} {{ underline }} .. automodule:: {{ fullname }} :members: {% block functions %} {% if functions %} Functions --------- {% for item in functions %} .. autofunction:: {{ item }} .. include:: backreferences/{{fullname}}.{{item}}.examples .. raw:: html
{%- endfor %} {% endif %} {% endblock %} {% block classes %} {% if classes %} Classes ------- {% for item in classes %} .. autoclass:: {{ item }} :members: .. include:: backreferences/{{fullname}}.{{item}}.examples .. raw:: html
{%- endfor %} {% endif %} {% endblock %} {% block exceptions %} {% if exceptions %} Exceptions ---------- .. autosummary:: {% for item in exceptions %} {{ item }} {%- endfor %} {% endif %} {% endblock %}python-pot-0.9.3+dfsg/docs/source/_templates/versions.html000066400000000000000000000025551455713015700237250ustar00rootroot00000000000000
Python Optimal Transport
Versions: Release Development Code
python-pot-0.9.3+dfsg/docs/source/all.rst000066400000000000000000000007501455713015700203270ustar00rootroot00000000000000 .. _sphx_glr_api_reference: API and modules =============== .. currentmodule:: ot :py:mod:`ot`: .. autosummary:: :toctree: gen_modules/ :template: module.rst backend bregman coot da datasets dr factored gaussian gnn gromov lp mapping optim partial plot regpath sliced smooth stochastic unbalanced utils weak Main :py:mod:`ot` functions --------------------------- .. automodule:: ot :members: python-pot-0.9.3+dfsg/docs/source/code_of_conduct.rst000066400000000000000000000002031455713015700226650ustar00rootroot00000000000000Code of conduct =============== .. include:: ../../.github/CODE_OF_CONDUCT.md :parser: myst_parser.sphinx_ :start-line: 2 python-pot-0.9.3+dfsg/docs/source/conf.py000066400000000000000000000266431455713015700203350ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # POT documentation build configuration file, created by # sphinx-quickstart on Mon Oct 24 11:10:10 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import re try: import sphinx_gallery except ImportError: print("warning sphinx-gallery not installed") # !!!! allow readthedoc compilation try: from unittest.mock import MagicMock except ImportError: from mock import Mock as MagicMock ## check whether in the source directory... # #!!! This should be commented when executing sphinx-gallery class Mock(MagicMock): @classmethod def __getattr__(cls, name): return MagicMock() MOCK_MODULES = [ 'cupy'] # 'autograd.numpy','pymanopt.manifolds','pymanopt.solvers', sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) # !!!! # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('.')) sys.path.insert(0, os.path.abspath('..')) sys.path.insert(0, os.path.abspath("../..")) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named #'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx_gallery.gen_gallery', 'myst_parser', "sphinxcontrib.jquery", ] autosummary_generate = True napoleon_numpy_docstring = True # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: source_suffix = ['.rst', '.md'] # source_suffix = '.rst' # The encoding of source files. source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'POT Python Optimal Transport' copyright = u'2016-2023, POT Contributors' author = u'Rémi Flamary, POT Contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # __version__ = re.search( r'__version__\s*=\s*[\'"]([^\'"]*)[\'"]', # It excludes inline comment too open('../../ot/__init__.py').read()).group(1) # The short X.Y version. version = __version__ # The full version, including alpha/beta/rc tags. release = __version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = "en" # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'default' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = '_static/images/logo_dark.svg' # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] #html_css_files = ["css/custom.css"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'POTdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'POT.tex', u'POT Python Optimal Transport library', author, 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'pot', u'POT Python Optimal Transport', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'POT', u'POT Python Optimal Transport', author, 'POT', 'Python Optimal Transport', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False autodoc_default_options = {'autosummary': True, 'autosummary_imported_members': True} # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'python': ('https://docs.python.org/3', None), 'numpy': ('https://numpy.org/doc/stable/', None), 'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None), 'matplotlib': ('http://matplotlib.org/', None), 'torch': ('https://pytorch.org/docs/stable/', None), 'jax': ('https://jax.readthedocs.io/en/latest/', None)} sphinx_gallery_conf = { 'examples_dirs': ['../../examples', '../../examples/da'], 'gallery_dirs': 'auto_examples', 'filename_pattern': 'plot_', #(?!barycenter_fgw) 'nested_sections' : False, 'backreferences_dir': 'gen_modules/backreferences', 'inspect_global_variables' : True, 'doc_module' : ('ot','numpy','scipy','pylab'), 'matplotlib_animations': True, 'reference_url': { 'ot': None} } python-pot-0.9.3+dfsg/docs/source/contributing.rst000066400000000000000000000002101455713015700222550ustar00rootroot00000000000000Contributing to POT =================== .. include:: ../../.github/CONTRIBUTING.md :parser: myst_parser.sphinx_ :start-line: 2 python-pot-0.9.3+dfsg/docs/source/contributors.rst000066400000000000000000000001621455713015700223110ustar00rootroot00000000000000Contributors ============ .. include:: ../../CONTRIBUTORS.md :parser: myst_parser.sphinx_ :start-line: 2 python-pot-0.9.3+dfsg/docs/source/index.rst000066400000000000000000000012261455713015700206650ustar00rootroot00000000000000.. POT documentation master file, created by sphinx-quickstart on Mon Oct 24 11:10:10 2016. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. POT: Python Optimal Transport ============================= .. image:: _static/images/logo.svg :width: 400 :alt: POT Logo Contents -------- .. toctree:: :maxdepth: 1 self quickstart all auto_examples/index releases contributors contributing code_of_conduct .. include:: ../../README.md :parser: myst_parser.sphinx_ Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` python-pot-0.9.3+dfsg/docs/source/quickstart.rst000066400000000000000000001507771455713015700217700ustar00rootroot00000000000000 Quick start guide ================= In the following we provide some pointers about which functions and classes to use for different problems related to optimal transport (OT) and machine learning. We refer when we can to concrete examples in the documentation that are also available as notebooks on the POT Github. .. note:: For a good introduction to numerical optimal transport we refer the reader to `the book `_ by Peyré and Cuturi [15]_. For more detailed introduction to OT and how it can be used in ML applications we refer the reader to the following `OTML tutorial `_. .. note:: Since version 0.8, POT provides a backend to automatically solve some OT problems independently from the toolbox used by the user (numpy/torch/jax). We provide a discussion about which functions are compatible in section `Backend section <#solving-ot-with-multiple-backends>`_ . Why Optimal Transport ? ----------------------- When to use OT ^^^^^^^^^^^^^^ Optimal Transport (OT) is a mathematical problem introduced by Gaspard Monge in 1781 that aim at finding the most efficient way to move mass between distributions. The cost of moving a unit of mass between two positions is called the ground cost and the objective is to minimize the overall cost of moving one mass distribution onto another one. The optimization problem can be expressed for two distributions :math:`\mu_s` and :math:`\mu_t` as .. math:: \min_{m, m \# \mu_s = \mu_t} \int c(x,m(x))d\mu_s(x) , where :math:`c(\cdot,\cdot)` is the ground cost and the constraint :math:`m \# \mu_s = \mu_t` ensures that :math:`\mu_s` is completely transported to :math:`\mu_t`. This problem is particularly difficult to solve because of this constraint and has been replaced in practice (on discrete distributions) by a linear program easier to solve. It corresponds to the Kantorovitch formulation where the Monge mapping :math:`m` is replaced by a joint distribution (OT matrix expressed in the next section) (see :ref:`kantorovitch_solve`). From the optimization problem above we can see that there are two main aspects to the OT solution that can be used in practical applications: - The optimal value (Wasserstein distance): Measures similarity between distributions. - The optimal mapping (Monge mapping, OT matrix): Finds correspondences between distributions. In the first case, OT can be used to measure similarity between distributions (or datasets), in this case the Wasserstein distance (the optimal value of the problem) is used. In the second case one can be interested in the way the mass is moved between the distributions (the mapping). This mapping can then be used to transfer knowledge between distributions. Wasserstein distance between distributions """""""""""""""""""""""""""""""""""""""""" OT is often used to measure similarity between distributions, especially when they do not share the same support. When the support between the distributions is disjoint OT-based Wasserstein distances compare favorably to popular f-divergences including the popular Kullback-Leibler, Jensen-Shannon divergences, and the Total Variation distance. What is particularly interesting for data science applications is that one can compute meaningful sub-gradients of the Wasserstein distance. For these reasons it became a very efficient tool for machine learning applications that need to measure and optimize similarity between empirical distributions. Numerous contributions make use of this an approach is the machine learning (ML) literature. For example OT was used for training `Generative Adversarial Networks (GANs) `_ in order to overcome the vanishing gradient problem. It has also been used to find `discriminant `_ or `robust `_ subspaces for a dataset. The Wasserstein distance has also been used to measure `similarity between word embeddings of documents `_ or between `signals `_ or `spectra `_. OT for mapping estimation """"""""""""""""""""""""" A very interesting aspect of OT problem is the OT mapping in itself. When computing optimal transport between discrete distributions one output is the OT matrix that will provide you with correspondences between the samples in each distributions. This correspondence is estimated with respect to the OT criterion and is found in a non-supervised way, which makes it very interesting on problems of transfer between datasets. It has been used to perform `color transfer between images `_ or in the context of `domain adaptation `_. More recent applications include the use of extension of OT (Gromov-Wasserstein) to find correspondences between languages in `word embeddings `_. When to use POT ^^^^^^^^^^^^^^^ The main objective of POT is to provide OT solvers for the rapidly growing area of OT in the context of machine learning. To this end we implement a number of solvers that have been proposed in research papers. Doing so we aim to promote reproducible research and foster novel developments. One very important aspect of POT is its ability to be easily extended. For instance we provide a very generic OT solver :any:`ot.optim.cg` that can solve OT problems with any smooth/continuous regularization term making it particularly practical for research purpose. Note that this generic solver has been used to solve both graph Laplacian regularization OT and Gromov Wasserstein [30]_. When not to use POT """"""""""""""""""" While POT has to the best of our knowledge one of the most efficient exact OT solvers, it has not been designed to handle large scale OT problems. For instance the memory cost for an OT problem is always :math:`\mathcal{O}(n^2)` in memory because the cost matrix has to be computed. The exact solver in of time complexity :math:`\mathcal{O}(n^3\log(n))` and the Sinkhorn solver has been proven to be nearly :math:`\mathcal{O}(n^2)` which is still too complex for very large scale solvers. If you need to solve OT with large number of samples, we recommend to use entropic regularization and memory efficient implementation of Sinkhorn as proposed in `GeomLoss `_. This implementation is compatible with Pytorch and can handle large number of samples. Another approach to estimate the Wasserstein distance for very large number of sample is to use the trick from `Wasserstein GAN `_ that solves the problem in the dual with a neural network estimating the dual variable. Note that in this case you are only solving an approximation of the Wasserstein distance because the 1-Lipschitz constraint on the dual cannot be enforced exactly (approximated through filter thresholding or regularization). Finally note that in order to avoid solving large scale OT problems, a number of recent approached minimized the expected Wasserstein distance on minibatches that is different from the Wasserstein but has better computational and `statistical properties `_. Optimal transport and Wasserstein distance ------------------------------------------ .. note:: In POT, most functions that solve OT or regularized OT problems have two versions that return the OT matrix or the value of the optimal solution. For instance :any:`ot.emd` returns the OT matrix and :any:`ot.emd2` returns the Wasserstein distance. This approach has been implemented in practice for all solvers that return an OT matrix (even Gromov-Wasserstein). .. _kantorovitch_solve: Solving optimal transport ^^^^^^^^^^^^^^^^^^^^^^^^^ The optimal transport problem between discrete distributions is often expressed as .. math:: \gamma^* = arg\min_{\gamma \in \mathbb{R}_+^{m\times n}} \quad \sum_{i,j}\gamma_{i,j}M_{i,j} s.t. \gamma 1 = a; \gamma^T 1= b; \gamma\geq 0 where: - :math:`M\in\mathbb{R}_+^{m\times n}` is the metric cost matrix defining the cost to move mass from bin :math:`a_i` to bin :math:`b_j`. - :math:`a` and :math:`b` are histograms on the simplex (positive, sum to 1) that represent the weights of each samples in the source an target distributions. Solving the linear program above can be done using the function :any:`ot.emd` that will return the optimal transport matrix :math:`\gamma^*`: .. code:: python # a and b are 1D histograms (sum to 1 and positive) # M is the ground cost matrix T = ot.emd(a, b, M) # exact linear program The method implemented for solving the OT problem is the network simplex. It is implemented in C from [1]_. It has a complexity of :math:`O(n^3)` but the solver is quite efficient and uses sparsity of the solution. .. minigallery:: ot.emd :add-heading: Examples of use for :any:`ot.emd` :heading-level: " Computing Wasserstein distance ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The value of the OT solution is often more interesting than the OT matrix: .. math:: OT(a,b) = \min_{\gamma \in \mathbb{R}_+^{m\times n}} \quad \sum_{i,j}\gamma_{i,j}M_{i,j} s.t. \gamma 1 = a; \gamma^T 1= b; \gamma\geq 0 It can computed from an already estimated OT matrix with :code:`np.sum(T*M)` or directly with the function :any:`ot.emd2`. .. code:: python # a and b are 1D histograms (sum to 1 and positive) # M is the ground cost matrix W = ot.emd2(a, b, M) # Wasserstein distance / EMD value Note that the well known `Wasserstein distance `_ between distributions a and b is defined as .. math:: W_p(a,b)=(\min_{\gamma \in \mathbb{R}_+^{m\times n}} \sum_{i,j}\gamma_{i,j}\|x_i-y_j\|_p)^\frac{1}{p} s.t. \gamma 1 = a; \gamma^T 1= b; \gamma\geq 0 This means that if you want to compute the :math:`W_2` you need to compute the square root of :any:`ot.emd2` when providing :code:`M = ot.dist(xs, xt)`, that uses the squared euclidean distance by default. Computing the :math:`W_1` Wasserstein distance can be done directly with :any:`ot.emd2` when providing :code:`M = ot.dist(xs, xt, metric='euclidean')` to use the Euclidean distance. .. minigallery:: ot.emd2 :add-heading: Examples of use for :any:`ot.emd2` :heading-level: " Special cases ^^^^^^^^^^^^^ Note that the OT problem and the corresponding Wasserstein distance can in some special cases be computed very efficiently. For instance when the samples are in 1D, then the OT problem can be solved in :math:`O(n\log(n))` by using a simple sorting. In this case we provide the function :any:`ot.emd_1d` and :any:`ot.emd2_1d` to return respectively the OT matrix and value. Note that since the solution is very sparse the :code:`sparse` parameter of :any:`ot.emd_1d` allows for solving and returning the solution for very large problems. Note that in order to compute directly the :math:`W_p` Wasserstein distance in 1D we provide the function :any:`ot.wasserstein_1d` that takes :code:`p` as a parameter. Another special case for estimating OT and Monge mapping is between Gaussian distributions. In this case there exists a close form solution given in Remark 2.29 in [15]_ and the Monge mapping is an affine function and can be also computed from the covariances and means of the source and target distributions. In the case when the finite sample dataset is supposed Gaussian, we provide :any:`ot.gaussian.bures_wasserstein_mapping` that returns the parameters for the Monge mapping. Regularized Optimal Transport ----------------------------- Recent developments have shown the interest of regularized OT both in terms of computational and statistical properties. We address in this section the regularized OT problems that can be expressed as .. math:: \gamma^* = arg\min_{\gamma \in \mathbb{R}_+^{m\times n}} \quad \sum_{i,j}\gamma_{i,j}M_{i,j} + \lambda\Omega(\gamma) s.t. \gamma 1 = a; \gamma^T 1= b; \gamma\geq 0 where : - :math:`M\in\mathbb{R}_+^{m\times n}` is the metric cost matrix defining the cost to move mass from bin :math:`a_i` to bin :math:`b_j`. - :math:`a` and :math:`b` are histograms (positive, sum to 1) that represent the weights of each samples in the source an target distributions. - :math:`\Omega` is the regularization term. We discuss in the following specific algorithms that can be used depending on the regularization term. Entropic regularized OT ^^^^^^^^^^^^^^^^^^^^^^^ This is the most common regularization used for optimal transport. It has been proposed in the ML community by Marco Cuturi in his seminal paper [2]_. This regularization has the following expression .. math:: \Omega(\gamma)=\sum_{i,j}\gamma_{i,j}\log(\gamma_{i,j}) The use of the regularization term above in the optimization problem has a very strong impact. First it makes the problem smooth which leads to new optimization procedures such as the well known Sinkhorn algorithm [2]_ or L-BFGS (see :any:`ot.smooth` ). Next it makes the problem strictly convex meaning that there will be a unique solution. Finally the solution of the resulting optimization problem can be expressed as: .. math:: \gamma_\lambda^*=\text{diag}(u)K\text{diag}(v) where :math:`u` and :math:`v` are vectors and :math:`K=\exp(-M/\lambda)` where the :math:`\exp` is taken component-wise. In order to solve the optimization problem, one can use an alternative projection algorithm called Sinkhorn-Knopp that can be very efficient for large values of regularization. The Sinkhorn-Knopp algorithm is implemented in :any:`ot.sinkhorn` and :any:`ot.sinkhorn2` that return respectively the OT matrix and the value of the linear term. Note that the regularization parameter :math:`\lambda` in the equation above is given to those functions with the parameter :code:`reg`. >>> import ot >>> a = [.5, .5] >>> b = [.5, .5] >>> M = [[0., 1.], [1., 0.]] >>> ot.sinkhorn(a, b, M, 1) array([[ 0.36552929, 0.13447071], [ 0.13447071, 0.36552929]]) More details about the algorithms used are given in the following note. .. note:: The main function to solve entropic regularized OT is :any:`ot.sinkhorn`. This function is a wrapper and the parameter :code:`method` allows you to select the actual algorithm used to solve the problem: + :code:`method='sinkhorn'` calls :any:`ot.bregman.sinkhorn_knopp` the classic algorithm [2]_. + :code:`method='sinkhorn_log'` calls :any:`ot.bregman.sinkhorn_log` the sinkhorn algorithm in log space [2]_ that is more stable but can be slower in numpy since `logsumexp` is not implemented in parallel. It is the recommended solver for applications that requires differentiability with a small number of iterations. + :code:`method='sinkhorn_stabilized'` calls :any:`ot.bregman.sinkhorn_stabilized` the log stabilized version of the algorithm [9]_. + :code:`method='sinkhorn_epsilon_scaling'` calls :any:`ot.bregman.sinkhorn_epsilon_scaling` the epsilon scaling version of the algorithm [9]_. + :code:`method='greenkhorn'` calls :any:`ot.bregman.greenkhorn` the greedy Sinkhorn version of the algorithm [22]_. + :code:`method='screenkhorn'` calls :any:`ot.bregman.screenkhorn` the screening sinkhorn version of the algorithm [26]_. In addition to all those variants of Sinkhorn, we have another implementation solving the problem in the smooth dual or semi-dual in :any:`ot.smooth`. This solver uses the :any:`scipy.optimize.minimize` function to solve the smooth problem with :code:`L-BFGS-B` algorithm. Tu use this solver, use functions :any:`ot.smooth.smooth_ot_dual` or :any:`ot.smooth.smooth_ot_semi_dual` with parameter :code:`reg_type='kl'` to choose entropic/Kullbach-Leibler regularization. **Choosing a Sinkhorn solver** By default and when using a regularization parameter that is not too small the default Sinkhorn solver should be enough. If you need to use a small regularization to get sharper OT matrices, you should use the :any:`ot.bregman.sinkhorn_stabilized` solver that will avoid numerical errors. This last solver can be very slow in practice and might not even converge to a reasonable OT matrix in a finite time. This is why :any:`ot.bregman.sinkhorn_epsilon_scaling` that relies on iterating the value of the regularization (and using warm start) sometimes leads to better solutions. Note that the greedy version of the Sinkhorn :any:`ot.bregman.greenkhorn` can also lead to a speedup and the screening version of the Sinkhorn :any:`ot.bregman.screenkhorn` aim a providing a fast approximation of the Sinkhorn problem. For use of GPU and gradient computation with small number of iterations we strongly recommend the :any:`ot.bregman.sinkhorn_log` solver that will no need to check for numerical problems. Recently Genevay et al. [23]_ introduced the Sinkhorn divergence that build from entropic regularization to compute fast and differentiable geometric divergence between empirical distributions. Note that we provide a function that computes directly (with no need to precompute the :code:`M` matrix) the Sinkhorn divergence for empirical distributions in :any:`ot.bregman.empirical_sinkhorn_divergence`. Similarly one can compute the OT matrix and loss for empirical distributions with respectively :any:`ot.bregman.empirical_sinkhorn` and :any:`ot.bregman.empirical_sinkhorn2`. Finally note that we also provide in :any:`ot.stochastic` several implementation of stochastic solvers for entropic regularized OT [18]_ [19]_. Those pure Python implementations are not optimized for speed but provide a robust implementation of algorithms in [18]_ [19]_. .. minigallery:: ot.sinkhorn :add-heading: Examples of use for :any:`ot.sinkhorn` :heading-level: " .. minigallery:: ot.sinkhorn2 :add-heading: Examples of use for :any:`ot.sinkhorn2` :heading-level: " Other regularizations ^^^^^^^^^^^^^^^^^^^^^ While entropic OT is the most common and favored in practice, there exists other kinds of regularizations. We provide in POT two specific solvers for other regularization terms, namely quadratic regularization and group Lasso regularization. But we also provide in :any:`ot.optim` two generic solvers that allows solving any smooth regularization in practice. Quadratic regularization """""""""""""""""""""""" The first general regularization term we can solve is the quadratic regularization of the form .. math:: \Omega(\gamma)=\sum_{i,j} \gamma_{i,j}^2 This regularization term has an effect similar to entropic regularization by densifying the OT matrix, yet it keeps some sort of sparsity that is lost with entropic regularization as soon as :math:`\lambda>0` [17]_. This problem can be solved with POT using solvers from :any:`ot.smooth`, more specifically functions :any:`ot.smooth.smooth_ot_dual` or :any:`ot.smooth.smooth_ot_semi_dual` with parameter :code:`reg_type='l2'` to choose the quadratic regularization. .. minigallery:: ot.smooth.smooth_ot_dual ot.smooth.smooth_ot_semi_dual ot.optim.cg :add-heading: Examples of use of quadratic regularization :heading-level: " Group Lasso regularization """""""""""""""""""""""""" Another regularization that has been used in recent years [5]_ is the group Lasso regularization .. math:: \Omega(\gamma)=\sum_{j,G\in\mathcal{G}} \|\gamma_{G,j}\|_q^p where :math:`\mathcal{G}` contains non-overlapping groups of lines in the OT matrix. This regularization proposed in [5]_ promotes sparsity at the group level and for instance will force target samples to get mass from a small number of groups. Note that the exact OT solution is already sparse so this regularization does not make sense if it is not combined with entropic regularization. Depending on the choice of :code:`p` and :code:`q`, the problem can be solved with different approaches. When :code:`q=1` and :code:`p<1` the problem is non-convex but can be solved using an efficient majoration minimization approach with :any:`ot.sinkhorn_lpl1_mm`. When :code:`q=2` and :code:`p=1` we recover the convex group lasso and we provide a solver using generalized conditional gradient algorithm [7]_ in function :any:`ot.da.sinkhorn_l1l2_gl`. .. minigallery:: ot.da.SinkhornLpl1Transport ot.da.SinkhornL1l2Transport ot.da.sinkhorn_l1l2_gl ot.da.sinkhorn_lpl1_mm :add-heading: Examples of group Lasso regularization :heading-level: " Generic solvers """"""""""""""" Finally we propose in POT generic solvers that can be used to solve any regularization as long as you can provide a function computing the regularization and a function computing its gradient (or sub-gradient). In order to solve .. math:: \gamma^* = arg\min_\gamma \quad \sum_{i,j}\gamma_{i,j}M_{i,j} + \lambda\Omega(\gamma) s.t. \gamma 1 = a; \gamma^T 1= b; \gamma\geq 0 you can use function :any:`ot.optim.cg` that will use a conditional gradient as proposed in [6]_ . You need to provide the regularization function as parameter ``f`` and its gradient as parameter ``df``. Note that the conditional gradient relies on iterative solving of a linearization of the problem using the exact :any:`ot.emd` so it can be quite slow in practice. However, being an interior point algorithm, it always returns a transport matrix that does not violates the marginals. Another generic solver is proposed to solve the problem: .. math:: \gamma^* = arg\min_\gamma \quad \sum_{i,j}\gamma_{i,j}M_{i,j}+ \lambda_e\Omega_e(\gamma) + \lambda\Omega(\gamma) s.t. \gamma 1 = a; \gamma^T 1= b; \gamma\geq 0 where :math:`\Omega_e` is the entropic regularization. In this case we use a generalized conditional gradient [7]_ implemented in :any:`ot.optim.gcg` that does not linearize the entropic term but relies on :any:`ot.sinkhorn` for its iterations. .. minigallery:: ot.optim.cg ot.optim.gcg :add-heading: Examples of the generic solvers :heading-level: " Wasserstein Barycenters ----------------------- A Wasserstein barycenter is a distribution that minimizes its Wasserstein distance with respect to other distributions [16]_. It corresponds to minimizing the following problem by searching a distribution :math:`\mu` such that .. math:: \min_\mu \quad \sum_{k} w_kW(\mu,\mu_k) In practice we model a distribution with a finite number of support position: .. math:: \mu=\sum_{i=1}^n a_i\delta_{x_i} where :math:`a` is an histogram on the simplex and the :math:`\{x_i\}` are the position of the support. We can clearly see here that optimizing :math:`\mu` can be done by searching for optimal weights :math:`a` or optimal support :math:`\{x_i\}` (optimizing both is also an option). We provide in POT solvers to estimate a discrete Wasserstein barycenter in both cases. Barycenters with fixed support ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When optimizing a barycenter with a fixed support, the optimization problem can be expressed as .. math:: \min_a \quad \sum_{k} w_k W(a,b_k) where :math:`b_k` are also weights in the simplex. In the non-regularized case, the problem above is a classical linear program. In this case we propose a solver :meth:`ot.lp.barycenter` that relies on generic LP solvers. By default the function uses :any:`scipy.optimize.linprog`, but more efficient LP solvers from `cvxopt` can be also used by changing parameter :code:`solver`. Note that this problem requires to solve a very large linear program and can be very slow in practice. Similarly to the OT problem, OT barycenters can be computed in the regularized case. When entropic regularization is used, the problem can be solved with a generalization of the Sinkhorn algorithm based on Bregman projections [3]_. This algorithm is provided in function :any:`ot.bregman.barycenter` also available as :any:`ot.barycenter`. In this case, the algorithm scales better to large distributions and relies only on matrix multiplications that can be performed in parallel. In addition to the speedup brought by regularization, one can also greatly accelerate the estimation of Wasserstein barycenter when the support has a separable structure [21]_. In the case of 2D images for instance one can replace the matrix vector production in the Bregman projections by convolution operators. We provide an implementation of this algorithm in function :any:`ot.bregman.convolutional_barycenter2d`. .. minigallery:: ot.lp.barycenter ot.bregman.barycenter ot.barycenter :add-heading: Examples of Wasserstein and regularized Wasserstein barycenters :heading-level: " .. minigallery:: ot.bregman.convolutional_barycenter2d :add-heading: An example of convolutional barycenter (:any:`ot.bregman.convolutional_barycenter2d`) computation :heading-level: " Barycenters with free support ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Estimating the Wasserstein barycenter with free support but fixed weights corresponds to solving the following optimization problem: .. math:: \min_{\{x_i\}} \quad \sum_{k} w_kW(\mu,\mu_k) s.t. \quad \mu=\sum_{i=1}^n a_i\delta_{x_i} We provide a solver based on [20]_ in :any:`ot.lp.free_support_barycenter`. This function minimize the problem and return a locally optimal support :math:`\{x_i\}` for uniform or given weights :math:`a`. .. minigallery:: ot.lp.free_support_barycenter :add-heading: Examples of free support barycenter estimation :heading-level: " Monge mapping and Domain adaptation ----------------------------------- The original transport problem investigated by Gaspard Monge was seeking for a mapping function that maps (or transports) between a source and target distribution but that minimizes the transport loss. The existence and uniqueness of this optimal mapping is still an open problem in the general case but has been proven for smooth distributions by Brenier in his eponym `theorem `__. We provide in :any:`ot.da` several solvers for smooth Monge mapping estimation and domain adaptation from discrete distributions. Monge Mapping estimation ^^^^^^^^^^^^^^^^^^^^^^^^ We now discuss several approaches that are implemented in POT to estimate or approximate a Monge mapping from finite distributions. First note that when the source and target distributions are supposed to be Gaussian distributions, there exists a close form solution for the mapping and its an affine function [14]_ of the form :math:`T(x)=Ax+b` . In this case we provide the function :any:`ot.gaussian.bures_wasserstein_mapping` that returns the operator :math:`A` and vector :math:`b`. Note that if the number of samples is too small there is a parameter :code:`reg` that provides a regularization for the covariance matrix estimation. For a more general mapping estimation we also provide the barycentric mapping proposed in [6]_. It is implemented in the class :any:`ot.da.EMDTransport` and other transport-based classes in :any:`ot.da` . Those classes are discussed more in the following but follow an interface similar to scikit-learn classes. Finally a method proposed in [8]_ that estimates a continuous mapping approximating the barycentric mapping is provided in :any:`ot.da.joint_OT_mapping_linear` for linear mapping and :any:`ot.da.joint_OT_mapping_kernel` for non-linear mapping. .. minigallery:: ot.da.joint_OT_mapping_linear ot.da.joint_OT_mapping_linear ot.gaussian.bures_wasserstein_mapping :add-heading: Examples of Monge mapping estimation :heading-level: " Domain adaptation classes ^^^^^^^^^^^^^^^^^^^^^^^^^ The use of OT for domain adaptation (OTDA) has been first proposed in [5]_ that also introduced the group Lasso regularization. The main idea of OTDA is to estimate a mapping of the samples between source and target distributions which allows to transport labeled source samples onto the target distribution with no labels. We provide several classes based on :any:`ot.da.BaseTransport` that provide several OT and mapping estimations. The interface of those classes is similar to classifiers in scikit-learn. At initialization, several parameters such as regularization parameter value can be set. Then one needs to estimate the mapping with function :any:`ot.da.BaseTransport.fit`. Finally one can map the samples from source to target with :any:`ot.da.BaseTransport.transform` and from target to source with :any:`ot.da.BaseTransport.inverse_transform`. Here is an example for class :any:`ot.da.EMDTransport`: .. code:: ot_emd = ot.da.EMDTransport() ot_emd.fit(Xs=Xs, Xt=Xt) Xs_mapped = ot_emd.transform(Xs=Xs) A list of the provided implementation is given in the following note. .. note:: Here is a list of the OT mapping classes inheriting from :any:`ot.da.BaseTransport` * :any:`ot.da.EMDTransport`: Barycentric mapping with EMD transport * :any:`ot.da.SinkhornTransport`: Barycentric mapping with Sinkhorn transport * :any:`ot.da.SinkhornL1l2Transport`: Barycentric mapping with Sinkhorn + group Lasso regularization [5]_ * :any:`ot.da.SinkhornLpl1Transport`: Barycentric mapping with Sinkhorn + non convex group Lasso regularization [5]_ * :any:`ot.da.LinearTransport`: Linear mapping estimation between Gaussians [14]_ * :any:`ot.da.MappingTransport`: Nonlinear mapping estimation [8]_ .. minigallery:: ot.da.SinkhornTransport ot.da.LinearTransport :add-heading: Examples of the use of OTDA classes :heading-level: " Unbalanced and partial OT ------------------------- Unbalanced optimal transport ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Unbalanced OT is a relaxation of the entropy regularized OT problem where the violation of the constraint on the marginals is added to the objective of the optimization problem. The unbalanced OT metric between two unbalanced histograms a and b is defined as [25]_ [10]_: .. math:: W_u(a, b) = \min_\gamma \quad \sum_{i,j}\gamma_{i,j}M_{i,j} + reg\cdot\Omega(\gamma) + reg_m KL(\gamma 1, a) + reg_m KL(\gamma^T 1, b) s.t. \quad \gamma\geq 0 where KL is the Kullback-Leibler divergence. This formulation allows for computing approximate mapping between distributions that do not have the same amount of mass. Interestingly the problem can be solved with a generalization of the Bregman projections algorithm [10]_. We provide a solver for unbalanced OT in :any:`ot.unbalanced`. Computing the optimal transport plan or the transport cost is similar to the balanced case. The Sinkhorn-Knopp algorithm is implemented in :any:`ot.sinkhorn_unbalanced` and :any:`ot.sinkhorn_unbalanced2` that return respectively the OT matrix and the value of the linear term. .. note:: The main function to solve entropic regularized UOT is :any:`ot.sinkhorn_unbalanced`. This function is a wrapper and the parameter :code:`method` helps you select the actual algorithm used to solve the problem: + :code:`method='sinkhorn'` calls :any:`ot.unbalanced.sinkhorn_knopp_unbalanced` the generalized Sinkhorn algorithm [25]_ [10]_. + :code:`method='sinkhorn_stabilized'` calls :any:`ot.unbalanced.sinkhorn_stabilized_unbalanced` the log stabilized version of the algorithm [10]_. .. minigallery:: ot.sinkhorn_unbalanced ot.sinkhorn_unbalanced2 ot.unbalanced.sinkhorn_unbalanced :add-heading: Examples of Unbalanced OT :heading-level: " Unbalanced Barycenters ^^^^^^^^^^^^^^^^^^^^^^ As with balanced distributions, we can define a barycenter of a set of histograms with different masses as a Fréchet Mean: .. math:: \min_{\mu} \quad \sum_{k} w_kW_u(\mu,\mu_k) where :math:`W_u` is the unbalanced Wasserstein metric defined above. This problem can also be solved using generalized version of Sinkhorn's algorithm and it is implemented the main function :any:`ot.barycenter_unbalanced`. .. note:: The main function to compute UOT barycenters is :any:`ot.barycenter_unbalanced`. This function is a wrapper and the parameter :code:`method` helps you select the actual algorithm used to solve the problem: + :code:`method='sinkhorn'` calls :meth:`ot.unbalanced.barycenter_unbalanced_sinkhorn_unbalanced` the generalized Sinkhorn algorithm [10]_. + :code:`method='sinkhorn_stabilized'` calls :any:`ot.unbalanced.barycenter_unbalanced_stabilized` the log stabilized version of the algorithm [10]_. .. minigallery:: ot.barycenter_unbalanced ot.unbalanced.barycenter_unbalanced :add-heading: Examples of Unbalanced OT barycenters :heading-level: " Partial optimal transport ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Partial OT is a variant of the optimal transport problem when only a fixed amount of mass m is to be transported. The partial OT metric between two histograms a and b is defined as [28]_: .. math:: \gamma = \arg\min_\gamma <\gamma,M>_F s.t. \gamma\geq 0 \\ \gamma 1 \leq a\\ \gamma^T 1 \leq b\\ 1^T \gamma^T 1 = m \leq \min\{\|a\|_1, \|b\|_1\} Interestingly the problem can be casted into a regular OT problem by adding reservoir points in which the surplus mass is sent [29]_. We provide a solver for partial OT in :any:`ot.partial`. The exact resolution of the problem is computed in :any:`ot.partial.partial_wasserstein` and :any:`ot.partial.partial_wasserstein2` that return respectively the OT matrix and the value of the linear term. The entropic solution of the problem is computed in :any:`ot.partial.entropic_partial_wasserstein` (see [3]_). The partial Gromov-Wasserstein formulation of the problem .. math:: GW = \min_\gamma \sum_{i,j,k,l} L(C1_{i,k},C2_{j,l})*\gamma_{i,j}*\gamma_{k,l} s.t. \gamma\geq 0 \\ \gamma 1 \leq a\\ \gamma^T 1 \leq b\\ 1^T \gamma^T 1 = m \leq \min\{\|a\|_1, \|b\|_1\} is computed in :any:`ot.partial.partial_gromov_wasserstein` and in :any:`ot.partial.entropic_partial_gromov_wasserstein` when considering the entropic regularization of the problem. .. minigallery:: ot.partial.partial_wasserstein ot.partial.partial_gromov_wasserstein :add-heading: Examples of Partial OT :heading-level: " Gromov Wasserstein and extensions --------------------------------- Gromov Wasserstein(GW) ^^^^^^^^^^^^^^^^^^^^^^ Gromov Wasserstein (GW) is a generalization of OT to distributions that do not lie in the same space [13]_. In this case one cannot compute distance between samples from the two distributions. [13]_ proposed instead to realign the metric spaces by computing a transport between distance matrices. The Gromov Wasserstein alignment between two distributions can be expressed as the one minimizing: .. math:: GW = \min_\gamma \sum_{i,j,k,l} L(C1_{i,k},C2_{j,l})*\gamma_{i,j}*\gamma_{k,l} s.t. \gamma 1 = a; \gamma^T 1= b; \gamma\geq 0 where ::math:`C1` is the distance matrix between samples in the source distribution and :math:`C2` the one between samples in the target, :math:`L(C1_{i,k},C2_{j,l})` is a measure of similarity between :math:`C1_{i,k}` and :math:`C2_{j,l}` often chosen as :math:`L(C1_{i,k},C2_{j,l})=\|C1_{i,k}-C2_{j,l}\|^2`. The optimization problem above is a non-convex quadratic program but we provide a solver that finds a local minimum using conditional gradient in :any:`ot.gromov.gromov_wasserstein`. There also exists an entropic regularized variant of GW that has been proposed in [12]_ and we provide an implementation of their algorithm in :any:`ot.gromov.entropic_gromov_wasserstein`. .. minigallery:: ot.gromov.gromov_wasserstein ot.gromov.entropic_gromov_wasserstein ot.gromov.fused_gromov_wasserstein ot.gromov.gromov_wasserstein2 :add-heading: Examples of computation of GW, regularized G and FGW :heading-level: " Gromov Wasserstein barycenters ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Note that similarly to Wasserstein distance GW allows for the definition of GW barycenters that can be expressed as .. math:: \min_{C\geq 0} \quad \sum_{k} w_k GW(C,Ck) where :math:`Ck` is the distance matrix between samples in distribution :math:`k`. Note that interestingly the barycenter is defined as a symmetric positive matrix. We provide a block coordinate optimization procedure in :any:`ot.gromov.gromov_barycenters` and :any:`ot.gromov.entropic_gromov_barycenters` for non-regularized and regularized barycenters respectively. Finally note that recently a fusion between Wasserstein and GW, coined Fused Gromov-Wasserstein (FGW) has been proposed [24]_. It allows to compute a similarity between objects that are only partly in the same space. As such it can be used to measure similarity between labeled graphs for instance and also provide computable barycenters. The implementations of FGW and FGW barycenter is provided in functions :any:`ot.gromov.fused_gromov_wasserstein` and :any:`ot.gromov.fgw_barycenters`. .. minigallery:: ot.gromov.gromov_barycenters ot.gromov.fgw_barycenters :add-heading: Examples of GW, regularized G and FGW barycenters :heading-level: " Other applications ------------------ We discuss in the following several OT related problems and tools that has been proposed in the OT and machine learning community. Wasserstein Discriminant Analysis ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Wasserstein Discriminant Analysis [11]_ is a generalization of `Fisher Linear Discriminant Analysis `__ that allows discrimination between classes that are not linearly separable. It consists in finding a linear projector optimizing the following criterion .. math:: P = \text{arg}\min_P \frac{\sum_i OT_e(\mu_i\#P,\mu_i\#P)}{\sum_{i,j\neq i} OT_e(\mu_i\#P,\mu_j\#P)} where :math:`\#` is the push-forward operator, :math:`OT_e` is the entropic OT loss and :math:`\mu_i` is the distribution of samples from class :math:`i`. :math:`P` is also constrained to be in the Stiefel manifold. WDA can be solved in POT using function :any:`ot.dr.wda`. It requires to have installed :code:`pymanopt` and :code:`autograd` for manifold optimization and automatic differentiation respectively. Note that we also provide the Fisher discriminant estimator in :any:`ot.dr.fda` for easy comparison. .. warning:: Note that due to the hard dependency on :code:`pymanopt` and :code:`autograd`, :any:`ot.dr` is not imported by default. If you want to use it you have to specifically import it with :code:`import ot.dr` . .. minigallery:: ot.dr.wda :add-heading: Examples of the use of WDA :heading-level: " Solving OT with Multiple backends on CPU/GPU -------------------------------------------- .. _backends_section: Since version 0.8, POT provides a backend that allows to code solvers independently from the type of the input arrays. The idea is to provide the user with a package that works seamlessly and returns a solution for instance as a Pytorch tensors when the function has Pytorch tensors as input. How it works ^^^^^^^^^^^^ The aim of the backend is to use the same function independently of the type of the input arrays. For instance when executing the following code .. code:: python # a and b are 1D histograms (sum to 1 and positive) # M is the ground cost matrix T = ot.emd(a, b, M) # exact linear program w = ot.emd2(a, b, M) # Wasserstein computation the functions :any:`ot.emd` and :any:`ot.emd2` can take inputs of the type :any:`numpy.array`, :any:`torch.tensor` or :any:`jax.numpy.array`. The output of the function will be the same type as the inputs and on the same device. When possible all computations are done on the same device and also when possible the output will be differentiable with respect to the input of the function. GPU acceleration ^^^^^^^^^^^^^^^^ The backends provide automatic computations/compatibility on GPU for most of the POT functions. Note that all solvers relying on the exact OT solver en C++ will need to solve the problem on CPU which can incur some memory copy overhead and be far from optimal when all other computations are done on GPU. They will still work on array on GPU since the copy is done automatically. Some of the functions that rely on the exact C++ solver are: - :any:`ot.emd`, :any:`ot.emd2` - :any:`ot.gromov_wasserstein`, :any:`ot.gromov_wasserstein2` - :any:`ot.optim.cg` List of compatible Backends ^^^^^^^^^^^^^^^^^^^^^^^^^^^ - `Numpy `_ (all functions and solvers) - `Pytorch `_ (all outputs differentiable w.r.t. inputs) - `Jax `_ (Some functions are differentiable some require a wrapper) - `Tensorflow `_ (all outputs differentiable w.r.t. inputs) - `Cupy `_ (no differentiation, GPU only) The library automatically detects which backends are available for use. A backend is instantiated lazily only when necessary to prevent unwarranted GPU memory allocations. You can also disable the import of a specific backend library (e.g., to accelerate loading of `ot` library) using the environment variable `POT_BACKEND_DISABLE_` with in (TORCH,TENSORFLOW,CUPY,JAX). For instance, to disable TensorFlow, set `export POT_BACKEND_DISABLE_TENSORFLOW=1`. It's important to note that the `numpy` backend cannot be disabled. List of compatible modules ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This list will get longer for new releases and will hopefully disappear when POT become fully implemented with the backend. - :any:`ot.bregman` - :any:`ot.gromov` (some functions use CPU only solvers with copy overhead) - :any:`ot.optim` (some functions use CPU only solvers with copy overhead) - :any:`ot.sliced` - :any:`ot.utils` (partial) FAQ --- 1. **How to solve a discrete optimal transport problem ?** The solver for discrete OT is the function :py:mod:`ot.emd` that returns the OT transport matrix. If you want to solve a regularized OT you can use :py:mod:`ot.sinkhorn`. Here is a simple use case: .. code:: python # a and b are 1D histograms (sum to 1 and positive) # M is the ground cost matrix T = ot.emd(a, b, M) # exact linear program T_reg = ot.sinkhorn(a, b, M, reg) # entropic regularized OT More detailed examples can be seen on this example: :doc:`auto_examples/plot_OT_2D_samples` 2. **pip install POT fails with error : ImportError: No module named Cython.Build** As discussed shortly in the README file. POT<0.8 requires to have :code:`numpy` and :code:`cython` installed to build. This corner case is not yet handled by :code:`pip` and for now you need to install both library prior to installing POT. Note that this problem do not occur when using conda-forge since the packages there are pre-compiled. See `Issue #59 `__ for more details. 3. **Why is Sinkhorn slower than EMD ?** This might come from the choice of the regularization term. The speed of convergence of Sinkhorn depends directly on this term [22]_. When the regularization gets very small the problem tries to approximate the exact OT which leads to slow convergence in addition to numerical problems. In other words, for large regularization Sinkhorn will be very fast to converge, for small regularization (when you need an OT matrix close to the true OT), it might be quicker to use the EMD solver. Also note that the numpy implementation of Sinkhorn can use parallel computation depending on the configuration of your system, yet very important speedup can be obtained by using a GPU implementation since all operations are matrix/vector products. References ---------- .. [1] Bonneel, N., Van De Panne, M., Paris, S., & Heidrich, W. (2011, December). `Displacement nterpolation using Lagrangian mass transport `__. In ACM Transactions on Graphics (TOG) (Vol. 30, No. 6, p. 158). ACM. .. [2] Cuturi, M. (2013). `Sinkhorn distances: Lightspeed computation of optimal transport `__. In Advances in Neural Information Processing Systems (pp. 2292-2300). .. [3] Benamou, J. D., Carlier, G., Cuturi, M., Nenna, L., & Peyré, G. (2015). `Iterative Bregman projections for regularized transportation problems `__. SIAM Journal on Scientific Computing, 37(2), A1111-A1138. .. [5] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, `Optimal Transport for Domain Adaptation `__, in IEEE Transactions on Pattern Analysis and Machine Intelligence , vol.PP, no.99, pp.1-1 .. [6] Ferradans, S., Papadakis, N., Peyré, G., & Aujol, J. F. (2014). `Regularized discrete optimal transport `__. SIAM Journal on Imaging Sciences, 7(3), 1853-1882. .. [7] Rakotomamonjy, A., Flamary, R., & Courty, N. (2015). `Generalized conditional gradient: analysis of convergence and applications `__. arXiv preprint arXiv:1510.06567. .. [8] M. Perrot, N. Courty, R. Flamary, A. Habrard (2016), `Mapping estimation for discrete optimal transport `__, Neural Information Processing Systems (NIPS). .. [9] Schmitzer, B. (2016). `Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems `__. arXiv preprint arXiv:1610.06519. .. [10] Chizat, L., Peyré, G., Schmitzer, B., & Vialard, F. X. (2016). `Scaling algorithms for unbalanced transport problems `__. arXiv preprint arXiv:1607.05816. .. [11] Flamary, R., Cuturi, M., Courty, N., & Rakotomamonjy, A. (2016). `Wasserstein Discriminant Analysis `__. arXiv preprint arXiv:1608.08063. .. [12] Gabriel Peyré, Marco Cuturi, and Justin Solomon (2016), `Gromov-Wasserstein averaging of kernel and distance matrices `__ International Conference on Machine Learning (ICML). .. [13] Mémoli, Facundo (2011). `Gromov–Wasserstein distances and the metric approach to object matching `__. Foundations of computational mathematics 11.4 : 417-487. .. [14] Knott, M. and Smith, C. S. (1984). `On the optimal mapping of distributions `__, Journal of Optimization Theory and Applications Vol 43. .. [15] Peyré, G., & Cuturi, M. (2018). `Computational Optimal Transport `__ . .. [16] Agueh, M., & Carlier, G. (2011). `Barycenters in the Wasserstein space `__. SIAM Journal on Mathematical Analysis, 43(2), 904-924. .. [17] Blondel, M., Seguy, V., & Rolet, A. (2018). `Smooth and Sparse Optimal Transport `__. Proceedings of the Twenty-First International Conference on Artificial Intelligence and Statistics (AISTATS). .. [18] Genevay, A., Cuturi, M., Peyré, G. & Bach, F. (2016) `Stochastic Optimization for Large-scale Optimal Transport `__. Advances in Neural Information Processing Systems (2016). .. [19] Seguy, V., Bhushan Damodaran, B., Flamary, R., Courty, N., Rolet, A.& Blondel, M. `Large-scale Optimal Transport and Mapping Estimation `__. International Conference on Learning Representation (2018) .. [20] Cuturi, M. and Doucet, A. (2014) `Fast Computation of Wasserstein Barycenters `__. International Conference in Machine Learning .. [21] Solomon, J., De Goes, F., Peyré, G., Cuturi, M., Butscher, A., Nguyen, A. & Guibas, L. (2015). `Convolutional wasserstein distances: Efficient optimal transportation on geometric domains `__. ACM Transactions on Graphics (TOG), 34(4), 66. .. [22] J. Altschuler, J.Weed, P. Rigollet, (2017) `Near-linear time approximation algorithms for optimal transport via Sinkhorn iteration `__, Advances in Neural Information Processing Systems (NIPS) 31 .. [23] Genevay, A., Peyré, G., Cuturi, M., `Learning Generative Models with Sinkhorn Divergences `__, Proceedings of the Twenty-First International Conference on Artificial Intelligence and Statistics, (AISTATS) 21, 2018 .. [24] Vayer, T., Chapel, L., Flamary, R., Tavenard, R. and Courty, N. (2019). `Optimal Transport for structured data with application on graphs `__ Proceedings of the 36th International Conference on Machine Learning (ICML). .. [25] Frogner C., Zhang C., Mobahi H., Araya-Polo M., Poggio T. : Learning with a Wasserstein Loss, Advances in Neural Information Processing Systems (NIPS) 2015 .. [26] Alaya M. Z., Bérar M., Gasso G., Rakotomamonjy A. (2019). Screening Sinkhorn Algorithm for Regularized Optimal Transport , Advances in Neural Information Processing Systems 33 (NeurIPS). .. [28] Caffarelli, L. A., McCann, R. J. (2020). Free boundaries in optimal transport and Monge-Ampere obstacle problems , Annals of mathematics, 673-730. .. [29] Chapel, L., Alaya, M., Gasso, G. (2019). Partial Gromov-Wasserstein with Applications on Positive-Unlabeled Learning , arXiv preprint arXiv:2002.08276. .. [30] Flamary, Rémi, et al. "Optimal transport with Laplacian regularization: Applications to domain adaptation and shape matching." NIPS Workshop on Optimal Transport and Machine Learning OTML. 2014. .. [31] Bonneel, Nicolas, et al. `Sliced and radon wasserstein barycenters of measures `_\ , Journal of Mathematical Imaging and Vision 51.1 (2015): 22-45 .. [32] Huang, M., Ma S., Lai, L. (2021). `A Riemannian Block Coordinate Descent Method for Computing the Projection Robust Wasserstein Distance `_\ , Proceedings of the 38th International Conference on Machine Learning (ICML). .. [33] Kerdoncuff T., Emonet R., Marc S. `Sampled Gromov Wasserstein `_\ , Machine Learning Journal (MJL), 2021 .. [34] Feydy, J., Séjourné, T., Vialard, F. X., Amari, S. I., Trouvé, A., & Peyré, G. (2019, April). `Interpolating between optimal transport and MMD using Sinkhorn divergences `_. In The 22nd International Conference on Artificial Intelligence and Statistics (pp. 2681-2690). PMLR. .. [35] Deshpande, I., Hu, Y. T., Sun, R., Pyrros, A., Siddiqui, N., Koyejo, S., & Schwing, A. G. (2019). `Max-sliced wasserstein distance and its use for gans `_. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (pp. 10648-10656). .. [36] Liutkus, A., Simsekli, U., Majewski, S., Durmus, A., & Stöter, F. R. (2019, May). `Sliced-Wasserstein flows: Nonparametric generative modeling via optimal transport and diffusions `_. In International Conference on Machine Learning (pp. 4104-4113). PMLR. .. [37] Janati, H., Cuturi, M., Gramfort, A. `Debiased sinkhorn barycenters `_ Proceedings of the 37th International Conference on Machine Learning, PMLR 119:4692-4701, 2020 .. [38] C. Vincent-Cuaz, T. Vayer, R. Flamary, M. Corneli, N. Courty, `Online Graph Dictionary Learning `_\ , International Conference on Machine Learning (ICML), 2021. .. [39] Gozlan, N., Roberto, C., Samson, P. M., & Tetali, P. (2017). `Kantorovich duality for general transport costs and applications `_. Journal of Functional Analysis, 273(11), 3327-3405. .. [40] Forrow, A., Hütter, J. C., Nitzan, M., Rigollet, P., Schiebinger, G., & Weed, J. (2019, April). `Statistical optimal transport via factored couplings `_. In The 22nd International Conference on Artificial Intelligence and Statistics (pp. 2454-2465). PMLR. python-pot-0.9.3+dfsg/docs/source/releases.rst000066400000000000000000000001461455713015700213610ustar00rootroot00000000000000Releases ======== .. include:: ../../RELEASES.md :parser: myst_parser.sphinx_ :start-line: 2 python-pot-0.9.3+dfsg/examples/000077500000000000000000000000001455713015700164115ustar00rootroot00000000000000python-pot-0.9.3+dfsg/examples/README.txt000066400000000000000000000002011455713015700201000ustar00rootroot00000000000000Examples gallery ================ This is a gallery of all the POT example files. OT and regularized OT ---------------------python-pot-0.9.3+dfsg/examples/backends/000077500000000000000000000000001455713015700201635ustar00rootroot00000000000000python-pot-0.9.3+dfsg/examples/backends/README.txt000066400000000000000000000000531455713015700216570ustar00rootroot00000000000000 POT backend examples --------------------python-pot-0.9.3+dfsg/examples/backends/plot_dual_ot_pytorch.py000066400000000000000000000072371455713015700250030ustar00rootroot00000000000000# -*- coding: utf-8 -*- r""" ====================================================================== Dual OT solvers for entropic and quadratic regularized OT with Pytorch ====================================================================== """ # Author: Remi Flamary # # License: MIT License # sphinx_gallery_thumbnail_number = 3 import numpy as np import matplotlib.pyplot as pl import torch import ot import ot.plot # %% # Data generation # --------------- torch.manual_seed(1) n_source_samples = 100 n_target_samples = 100 theta = 2 * np.pi / 20 noise_level = 0.1 Xs, ys = ot.datasets.make_data_classif( 'gaussrot', n_source_samples, nz=noise_level) Xt, yt = ot.datasets.make_data_classif( 'gaussrot', n_target_samples, theta=theta, nz=noise_level) # one of the target mode changes its variance (no linear mapping) Xt[yt == 2] *= 3 Xt = Xt + 4 # %% # Plot data # --------- pl.figure(1, (10, 5)) pl.clf() pl.scatter(Xs[:, 0], Xs[:, 1], marker='+', label='Source samples') pl.scatter(Xt[:, 0], Xt[:, 1], marker='o', label='Target samples') pl.legend(loc=0) pl.title('Source and target distributions') # %% # Convert data to torch tensors # ----------------------------- xs = torch.tensor(Xs) xt = torch.tensor(Xt) # %% # Estimating dual variables for entropic OT # ----------------------------------------- u = torch.randn(n_source_samples, requires_grad=True) v = torch.randn(n_source_samples, requires_grad=True) reg = 0.5 optimizer = torch.optim.Adam([u, v], lr=1) # number of iteration n_iter = 200 losses = [] for i in range(n_iter): # generate noise samples # minus because we maximize te dual loss loss = -ot.stochastic.loss_dual_entropic(u, v, xs, xt, reg=reg) losses.append(float(loss.detach())) if i % 10 == 0: print("Iter: {:3d}, loss={}".format(i, losses[-1])) loss.backward() optimizer.step() optimizer.zero_grad() pl.figure(2) pl.plot(losses) pl.grid() pl.title('Dual objective (negative)') pl.xlabel("Iterations") Ge = ot.stochastic.plan_dual_entropic(u, v, xs, xt, reg=reg) # %% # Plot the estimated entropic OT plan # ----------------------------------- pl.figure(3, (10, 5)) pl.clf() ot.plot.plot2D_samples_mat(Xs, Xt, Ge.detach().numpy(), alpha=0.1) pl.scatter(Xs[:, 0], Xs[:, 1], marker='+', label='Source samples', zorder=2) pl.scatter(Xt[:, 0], Xt[:, 1], marker='o', label='Target samples', zorder=2) pl.legend(loc=0) pl.title('Source and target distributions') # %% # Estimating dual variables for quadratic OT # ------------------------------------------ u = torch.randn(n_source_samples, requires_grad=True) v = torch.randn(n_source_samples, requires_grad=True) reg = 0.01 optimizer = torch.optim.Adam([u, v], lr=1) # number of iteration n_iter = 200 losses = [] for i in range(n_iter): # generate noise samples # minus because we maximize te dual loss loss = -ot.stochastic.loss_dual_quadratic(u, v, xs, xt, reg=reg) losses.append(float(loss.detach())) if i % 10 == 0: print("Iter: {:3d}, loss={}".format(i, losses[-1])) loss.backward() optimizer.step() optimizer.zero_grad() pl.figure(4) pl.plot(losses) pl.grid() pl.title('Dual objective (negative)') pl.xlabel("Iterations") Gq = ot.stochastic.plan_dual_quadratic(u, v, xs, xt, reg=reg) # %% # Plot the estimated quadratic OT plan # ------------------------------------ pl.figure(5, (10, 5)) pl.clf() ot.plot.plot2D_samples_mat(Xs, Xt, Gq.detach().numpy(), alpha=0.1) pl.scatter(Xs[:, 0], Xs[:, 1], marker='+', label='Source samples', zorder=2) pl.scatter(Xt[:, 0], Xt[:, 1], marker='o', label='Target samples', zorder=2) pl.legend(loc=0) pl.title('OT plan with quadratic regularization') python-pot-0.9.3+dfsg/examples/backends/plot_optim_gromov_pytorch.py000066400000000000000000000170761455713015700260770ustar00rootroot00000000000000r""" ======================================================= Optimizing the Gromov-Wasserstein distance with PyTorch ======================================================= In this example, we use the pytorch backend to optimize the Gromov-Wasserstein (GW) loss between two graphs expressed as empirical distribution. In the first part, we optimize the weights on the node of a simple template graph so that it minimizes the GW with a given Stochastic Block Model graph. We can see that this actually recovers the proportion of classes in the SBM and allows for an accurate clustering of the nodes using the GW optimal plan. In the second part, we optimize simultaneously the weights and the structure of the template graph which allows us to perform graph compression and to recover other properties of the SBM. The backend actually uses the gradients expressed in [38] to optimize the weights. [38] C. Vincent-Cuaz, T. Vayer, R. Flamary, M. Corneli, N. Courty, Online Graph Dictionary Learning, International Conference on Machine Learning (ICML), 2021. """ # Author: Rémi Flamary # # License: MIT License # sphinx_gallery_thumbnail_number = 3 from sklearn.manifold import MDS import numpy as np import matplotlib.pylab as pl import torch import ot from ot.gromov import gromov_wasserstein2 # %% # Graph generation # ---------------- rng = np.random.RandomState(42) def get_sbm(n, nc, ratio, P): nbpc = np.round(n * ratio).astype(int) n = np.sum(nbpc) C = np.zeros((n, n)) for c1 in range(nc): for c2 in range(c1 + 1): if c1 == c2: for i in range(np.sum(nbpc[:c1]), np.sum(nbpc[:c1 + 1])): for j in range(np.sum(nbpc[:c2]), i): if rng.rand() <= P[c1, c2]: C[i, j] = 1 else: for i in range(np.sum(nbpc[:c1]), np.sum(nbpc[:c1 + 1])): for j in range(np.sum(nbpc[:c2]), np.sum(nbpc[:c2 + 1])): if rng.rand() <= P[c1, c2]: C[i, j] = 1 return C + C.T n = 100 nc = 3 ratio = np.array([.5, .3, .2]) P = np.array(0.6 * np.eye(3) + 0.05 * np.ones((3, 3))) C1 = get_sbm(n, nc, ratio, P) # get 2d position for nodes x1 = MDS(dissimilarity='precomputed', random_state=0).fit_transform(1 - C1) def plot_graph(x, C, color='C0', s=None): for j in range(C.shape[0]): for i in range(j): if C[i, j] > 0: pl.plot([x[i, 0], x[j, 0]], [x[i, 1], x[j, 1]], alpha=0.2, color='k') pl.scatter(x[:, 0], x[:, 1], c=color, s=s, zorder=10, edgecolors='k', cmap='tab10', vmax=9) pl.figure(1, (10, 5)) pl.clf() pl.subplot(1, 2, 1) plot_graph(x1, C1, color='C0') pl.title("SBM Graph") pl.axis("off") pl.subplot(1, 2, 2) pl.imshow(C1, interpolation='nearest') pl.title("Adjacency matrix") pl.axis("off") # %% # Optimizing GW w.r.t. the weights on a template structure # -------------------------------------------------------- # The adjacency matrix C1 is block diagonal with 3 blocks. We want to # optimize the weights of a simple template C0=eye(3) and see if we can # recover the proportion of classes from the SBM (up to a permutation). C0 = np.eye(3) def min_weight_gw(C1, C2, a2, nb_iter_max=100, lr=1e-2): """ solve min_a GW(C1,C2,a, a2) by gradient descent""" # use pyTorch for our data C1_torch = torch.tensor(C1) C2_torch = torch.tensor(C2) a0 = rng.rand(C1.shape[0]) # random_init a0 /= a0.sum() # on simplex a1_torch = torch.tensor(a0).requires_grad_(True) a2_torch = torch.tensor(a2) loss_iter = [] for i in range(nb_iter_max): loss = gromov_wasserstein2(C1_torch, C2_torch, a1_torch, a2_torch) loss_iter.append(loss.clone().detach().cpu().numpy()) loss.backward() #print("{:03d} | {}".format(i, loss_iter[-1])) # performs a step of projected gradient descent with torch.no_grad(): grad = a1_torch.grad a1_torch -= grad * lr # step a1_torch.grad.zero_() a1_torch.data = ot.utils.proj_simplex(a1_torch) a1 = a1_torch.clone().detach().cpu().numpy() return a1, loss_iter a0_est, loss_iter0 = min_weight_gw(C0, C1, ot.unif(n), nb_iter_max=100, lr=1e-2) pl.figure(2) pl.plot(loss_iter0) pl.title("Loss along iterations") print("Estimated weights : ", a0_est) print("True proportions : ", ratio) # %% # It is clear that the optimization has converged and that we recover the # ratio of the different classes in the SBM graph up to a permutation. # %% # Community clustering with uniform and estimated weights # ------------------------------------------------------- # The GW OT plan can be used to perform a clustering of the nodes of a graph # when computing the GW with a simple template like C0 by labeling nodes in # the original graph using by the index of the noe in the template receiving # the most mass. # # We show here the result of such a clustering when using uniform weights on # the template C0 and when using the optimal weights previously estimated. T_unif = ot.gromov_wasserstein(C1, C0, ot.unif(n), ot.unif(3)) label_unif = T_unif.argmax(1) T_est = ot.gromov_wasserstein(C1, C0, ot.unif(n), a0_est) label_est = T_est.argmax(1) pl.figure(3, (10, 5)) pl.clf() pl.subplot(1, 2, 1) plot_graph(x1, C1, color=label_unif) pl.title("Graph clustering unif. weights") pl.axis("off") pl.subplot(1, 2, 2) plot_graph(x1, C1, color=label_est) pl.title("Graph clustering est. weights") pl.axis("off") # %% # Graph compression with GW # ------------------------- # Now we optimize both the weights and structure of a small graph that # minimize the GW distance wrt our data graph. This can be seen as graph # compression but can also recover important properties of an SBM such # as its class proportion but also its matrix of probability of links between # classes def graph_compression_gw(nb_nodes, C2, a2, nb_iter_max=100, lr=1e-2): """ solve min_a GW(C1,C2,a, a2) by gradient descent""" # use pyTorch for our data C2_torch = torch.tensor(C2) a2_torch = torch.tensor(a2) a0 = rng.rand(nb_nodes) # random_init a0 /= a0.sum() # on simplex a1_torch = torch.tensor(a0).requires_grad_(True) C0 = np.eye(nb_nodes) C1_torch = torch.tensor(C0).requires_grad_(True) loss_iter = [] for i in range(nb_iter_max): loss = gromov_wasserstein2(C1_torch, C2_torch, a1_torch, a2_torch) loss_iter.append(loss.clone().detach().cpu().numpy()) loss.backward() #print("{:03d} | {}".format(i, loss_iter[-1])) # performs a step of projected gradient descent with torch.no_grad(): grad = a1_torch.grad a1_torch -= grad * lr # step a1_torch.grad.zero_() a1_torch.data = ot.utils.proj_simplex(a1_torch) grad = C1_torch.grad C1_torch -= grad * lr # step C1_torch.grad.zero_() C1_torch.data = torch.clamp(C1_torch, 0, 1) a1 = a1_torch.clone().detach().cpu().numpy() C1 = C1_torch.clone().detach().cpu().numpy() return a1, C1, loss_iter nb_nodes = 3 a0_est2, C0_est2, loss_iter2 = graph_compression_gw(nb_nodes, C1, ot.unif(n), nb_iter_max=100, lr=5e-2) pl.figure(4) pl.plot(loss_iter2) pl.title("Loss along iterations") print("Estimated weights : ", a0_est2) print("True proportions : ", ratio) pl.figure(6, (10, 3.5)) pl.clf() pl.subplot(1, 2, 1) pl.imshow(P, vmin=0, vmax=1) pl.title('True SBM P matrix') pl.subplot(1, 2, 2) pl.imshow(C0_est2, vmin=0, vmax=1) pl.title('Estimated C0 matrix') pl.colorbar() python-pot-0.9.3+dfsg/examples/backends/plot_sliced_wass_grad_flow_pytorch.py000066400000000000000000000130311455713015700276650ustar00rootroot00000000000000r""" ============================================================ Sliced Wasserstein barycenter and gradient flow with PyTorch ============================================================ In this example we use the pytorch backend to optimize the sliced Wasserstein loss between two empirical distributions [31]. In the first example one we perform a gradient flow on the support of a distribution that minimize the sliced Wasserstein distance as proposed in [36]. In the second example we optimize with a gradient descent the sliced Wasserstein barycenter between two distributions as in [31]. [31] Bonneel, Nicolas, et al. "Sliced and radon wasserstein barycenters of measures." Journal of Mathematical Imaging and Vision 51.1 (2015): 22-45 [36] Liutkus, A., Simsekli, U., Majewski, S., Durmus, A., & Stöter, F. R. (2019, May). Sliced-Wasserstein flows: Nonparametric generative modeling via optimal transport and diffusions. In International Conference on Machine Learning (pp. 4104-4113). PMLR. """ # Author: Rémi Flamary # # License: MIT License # sphinx_gallery_thumbnail_number = 4 # %% # Loading the data import numpy as np import matplotlib.pylab as pl import torch import ot import matplotlib.animation as animation I1 = pl.imread('../../data/redcross.png').astype(np.float64)[::5, ::5, 2] I2 = pl.imread('../../data/tooth.png').astype(np.float64)[::5, ::5, 2] sz = I2.shape[0] XX, YY = np.meshgrid(np.arange(sz), np.arange(sz)) x1 = np.stack((XX[I1 == 0], YY[I1 == 0]), 1) * 1.0 x2 = np.stack((XX[I2 == 0] + 60, -YY[I2 == 0] + 32), 1) * 1.0 x3 = np.stack((XX[I2 == 0], -YY[I2 == 0] + 32), 1) * 1.0 pl.figure(1, (8, 4)) pl.scatter(x1[:, 0], x1[:, 1], alpha=0.5) pl.scatter(x2[:, 0], x2[:, 1], alpha=0.5) # %% # Sliced Wasserstein gradient flow with Pytorch # --------------------------------------------- device = "cuda" if torch.cuda.is_available() else "cpu" # use pyTorch for our data x1_torch = torch.tensor(x1).to(device=device).requires_grad_(True) x2_torch = torch.tensor(x2).to(device=device) lr = 1e3 nb_iter_max = 50 x_all = np.zeros((nb_iter_max, x1.shape[0], 2)) loss_iter = [] # generator for random permutations gen = torch.Generator(device=device) gen.manual_seed(42) for i in range(nb_iter_max): loss = ot.sliced_wasserstein_distance(x1_torch, x2_torch, n_projections=20, seed=gen) loss_iter.append(loss.clone().detach().cpu().numpy()) loss.backward() # performs a step of projected gradient descent with torch.no_grad(): grad = x1_torch.grad x1_torch -= grad * lr / (1 + i / 5e1) # step x1_torch.grad.zero_() x_all[i, :, :] = x1_torch.clone().detach().cpu().numpy() xb = x1_torch.clone().detach().cpu().numpy() pl.figure(2, (8, 4)) pl.scatter(x1[:, 0], x1[:, 1], alpha=0.5, label='$\mu^{(0)}$') pl.scatter(x2[:, 0], x2[:, 1], alpha=0.5, label=r'$\nu$') pl.scatter(xb[:, 0], xb[:, 1], alpha=0.5, label='$\mu^{(100)}$') pl.title('Sliced Wasserstein gradient flow') pl.legend() ax = pl.axis() # %% # Animate trajectories of the gradient flow along iteration # --------------------------------------------------------- pl.figure(3, (8, 4)) def _update_plot(i): pl.clf() pl.scatter(x1[:, 0], x1[:, 1], alpha=0.5, label='$\mu^{(0)}$') pl.scatter(x2[:, 0], x2[:, 1], alpha=0.5, label=r'$\nu$') pl.scatter(x_all[i, :, 0], x_all[i, :, 1], alpha=0.5, label='$\mu^{(100)}$') pl.title('Sliced Wasserstein gradient flow Iter. {}'.format(i)) pl.axis(ax) return 1 ani = animation.FuncAnimation(pl.gcf(), _update_plot, nb_iter_max, interval=100, repeat_delay=2000) # %% # Compute the Sliced Wasserstein Barycenter # ----------------------------------------- x1_torch = torch.tensor(x1).to(device=device) x3_torch = torch.tensor(x3).to(device=device) xbinit = np.random.randn(500, 2) * 10 + 16 xbary_torch = torch.tensor(xbinit).to(device=device).requires_grad_(True) lr = 1e3 nb_iter_max = 50 x_all = np.zeros((nb_iter_max, xbary_torch.shape[0], 2)) loss_iter = [] # generator for random permutations gen = torch.Generator(device=device) gen.manual_seed(42) alpha = 0.5 for i in range(nb_iter_max): loss = alpha * ot.sliced_wasserstein_distance(xbary_torch, x3_torch, n_projections=50, seed=gen) \ + (1 - alpha) * ot.sliced_wasserstein_distance(xbary_torch, x1_torch, n_projections=50, seed=gen) loss_iter.append(loss.clone().detach().cpu().numpy()) loss.backward() # performs a step of projected gradient descent with torch.no_grad(): grad = xbary_torch.grad xbary_torch -= grad * lr # / (1 + i / 5e1) # step xbary_torch.grad.zero_() x_all[i, :, :] = xbary_torch.clone().detach().cpu().numpy() xb = xbary_torch.clone().detach().cpu().numpy() pl.figure(4, (8, 4)) pl.scatter(x1[:, 0], x1[:, 1], alpha=0.5, label='$\mu$') pl.scatter(x2[:, 0], x2[:, 1], alpha=0.5, label=r'$\nu$') pl.scatter(xb[:, 0] + 30, xb[:, 1], alpha=0.5, label='Barycenter') pl.title('Sliced Wasserstein barycenter') pl.legend() ax = pl.axis() # %% # Animate trajectories of the barycenter along gradient descent # ------------------------------------------------------------- pl.figure(5, (8, 4)) def _update_plot(i): pl.clf() pl.scatter(x1[:, 0], x1[:, 1], alpha=0.5, label='$\mu^{(0)}$') pl.scatter(x2[:, 0], x2[:, 1], alpha=0.5, label=r'$\nu$') pl.scatter(x_all[i, :, 0] + 30, x_all[i, :, 1], alpha=0.5, label='$\mu^{(100)}$') pl.title('Sliced Wasserstein barycenter Iter. {}'.format(i)) pl.axis(ax) return 1 ani = animation.FuncAnimation(pl.gcf(), _update_plot, nb_iter_max, interval=100, repeat_delay=2000) python-pot-0.9.3+dfsg/examples/backends/plot_ssw_unif_torch.py000066400000000000000000000065601455713015700246360ustar00rootroot00000000000000# -*- coding: utf-8 -*- r""" ================================================ Spherical Sliced-Wasserstein Embedding on Sphere ================================================ Here, we aim at transforming samples into a uniform distribution on the sphere by minimizing SSW: .. math:: \min_{x} SSW_2(\nu, \frac{1}{n}\sum_{i=1}^n \delta_{x_i}) where :math:`\nu=\mathrm{Unif}(S^1)`. """ # Author: Clément Bonet # # License: MIT License # sphinx_gallery_thumbnail_number = 3 import numpy as np import matplotlib.pyplot as pl import matplotlib.animation as animation import torch import torch.nn.functional as F import ot # %% # Data generation # --------------- torch.manual_seed(1) N = 500 x0 = torch.rand(N, 3) x0 = F.normalize(x0, dim=-1) # %% # Plot data # --------- def plot_sphere(ax): xlist = np.linspace(-1.0, 1.0, 50) ylist = np.linspace(-1.0, 1.0, 50) r = np.linspace(1.0, 1.0, 50) X, Y = np.meshgrid(xlist, ylist) Z = np.sqrt(np.maximum(r**2 - X**2 - Y**2, 0)) ax.plot_wireframe(X, Y, Z, color="gray", alpha=.3) ax.plot_wireframe(X, Y, -Z, color="gray", alpha=.3) # Now plot the bottom half # plot the distributions pl.figure(1) ax = pl.axes(projection='3d') plot_sphere(ax) ax.scatter(x0[:, 0], x0[:, 1], x0[:, 2], label='Data samples', alpha=0.5) ax.set_title('Data distribution') ax.legend() # %% # Gradient descent # ---------------- x = x0.clone() x.requires_grad_(True) n_iter = 100 lr = 150 losses = [] xvisu = torch.zeros(n_iter, N, 3) for i in range(n_iter): sw = ot.sliced_wasserstein_sphere_unif(x, n_projections=500) grad_x = torch.autograd.grad(sw, x)[0] x = x - lr * grad_x / np.sqrt(i / 10 + 1) x = F.normalize(x, p=2, dim=1) losses.append(sw.item()) xvisu[i, :, :] = x.detach().clone() if i % 100 == 0: print("Iter: {:3d}, loss={}".format(i, losses[-1])) pl.figure(1) pl.semilogy(losses) pl.grid() pl.title('SSW') pl.xlabel("Iterations") # %% # Plot trajectories of generated samples along iterations # ------------------------------------------------------- ivisu = [0, 10, 20, 30, 40, 50, 60, 70, 80] fig = pl.figure(3, (10, 10)) for i in range(9): # pl.subplot(3, 3, i + 1) # ax = pl.axes(projection='3d') ax = fig.add_subplot(3, 3, i + 1, projection='3d') plot_sphere(ax) ax.scatter(xvisu[ivisu[i], :, 0], xvisu[ivisu[i], :, 1], xvisu[ivisu[i], :, 2], label='Data samples', alpha=0.5) ax.set_title('Iter. {}'.format(ivisu[i])) #ax.axis("off") if i == 0: ax.legend() # %% # Animate trajectories of generated samples along iteration # --------------------------------------------------------- pl.figure(4, (8, 8)) def _update_plot(i): i = 3 * i pl.clf() ax = pl.axes(projection='3d') plot_sphere(ax) ax.scatter(xvisu[i, :, 0], xvisu[i, :, 1], xvisu[i, :, 2], label='Data samples$', alpha=0.5) ax.axis("off") ax.set_xlim((-1.5, 1.5)) ax.set_ylim((-1.5, 1.5)) ax.set_title('Iter. {}'.format(i)) return 1 print(xvisu.shape) i = 0 ax = pl.axes(projection='3d') plot_sphere(ax) ax.scatter(xvisu[i, :, 0], xvisu[i, :, 1], xvisu[i, :, 2], label='Data samples from $G\#\mu_n$', alpha=0.5) ax.axis("off") ax.set_xlim((-1.5, 1.5)) ax.set_ylim((-1.5, 1.5)) ax.set_title('Iter. {}'.format(ivisu[i])) ani = animation.FuncAnimation(pl.gcf(), _update_plot, n_iter // 5, interval=200, repeat_delay=2000) # %% python-pot-0.9.3+dfsg/examples/backends/plot_stoch_continuous_ot_pytorch.py000066400000000000000000000116611455713015700274600ustar00rootroot00000000000000# -*- coding: utf-8 -*- r""" ====================================================================== Continuous OT plan estimation with Pytorch ====================================================================== """ # Author: Remi Flamary # # License: MIT License # sphinx_gallery_thumbnail_number = 3 import numpy as np import matplotlib.pyplot as pl import torch from torch import nn import ot import ot.plot # %% # Data generation # --------------- torch.manual_seed(42) np.random.seed(42) n_source_samples = 1000 n_target_samples = 1000 theta = 2 * np.pi / 20 noise_level = 0.1 Xs = np.random.randn(n_source_samples, 2) * 0.5 Xt = np.random.randn(n_target_samples, 2) * 2 # one of the target mode changes its variance (no linear mapping) Xt = Xt + 4 # %% # Plot data # --------- nvisu = 300 pl.figure(1, (5, 5)) pl.clf() pl.scatter(Xs[:nvisu, 0], Xs[:nvisu, 1], marker='+', label='Source samples', alpha=0.5) pl.scatter(Xt[:nvisu, 0], Xt[:nvisu, 1], marker='o', label='Target samples', alpha=0.5) pl.legend(loc=0) ax_bounds = pl.axis() pl.title('Source and target distributions') # %% # Convert data to torch tensors # ----------------------------- xs = torch.tensor(Xs) xt = torch.tensor(Xt) # %% # Estimating deep dual variables for entropic OT # ---------------------------------------------- torch.manual_seed(42) # define the MLP model class Potential(torch.nn.Module): def __init__(self): super(Potential, self).__init__() self.fc1 = nn.Linear(2, 200) self.fc2 = nn.Linear(200, 1) self.relu = torch.nn.ReLU() # instead of Heaviside step fn def forward(self, x): output = self.fc1(x) output = self.relu(output) # instead of Heaviside step fn output = self.fc2(output) return output.ravel() u = Potential().double() v = Potential().double() reg = 1 optimizer = torch.optim.Adam(list(u.parameters()) + list(v.parameters()), lr=.005) # number of iteration n_iter = 500 n_batch = 500 losses = [] for i in range(n_iter): # generate noise samples iperms = torch.randint(0, n_source_samples, (n_batch,)) ipermt = torch.randint(0, n_target_samples, (n_batch,)) xsi = xs[iperms] xti = xt[ipermt] # minus because we maximize te dual loss loss = -ot.stochastic.loss_dual_entropic(u(xsi), v(xti), xsi, xti, reg=reg) losses.append(float(loss.detach())) if i % 10 == 0: print("Iter: {:3d}, loss={}".format(i, losses[-1])) loss.backward() optimizer.step() optimizer.zero_grad() pl.figure(2) pl.plot(losses) pl.grid() pl.title('Dual objective (negative)') pl.xlabel("Iterations") # %% # Plot the density on target for a given source sample # ---------------------------------------------------- nv = 100 xl = np.linspace(ax_bounds[0], ax_bounds[1], nv) yl = np.linspace(ax_bounds[2], ax_bounds[3], nv) XX, YY = np.meshgrid(xl, yl) xg = np.concatenate((XX.ravel()[:, None], YY.ravel()[:, None]), axis=1) wxg = np.exp(-((xg[:, 0] - 4)**2 + (xg[:, 1] - 4)**2) / (2 * 2)) wxg = wxg / np.sum(wxg) xg = torch.tensor(xg) wxg = torch.tensor(wxg) pl.figure(4, (12, 4)) pl.clf() pl.subplot(1, 3, 1) iv = 2 Gg = ot.stochastic.plan_dual_entropic(u(xs[iv:iv + 1, :]), v(xg), xs[iv:iv + 1, :], xg, reg=reg, wt=wxg) Gg = Gg.reshape((nv, nv)).detach().numpy() pl.scatter(Xs[:nvisu, 0], Xs[:nvisu, 1], marker='+', zorder=2, alpha=0.05) pl.scatter(Xt[:nvisu, 0], Xt[:nvisu, 1], marker='o', zorder=2, alpha=0.05) pl.scatter(Xs[iv:iv + 1, 0], Xs[iv:iv + 1, 1], s=100, marker='+', label='Source sample', zorder=2, alpha=1, color='C0') pl.pcolormesh(XX, YY, Gg, cmap='Greens', label='Density of transported source sample') pl.legend(loc=0) ax_bounds = pl.axis() pl.title('Density of transported source sample') pl.subplot(1, 3, 2) iv = 3 Gg = ot.stochastic.plan_dual_entropic(u(xs[iv:iv + 1, :]), v(xg), xs[iv:iv + 1, :], xg, reg=reg, wt=wxg) Gg = Gg.reshape((nv, nv)).detach().numpy() pl.scatter(Xs[:nvisu, 0], Xs[:nvisu, 1], marker='+', zorder=2, alpha=0.05) pl.scatter(Xt[:nvisu, 0], Xt[:nvisu, 1], marker='o', zorder=2, alpha=0.05) pl.scatter(Xs[iv:iv + 1, 0], Xs[iv:iv + 1, 1], s=100, marker='+', label='Source sample', zorder=2, alpha=1, color='C0') pl.pcolormesh(XX, YY, Gg, cmap='Greens', label='Density of transported source sample') pl.legend(loc=0) ax_bounds = pl.axis() pl.title('Density of transported source sample') pl.subplot(1, 3, 3) iv = 6 Gg = ot.stochastic.plan_dual_entropic(u(xs[iv:iv + 1, :]), v(xg), xs[iv:iv + 1, :], xg, reg=reg, wt=wxg) Gg = Gg.reshape((nv, nv)).detach().numpy() pl.scatter(Xs[:nvisu, 0], Xs[:nvisu, 1], marker='+', zorder=2, alpha=0.05) pl.scatter(Xt[:nvisu, 0], Xt[:nvisu, 1], marker='o', zorder=2, alpha=0.05) pl.scatter(Xs[iv:iv + 1, 0], Xs[iv:iv + 1, 1], s=100, marker='+', label='Source sample', zorder=2, alpha=1, color='C0') pl.pcolormesh(XX, YY, Gg, cmap='Greens', label='Density of transported source sample') pl.legend(loc=0) ax_bounds = pl.axis() pl.title('Density of transported source sample') python-pot-0.9.3+dfsg/examples/backends/plot_unmix_optim_torch.py000066400000000000000000000100761455713015700253460ustar00rootroot00000000000000# -*- coding: utf-8 -*- r""" ================================= Wasserstein unmixing with PyTorch ================================= In this example we estimate mixing parameters from distributions that minimize the Wasserstein distance. In other words we suppose that a target distribution :math:`\mu^t` can be expressed as a weighted sum of source distributions :math:`\mu^s_k` with the following model: .. math:: \mu^t = \sum_{k=1}^K w_k\mu^s_k where :math:`\mathbf{w}` is a vector of size :math:`K` and belongs in the distribution simplex :math:`\Delta_K`. In order to estimate this weight vector we propose to optimize the Wasserstein distance between the model and the observed :math:`\mu^t` with respect to the vector. This leads to the following optimization problem: .. math:: \min_{\mathbf{w}\in\Delta_K} \quad W \left(\mu^t,\sum_{k=1}^K w_k\mu^s_k\right) This minimization is done in this example with a simple projected gradient descent in PyTorch. We use the automatic backend of POT that allows us to compute the Wasserstein distance with :any:`ot.emd2` with differentiable losses. """ # Author: Remi Flamary # # License: MIT License # sphinx_gallery_thumbnail_number = 2 import numpy as np import matplotlib.pylab as pl import ot import torch ############################################################################## # Generate data # ------------- #%% Data nt = 100 nt1 = 10 # ns1 = 50 ns = 2 * ns1 rng = np.random.RandomState(2) xt = rng.randn(nt, 2) * 0.2 xt[:nt1, 0] += 1 xt[nt1:, 1] += 1 xs1 = rng.randn(ns1, 2) * 0.2 xs1[:, 0] += 1 xs2 = rng.randn(ns1, 2) * 0.2 xs2[:, 1] += 1 xs = np.concatenate((xs1, xs2)) # Sample reweighting matrix H H = np.zeros((ns, 2)) H[:ns1, 0] = 1 / ns1 H[ns1:, 1] = 1 / ns1 # each columns sums to 1 and has weights only for samples form the # corresponding source distribution M = ot.dist(xs, xt) ############################################################################## # Plot data # --------- #%% plot the distributions pl.figure(1) pl.scatter(xt[:, 0], xt[:, 1], label='Target $\mu^t$', alpha=0.5) pl.scatter(xs1[:, 0], xs1[:, 1], label='Source $\mu^s_1$', alpha=0.5) pl.scatter(xs2[:, 0], xs2[:, 1], label='Source $\mu^s_2$', alpha=0.5) pl.title('Sources and Target distributions') pl.legend() ############################################################################## # Optimization of the model wrt the Wasserstein distance # ------------------------------------------------------ #%% Weights optimization with gradient descent # convert numpy arrays to torch tensors H2 = torch.tensor(H) M2 = torch.tensor(M) # weights for the source distributions w = torch.tensor(ot.unif(2), requires_grad=True) # uniform weights for target b = torch.tensor(ot.unif(nt)) lr = 2e-3 # learning rate niter = 500 # number of iterations losses = [] # loss along the iterations # loss for the minimal Wasserstein estimator def get_loss(w): a = torch.mv(H2, w) # distribution reweighting return ot.emd2(a, b, M2) # squared Wasserstein 2 for i in range(niter): loss = get_loss(w) losses.append(float(loss)) loss.backward() with torch.no_grad(): w -= lr * w.grad # gradient step w[:] = ot.utils.proj_simplex(w) # projection on the simplex w.grad.zero_() ############################################################################## # Estimated weights and convergence of the objective # -------------------------------------------------- we = w.detach().numpy() print('Estimated mixture:', we) pl.figure(2) pl.semilogy(losses) pl.grid() pl.title('Wasserstein distance') pl.xlabel("Iterations") ############################################################################## # Plotting the reweighted source distribution # ------------------------------------------- pl.figure(3) # compute source weights ws = H.dot(we) pl.scatter(xt[:, 0], xt[:, 1], label='Target $\mu^t$', alpha=0.5) pl.scatter(xs[:, 0], xs[:, 1], color='C3', s=ws * 20 * ns, label='Weighted sources $\sum_{k} w_k\mu^s_k$', alpha=0.5) pl.title('Target and reweighted source distributions') pl.legend() python-pot-0.9.3+dfsg/examples/backends/plot_wass1d_torch.py000066400000000000000000000112321455713015700241730ustar00rootroot00000000000000r""" ================================================= Wasserstein 1D (flow and barycenter) with PyTorch ================================================= In this small example, we consider the following minimization problem: .. math:: \mu^* = \min_\mu W(\mu,\nu) where :math:`\nu` is a reference 1D measure. The problem is handled by a projected gradient descent method, where the gradient is computed by pyTorch automatic differentiation. The projection on the simplex ensures that the iterate will remain on the probability simplex. This example illustrates both `wasserstein_1d` function and backend use within the POT framework. """ # Author: Nicolas Courty # Rémi Flamary # # License: MIT License import numpy as np import matplotlib.pylab as pl import matplotlib as mpl import torch from ot.lp import wasserstein_1d from ot.datasets import make_1D_gauss as gauss from ot.utils import proj_simplex red = np.array(mpl.colors.to_rgb('red')) blue = np.array(mpl.colors.to_rgb('blue')) n = 100 # nb bins # bin positions x = np.arange(n, dtype=np.float64) # Gaussian distributions a = gauss(n, m=20, s=5) # m= mean, s= std b = gauss(n, m=60, s=10) # enforce sum to one on the support a = a / a.sum() b = b / b.sum() device = "cuda" if torch.cuda.is_available() else "cpu" # use pyTorch for our data x_torch = torch.tensor(x).to(device=device) a_torch = torch.tensor(a).to(device=device).requires_grad_(True) b_torch = torch.tensor(b).to(device=device) lr = 1e-6 nb_iter_max = 800 loss_iter = [] pl.figure(1, figsize=(8, 4)) pl.plot(x, a, 'b', label='Source distribution') pl.plot(x, b, 'r', label='Target distribution') for i in range(nb_iter_max): # Compute the Wasserstein 1D with torch backend loss = wasserstein_1d(x_torch, x_torch, a_torch, b_torch, p=2) # record the corresponding loss value loss_iter.append(loss.clone().detach().cpu().numpy()) loss.backward() # performs a step of projected gradient descent with torch.no_grad(): grad = a_torch.grad a_torch -= a_torch.grad * lr # step a_torch.grad.zero_() a_torch.data = proj_simplex(a_torch) # projection onto the simplex # plot one curve every 10 iterations if i % 10 == 0: mix = float(i) / nb_iter_max pl.plot(x, a_torch.clone().detach().cpu().numpy(), c=(1 - mix) * blue + mix * red) pl.legend() pl.title('Distribution along the iterations of the projected gradient descent') pl.show() pl.figure(2) pl.plot(range(nb_iter_max), loss_iter, lw=3) pl.title('Evolution of the loss along iterations', fontsize=16) pl.show() # %% # Wasserstein barycenter # ---------------------- # In this example, we consider the following Wasserstein barycenter problem # $$ \\eta^* = \\min_\\eta\;\;\; (1-t)W(\\mu,\\eta) + tW(\\eta,\\nu)$$ # where :math:`\\mu` and :math:`\\nu` are reference 1D measures, and :math:`t` # is a parameter :math:`\in [0,1]`. The problem is handled by a project gradient # descent method, where the gradient is computed by pyTorch automatic differentiation. # The projection on the simplex ensures that the iterate will remain on the # probability simplex. # # This example illustrates both `wasserstein_1d` function and backend use within the # POT framework. device = "cuda" if torch.cuda.is_available() else "cpu" # use pyTorch for our data x_torch = torch.tensor(x).to(device=device) a_torch = torch.tensor(a).to(device=device) b_torch = torch.tensor(b).to(device=device) bary_torch = torch.tensor((a + b).copy() / 2).to(device=device).requires_grad_(True) lr = 1e-6 nb_iter_max = 2000 loss_iter = [] # instant of the interpolation t = 0.5 for i in range(nb_iter_max): # Compute the Wasserstein 1D with torch backend loss = (1 - t) * wasserstein_1d(x_torch, x_torch, a_torch.detach(), bary_torch, p=2) + t * wasserstein_1d(x_torch, x_torch, b_torch, bary_torch, p=2) # record the corresponding loss value loss_iter.append(loss.clone().detach().cpu().numpy()) loss.backward() # performs a step of projected gradient descent with torch.no_grad(): grad = bary_torch.grad bary_torch -= bary_torch.grad * lr # step bary_torch.grad.zero_() bary_torch.data = proj_simplex(bary_torch) # projection onto the simplex pl.figure(3, figsize=(8, 4)) pl.plot(x, a, 'b', label='Source distribution') pl.plot(x, b, 'r', label='Target distribution') pl.plot(x, bary_torch.clone().detach().cpu().numpy(), c='green', label='W barycenter') pl.legend() pl.title('Wasserstein barycenter computed by gradient descent') pl.show() pl.figure(4) pl.plot(range(nb_iter_max), loss_iter, lw=3) pl.title('Evolution of the loss along iterations', fontsize=16) pl.show() python-pot-0.9.3+dfsg/examples/backends/plot_wass2_gan_torch.py000066400000000000000000000141251455713015700246610ustar00rootroot00000000000000# -*- coding: utf-8 -*- r""" ======================================== Wasserstein 2 Minibatch GAN with PyTorch ======================================== In this example we train a Wasserstein GAN using Wasserstein 2 on minibatches as a distribution fitting term. We want to train a generator :math:`G_\theta` that generates realistic data from random noise drawn form a Gaussian :math:`\mu_n` distribution so that the data is indistinguishable from true data in the data distribution :math:`\mu_d`. To this end Wasserstein GAN [Arjovsky2017] aim at optimizing the parameters :math:`\theta` of the generator with the following optimization problem: .. math:: \min_{\theta} W(\mu_d,G_\theta\#\mu_n) In practice we do not have access to the full distribution :math:`\mu_d` but samples and we cannot compute the Wasserstein distance for large dataset. [Arjovsky2017] proposed to approximate the dual potential of Wasserstein 1 with a neural network recovering an optimization problem similar to GAN. In this example we will optimize the expectation of the Wasserstein distance over minibatches at each iterations as proposed in [Genevay2018]. Optimizing the Minibatches of the Wasserstein distance has been studied in [Fatras2019]. [Arjovsky2017] Arjovsky, M., Chintala, S., & Bottou, L. (2017, July). Wasserstein generative adversarial networks. In International conference on machine learning (pp. 214-223). PMLR. [Genevay2018] Genevay, Aude, Gabriel Peyré, and Marco Cuturi. "Learning generative models with sinkhorn divergences." International Conference on Artificial Intelligence and Statistics. PMLR, 2018. [Fatras2019] Fatras, K., Zine, Y., Flamary, R., Gribonval, R., & Courty, N. (2020, June). Learning with minibatch Wasserstein: asymptotic and gradient properties. In the 23nd International Conference on Artificial Intelligence and Statistics (Vol. 108). """ # Author: Remi Flamary # # License: MIT License # sphinx_gallery_thumbnail_number = 3 import numpy as np import matplotlib.pyplot as pl import matplotlib.animation as animation import torch from torch import nn import ot # %% # Data generation # --------------- torch.manual_seed(1) sigma = 0.1 n_dims = 2 n_features = 2 def get_data(n_samples): c = torch.rand(size=(n_samples, 1)) angle = c * 2 * np.pi x = torch.cat((torch.cos(angle), torch.sin(angle)), 1) x += torch.randn(n_samples, 2) * sigma return x # %% # Plot data # --------- # plot the distributions x = get_data(500) pl.figure(1) pl.scatter(x[:, 0], x[:, 1], label='Data samples from $\mu_d$', alpha=0.5) pl.title('Data distribution') pl.legend() # %% # Generator Model # --------------- # define the MLP model class Generator(torch.nn.Module): def __init__(self): super(Generator, self).__init__() self.fc1 = nn.Linear(n_features, 200) self.fc2 = nn.Linear(200, 500) self.fc3 = nn.Linear(500, n_dims) self.relu = torch.nn.ReLU() # instead of Heaviside step fn def forward(self, x): output = self.fc1(x) output = self.relu(output) # instead of Heaviside step fn output = self.fc2(output) output = self.relu(output) output = self.fc3(output) return output # %% # Training the model # ------------------ G = Generator() optimizer = torch.optim.RMSprop(G.parameters(), lr=0.00019, eps=1e-5) # number of iteration and size of the batches n_iter = 200 # set to 200 for doc build but 1000 is better ;) size_batch = 500 # generate statis samples to see their trajectory along training n_visu = 100 xnvisu = torch.randn(n_visu, n_features) xvisu = torch.zeros(n_iter, n_visu, n_dims) ab = torch.ones(size_batch) / size_batch losses = [] for i in range(n_iter): # generate noise samples xn = torch.randn(size_batch, n_features) # generate data samples xd = get_data(size_batch) # generate sample along iterations xvisu[i, :, :] = G(xnvisu).detach() # generate smaples and compte distance matrix xg = G(xn) M = ot.dist(xg, xd) loss = ot.emd2(ab, ab, M) losses.append(float(loss.detach())) if i % 10 == 0: print("Iter: {:3d}, loss={}".format(i, losses[-1])) loss.backward() optimizer.step() optimizer.zero_grad() del M pl.figure(2) pl.semilogy(losses) pl.grid() pl.title('Wasserstein distance') pl.xlabel("Iterations") # %% # Plot trajectories of generated samples along iterations # ------------------------------------------------------- pl.figure(3, (10, 10)) ivisu = [0, 10, 25, 50, 75, 125, 15, 175, 199] for i in range(9): pl.subplot(3, 3, i + 1) pl.scatter(xd[:, 0], xd[:, 1], label='Data samples from $\mu_d$', alpha=0.1) pl.scatter(xvisu[ivisu[i], :, 0], xvisu[ivisu[i], :, 1], label='Data samples from $G\#\mu_n$', alpha=0.5) pl.xticks(()) pl.yticks(()) pl.title('Iter. {}'.format(ivisu[i])) if i == 0: pl.legend() # %% # Animate trajectories of generated samples along iteration # --------------------------------------------------------- pl.figure(4, (8, 8)) def _update_plot(i): pl.clf() pl.scatter(xd[:, 0], xd[:, 1], label='Data samples from $\mu_d$', alpha=0.1) pl.scatter(xvisu[i, :, 0], xvisu[i, :, 1], label='Data samples from $G\#\mu_n$', alpha=0.5) pl.xticks(()) pl.yticks(()) pl.xlim((-1.5, 1.5)) pl.ylim((-1.5, 1.5)) pl.title('Iter. {}'.format(i)) return 1 i = 0 pl.scatter(xd[:, 0], xd[:, 1], label='Data samples from $\mu_d$', alpha=0.1) pl.scatter(xvisu[i, :, 0], xvisu[i, :, 1], label='Data samples from $G\#\mu_n$', alpha=0.5) pl.xticks(()) pl.yticks(()) pl.xlim((-1.5, 1.5)) pl.ylim((-1.5, 1.5)) pl.title('Iter. {}'.format(ivisu[i])) ani = animation.FuncAnimation(pl.gcf(), _update_plot, n_iter, interval=100, repeat_delay=2000) # %% # Generate and visualize data # --------------------------- size_batch = 500 xd = get_data(size_batch) xn = torch.randn(size_batch, 2) x = G(xn).detach().numpy() pl.figure(5) pl.scatter(xd[:, 0], xd[:, 1], label='Data samples from $\mu_d$', alpha=0.5) pl.scatter(x[:, 0], x[:, 1], label='Data samples from $G\#\mu_n$', alpha=0.5) pl.title('Sources and Target distributions') pl.legend() python-pot-0.9.3+dfsg/examples/barycenters/000077500000000000000000000000001455713015700207325ustar00rootroot00000000000000python-pot-0.9.3+dfsg/examples/barycenters/README.txt000066400000000000000000000000611455713015700224250ustar00rootroot00000000000000 Wasserstein barycenters -----------------------python-pot-0.9.3+dfsg/examples/barycenters/plot_barycenter_1D.py000066400000000000000000000065761455713015700250420ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ============================== 1D Wasserstein barycenter demo ============================== This example illustrates the computation of regularized Wasserstein Barycenter as proposed in [3]. [3] Benamou, J. D., Carlier, G., Cuturi, M., Nenna, L., & Peyré, G. (2015). Iterative Bregman projections for regularized transportation problems SIAM Journal on Scientific Computing, 37(2), A1111-A1138. """ # Author: Remi Flamary # # License: MIT License # sphinx_gallery_thumbnail_number = 1 import numpy as np import matplotlib.pyplot as plt import ot # necessary for 3d plot even if not used from mpl_toolkits.mplot3d import Axes3D # noqa from matplotlib.collections import PolyCollection ############################################################################## # Generate data # ------------- #%% parameters n = 100 # nb bins # bin positions x = np.arange(n, dtype=np.float64) # Gaussian distributions a1 = ot.datasets.make_1D_gauss(n, m=20, s=5) # m= mean, s= std a2 = ot.datasets.make_1D_gauss(n, m=60, s=8) # creating matrix A containing all distributions A = np.vstack((a1, a2)).T n_distributions = A.shape[1] # loss matrix + normalization M = ot.utils.dist0(n) M /= M.max() ############################################################################## # Barycenter computation # ---------------------- #%% barycenter computation alpha = 0.2 # 0<=alpha<=1 weights = np.array([1 - alpha, alpha]) # l2bary bary_l2 = A.dot(weights) # wasserstein reg = 1e-3 bary_wass = ot.bregman.barycenter(A, M, reg, weights) f, (ax1, ax2) = plt.subplots(2, 1, tight_layout=True, num=1) ax1.plot(x, A, color="black") ax1.set_title('Distributions') ax2.plot(x, bary_l2, 'r', label='l2') ax2.plot(x, bary_wass, 'g', label='Wasserstein') ax2.set_title('Barycenters') plt.legend() plt.show() ############################################################################## # Barycentric interpolation # ------------------------- #%% barycenter interpolation n_alpha = 11 alpha_list = np.linspace(0, 1, n_alpha) B_l2 = np.zeros((n, n_alpha)) B_wass = np.copy(B_l2) for i in range(n_alpha): alpha = alpha_list[i] weights = np.array([1 - alpha, alpha]) B_l2[:, i] = A.dot(weights) B_wass[:, i] = ot.bregman.barycenter(A, M, reg, weights) #%% plot interpolation plt.figure(2) cmap = plt.cm.get_cmap('viridis') verts = [] zs = alpha_list for i, z in enumerate(zs): ys = B_l2[:, i] verts.append(list(zip(x, ys))) ax = plt.gcf().add_subplot(projection='3d') poly = PolyCollection(verts, facecolors=[cmap(a) for a in alpha_list]) poly.set_alpha(0.7) ax.add_collection3d(poly, zs=zs, zdir='y') ax.set_xlabel('x') ax.set_xlim3d(0, n) ax.set_ylabel('$\\alpha$') ax.set_ylim3d(0, 1) ax.set_zlabel('') ax.set_zlim3d(0, B_l2.max() * 1.01) plt.title('Barycenter interpolation with l2') plt.tight_layout() plt.figure(3) cmap = plt.cm.get_cmap('viridis') verts = [] zs = alpha_list for i, z in enumerate(zs): ys = B_wass[:, i] verts.append(list(zip(x, ys))) ax = plt.gcf().add_subplot(projection='3d') poly = PolyCollection(verts, facecolors=[cmap(a) for a in alpha_list]) poly.set_alpha(0.7) ax.add_collection3d(poly, zs=zs, zdir='y') ax.set_xlabel('x') ax.set_xlim3d(0, n) ax.set_ylabel('$\\alpha$') ax.set_ylim3d(0, 1) ax.set_zlabel('') ax.set_zlim3d(0, B_l2.max() * 1.01) plt.title('Barycenter interpolation with Wasserstein') plt.tight_layout() plt.show() python-pot-0.9.3+dfsg/examples/barycenters/plot_barycenter_lp_vs_entropic.py000066400000000000000000000134701455713015700276130ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ================================================================================= 1D Wasserstein barycenter: exact LP vs entropic regularization ================================================================================= This example illustrates the computation of regularized Wasserstein Barycenter as proposed in [3] and exact LP barycenters using standard LP solver. It reproduces approximately Figure 3.1 and 3.2 from the following paper: Cuturi, M., & Peyré, G. (2016). A smoothed dual approach for variational Wasserstein problems. SIAM Journal on Imaging Sciences, 9(1), 320-343. [3] Benamou, J. D., Carlier, G., Cuturi, M., Nenna, L., & Peyré, G. (2015). Iterative Bregman projections for regularized transportation problems SIAM Journal on Scientific Computing, 37(2), A1111-A1138. """ # Author: Remi Flamary # # License: MIT License # sphinx_gallery_thumbnail_number = 4 import numpy as np import matplotlib.pylab as pl import ot # necessary for 3d plot even if not used from mpl_toolkits.mplot3d import Axes3D # noqa from matplotlib.collections import PolyCollection # noqa #import ot.lp.cvx as cvx ############################################################################## # Gaussian Data # ------------- #%% parameters problems = [] n = 100 # nb bins # bin positions x = np.arange(n, dtype=np.float64) # Gaussian distributions # Gaussian distributions a1 = ot.datasets.make_1D_gauss(n, m=20, s=5) # m= mean, s= std a2 = ot.datasets.make_1D_gauss(n, m=60, s=8) # creating matrix A containing all distributions A = np.vstack((a1, a2)).T n_distributions = A.shape[1] # loss matrix + normalization M = ot.utils.dist0(n) M /= M.max() #%% plot the distributions pl.figure(1, figsize=(6.4, 3)) for i in range(n_distributions): pl.plot(x, A[:, i]) pl.title('Distributions') pl.tight_layout() #%% barycenter computation alpha = 0.5 # 0<=alpha<=1 weights = np.array([1 - alpha, alpha]) # l2bary bary_l2 = A.dot(weights) # wasserstein reg = 1e-3 ot.tic() bary_wass = ot.bregman.barycenter(A, M, reg, weights) ot.toc() ot.tic() bary_wass2 = ot.lp.barycenter(A, M, weights) ot.toc() pl.figure(2) pl.clf() pl.subplot(2, 1, 1) for i in range(n_distributions): pl.plot(x, A[:, i]) pl.title('Distributions') pl.subplot(2, 1, 2) pl.plot(x, bary_l2, 'r', label='l2') pl.plot(x, bary_wass, 'g', label='Reg Wasserstein') pl.plot(x, bary_wass2, 'b', label='LP Wasserstein') pl.legend() pl.title('Barycenters') pl.tight_layout() problems.append([A, [bary_l2, bary_wass, bary_wass2]]) ############################################################################## # Stair Data # ---------- #%% parameters a1 = 1.0 * (x > 10) * (x < 50) a2 = 1.0 * (x > 60) * (x < 80) a1 /= a1.sum() a2 /= a2.sum() # creating matrix A containing all distributions A = np.vstack((a1, a2)).T n_distributions = A.shape[1] # loss matrix + normalization M = ot.utils.dist0(n) M /= M.max() #%% plot the distributions pl.figure(1, figsize=(6.4, 3)) for i in range(n_distributions): pl.plot(x, A[:, i]) pl.title('Distributions') pl.tight_layout() #%% barycenter computation alpha = 0.5 # 0<=alpha<=1 weights = np.array([1 - alpha, alpha]) # l2bary bary_l2 = A.dot(weights) # wasserstein reg = 1e-3 ot.tic() bary_wass = ot.bregman.barycenter(A, M, reg, weights) ot.toc() ot.tic() bary_wass2 = ot.lp.barycenter(A, M, weights) ot.toc() problems.append([A, [bary_l2, bary_wass, bary_wass2]]) pl.figure(2) pl.clf() pl.subplot(2, 1, 1) for i in range(n_distributions): pl.plot(x, A[:, i]) pl.title('Distributions') pl.subplot(2, 1, 2) pl.plot(x, bary_l2, 'r', label='l2') pl.plot(x, bary_wass, 'g', label='Reg Wasserstein') pl.plot(x, bary_wass2, 'b', label='LP Wasserstein') pl.legend() pl.title('Barycenters') pl.tight_layout() ############################################################################## # Dirac Data # ---------- #%% parameters a1 = np.zeros(n) a2 = np.zeros(n) a1[10] = .25 a1[20] = .5 a1[30] = .25 a2[80] = 1 a1 /= a1.sum() a2 /= a2.sum() # creating matrix A containing all distributions A = np.vstack((a1, a2)).T n_distributions = A.shape[1] # loss matrix + normalization M = ot.utils.dist0(n) M /= M.max() #%% plot the distributions pl.figure(1, figsize=(6.4, 3)) for i in range(n_distributions): pl.plot(x, A[:, i]) pl.title('Distributions') pl.tight_layout() #%% barycenter computation alpha = 0.5 # 0<=alpha<=1 weights = np.array([1 - alpha, alpha]) # l2bary bary_l2 = A.dot(weights) # wasserstein reg = 1e-3 ot.tic() bary_wass = ot.bregman.barycenter(A, M, reg, weights) ot.toc() ot.tic() bary_wass2 = ot.lp.barycenter(A, M, weights) ot.toc() problems.append([A, [bary_l2, bary_wass, bary_wass2]]) pl.figure(2) pl.clf() pl.subplot(2, 1, 1) for i in range(n_distributions): pl.plot(x, A[:, i]) pl.title('Distributions') pl.subplot(2, 1, 2) pl.plot(x, bary_l2, 'r', label='l2') pl.plot(x, bary_wass, 'g', label='Reg Wasserstein') pl.plot(x, bary_wass2, 'b', label='LP Wasserstein') pl.legend() pl.title('Barycenters') pl.tight_layout() ############################################################################## # Final figure # ------------ # #%% plot nbm = len(problems) nbm2 = (nbm // 2) pl.figure(2, (20, 6)) pl.clf() for i in range(nbm): A = problems[i][0] bary_l2 = problems[i][1][0] bary_wass = problems[i][1][1] bary_wass2 = problems[i][1][2] pl.subplot(2, nbm, 1 + i) for j in range(n_distributions): pl.plot(x, A[:, j]) if i == nbm2: pl.title('Distributions') pl.xticks(()) pl.yticks(()) pl.subplot(2, nbm, 1 + i + nbm) pl.plot(x, bary_l2, 'r', label='L2 (Euclidean)') pl.plot(x, bary_wass, 'g', label='Reg Wasserstein') pl.plot(x, bary_wass2, 'b', label='LP Wasserstein') if i == nbm - 1: pl.legend() if i == nbm2: pl.title('Barycenters') pl.xticks(()) pl.yticks(()) python-pot-0.9.3+dfsg/examples/barycenters/plot_convolutional_barycenter.py000066400000000000000000000051351455713015700274600ustar00rootroot00000000000000 #%% # -*- coding: utf-8 -*- """ ============================================ Convolutional Wasserstein Barycenter example ============================================ This example is designed to illustrate how the Convolutional Wasserstein Barycenter function of POT works. """ # Author: Nicolas Courty # # License: MIT License import os from pathlib import Path import numpy as np import matplotlib.pyplot as plt import ot ############################################################################## # Data preparation # ---------------- # # The four distributions are constructed from 4 simple images this_file = os.path.realpath('__file__') data_path = os.path.join(Path(this_file).parent.parent.parent, 'data') f1 = 1 - plt.imread(os.path.join(data_path, 'redcross.png'))[::2, ::2, 2] f2 = 1 - plt.imread(os.path.join(data_path, 'tooth.png'))[::2, ::2, 2] f3 = 1 - plt.imread(os.path.join(data_path, 'heart.png'))[::2, ::2, 2] f4 = 1 - plt.imread(os.path.join(data_path, 'duck.png'))[::2, ::2, 2] f1 = f1 / np.sum(f1) f2 = f2 / np.sum(f2) f3 = f3 / np.sum(f3) f4 = f4 / np.sum(f4) A = np.array([f1, f2, f3, f4]) nb_images = 5 # those are the four corners coordinates that will be interpolated by bilinear # interpolation v1 = np.array((1, 0, 0, 0)) v2 = np.array((0, 1, 0, 0)) v3 = np.array((0, 0, 1, 0)) v4 = np.array((0, 0, 0, 1)) ############################################################################## # Barycenter computation and visualization # ---------------------------------------- # fig, axes = plt.subplots(nb_images, nb_images, figsize=(7, 7)) plt.suptitle('Convolutional Wasserstein Barycenters in POT') cm = 'Blues' # regularization parameter reg = 0.004 for i in range(nb_images): for j in range(nb_images): tx = float(i) / (nb_images - 1) ty = float(j) / (nb_images - 1) # weights are constructed by bilinear interpolation tmp1 = (1 - tx) * v1 + tx * v2 tmp2 = (1 - tx) * v3 + tx * v4 weights = (1 - ty) * tmp1 + ty * tmp2 if i == 0 and j == 0: axes[i, j].imshow(f1, cmap=cm) elif i == 0 and j == (nb_images - 1): axes[i, j].imshow(f3, cmap=cm) elif i == (nb_images - 1) and j == 0: axes[i, j].imshow(f2, cmap=cm) elif i == (nb_images - 1) and j == (nb_images - 1): axes[i, j].imshow(f4, cmap=cm) else: # call to barycenter computation axes[i, j].imshow( ot.bregman.convolutional_barycenter2d(A, reg, weights), cmap=cm ) axes[i, j].axis('off') plt.tight_layout() plt.show() python-pot-0.9.3+dfsg/examples/barycenters/plot_debiased_barycenter.py000066400000000000000000000100261455713015700263170ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ================================= Debiased Sinkhorn barycenter demo ================================= This example illustrates the computation of the debiased Sinkhorn barycenter as proposed in [37]_. .. [37] Janati, H., Cuturi, M., Gramfort, A. Proceedings of the 37th International Conference on Machine Learning, PMLR 119:4692-4701, 2020 """ # Author: Hicham Janati # # License: MIT License # sphinx_gallery_thumbnail_number = 3 import os from pathlib import Path import numpy as np import matplotlib.pyplot as plt import ot from ot.bregman import (barycenter, barycenter_debiased, convolutional_barycenter2d, convolutional_barycenter2d_debiased) ############################################################################## # Debiased barycenter of 1D Gaussians # ------------------------------------ #%% parameters n = 100 # nb bins # bin positions x = np.arange(n, dtype=np.float64) # Gaussian distributions a1 = ot.datasets.make_1D_gauss(n, m=20, s=5) # m= mean, s= std a2 = ot.datasets.make_1D_gauss(n, m=60, s=8) # creating matrix A containing all distributions A = np.vstack((a1, a2)).T n_distributions = A.shape[1] # loss matrix + normalization M = ot.utils.dist0(n) M /= M.max() #%% barycenter computation alpha = 0.2 # 0<=alpha<=1 weights = np.array([1 - alpha, alpha]) epsilons = [5e-3, 1e-2, 5e-2] bars = [barycenter(A, M, reg, weights) for reg in epsilons] bars_debiased = [barycenter_debiased(A, M, reg, weights) for reg in epsilons] labels = ["Sinkhorn barycenter", "Debiased barycenter"] colors = ["indianred", "gold"] f, axes = plt.subplots(1, len(epsilons), tight_layout=True, sharey=True, figsize=(12, 4), num=1) for ax, eps, bar, bar_debiased in zip(axes, epsilons, bars, bars_debiased): ax.plot(A[:, 0], color="k", ls="--", label="Input data", alpha=0.3) ax.plot(A[:, 1], color="k", ls="--", alpha=0.3) for data, label, color in zip([bar, bar_debiased], labels, colors): ax.plot(data, color=color, label=label, lw=2) ax.set_title(r"$\varepsilon = %.3f$" % eps) plt.legend() plt.show() ############################################################################## # Debiased barycenter of 2D images # --------------------------------- this_file = os.path.realpath('__file__') data_path = os.path.join(Path(this_file).parent.parent.parent, 'data') f1 = 1 - plt.imread(os.path.join(data_path, 'heart.png'))[:, :, 2] f2 = 1 - plt.imread(os.path.join(data_path, 'duck.png'))[:, :, 2] A = np.asarray([f1, f2]) + 1e-2 A /= A.sum(axis=(1, 2))[:, None, None] ############################################################################## # Display the input images fig, axes = plt.subplots(1, 2, figsize=(7, 4), num=2) for ax, img in zip(axes, A): ax.imshow(img, cmap="Greys") ax.axis("off") fig.tight_layout() plt.show() ############################################################################## # Barycenter computation and visualization # ---------------------------------------- # bars_sinkhorn, bars_debiased = [], [] epsilons = [5e-3, 7e-3, 1e-2] for eps in epsilons: bar = convolutional_barycenter2d(A, eps) bar_debiased, log = convolutional_barycenter2d_debiased(A, eps, log=True) bars_sinkhorn.append(bar) bars_debiased.append(bar_debiased) titles = ["Sinkhorn", "Debiased"] all_bars = [bars_sinkhorn, bars_debiased] fig, axes = plt.subplots(2, 3, figsize=(8, 6), num=3) for jj, (method, ax_row, bars) in enumerate(zip(titles, axes, all_bars)): for ii, (ax, img, eps) in enumerate(zip(ax_row, bars, epsilons)): ax.imshow(img, cmap="Greys") if jj == 0: ax.set_title(r"$\varepsilon = %.3f$" % eps, fontsize=13) ax.set_xticks([]) ax.set_yticks([]) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['bottom'].set_visible(False) ax.spines['left'].set_visible(False) if ii == 0: ax.set_ylabel(method, fontsize=15) fig.tight_layout() plt.show() python-pot-0.9.3+dfsg/examples/barycenters/plot_free_support_barycenter.py000066400000000000000000000054101455713015700272750ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ======================================================== 2D free support Wasserstein barycenters of distributions ======================================================== Illustration of 2D Wasserstein and Sinkhorn barycenters if distributions are weighted sum of Diracs. """ # Authors: Vivien Seguy # Rémi Flamary # Eduardo Fernandes Montesuma # # License: MIT License # sphinx_gallery_thumbnail_number = 2 import numpy as np import matplotlib.pylab as pl import ot # %% # Generate data # ------------- N = 2 d = 2 I1 = pl.imread('../../data/redcross.png').astype(np.float64)[::4, ::4, 2] I2 = pl.imread('../../data/duck.png').astype(np.float64)[::4, ::4, 2] sz = I2.shape[0] XX, YY = np.meshgrid(np.arange(sz), np.arange(sz)) x1 = np.stack((XX[I1 == 0], YY[I1 == 0]), 1) * 1.0 x2 = np.stack((XX[I2 == 0] + 80, -YY[I2 == 0] + 32), 1) * 1.0 x3 = np.stack((XX[I2 == 0], -YY[I2 == 0] + 32), 1) * 1.0 measures_locations = [x1, x2] measures_weights = [ot.unif(x1.shape[0]), ot.unif(x2.shape[0])] pl.figure(1, (12, 4)) pl.scatter(x1[:, 0], x1[:, 1], alpha=0.5) pl.scatter(x2[:, 0], x2[:, 1], alpha=0.5) pl.title('Distributions') # %% # Compute free support Wasserstein barycenter # ------------------------------------------- k = 200 # number of Diracs of the barycenter X_init = np.random.normal(0., 1., (k, d)) # initial Dirac locations b = np.ones((k,)) / k # weights of the barycenter (it will not be optimized, only the locations are optimized) X = ot.lp.free_support_barycenter(measures_locations, measures_weights, X_init, b) # %% # Plot the Wasserstein barycenter # ------------------------------- pl.figure(2, (8, 3)) pl.scatter(x1[:, 0], x1[:, 1], alpha=0.5) pl.scatter(x2[:, 0], x2[:, 1], alpha=0.5) pl.scatter(X[:, 0], X[:, 1], s=b * 1000, marker='s', label='2-Wasserstein barycenter') pl.title('Data measures and their barycenter') pl.legend(loc="lower right") pl.show() # %% # Compute free support Sinkhorn barycenter k = 200 # number of Diracs of the barycenter X_init = np.random.normal(0., 1., (k, d)) # initial Dirac locations b = np.ones((k,)) / k # weights of the barycenter (it will not be optimized, only the locations are optimized) X = ot.bregman.free_support_sinkhorn_barycenter(measures_locations, measures_weights, X_init, 20, b, numItermax=15) # %% # Plot the Wasserstein barycenter # ------------------------------- pl.figure(2, (8, 3)) pl.scatter(x1[:, 0], x1[:, 1], alpha=0.5) pl.scatter(x2[:, 0], x2[:, 1], alpha=0.5) pl.scatter(X[:, 0], X[:, 1], s=b * 1000, marker='s', label='2-Wasserstein barycenter') pl.title('Data measures and their barycenter') pl.legend(loc="lower right") pl.show() python-pot-0.9.3+dfsg/examples/barycenters/plot_free_support_sinkhorn_barycenter.py000066400000000000000000000106111455713015700312070ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ======================================================== 2D free support Sinkhorn barycenters of distributions ======================================================== Illustration of Sinkhorn barycenter calculation between empirical distributions understood as point clouds """ # Authors: Eduardo Fernandes Montesuma # # License: MIT License import numpy as np import matplotlib.pyplot as plt import ot # %% # General Parameters # ------------------ reg = 1e-2 # Entropic Regularization numItermax = 20 # Maximum number of iterations for the Barycenter algorithm numInnerItermax = 50 # Maximum number of sinkhorn iterations n_samples = 200 # %% # Generate Data # ------------- X1 = np.random.randn(200, 2) X2 = 2 * np.concatenate([ np.concatenate([- np.ones([50, 1]), np.linspace(-1, 1, 50)[:, None]], axis=1), np.concatenate([np.linspace(-1, 1, 50)[:, None], np.ones([50, 1])], axis=1), np.concatenate([np.ones([50, 1]), np.linspace(1, -1, 50)[:, None]], axis=1), np.concatenate([np.linspace(1, -1, 50)[:, None], - np.ones([50, 1])], axis=1), ], axis=0) X3 = np.random.randn(200, 2) X3 = 2 * (X3 / np.linalg.norm(X3, axis=1)[:, None]) X4 = np.random.multivariate_normal(np.array([0, 0]), np.array([[1., 0.5], [0.5, 1.]]), size=200) a1, a2, a3, a4 = ot.unif(len(X1)), ot.unif(len(X1)), ot.unif(len(X1)), ot.unif(len(X1)) # %% # Inspect generated distributions # ------------------------------- fig, axes = plt.subplots(1, 4, figsize=(16, 4)) axes[0].scatter(x=X1[:, 0], y=X1[:, 1], c='steelblue', edgecolor='k') axes[1].scatter(x=X2[:, 0], y=X2[:, 1], c='steelblue', edgecolor='k') axes[2].scatter(x=X3[:, 0], y=X3[:, 1], c='steelblue', edgecolor='k') axes[3].scatter(x=X4[:, 0], y=X4[:, 1], c='steelblue', edgecolor='k') axes[0].set_xlim([-3, 3]) axes[0].set_ylim([-3, 3]) axes[0].set_title('Distribution 1') axes[1].set_xlim([-3, 3]) axes[1].set_ylim([-3, 3]) axes[1].set_title('Distribution 2') axes[2].set_xlim([-3, 3]) axes[2].set_ylim([-3, 3]) axes[2].set_title('Distribution 3') axes[3].set_xlim([-3, 3]) axes[3].set_ylim([-3, 3]) axes[3].set_title('Distribution 4') plt.tight_layout() plt.show() # %% # Interpolating Empirical Distributions # ------------------------------------- fig = plt.figure(figsize=(10, 10)) weights = np.array([ [3 / 3, 0 / 3], [2 / 3, 1 / 3], [1 / 3, 2 / 3], [0 / 3, 3 / 3], ]).astype(np.float32) for k in range(4): XB_init = np.random.randn(n_samples, 2) XB = ot.bregman.free_support_sinkhorn_barycenter( measures_locations=[X1, X2], measures_weights=[a1, a2], weights=weights[k], X_init=XB_init, reg=reg, numItermax=numItermax, numInnerItermax=numInnerItermax ) ax = plt.subplot2grid((4, 4), (0, k)) ax.scatter(XB[:, 0], XB[:, 1], color='steelblue', edgecolor='k') ax.set_xlim([-3, 3]) ax.set_ylim([-3, 3]) for k in range(1, 4, 1): XB_init = np.random.randn(n_samples, 2) XB = ot.bregman.free_support_sinkhorn_barycenter( measures_locations=[X1, X3], measures_weights=[a1, a2], weights=weights[k], X_init=XB_init, reg=reg, numItermax=numItermax, numInnerItermax=numInnerItermax ) ax = plt.subplot2grid((4, 4), (k, 0)) ax.scatter(XB[:, 0], XB[:, 1], color='steelblue', edgecolor='k') ax.set_xlim([-3, 3]) ax.set_ylim([-3, 3]) for k in range(1, 4, 1): XB_init = np.random.randn(n_samples, 2) XB = ot.bregman.free_support_sinkhorn_barycenter( measures_locations=[X3, X4], measures_weights=[a1, a2], weights=weights[k], X_init=XB_init, reg=reg, numItermax=numItermax, numInnerItermax=numInnerItermax ) ax = plt.subplot2grid((4, 4), (3, k)) ax.scatter(XB[:, 0], XB[:, 1], color='steelblue', edgecolor='k') ax.set_xlim([-3, 3]) ax.set_ylim([-3, 3]) for k in range(1, 3, 1): XB_init = np.random.randn(n_samples, 2) XB = ot.bregman.free_support_sinkhorn_barycenter( measures_locations=[X2, X4], measures_weights=[a1, a2], weights=weights[k], X_init=XB_init, reg=reg, numItermax=numItermax, numInnerItermax=numInnerItermax ) ax = plt.subplot2grid((4, 4), (k, 3)) ax.scatter(XB[:, 0], XB[:, 1], color='steelblue', edgecolor='k') ax.set_xlim([-3, 3]) ax.set_ylim([-3, 3]) plt.show() python-pot-0.9.3+dfsg/examples/barycenters/plot_gaussian_barycenter.py000066400000000000000000000057171455713015700264040ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ======================================================== Gaussian Bures-Wasserstein barycenters ======================================================== Illustration of Gaussian Bures-Wasserstein barycenters. """ # Authors: Rémi Flamary # # License: MIT License # sphinx_gallery_thumbnail_number = 2 # %% from matplotlib import colors from matplotlib.patches import Ellipse import numpy as np import matplotlib.pylab as pl import ot # %% # Define Gaussian Covariances and distributions # --------------------------------------------- C1 = np.array([[0.5, -0.4], [-0.4, 0.5]]) C2 = np.array([[1, 0.3], [0.3, 1]]) C3 = np.array([[1.5, 0], [0, 0.5]]) C4 = np.array([[0.5, 0], [0, 1.5]]) C = np.stack((C1, C2, C3, C4)) m1 = np.array([0, 0]) m2 = np.array([0, 4]) m3 = np.array([4, 0]) m4 = np.array([4, 4]) m = np.stack((m1, m2, m3, m4)) # %% # Plot the distributions # ---------------------- def draw_cov(mu, C, color=None, label=None, nstd=1): def eigsorted(cov): vals, vecs = np.linalg.eigh(cov) order = vals.argsort()[::-1] return vals[order], vecs[:, order] vals, vecs = eigsorted(C) theta = np.degrees(np.arctan2(*vecs[:, 0][::-1])) w, h = 2 * nstd * np.sqrt(vals) ell = Ellipse(xy=(mu[0], mu[1]), width=w, height=h, alpha=0.5, angle=theta, facecolor=color, edgecolor=color, label=label, fill=True) pl.gca().add_artist(ell) #pl.scatter(mu[0],mu[1],color=color, marker='x') axis = [-1.5, 5.5, -1.5, 5.5] pl.figure(1, (8, 2)) pl.clf() pl.subplot(1, 4, 1) draw_cov(m1, C1, color='C0') pl.axis(axis) pl.title('$\mathcal{N}(m_1,\Sigma_1)$') pl.subplot(1, 4, 2) draw_cov(m2, C2, color='C1') pl.axis(axis) pl.title('$\mathcal{N}(m_2,\Sigma_2)$') pl.subplot(1, 4, 3) draw_cov(m3, C3, color='C2') pl.axis(axis) pl.title('$\mathcal{N}(m_3,\Sigma_3)$') pl.subplot(1, 4, 4) draw_cov(m4, C4, color='C3') pl.axis(axis) pl.title('$\mathcal{N}(m_4,\Sigma_4)$') # %% # Compute Bures-Wasserstein barycenters and plot them # ------------------------------------------- # basis for bilinear interpolation v1 = np.array((1, 0, 0, 0)) v2 = np.array((0, 1, 0, 0)) v3 = np.array((0, 0, 1, 0)) v4 = np.array((0, 0, 0, 1)) colors = np.stack((colors.to_rgb('C0'), colors.to_rgb('C1'), colors.to_rgb('C2'), colors.to_rgb('C3'))) pl.figure(2, (8, 8)) nb_interp = 6 for i in range(nb_interp): for j in range(nb_interp): tx = float(i) / (nb_interp - 1) ty = float(j) / (nb_interp - 1) # weights are constructed by bilinear interpolation tmp1 = (1 - tx) * v1 + tx * v2 tmp2 = (1 - tx) * v3 + tx * v4 weights = (1 - ty) * tmp1 + ty * tmp2 color = np.dot(colors.T, weights) mb, Cb = ot.gaussian.bures_wasserstein_barycenter(m, C, weights) draw_cov(mb, Cb, color=color, label=None, nstd=0.3) pl.axis(axis) pl.axis('off') pl.tight_layout() python-pot-0.9.3+dfsg/examples/barycenters/plot_generalized_free_support_barycenter.py000066400000000000000000000103771455713015700316560ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ======================================= Generalized Wasserstein Barycenter Demo ======================================= This example illustrates the computation of Generalized Wasserstein Barycenter as proposed in [42]. [42] Delon, J., Gozlan, N., and Saint-Dizier, A.. Generalized Wasserstein barycenters between probability measures living on different subspaces. arXiv preprint arXiv:2105.09755, 2021. """ # Author: Eloi Tanguy # # License: MIT License # sphinx_gallery_thumbnail_number = 2 import numpy as np import matplotlib.pyplot as plt import matplotlib.pylab as pl import ot import matplotlib.animation as animation ######################## # Generate and plot data # ---------------------- # Input measures sub_sample_factor = 8 I1 = pl.imread('../../data/redcross.png').astype(np.float64)[::sub_sample_factor, ::sub_sample_factor, 2] I2 = pl.imread('../../data/tooth.png').astype(np.float64)[::-sub_sample_factor, ::sub_sample_factor, 2] I3 = pl.imread('../../data/heart.png').astype(np.float64)[::-sub_sample_factor, ::sub_sample_factor, 2] sz = I1.shape[0] UU, VV = np.meshgrid(np.arange(sz), np.arange(sz)) # Input measure locations in their respective 2D spaces X_list = [np.stack((UU[im == 0], VV[im == 0]), 1) * 1.0 for im in [I1, I2, I3]] # Input measure weights a_list = [ot.unif(x.shape[0]) for x in X_list] # Projections 3D -> 2D P1 = np.array([[1, 0, 0], [0, 1, 0]]) P2 = np.array([[0, 1, 0], [0, 0, 1]]) P3 = np.array([[1, 0, 0], [0, 0, 1]]) P_list = [P1, P2, P3] # Barycenter weights weights = np.array([1 / 3, 1 / 3, 1 / 3]) # Number of barycenter points to compute n_samples_bary = 150 # Send the input measures into 3D space for visualization X_visu = [Xi @ Pi for (Xi, Pi) in zip(X_list, P_list)] # Plot the input data fig = plt.figure(figsize=(3, 3)) axis = fig.add_subplot(1, 1, 1, projection="3d") for Xi in X_visu: axis.scatter(Xi[:, 0], Xi[:, 1], Xi[:, 2], marker='o', alpha=.6) axis.view_init(azim=45) axis.set_xticks([]) axis.set_yticks([]) axis.set_zticks([]) plt.show() ################################# # Barycenter computation and plot # ------------------------------- Y = ot.lp.generalized_free_support_barycenter(X_list, a_list, P_list, n_samples_bary) fig = plt.figure(figsize=(3, 3)) axis = fig.add_subplot(1, 1, 1, projection="3d") for Xi in X_visu: axis.scatter(Xi[:, 0], Xi[:, 1], Xi[:, 2], marker='o', alpha=.6) axis.scatter(Y[:, 0], Y[:, 1], Y[:, 2], marker='o', alpha=.6) axis.view_init(azim=45) axis.set_xticks([]) axis.set_yticks([]) axis.set_zticks([]) plt.show() ############################# # Plotting projection matches # --------------------------- fig = plt.figure(figsize=(9, 3)) ax = fig.add_subplot(1, 3, 1, projection='3d') for Xi in X_visu: ax.scatter(Xi[:, 0], Xi[:, 1], Xi[:, 2], marker='o', alpha=.6) ax.scatter(Y[:, 0], Y[:, 1], Y[:, 2], marker='o', alpha=.6) ax.view_init(elev=0, azim=0) ax.set_xticks([]) ax.set_yticks([]) ax.set_zticks([]) ax = fig.add_subplot(1, 3, 2, projection='3d') for Xi in X_visu: ax.scatter(Xi[:, 0], Xi[:, 1], Xi[:, 2], marker='o', alpha=.6) ax.scatter(Y[:, 0], Y[:, 1], Y[:, 2], marker='o', alpha=.6) ax.view_init(elev=0, azim=90) ax.set_xticks([]) ax.set_yticks([]) ax.set_zticks([]) ax = fig.add_subplot(1, 3, 3, projection='3d') for Xi in X_visu: ax.scatter(Xi[:, 0], Xi[:, 1], Xi[:, 2], marker='o', alpha=.6) ax.scatter(Y[:, 0], Y[:, 1], Y[:, 2], marker='o', alpha=.6) ax.view_init(elev=90, azim=0) ax.set_xticks([]) ax.set_yticks([]) ax.set_zticks([]) plt.tight_layout() plt.show() ############################################## # Rotation animation # -------------------------------------------- fig = plt.figure(figsize=(7, 7)) ax = fig.add_subplot(1, 1, 1, projection="3d") def _init(): for Xi in X_visu: ax.scatter(Xi[:, 0], Xi[:, 1], Xi[:, 2], marker='o', alpha=.6) ax.scatter(Y[:, 0], Y[:, 1], Y[:, 2], marker='o', alpha=.6) ax.view_init(elev=0, azim=0) ax.set_xticks([]) ax.set_yticks([]) ax.set_zticks([]) return fig, def _update_plot(i): if i < 45: ax.view_init(elev=0, azim=4 * i) else: ax.view_init(elev=i - 45, azim=4 * i) return fig, ani = animation.FuncAnimation(fig, _update_plot, init_func=_init, frames=136, interval=50, blit=True, repeat_delay=2000) python-pot-0.9.3+dfsg/examples/domain-adaptation/000077500000000000000000000000001455713015700220025ustar00rootroot00000000000000python-pot-0.9.3+dfsg/examples/domain-adaptation/README.txt000066400000000000000000000000701455713015700234750ustar00rootroot00000000000000 Domain adaptation examples --------------------------python-pot-0.9.3+dfsg/examples/domain-adaptation/plot_otda_classes.py000066400000000000000000000103551455713015700260620ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ======================== OT for domain adaptation ======================== This example introduces a domain adaptation in a 2D setting and the 4 OTDA approaches currently supported in POT. """ # Authors: Remi Flamary # Stanislas Chambon # # License: MIT License import matplotlib.pylab as pl import ot ############################################################################## # Generate data # ------------- n_source_samples = 150 n_target_samples = 150 Xs, ys = ot.datasets.make_data_classif('3gauss', n_source_samples) Xt, yt = ot.datasets.make_data_classif('3gauss2', n_target_samples) ############################################################################## # Instantiate the different transport algorithms and fit them # ----------------------------------------------------------- # EMD Transport ot_emd = ot.da.EMDTransport() ot_emd.fit(Xs=Xs, Xt=Xt) # Sinkhorn Transport ot_sinkhorn = ot.da.SinkhornTransport(reg_e=1e-1) ot_sinkhorn.fit(Xs=Xs, Xt=Xt) # Sinkhorn Transport with Group lasso regularization ot_lpl1 = ot.da.SinkhornLpl1Transport(reg_e=1e-1, reg_cl=1e0) ot_lpl1.fit(Xs=Xs, ys=ys, Xt=Xt) # Sinkhorn Transport with Group lasso regularization l1l2 ot_l1l2 = ot.da.SinkhornL1l2Transport(reg_e=1e-1, reg_cl=2e0, max_iter=20, verbose=True) ot_l1l2.fit(Xs=Xs, ys=ys, Xt=Xt) # transport source samples onto target samples transp_Xs_emd = ot_emd.transform(Xs=Xs) transp_Xs_sinkhorn = ot_sinkhorn.transform(Xs=Xs) transp_Xs_lpl1 = ot_lpl1.transform(Xs=Xs) transp_Xs_l1l2 = ot_l1l2.transform(Xs=Xs) ############################################################################## # Fig 1 : plots source and target samples # --------------------------------------- pl.figure(1, figsize=(10, 5)) pl.subplot(1, 2, 1) pl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples') pl.xticks([]) pl.yticks([]) pl.legend(loc=0) pl.title('Source samples') pl.subplot(1, 2, 2) pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples') pl.xticks([]) pl.yticks([]) pl.legend(loc=0) pl.title('Target samples') pl.tight_layout() ############################################################################## # Fig 2 : plot optimal couplings and transported samples # ------------------------------------------------------ param_img = {'interpolation': 'nearest'} pl.figure(2, figsize=(15, 8)) pl.subplot(2, 4, 1) pl.imshow(ot_emd.coupling_, **param_img) pl.xticks([]) pl.yticks([]) pl.title('Optimal coupling\nEMDTransport') pl.subplot(2, 4, 2) pl.imshow(ot_sinkhorn.coupling_, **param_img) pl.xticks([]) pl.yticks([]) pl.title('Optimal coupling\nSinkhornTransport') pl.subplot(2, 4, 3) pl.imshow(ot_lpl1.coupling_, **param_img) pl.xticks([]) pl.yticks([]) pl.title('Optimal coupling\nSinkhornLpl1Transport') pl.subplot(2, 4, 4) pl.imshow(ot_l1l2.coupling_, **param_img) pl.xticks([]) pl.yticks([]) pl.title('Optimal coupling\nSinkhornL1l2Transport') pl.subplot(2, 4, 5) pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples', alpha=0.3) pl.scatter(transp_Xs_emd[:, 0], transp_Xs_emd[:, 1], c=ys, marker='+', label='Transp samples', s=30) pl.xticks([]) pl.yticks([]) pl.title('Transported samples\nEmdTransport') pl.legend(loc="lower left") pl.subplot(2, 4, 6) pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples', alpha=0.3) pl.scatter(transp_Xs_sinkhorn[:, 0], transp_Xs_sinkhorn[:, 1], c=ys, marker='+', label='Transp samples', s=30) pl.xticks([]) pl.yticks([]) pl.title('Transported samples\nSinkhornTransport') pl.subplot(2, 4, 7) pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples', alpha=0.3) pl.scatter(transp_Xs_lpl1[:, 0], transp_Xs_lpl1[:, 1], c=ys, marker='+', label='Transp samples', s=30) pl.xticks([]) pl.yticks([]) pl.title('Transported samples\nSinkhornLpl1Transport') pl.subplot(2, 4, 8) pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples', alpha=0.3) pl.scatter(transp_Xs_l1l2[:, 0], transp_Xs_l1l2[:, 1], c=ys, marker='+', label='Transp samples', s=30) pl.xticks([]) pl.yticks([]) pl.title('Transported samples\nSinkhornL1l2Transport') pl.tight_layout() pl.show() python-pot-0.9.3+dfsg/examples/domain-adaptation/plot_otda_color_images.py000066400000000000000000000074371455713015700270770ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ============================= OT for image color adaptation ============================= This example presents a way of transferring colors between two images with Optimal Transport as introduced in [6] [6] Ferradans, S., Papadakis, N., Peyre, G., & Aujol, J. F. (2014). Regularized discrete optimal transport. SIAM Journal on Imaging Sciences, 7(3), 1853-1882. """ # Authors: Remi Flamary # Stanislas Chambon # # License: MIT License # sphinx_gallery_thumbnail_number = 2 import os from pathlib import Path import numpy as np from matplotlib import pyplot as plt import ot rng = np.random.RandomState(42) def im2mat(img): """Converts an image to matrix (one pixel per line)""" return img.reshape((img.shape[0] * img.shape[1], img.shape[2])) def mat2im(X, shape): """Converts back a matrix to an image""" return X.reshape(shape) def minmax(img): return np.clip(img, 0, 1) ############################################################################## # Generate data # ------------- # Loading images this_file = os.path.realpath('__file__') data_path = os.path.join(Path(this_file).parent.parent.parent, 'data') I1 = plt.imread(os.path.join(data_path, 'ocean_day.jpg')).astype(np.float64) / 256 I2 = plt.imread(os.path.join(data_path, 'ocean_sunset.jpg')).astype(np.float64) / 256 X1 = im2mat(I1) X2 = im2mat(I2) # training samples nb = 500 idx1 = rng.randint(X1.shape[0], size=(nb,)) idx2 = rng.randint(X2.shape[0], size=(nb,)) Xs = X1[idx1, :] Xt = X2[idx2, :] ############################################################################## # Plot original image # ------------------- plt.figure(1, figsize=(6.4, 3)) plt.subplot(1, 2, 1) plt.imshow(I1) plt.axis('off') plt.title('Image 1') plt.subplot(1, 2, 2) plt.imshow(I2) plt.axis('off') plt.title('Image 2') ############################################################################## # Scatter plot of colors # ---------------------- plt.figure(2, figsize=(6.4, 3)) plt.subplot(1, 2, 1) plt.scatter(Xs[:, 0], Xs[:, 2], c=Xs) plt.axis([0, 1, 0, 1]) plt.xlabel('Red') plt.ylabel('Blue') plt.title('Image 1') plt.subplot(1, 2, 2) plt.scatter(Xt[:, 0], Xt[:, 2], c=Xt) plt.axis([0, 1, 0, 1]) plt.xlabel('Red') plt.ylabel('Blue') plt.title('Image 2') plt.tight_layout() ############################################################################## # Instantiate the different transport algorithms and fit them # ----------------------------------------------------------- # EMDTransport ot_emd = ot.da.EMDTransport() ot_emd.fit(Xs=Xs, Xt=Xt) # SinkhornTransport ot_sinkhorn = ot.da.SinkhornTransport(reg_e=1e-1) ot_sinkhorn.fit(Xs=Xs, Xt=Xt) # prediction between images (using out of sample prediction as in [6]) transp_Xs_emd = ot_emd.transform(Xs=X1) transp_Xt_emd = ot_emd.inverse_transform(Xt=X2) transp_Xs_sinkhorn = ot_sinkhorn.transform(Xs=X1) transp_Xt_sinkhorn = ot_sinkhorn.inverse_transform(Xt=X2) I1t = minmax(mat2im(transp_Xs_emd, I1.shape)) I2t = minmax(mat2im(transp_Xt_emd, I2.shape)) I1te = minmax(mat2im(transp_Xs_sinkhorn, I1.shape)) I2te = minmax(mat2im(transp_Xt_sinkhorn, I2.shape)) ############################################################################## # Plot new images # --------------- plt.figure(3, figsize=(8, 4)) plt.subplot(2, 3, 1) plt.imshow(I1) plt.axis('off') plt.title('Image 1') plt.subplot(2, 3, 2) plt.imshow(I1t) plt.axis('off') plt.title('Image 1 Adapt') plt.subplot(2, 3, 3) plt.imshow(I1te) plt.axis('off') plt.title('Image 1 Adapt (reg)') plt.subplot(2, 3, 4) plt.imshow(I2) plt.axis('off') plt.title('Image 2') plt.subplot(2, 3, 5) plt.imshow(I2t) plt.axis('off') plt.title('Image 2 Adapt') plt.subplot(2, 3, 6) plt.imshow(I2te) plt.axis('off') plt.title('Image 2 Adapt (reg)') plt.tight_layout() plt.show() python-pot-0.9.3+dfsg/examples/domain-adaptation/plot_otda_d2.py000066400000000000000000000124631455713015700247340ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ =================================================== OT for domain adaptation on empirical distributions =================================================== This example introduces a domain adaptation in a 2D setting. It explicits the problem of domain adaptation and introduces some optimal transport approaches to solve it. Quantities such as optimal couplings, greater coupling coefficients and transported samples are represented in order to give a visual understanding of what the transport methods are doing. """ # Authors: Remi Flamary # Stanislas Chambon # # License: MIT License # sphinx_gallery_thumbnail_number = 2 import matplotlib.pylab as pl import ot import ot.plot ############################################################################## # Generate data # ------------- n_samples_source = 150 n_samples_target = 150 Xs, ys = ot.datasets.make_data_classif('3gauss', n_samples_source) Xt, yt = ot.datasets.make_data_classif('3gauss2', n_samples_target) # Cost matrix M = ot.dist(Xs, Xt, metric='sqeuclidean') ############################################################################## # Instantiate the different transport algorithms and fit them # ----------------------------------------------------------- # EMD Transport ot_emd = ot.da.EMDTransport() ot_emd.fit(Xs=Xs, Xt=Xt) # Sinkhorn Transport ot_sinkhorn = ot.da.SinkhornTransport(reg_e=1e-1) ot_sinkhorn.fit(Xs=Xs, Xt=Xt) # Sinkhorn Transport with Group lasso regularization ot_lpl1 = ot.da.SinkhornLpl1Transport(reg_e=1e-1, reg_cl=1e0) ot_lpl1.fit(Xs=Xs, ys=ys, Xt=Xt) # transport source samples onto target samples transp_Xs_emd = ot_emd.transform(Xs=Xs) transp_Xs_sinkhorn = ot_sinkhorn.transform(Xs=Xs) transp_Xs_lpl1 = ot_lpl1.transform(Xs=Xs) ############################################################################## # Fig 1 : plots source and target samples + matrix of pairwise distance # --------------------------------------------------------------------- pl.figure(1, figsize=(10, 10)) pl.subplot(2, 2, 1) pl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples') pl.xticks([]) pl.yticks([]) pl.legend(loc=0) pl.title('Source samples') pl.subplot(2, 2, 2) pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples') pl.xticks([]) pl.yticks([]) pl.legend(loc=0) pl.title('Target samples') pl.subplot(2, 2, 3) pl.imshow(M, interpolation='nearest') pl.xticks([]) pl.yticks([]) pl.title('Matrix of pairwise distances') pl.tight_layout() ############################################################################## # Fig 2 : plots optimal couplings for the different methods # --------------------------------------------------------- pl.figure(2, figsize=(10, 6)) pl.subplot(2, 3, 1) pl.imshow(ot_emd.coupling_, interpolation='nearest') pl.xticks([]) pl.yticks([]) pl.title('Optimal coupling\nEMDTransport') pl.subplot(2, 3, 2) pl.imshow(ot_sinkhorn.coupling_, interpolation='nearest') pl.xticks([]) pl.yticks([]) pl.title('Optimal coupling\nSinkhornTransport') pl.subplot(2, 3, 3) pl.imshow(ot_lpl1.coupling_, interpolation='nearest') pl.xticks([]) pl.yticks([]) pl.title('Optimal coupling\nSinkhornLpl1Transport') pl.subplot(2, 3, 4) ot.plot.plot2D_samples_mat(Xs, Xt, ot_emd.coupling_, c=[.5, .5, 1]) pl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples') pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples') pl.xticks([]) pl.yticks([]) pl.title('Main coupling coefficients\nEMDTransport') pl.subplot(2, 3, 5) ot.plot.plot2D_samples_mat(Xs, Xt, ot_sinkhorn.coupling_, c=[.5, .5, 1]) pl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples') pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples') pl.xticks([]) pl.yticks([]) pl.title('Main coupling coefficients\nSinkhornTransport') pl.subplot(2, 3, 6) ot.plot.plot2D_samples_mat(Xs, Xt, ot_lpl1.coupling_, c=[.5, .5, 1]) pl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples') pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples') pl.xticks([]) pl.yticks([]) pl.title('Main coupling coefficients\nSinkhornLpl1Transport') pl.tight_layout() ############################################################################## # Fig 3 : plot transported samples # -------------------------------- # display transported samples pl.figure(4, figsize=(10, 4)) pl.subplot(1, 3, 1) pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples', alpha=0.5) pl.scatter(transp_Xs_emd[:, 0], transp_Xs_emd[:, 1], c=ys, marker='+', label='Transp samples', s=30) pl.title('Transported samples\nEmdTransport') pl.legend(loc=0) pl.xticks([]) pl.yticks([]) pl.subplot(1, 3, 2) pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples', alpha=0.5) pl.scatter(transp_Xs_sinkhorn[:, 0], transp_Xs_sinkhorn[:, 1], c=ys, marker='+', label='Transp samples', s=30) pl.title('Transported samples\nSinkhornTransport') pl.xticks([]) pl.yticks([]) pl.subplot(1, 3, 3) pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples', alpha=0.5) pl.scatter(transp_Xs_lpl1[:, 0], transp_Xs_lpl1[:, 1], c=ys, marker='+', label='Transp samples', s=30) pl.title('Transported samples\nSinkhornLpl1Transport') pl.xticks([]) pl.yticks([]) pl.tight_layout() pl.show() python-pot-0.9.3+dfsg/examples/domain-adaptation/plot_otda_jcpot.py000066400000000000000000000130441455713015700255420ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ================================ OT for multi-source target shift ================================ This example introduces a target shift problem with two 2D source and 1 target domain. """ # Authors: Remi Flamary # Ievgen Redko # # License: MIT License import pylab as pl import numpy as np import ot from ot.datasets import make_data_classif ############################################################################## # Generate data # ------------- n = 50 sigma = 0.3 np.random.seed(1985) p1 = .2 dec1 = [0, 2] p2 = .9 dec2 = [0, -2] pt = .4 dect = [4, 0] xs1, ys1 = make_data_classif('2gauss_prop', n, nz=sigma, p=p1, bias=dec1) xs2, ys2 = make_data_classif('2gauss_prop', n + 1, nz=sigma, p=p2, bias=dec2) xt, yt = make_data_classif('2gauss_prop', n, nz=sigma, p=pt, bias=dect) all_Xr = [xs1, xs2] all_Yr = [ys1, ys2] # %% da = 1.5 def plot_ax(dec, name): pl.plot([dec[0], dec[0]], [dec[1] - da, dec[1] + da], 'k', alpha=0.5) pl.plot([dec[0] - da, dec[0] + da], [dec[1], dec[1]], 'k', alpha=0.5) pl.text(dec[0] - .5, dec[1] + 2, name) ############################################################################## # Fig 1 : plots source and target samples # --------------------------------------- pl.figure(1) pl.clf() plot_ax(dec1, 'Source 1') plot_ax(dec2, 'Source 2') plot_ax(dect, 'Target') pl.scatter(xs1[:, 0], xs1[:, 1], c=ys1, s=35, marker='x', cmap='Set1', vmax=9, label='Source 1 ({:1.2f}, {:1.2f})'.format(1 - p1, p1)) pl.scatter(xs2[:, 0], xs2[:, 1], c=ys2, s=35, marker='+', cmap='Set1', vmax=9, label='Source 2 ({:1.2f}, {:1.2f})'.format(1 - p2, p2)) pl.scatter(xt[:, 0], xt[:, 1], c=yt, s=35, marker='o', cmap='Set1', vmax=9, label='Target ({:1.2f}, {:1.2f})'.format(1 - pt, pt)) pl.title('Data') pl.legend() pl.axis('equal') pl.axis('off') ############################################################################## # Instantiate Sinkhorn transport algorithm and fit them for all source domains # ---------------------------------------------------------------------------- ot_sinkhorn = ot.da.SinkhornTransport(reg_e=1e-1, metric='sqeuclidean') def print_G(G, xs, ys, xt): for i in range(G.shape[0]): for j in range(G.shape[1]): if G[i, j] > 5e-4: if ys[i]: c = 'b' else: c = 'r' pl.plot([xs[i, 0], xt[j, 0]], [xs[i, 1], xt[j, 1]], c, alpha=.2) ############################################################################## # Fig 2 : plot optimal couplings and transported samples # ------------------------------------------------------ pl.figure(2) pl.clf() plot_ax(dec1, 'Source 1') plot_ax(dec2, 'Source 2') plot_ax(dect, 'Target') print_G(ot_sinkhorn.fit(Xs=xs1, Xt=xt).coupling_, xs1, ys1, xt) print_G(ot_sinkhorn.fit(Xs=xs2, Xt=xt).coupling_, xs2, ys2, xt) pl.scatter(xs1[:, 0], xs1[:, 1], c=ys1, s=35, marker='x', cmap='Set1', vmax=9) pl.scatter(xs2[:, 0], xs2[:, 1], c=ys2, s=35, marker='+', cmap='Set1', vmax=9) pl.scatter(xt[:, 0], xt[:, 1], c=yt, s=35, marker='o', cmap='Set1', vmax=9) pl.plot([], [], 'r', alpha=.2, label='Mass from Class 1') pl.plot([], [], 'b', alpha=.2, label='Mass from Class 2') pl.title('Independent OT') pl.legend() pl.axis('equal') pl.axis('off') ############################################################################## # Instantiate JCPOT adaptation algorithm and fit it # ---------------------------------------------------------------------------- otda = ot.da.JCPOTTransport(reg_e=1, max_iter=1000, metric='sqeuclidean', tol=1e-9, verbose=True, log=True) otda.fit(all_Xr, all_Yr, xt) ws1 = otda.proportions_.dot(otda.log_['D2'][0]) ws2 = otda.proportions_.dot(otda.log_['D2'][1]) pl.figure(3) pl.clf() plot_ax(dec1, 'Source 1') plot_ax(dec2, 'Source 2') plot_ax(dect, 'Target') print_G(ot.bregman.sinkhorn(ws1, [], otda.log_['M'][0], reg=1e-1), xs1, ys1, xt) print_G(ot.bregman.sinkhorn(ws2, [], otda.log_['M'][1], reg=1e-1), xs2, ys2, xt) pl.scatter(xs1[:, 0], xs1[:, 1], c=ys1, s=35, marker='x', cmap='Set1', vmax=9) pl.scatter(xs2[:, 0], xs2[:, 1], c=ys2, s=35, marker='+', cmap='Set1', vmax=9) pl.scatter(xt[:, 0], xt[:, 1], c=yt, s=35, marker='o', cmap='Set1', vmax=9) pl.plot([], [], 'r', alpha=.2, label='Mass from Class 1') pl.plot([], [], 'b', alpha=.2, label='Mass from Class 2') pl.title('OT with prop estimation ({:1.3f},{:1.3f})'.format(otda.proportions_[0], otda.proportions_[1])) pl.legend() pl.axis('equal') pl.axis('off') ############################################################################## # Run oracle transport algorithm with known proportions # ---------------------------------------------------------------------------- h_res = np.array([1 - pt, pt]) ws1 = h_res.dot(otda.log_['D2'][0]) ws2 = h_res.dot(otda.log_['D2'][1]) pl.figure(4) pl.clf() plot_ax(dec1, 'Source 1') plot_ax(dec2, 'Source 2') plot_ax(dect, 'Target') print_G(ot.bregman.sinkhorn(ws1, [], otda.log_['M'][0], reg=1e-1), xs1, ys1, xt) print_G(ot.bregman.sinkhorn(ws2, [], otda.log_['M'][1], reg=1e-1), xs2, ys2, xt) pl.scatter(xs1[:, 0], xs1[:, 1], c=ys1, s=35, marker='x', cmap='Set1', vmax=9) pl.scatter(xs2[:, 0], xs2[:, 1], c=ys2, s=35, marker='+', cmap='Set1', vmax=9) pl.scatter(xt[:, 0], xt[:, 1], c=yt, s=35, marker='o', cmap='Set1', vmax=9) pl.plot([], [], 'r', alpha=.2, label='Mass from Class 1') pl.plot([], [], 'b', alpha=.2, label='Mass from Class 2') pl.title('OT with known proportion ({:1.1f},{:1.1f})'.format(h_res[0], h_res[1])) pl.legend() pl.axis('equal') pl.axis('off') pl.show() python-pot-0.9.3+dfsg/examples/domain-adaptation/plot_otda_laplacian.py000066400000000000000000000072011455713015700263450ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ====================================================== OT with Laplacian regularization for domain adaptation ====================================================== This example introduces a domain adaptation in a 2D setting and OTDA approach with Laplacian regularization. """ # Authors: Ievgen Redko # License: MIT License import matplotlib.pylab as pl import ot ############################################################################## # Generate data # ------------- n_source_samples = 150 n_target_samples = 150 Xs, ys = ot.datasets.make_data_classif('3gauss', n_source_samples) Xt, yt = ot.datasets.make_data_classif('3gauss2', n_target_samples) ############################################################################## # Instantiate the different transport algorithms and fit them # ----------------------------------------------------------- # EMD Transport ot_emd = ot.da.EMDTransport() ot_emd.fit(Xs=Xs, Xt=Xt) # Sinkhorn Transport ot_sinkhorn = ot.da.SinkhornTransport(reg_e=.01) ot_sinkhorn.fit(Xs=Xs, Xt=Xt) # EMD Transport with Laplacian regularization ot_emd_laplace = ot.da.EMDLaplaceTransport(reg_lap=100, reg_src=1) ot_emd_laplace.fit(Xs=Xs, Xt=Xt) # transport source samples onto target samples transp_Xs_emd = ot_emd.transform(Xs=Xs) transp_Xs_sinkhorn = ot_sinkhorn.transform(Xs=Xs) transp_Xs_emd_laplace = ot_emd_laplace.transform(Xs=Xs) ############################################################################## # Fig 1 : plots source and target samples # --------------------------------------- pl.figure(1, figsize=(10, 5)) pl.subplot(1, 2, 1) pl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples') pl.xticks([]) pl.yticks([]) pl.legend(loc=0) pl.title('Source samples') pl.subplot(1, 2, 2) pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples') pl.xticks([]) pl.yticks([]) pl.legend(loc=0) pl.title('Target samples') pl.tight_layout() ############################################################################## # Fig 2 : plot optimal couplings and transported samples # ------------------------------------------------------ param_img = {'interpolation': 'nearest'} pl.figure(2, figsize=(15, 8)) pl.subplot(2, 3, 1) pl.imshow(ot_emd.coupling_, **param_img) pl.xticks([]) pl.yticks([]) pl.title('Optimal coupling\nEMDTransport') pl.figure(2, figsize=(15, 8)) pl.subplot(2, 3, 2) pl.imshow(ot_sinkhorn.coupling_, **param_img) pl.xticks([]) pl.yticks([]) pl.title('Optimal coupling\nSinkhornTransport') pl.subplot(2, 3, 3) pl.imshow(ot_emd_laplace.coupling_, **param_img) pl.xticks([]) pl.yticks([]) pl.title('Optimal coupling\nEMDLaplaceTransport') pl.subplot(2, 3, 4) pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples', alpha=0.3) pl.scatter(transp_Xs_emd[:, 0], transp_Xs_emd[:, 1], c=ys, marker='+', label='Transp samples', s=30) pl.xticks([]) pl.yticks([]) pl.title('Transported samples\nEmdTransport') pl.legend(loc="lower left") pl.subplot(2, 3, 5) pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples', alpha=0.3) pl.scatter(transp_Xs_sinkhorn[:, 0], transp_Xs_sinkhorn[:, 1], c=ys, marker='+', label='Transp samples', s=30) pl.xticks([]) pl.yticks([]) pl.title('Transported samples\nSinkhornTransport') pl.subplot(2, 3, 6) pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples', alpha=0.3) pl.scatter(transp_Xs_emd_laplace[:, 0], transp_Xs_emd_laplace[:, 1], c=ys, marker='+', label='Transp samples', s=30) pl.xticks([]) pl.yticks([]) pl.title('Transported samples\nEMDLaplaceTransport') pl.tight_layout() pl.show() python-pot-0.9.3+dfsg/examples/domain-adaptation/plot_otda_linear_mapping.py000066400000000000000000000104451455713015700274120ustar00rootroot00000000000000#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ ============================ Linear OT mapping estimation ============================ """ # Author: Remi Flamary # # License: MIT License # sphinx_gallery_thumbnail_number = 2 #%% import os from pathlib import Path import numpy as np from matplotlib import pyplot as plt import ot ############################################################################## # Generate data # ------------- n = 1000 d = 2 sigma = .1 rng = np.random.RandomState(42) # source samples angles = rng.rand(n, 1) * 2 * np.pi xs = np.concatenate((np.sin(angles), np.cos(angles)), axis=1) + sigma * rng.randn(n, 2) xs[:n // 2, 1] += 2 # target samples anglet = rng.rand(n, 1) * 2 * np.pi xt = np.concatenate((np.sin(anglet), np.cos(anglet)), axis=1) + sigma * rng.randn(n, 2) xt[:n // 2, 1] += 2 A = np.array([[1.5, .7], [.7, 1.5]]) b = np.array([[4, 2]]) xt = xt.dot(A) + b ############################################################################## # Plot data # --------- plt.figure(1, (5, 5)) plt.plot(xs[:, 0], xs[:, 1], '+') plt.plot(xt[:, 0], xt[:, 1], 'o') plt.legend(('Source', 'Target')) plt.title('Source and target distributions') plt.show() ############################################################################## # Estimate linear mapping and transport # ------------------------------------- # Gaussian (linear) Monge mapping estimation Ae, be = ot.gaussian.empirical_bures_wasserstein_mapping(xs, xt) xst = xs.dot(Ae) + be # Gaussian (linear) GW mapping estimation Agw, bgw = ot.gaussian.empirical_gaussian_gromov_wasserstein_mapping(xs, xt) xstgw = xs.dot(Agw) + bgw ############################################################################## # Plot transported samples # ------------------------ plt.figure(2, (10, 5)) plt.clf() plt.subplot(1, 2, 1) plt.plot(xs[:, 0], xs[:, 1], '+') plt.plot(xt[:, 0], xt[:, 1], 'o') plt.plot(xst[:, 0], xst[:, 1], '+') plt.legend(('Source', 'Target', 'Transp. Monge'), loc=0) plt.title('Transported samples with Monge') plt.subplot(1, 2, 2) plt.plot(xs[:, 0], xs[:, 1], '+') plt.plot(xt[:, 0], xt[:, 1], 'o') plt.plot(xstgw[:, 0], xstgw[:, 1], '+') plt.legend(('Source', 'Target', 'Transp. GW'), loc=0) plt.title('Transported samples with Gaussian GW') plt.show() ############################################################################## # Load image data # --------------- def im2mat(img): """Converts and image to matrix (one pixel per line)""" return img.reshape((img.shape[0] * img.shape[1], img.shape[2])) def mat2im(X, shape): """Converts back a matrix to an image""" return X.reshape(shape) def minmax(img): return np.clip(img, 0, 1) # Loading images this_file = os.path.realpath('__file__') data_path = os.path.join(Path(this_file).parent.parent.parent, 'data') I1 = plt.imread(os.path.join(data_path, 'ocean_day.jpg')).astype(np.float64) / 256 I2 = plt.imread(os.path.join(data_path, 'ocean_sunset.jpg')).astype(np.float64) / 256 X1 = im2mat(I1) X2 = im2mat(I2) ############################################################################## # Estimate mapping and adapt # ---------------------------- # Monge mapping mapping = ot.da.LinearTransport() mapping.fit(Xs=X1, Xt=X2) xst = mapping.transform(Xs=X1) xts = mapping.inverse_transform(Xt=X2) I1t = minmax(mat2im(xst, I1.shape)) I2t = minmax(mat2im(xts, I2.shape)) # gaussian GW mapping mapping = ot.da.LinearGWTransport() mapping.fit(Xs=X1, Xt=X2) xstgw = mapping.transform(Xs=X1) xtsgw = mapping.inverse_transform(Xt=X2) I1tgw = minmax(mat2im(xstgw, I1.shape)) I2tgw = minmax(mat2im(xtsgw, I2.shape)) # %% ############################################################################## # Plot transformed images # ----------------------- plt.figure(3, figsize=(14, 7)) plt.subplot(2, 3, 1) plt.imshow(I1) plt.axis('off') plt.title('Im. 1') plt.subplot(2, 3, 4) plt.imshow(I2) plt.axis('off') plt.title('Im. 2') plt.subplot(2, 3, 2) plt.imshow(I1t) plt.axis('off') plt.title('Monge mapping Im. 1') plt.subplot(2, 3, 5) plt.imshow(I2t) plt.axis('off') plt.title('Inverse Monge mapping Im. 2') plt.subplot(2, 3, 3) plt.imshow(I1tgw) plt.axis('off') plt.title('Gaussian GW mapping Im. 1') plt.subplot(2, 3, 6) plt.imshow(I2tgw) plt.axis('off') plt.title('Inverse Gaussian GW mapping Im. 2') python-pot-0.9.3+dfsg/examples/domain-adaptation/plot_otda_mapping.py000066400000000000000000000100231455713015700260500ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ =========================================== OT mapping estimation for domain adaptation =========================================== This example presents how to use MappingTransport to estimate at the same time both the coupling transport and approximate the transport map with either a linear or a kernelized mapping as introduced in [8]. [8] M. Perrot, N. Courty, R. Flamary, A. Habrard, "Mapping estimation for discrete optimal transport", Neural Information Processing Systems (NIPS), 2016. """ # Authors: Remi Flamary # Stanislas Chambon # # License: MIT License # sphinx_gallery_thumbnail_number = 2 import numpy as np import matplotlib.pylab as pl import ot ############################################################################## # Generate data # ------------- n_source_samples = 100 n_target_samples = 100 theta = 2 * np.pi / 20 noise_level = 0.1 Xs, ys = ot.datasets.make_data_classif( 'gaussrot', n_source_samples, nz=noise_level) Xs_new, _ = ot.datasets.make_data_classif( 'gaussrot', n_source_samples, nz=noise_level) Xt, yt = ot.datasets.make_data_classif( 'gaussrot', n_target_samples, theta=theta, nz=noise_level) # one of the target mode changes its variance (no linear mapping) Xt[yt == 2] *= 3 Xt = Xt + 4 ############################################################################## # Plot data # --------- pl.figure(1, (10, 5)) pl.clf() pl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples') pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples') pl.legend(loc=0) pl.title('Source and target distributions') ############################################################################## # Instantiate the different transport algorithms and fit them # ----------------------------------------------------------- # MappingTransport with linear kernel ot_mapping_linear = ot.da.MappingTransport( kernel="linear", mu=1e0, eta=1e-8, bias=True, max_iter=20, verbose=True) ot_mapping_linear.fit(Xs=Xs, Xt=Xt) # for original source samples, transform applies barycentric mapping transp_Xs_linear = ot_mapping_linear.transform(Xs=Xs) # for out of source samples, transform applies the linear mapping transp_Xs_linear_new = ot_mapping_linear.transform(Xs=Xs_new) # MappingTransport with gaussian kernel ot_mapping_gaussian = ot.da.MappingTransport( kernel="gaussian", eta=1e-5, mu=1e-1, bias=True, sigma=1, max_iter=10, verbose=True) ot_mapping_gaussian.fit(Xs=Xs, Xt=Xt) # for original source samples, transform applies barycentric mapping transp_Xs_gaussian = ot_mapping_gaussian.transform(Xs=Xs) # for out of source samples, transform applies the gaussian mapping transp_Xs_gaussian_new = ot_mapping_gaussian.transform(Xs=Xs_new) ############################################################################## # Plot transported samples # ------------------------ pl.figure(2) pl.clf() pl.subplot(2, 2, 1) pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples', alpha=.2) pl.scatter(transp_Xs_linear[:, 0], transp_Xs_linear[:, 1], c=ys, marker='+', label='Mapped source samples') pl.title("Bary. mapping (linear)") pl.legend(loc=0) pl.subplot(2, 2, 2) pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples', alpha=.2) pl.scatter(transp_Xs_linear_new[:, 0], transp_Xs_linear_new[:, 1], c=ys, marker='+', label='Learned mapping') pl.title("Estim. mapping (linear)") pl.subplot(2, 2, 3) pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples', alpha=.2) pl.scatter(transp_Xs_gaussian[:, 0], transp_Xs_gaussian[:, 1], c=ys, marker='+', label='barycentric mapping') pl.title("Bary. mapping (kernel)") pl.subplot(2, 2, 4) pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples', alpha=.2) pl.scatter(transp_Xs_gaussian_new[:, 0], transp_Xs_gaussian_new[:, 1], c=ys, marker='+', label='Learned mapping') pl.title("Estim. mapping (kernel)") pl.tight_layout() pl.show() python-pot-0.9.3+dfsg/examples/domain-adaptation/plot_otda_mapping_colors_images.py000066400000000000000000000105141455713015700307630ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ===================================================== OT for image color adaptation with mapping estimation ===================================================== OT for domain adaptation with image color adaptation [6] with mapping estimation [8]. [6] Ferradans, S., Papadakis, N., Peyre, G., & Aujol, J. F. (2014). Regularized discrete optimal transport. SIAM Journal on Imaging Sciences, 7(3), 1853-1882. [8] M. Perrot, N. Courty, R. Flamary, A. Habrard, "Mapping estimation for discrete optimal transport", Neural Information Processing Systems (NIPS), 2016. """ # Authors: Remi Flamary # Stanislas Chambon # # License: MIT License # sphinx_gallery_thumbnail_number = 3 import os from pathlib import Path import numpy as np from matplotlib import pyplot as plt import ot rng = np.random.RandomState(42) def im2mat(img): """Converts and image to matrix (one pixel per line)""" return img.reshape((img.shape[0] * img.shape[1], img.shape[2])) def mat2im(X, shape): """Converts back a matrix to an image""" return X.reshape(shape) def minmax(img): return np.clip(img, 0, 1) ############################################################################## # Generate data # ------------- # Loading images this_file = os.path.realpath('__file__') data_path = os.path.join(Path(this_file).parent.parent.parent, 'data') I1 = plt.imread(os.path.join(data_path, 'ocean_day.jpg')).astype(np.float64) / 256 I2 = plt.imread(os.path.join(data_path, 'ocean_sunset.jpg')).astype(np.float64) / 256 X1 = im2mat(I1) X2 = im2mat(I2) # training samples nb = 500 idx1 = rng.randint(X1.shape[0], size=(nb,)) idx2 = rng.randint(X2.shape[0], size=(nb,)) Xs = X1[idx1, :] Xt = X2[idx2, :] ############################################################################## # Domain adaptation for pixel distribution transfer # ------------------------------------------------- # EMDTransport ot_emd = ot.da.EMDTransport() ot_emd.fit(Xs=Xs, Xt=Xt) transp_Xs_emd = ot_emd.transform(Xs=X1) Image_emd = minmax(mat2im(transp_Xs_emd, I1.shape)) # SinkhornTransport ot_sinkhorn = ot.da.SinkhornTransport(reg_e=1e-1) ot_sinkhorn.fit(Xs=Xs, Xt=Xt) transp_Xs_sinkhorn = ot_sinkhorn.transform(Xs=X1) Image_sinkhorn = minmax(mat2im(transp_Xs_sinkhorn, I1.shape)) ot_mapping_linear = ot.da.MappingTransport( mu=1e0, eta=1e-8, bias=True, max_iter=20, verbose=True) ot_mapping_linear.fit(Xs=Xs, Xt=Xt) X1tl = ot_mapping_linear.transform(Xs=X1) Image_mapping_linear = minmax(mat2im(X1tl, I1.shape)) ot_mapping_gaussian = ot.da.MappingTransport( mu=1e0, eta=1e-2, sigma=1, bias=False, max_iter=10, verbose=True) ot_mapping_gaussian.fit(Xs=Xs, Xt=Xt) X1tn = ot_mapping_gaussian.transform(Xs=X1) # use the estimated mapping Image_mapping_gaussian = minmax(mat2im(X1tn, I1.shape)) ############################################################################## # Plot original images # -------------------- plt.figure(1, figsize=(6.4, 3)) plt.subplot(1, 2, 1) plt.imshow(I1) plt.axis('off') plt.title('Image 1') plt.subplot(1, 2, 2) plt.imshow(I2) plt.axis('off') plt.title('Image 2') plt.tight_layout() ############################################################################## # Plot pixel values distribution # ------------------------------ plt.figure(2, figsize=(6.4, 5)) plt.subplot(1, 2, 1) plt.scatter(Xs[:, 0], Xs[:, 2], c=Xs) plt.axis([0, 1, 0, 1]) plt.xlabel('Red') plt.ylabel('Blue') plt.title('Image 1') plt.subplot(1, 2, 2) plt.scatter(Xt[:, 0], Xt[:, 2], c=Xt) plt.axis([0, 1, 0, 1]) plt.xlabel('Red') plt.ylabel('Blue') plt.title('Image 2') plt.tight_layout() ############################################################################## # Plot transformed images # ----------------------- plt.figure(2, figsize=(10, 5)) plt.subplot(2, 3, 1) plt.imshow(I1) plt.axis('off') plt.title('Im. 1') plt.subplot(2, 3, 4) plt.imshow(I2) plt.axis('off') plt.title('Im. 2') plt.subplot(2, 3, 2) plt.imshow(Image_emd) plt.axis('off') plt.title('EmdTransport') plt.subplot(2, 3, 5) plt.imshow(Image_sinkhorn) plt.axis('off') plt.title('SinkhornTransport') plt.subplot(2, 3, 3) plt.imshow(Image_mapping_linear) plt.axis('off') plt.title('MappingTransport (linear)') plt.subplot(2, 3, 6) plt.imshow(Image_mapping_gaussian) plt.axis('off') plt.title('MappingTransport (gaussian)') plt.tight_layout() plt.show() python-pot-0.9.3+dfsg/examples/domain-adaptation/plot_otda_semi_supervised.py000066400000000000000000000111131455713015700276240ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ============================================ OTDA unsupervised vs semi-supervised setting ============================================ This example introduces a semi supervised domain adaptation in a 2D setting. It explicits the problem of semi supervised domain adaptation and introduces some optimal transport approaches to solve it. Quantities such as optimal couplings, greater coupling coefficients and transported samples are represented in order to give a visual understanding of what the transport methods are doing. """ # Authors: Remi Flamary # Stanislas Chambon # # License: MIT License # sphinx_gallery_thumbnail_number = 3 import matplotlib.pylab as pl import ot ############################################################################## # Generate data # ------------- n_samples_source = 150 n_samples_target = 150 Xs, ys = ot.datasets.make_data_classif('3gauss', n_samples_source) Xt, yt = ot.datasets.make_data_classif('3gauss2', n_samples_target) ############################################################################## # Transport source samples onto target samples # -------------------------------------------- # unsupervised domain adaptation ot_sinkhorn_un = ot.da.SinkhornTransport(reg_e=1e-1) ot_sinkhorn_un.fit(Xs=Xs, Xt=Xt) transp_Xs_sinkhorn_un = ot_sinkhorn_un.transform(Xs=Xs) # semi-supervised domain adaptation ot_sinkhorn_semi = ot.da.SinkhornTransport(reg_e=1e-1) ot_sinkhorn_semi.fit(Xs=Xs, Xt=Xt, ys=ys, yt=yt) transp_Xs_sinkhorn_semi = ot_sinkhorn_semi.transform(Xs=Xs) # semi supervised DA uses available labeled target samples to modify the cost # matrix involved in the OT problem. The cost of transporting a source sample # of class A onto a target sample of class B != A is set to infinite, or a # very large value # note that in the present case we consider that all the target samples are # labeled. For daily applications, some target sample might not have labels, # in this case the element of yt corresponding to these samples should be # filled with -1. # Warning: we recall that -1 cannot be used as a class label ############################################################################## # Fig 1 : plots source and target samples + matrix of pairwise distance # --------------------------------------------------------------------- pl.figure(1, figsize=(10, 10)) pl.subplot(2, 2, 1) pl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples') pl.xticks([]) pl.yticks([]) pl.legend(loc=0) pl.title('Source samples') pl.subplot(2, 2, 2) pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples') pl.xticks([]) pl.yticks([]) pl.legend(loc=0) pl.title('Target samples') pl.subplot(2, 2, 3) pl.imshow(ot_sinkhorn_un.cost_, interpolation='nearest') pl.xticks([]) pl.yticks([]) pl.title('Cost matrix - unsupervised DA') pl.subplot(2, 2, 4) pl.imshow(ot_sinkhorn_semi.cost_, interpolation='nearest') pl.xticks([]) pl.yticks([]) pl.title('Cost matrix - semi-supervised DA') pl.tight_layout() # the optimal coupling in the semi-supervised DA case will exhibit " shape # similar" to the cost matrix, (block diagonal matrix) ############################################################################## # Fig 2 : plots optimal couplings for the different methods # --------------------------------------------------------- pl.figure(2, figsize=(8, 4)) pl.subplot(1, 2, 1) pl.imshow(ot_sinkhorn_un.coupling_, interpolation='nearest') pl.xticks([]) pl.yticks([]) pl.title('Optimal coupling\nUnsupervised DA') pl.subplot(1, 2, 2) pl.imshow(ot_sinkhorn_semi.coupling_, interpolation='nearest') pl.xticks([]) pl.yticks([]) pl.title('Optimal coupling\nSemi-supervised DA') pl.tight_layout() ############################################################################## # Fig 3 : plot transported samples # -------------------------------- # display transported samples pl.figure(4, figsize=(8, 4)) pl.subplot(1, 2, 1) pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples', alpha=0.5) pl.scatter(transp_Xs_sinkhorn_un[:, 0], transp_Xs_sinkhorn_un[:, 1], c=ys, marker='+', label='Transp samples', s=30) pl.title('Transported samples\nEmdTransport') pl.legend(loc=0) pl.xticks([]) pl.yticks([]) pl.subplot(1, 2, 2) pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples', alpha=0.5) pl.scatter(transp_Xs_sinkhorn_semi[:, 0], transp_Xs_sinkhorn_semi[:, 1], c=ys, marker='+', label='Transp samples', s=30) pl.title('Transported samples\nSinkhornTransport') pl.xticks([]) pl.yticks([]) pl.tight_layout() pl.show() python-pot-0.9.3+dfsg/examples/gromov/000077500000000000000000000000001455713015700177225ustar00rootroot00000000000000python-pot-0.9.3+dfsg/examples/gromov/README.txt000066400000000000000000000001111455713015700214110ustar00rootroot00000000000000 Gromov and Fused-Gromov-Wasserstein -----------------------------------python-pot-0.9.3+dfsg/examples/gromov/plot_barycenter_fgw.py000066400000000000000000000143301455713015700243340ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ================================= Plot graphs barycenter using FGW ================================= This example illustrates the computation barycenter of labeled graphs using FGW [18]. Requires networkx >=2 [18] Vayer Titouan, Chapel Laetitia, Flamary Rémi, Tavenard Romain and Courty Nicolas "Optimal Transport for structured data with application on graphs" International Conference on Machine Learning (ICML). 2019. """ # Author: Titouan Vayer # # License: MIT License #%% load libraries import numpy as np import matplotlib.pyplot as plt import networkx as nx import math from scipy.sparse.csgraph import shortest_path import matplotlib.colors as mcol from matplotlib import cm from ot.gromov import fgw_barycenters #%% Graph functions def find_thresh(C, inf=0.5, sup=3, step=10): """ Trick to find the adequate thresholds from where value of the C matrix are considered close enough to say that nodes are connected The threshold is found by a linesearch between values "inf" and "sup" with "step" thresholds tested. The optimal threshold is the one which minimizes the reconstruction error between the shortest_path matrix coming from the thresholded adjacency matrix and the original matrix. Parameters ---------- C : ndarray, shape (n_nodes,n_nodes) The structure matrix to threshold inf : float The beginning of the linesearch sup : float The end of the linesearch step : integer Number of thresholds tested """ dist = [] search = np.linspace(inf, sup, step) for thresh in search: Cprime = sp_to_adjacency(C, 0, thresh) SC = shortest_path(Cprime, method='D') SC[SC == float('inf')] = 100 dist.append(np.linalg.norm(SC - C)) return search[np.argmin(dist)], dist def sp_to_adjacency(C, threshinf=0.2, threshsup=1.8): """ Thresholds the structure matrix in order to compute an adjacency matrix. All values between threshinf and threshsup are considered representing connected nodes and set to 1. Else are set to 0 Parameters ---------- C : ndarray, shape (n_nodes,n_nodes) The structure matrix to threshold threshinf : float The minimum value of distance from which the new value is set to 1 threshsup : float The maximum value of distance from which the new value is set to 1 Returns ------- C : ndarray, shape (n_nodes,n_nodes) The threshold matrix. Each element is in {0,1} """ H = np.zeros_like(C) np.fill_diagonal(H, np.diagonal(C)) C = C - H C = np.minimum(np.maximum(C, threshinf), threshsup) C[C == threshsup] = 0 C[C != 0] = 1 return C def build_noisy_circular_graph(N=20, mu=0, sigma=0.3, with_noise=False, structure_noise=False, p=None): """ Create a noisy circular graph """ g = nx.Graph() g.add_nodes_from(list(range(N))) for i in range(N): noise = float(np.random.normal(mu, sigma, 1)) if with_noise: g.add_node(i, attr_name=math.sin((2 * i * math.pi / N)) + noise) else: g.add_node(i, attr_name=math.sin(2 * i * math.pi / N)) g.add_edge(i, i + 1) if structure_noise: randomint = np.random.randint(0, p) if randomint == 0: if i <= N - 3: g.add_edge(i, i + 2) if i == N - 2: g.add_edge(i, 0) if i == N - 1: g.add_edge(i, 1) g.add_edge(N, 0) noise = float(np.random.normal(mu, sigma, 1)) if with_noise: g.add_node(N, attr_name=math.sin((2 * N * math.pi / N)) + noise) else: g.add_node(N, attr_name=math.sin(2 * N * math.pi / N)) return g def graph_colors(nx_graph, vmin=0, vmax=7): cnorm = mcol.Normalize(vmin=vmin, vmax=vmax) cpick = cm.ScalarMappable(norm=cnorm, cmap='viridis') cpick.set_array([]) val_map = {} for k, v in nx.get_node_attributes(nx_graph, 'attr_name').items(): val_map[k] = cpick.to_rgba(v) colors = [] for node in nx_graph.nodes(): colors.append(val_map[node]) return colors ############################################################################## # Generate data # ------------- #%% circular dataset # We build a dataset of noisy circular graphs. # Noise is added on the structures by random connections and on the features by gaussian noise. np.random.seed(30) X0 = [] for k in range(9): X0.append(build_noisy_circular_graph(np.random.randint(15, 25), with_noise=True, structure_noise=True, p=3)) ############################################################################## # Plot data # --------- #%% Plot graphs plt.figure(figsize=(8, 10)) for i in range(len(X0)): plt.subplot(3, 3, i + 1) g = X0[i] pos = nx.kamada_kawai_layout(g) nx.draw(g, pos=pos, node_color=graph_colors(g, vmin=-1, vmax=1), with_labels=False, node_size=100) plt.suptitle('Dataset of noisy graphs. Color indicates the label', fontsize=20) plt.show() ############################################################################## # Barycenter computation # ---------------------- #%% We compute the barycenter using FGW. Structure matrices are computed using the shortest_path distance in the graph # Features distances are the euclidean distances Cs = [shortest_path(nx.adjacency_matrix(x).todense()) for x in X0] ps = [np.ones(len(x.nodes())) / len(x.nodes()) for x in X0] Ys = [np.array([v for (k, v) in nx.get_node_attributes(x, 'attr_name').items()]).reshape(-1, 1) for x in X0] lambdas = np.array([np.ones(len(Ys)) / len(Ys)]).ravel() sizebary = 15 # we choose a barycenter with 15 nodes A, C, log = fgw_barycenters(sizebary, Ys, Cs, ps, lambdas, alpha=0.95, log=True) ############################################################################## # Plot Barycenter # ------------------------- #%% Create the barycenter bary = nx.from_numpy_array(sp_to_adjacency(C, threshinf=0, threshsup=find_thresh(C, sup=100, step=100)[0])) for i, v in enumerate(A.ravel()): bary.add_node(i, attr_name=v) #%% pos = nx.kamada_kawai_layout(bary) nx.draw(bary, pos=pos, node_color=graph_colors(bary, vmin=-1, vmax=1), with_labels=False) plt.suptitle('Barycenter', fontsize=20) plt.show() python-pot-0.9.3+dfsg/examples/gromov/plot_entropic_semirelaxed_fgw.py000066400000000000000000000256131455713015700264110ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ========================== Entropic-regularized semi-relaxed (Fused) Gromov-Wasserstein example ========================== This example is designed to show how to use the entropic semi-relaxed Gromov-Wasserstein and the entropic semi-relaxed Fused Gromov-Wasserstein divergences. Entropic-regularized sr(F)GW between two graphs G1 and G2 searches for a reweighing of the nodes of G2 at a minimal entropic-regularized (F)GW distance from G1. First, we generate two graphs following Stochastic Block Models, then show how to compute their srGW matchings and illustrate them. These graphs are then endowed with node features and we follow the same process with srFGW. [48] Cédric Vincent-Cuaz, Rémi Flamary, Marco Corneli, Titouan Vayer, Nicolas Courty. "Semi-relaxed Gromov-Wasserstein divergence and applications on graphs" International Conference on Learning Representations (ICLR), 2021. """ # Author: Cédric Vincent-Cuaz # # License: MIT License # sphinx_gallery_thumbnail_number = 1 import numpy as np import matplotlib.pylab as pl from ot.gromov import entropic_semirelaxed_gromov_wasserstein, entropic_semirelaxed_fused_gromov_wasserstein, gromov_wasserstein, fused_gromov_wasserstein import networkx from networkx.generators.community import stochastic_block_model as sbm ############################################################################# # # Generate two graphs following Stochastic Block models of 2 and 3 clusters. # --------------------------------------------- N2 = 20 # 2 communities N3 = 30 # 3 communities p2 = [[1., 0.1], [0.1, 0.9]] p3 = [[1., 0.1, 0.], [0.1, 0.95, 0.1], [0., 0.1, 0.9]] G2 = sbm(seed=0, sizes=[N2 // 2, N2 // 2], p=p2) G3 = sbm(seed=0, sizes=[N3 // 3, N3 // 3, N3 // 3], p=p3) C2 = networkx.to_numpy_array(G2) C3 = networkx.to_numpy_array(G3) h2 = np.ones(C2.shape[0]) / C2.shape[0] h3 = np.ones(C3.shape[0]) / C3.shape[0] # Add weights on the edges for visualization later on weight_intra_G2 = 5 weight_inter_G2 = 0.5 weight_intra_G3 = 1. weight_inter_G3 = 1.5 weightedG2 = networkx.Graph() part_G2 = [G2.nodes[i]['block'] for i in range(N2)] for node in G2.nodes(): weightedG2.add_node(node) for i, j in G2.edges(): if part_G2[i] == part_G2[j]: weightedG2.add_edge(i, j, weight=weight_intra_G2) else: weightedG2.add_edge(i, j, weight=weight_inter_G2) weightedG3 = networkx.Graph() part_G3 = [G3.nodes[i]['block'] for i in range(N3)] for node in G3.nodes(): weightedG3.add_node(node) for i, j in G3.edges(): if part_G3[i] == part_G3[j]: weightedG3.add_edge(i, j, weight=weight_intra_G3) else: weightedG3.add_edge(i, j, weight=weight_inter_G3) ############################################################################# # # Compute their entropic-regularized semi-relaxed Gromov-Wasserstein divergences # --------------------------------------------- # 0) GW(C2, h2, C3, h3) for reference OT, log = gromov_wasserstein(C2, C3, h2, h3, symmetric=True, log=True) gw = log['gw_dist'] # 1) srGW_e(C2, h2, C3) OT_23, log_23 = entropic_semirelaxed_gromov_wasserstein( C2, C3, h2, symmetric=True, epsilon=1., G0=None, log=True) srgw_23 = log_23['srgw_dist'] # 2) srGW_e(C3, h3, C2) OT_32, log_32 = entropic_semirelaxed_gromov_wasserstein( C3, C2, h3, symmetric=None, epsilon=1., G0=None, log=True) srgw_32 = log_32['srgw_dist'] print('GW(C2, C3) = ', gw) print('srGW_e(C2, h2, C3) = ', srgw_23) print('srGW_e(C3, h3, C2) = ', srgw_32) ############################################################################# # # Visualization of the entropic-regularized semi-relaxed Gromov-Wasserstein matchings # --------------------------------------------- # # We color nodes of the graph on the right - then project its node colors # based on the optimal transport plan from the entropic srGW matching. # We adjust the intensity of links across domains proportionaly to the mass # sent, adding a minimal intensity of 0.1 if mass sent is not zero. def draw_graph(G, C, nodes_color_part, Gweights=None, pos=None, edge_color='black', node_size=None, shiftx=0, seed=0): if (pos is None): pos = networkx.spring_layout(G, scale=1., seed=seed) if shiftx != 0: for k, v in pos.items(): v[0] = v[0] + shiftx alpha_edge = 0.7 width_edge = 1.8 if Gweights is None: networkx.draw_networkx_edges(G, pos, width=width_edge, alpha=alpha_edge, edge_color=edge_color) else: # We make more visible connections between activated nodes n = len(Gweights) edgelist_activated = [] edgelist_deactivated = [] for i in range(n): for j in range(n): if Gweights[i] * Gweights[j] * C[i, j] > 0: edgelist_activated.append((i, j)) elif C[i, j] > 0: edgelist_deactivated.append((i, j)) networkx.draw_networkx_edges(G, pos, edgelist=edgelist_activated, width=width_edge, alpha=alpha_edge, edge_color=edge_color) networkx.draw_networkx_edges(G, pos, edgelist=edgelist_deactivated, width=width_edge, alpha=0.1, edge_color=edge_color) if Gweights is None: for node, node_color in enumerate(nodes_color_part): networkx.draw_networkx_nodes(G, pos, nodelist=[node], node_size=node_size, alpha=1, node_color=node_color) else: scaled_Gweights = Gweights / (0.5 * Gweights.max()) nodes_size = node_size * scaled_Gweights for node, node_color in enumerate(nodes_color_part): networkx.draw_networkx_nodes(G, pos, nodelist=[node], node_size=nodes_size[node], alpha=1, node_color=node_color) return pos def draw_transp_colored_srGW(G1, C1, G2, C2, part_G1, p1, p2, T, pos1=None, pos2=None, shiftx=4, switchx=False, node_size=70, seed_G1=0, seed_G2=0): starting_color = 0 # get graphs partition and their coloring part1 = part_G1.copy() unique_colors = ['C%s' % (starting_color + i) for i in np.unique(part1)] nodes_color_part1 = [] for cluster in part1: nodes_color_part1.append(unique_colors[cluster]) nodes_color_part2 = [] # T: getting colors assignment from argmin of columns for i in range(len(G2.nodes())): j = np.argmax(T[:, i]) nodes_color_part2.append(nodes_color_part1[j]) pos1 = draw_graph(G1, C1, nodes_color_part1, Gweights=p1, pos=pos1, node_size=node_size, shiftx=0, seed=seed_G1) pos2 = draw_graph(G2, C2, nodes_color_part2, Gweights=p2, pos=pos2, node_size=node_size, shiftx=shiftx, seed=seed_G2) for k1, v1 in pos1.items(): max_Tk1 = np.max(T[k1, :]) for k2, v2 in pos2.items(): if (T[k1, k2] > 0): pl.plot([pos1[k1][0], pos2[k2][0]], [pos1[k1][1], pos2[k2][1]], '-', lw=0.6, alpha=min(T[k1, k2] / max_Tk1 + 0.1, 1.), color=nodes_color_part1[k1]) return pos1, pos2 node_size = 40 fontsize = 10 seed_G2 = 0 seed_G3 = 4 pl.figure(1, figsize=(8, 2.5)) pl.clf() pl.subplot(121) pl.axis('off') pl.axis pl.title(r'$srGW_e(\mathbf{C_2},\mathbf{h_2},\mathbf{C_3}) =%s$' % (np.round(srgw_23, 3)), fontsize=fontsize) hbar2 = OT_23.sum(axis=0) pos1, pos2 = draw_transp_colored_srGW( weightedG2, C2, weightedG3, C3, part_G2, p1=None, p2=hbar2, T=OT_23, shiftx=1.5, node_size=node_size, seed_G1=seed_G2, seed_G2=seed_G3) pl.subplot(122) pl.axis('off') hbar3 = OT_32.sum(axis=0) pl.title(r'$srGW_e(\mathbf{C_3}, \mathbf{h_3},\mathbf{C_2}) =%s$' % (np.round(srgw_32, 3)), fontsize=fontsize) pos1, pos2 = draw_transp_colored_srGW( weightedG3, C3, weightedG2, C2, part_G3, p1=None, p2=hbar3, T=OT_32, pos1=pos2, pos2=pos1, shiftx=3., node_size=node_size, seed_G1=0, seed_G2=0) pl.tight_layout() pl.show() ############################################################################# # # Add node features # --------------------------------------------- # We add node features with given mean - by clusters # and inversely proportional to clusters' intra-connectivity F2 = np.zeros((N2, 1)) for i, c in enumerate(part_G2): F2[i, 0] = np.random.normal(loc=c, scale=0.01) F3 = np.zeros((N3, 1)) for i, c in enumerate(part_G3): F3[i, 0] = np.random.normal(loc=2. - c, scale=0.01) ############################################################################# # # Compute their semi-relaxed Fused Gromov-Wasserstein divergences # --------------------------------------------- alpha = 0.5 # Compute pairwise euclidean distance between node features M = (F2 ** 2).dot(np.ones((1, N3))) + np.ones((N2, 1)).dot((F3 ** 2).T) - 2 * F2.dot(F3.T) # 0) FGW_alpha(C2, F2, h2, C3, F3, h3) for reference OT, log = fused_gromov_wasserstein( M, C2, C3, h2, h3, symmetric=True, alpha=alpha, log=True) fgw = log['fgw_dist'] # 1) srFGW_e(C2, F2, h2, C3, F3) OT_23, log_23 = entropic_semirelaxed_fused_gromov_wasserstein( M, C2, C3, h2, symmetric=True, epsilon=1., alpha=0.5, log=True, G0=None) srfgw_23 = log_23['srfgw_dist'] # 2) srFGW(C3, F3, h3, C2, F2) OT_32, log_32 = entropic_semirelaxed_fused_gromov_wasserstein( M.T, C3, C2, h3, symmetric=None, epsilon=1., alpha=alpha, log=True, G0=None) srfgw_32 = log_32['srfgw_dist'] print('FGW(C2, F2, C3, F3) = ', fgw) print(r'$srGW_e$(C2, F2, h2, C3, F3) = ', srfgw_23) print(r'$srGW_e$(C3, F3, h3, C2, F2) = ', srfgw_32) ############################################################################# # # Visualization of the entropic semi-relaxed Fused Gromov-Wasserstein matchings # --------------------------------------------- # # We color nodes of the graph on the right - then project its node colors # based on the optimal transport plan from the srFGW matching # NB: colors refer to clusters - not to node features pl.figure(2, figsize=(8, 2.5)) pl.clf() pl.subplot(121) pl.axis('off') pl.axis pl.title(r'$srFGW_e(\mathbf{C_2},\mathbf{F_2},\mathbf{h_2},\mathbf{C_3},\mathbf{F_3}) =%s$' % (np.round(srfgw_23, 3)), fontsize=fontsize) hbar2 = OT_23.sum(axis=0) pos1, pos2 = draw_transp_colored_srGW( weightedG2, C2, weightedG3, C3, part_G2, p1=None, p2=hbar2, T=OT_23, shiftx=1.5, node_size=node_size, seed_G1=seed_G2, seed_G2=seed_G3) pl.subplot(122) pl.axis('off') hbar3 = OT_32.sum(axis=0) pl.title(r'$srFGW_e(\mathbf{C_3}, \mathbf{F_3}, \mathbf{h_3}, \mathbf{C_2}, \mathbf{F_2}) =%s$' % (np.round(srfgw_32, 3)), fontsize=fontsize) pos1, pos2 = draw_transp_colored_srGW( weightedG3, C3, weightedG2, C2, part_G3, p1=None, p2=hbar3, T=OT_32, pos1=pos2, pos2=pos1, shiftx=3., node_size=node_size, seed_G1=0, seed_G2=0) pl.tight_layout() pl.show() python-pot-0.9.3+dfsg/examples/gromov/plot_fgw.py000066400000000000000000000104611455713015700221170ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ============================== Plot Fused-Gromov-Wasserstein ============================== This example first illustrates the computation of FGW for 1D measures estimated using a Conditional Gradient solver [24]. [24] Vayer Titouan, Chapel Laetitia, Flamary Rémi, Tavenard Romain and Courty Nicolas "Optimal Transport for structured data with application on graphs" International Conference on Machine Learning (ICML). 2019. """ # Author: Titouan Vayer # # License: MIT License # sphinx_gallery_thumbnail_number = 3 import matplotlib.pyplot as pl import numpy as np import ot from ot.gromov import gromov_wasserstein, fused_gromov_wasserstein ############################################################################## # Generate data # ------------- # parameters # We create two 1D random measures n = 20 # number of points in the first distribution n2 = 30 # number of points in the second distribution sig = 1 # std of first distribution sig2 = 0.1 # std of second distribution np.random.seed(0) phi = np.arange(n)[:, None] xs = phi + sig * np.random.randn(n, 1) ys = np.vstack((np.ones((n // 2, 1)), 0 * np.ones((n // 2, 1)))) + sig2 * np.random.randn(n, 1) phi2 = np.arange(n2)[:, None] xt = phi2 + sig * np.random.randn(n2, 1) yt = np.vstack((np.ones((n2 // 2, 1)), 0 * np.ones((n2 // 2, 1)))) + sig2 * np.random.randn(n2, 1) yt = yt[::-1, :] p = ot.unif(n) q = ot.unif(n2) ############################################################################## # Plot data # --------- # plot the distributions pl.figure(1, (7, 7)) pl.subplot(2, 1, 1) pl.scatter(ys, xs, c=phi, s=70) pl.ylabel('Feature value a', fontsize=20) pl.title('$\mu=\sum_i \delta_{x_i,a_i}$', fontsize=25, y=1) pl.xticks(()) pl.yticks(()) pl.subplot(2, 1, 2) pl.scatter(yt, xt, c=phi2, s=70) pl.xlabel('coordinates x/y', fontsize=25) pl.ylabel('Feature value b', fontsize=20) pl.title('$\\nu=\sum_j \delta_{y_j,b_j}$', fontsize=25, y=1) pl.yticks(()) pl.tight_layout() pl.show() ############################################################################## # Create structure matrices and across-feature distance matrix # ------------------------------------------------------------ # Structure matrices and across-features distance matrix C1 = ot.dist(xs) C2 = ot.dist(xt) M = ot.dist(ys, yt) w1 = ot.unif(C1.shape[0]) w2 = ot.unif(C2.shape[0]) Got = ot.emd([], [], M) ############################################################################## # Plot matrices # ------------- cmap = 'Reds' pl.figure(2, (5, 5)) fs = 15 l_x = [0, 5, 10, 15] l_y = [0, 5, 10, 15, 20, 25] gs = pl.GridSpec(5, 5) ax1 = pl.subplot(gs[3:, :2]) pl.imshow(C1, cmap=cmap, interpolation='nearest') pl.title("$C_1$", fontsize=fs) pl.xlabel("$k$", fontsize=fs) pl.ylabel("$i$", fontsize=fs) pl.xticks(l_x) pl.yticks(l_x) ax2 = pl.subplot(gs[:3, 2:]) pl.imshow(C2, cmap=cmap, interpolation='nearest') pl.title("$C_2$", fontsize=fs) pl.ylabel("$l$", fontsize=fs) pl.xticks(()) pl.yticks(l_y) ax2.set_aspect('auto') ax3 = pl.subplot(gs[3:, 2:], sharex=ax2, sharey=ax1) pl.imshow(M, cmap=cmap, interpolation='nearest') pl.yticks(l_x) pl.xticks(l_y) pl.ylabel("$i$", fontsize=fs) pl.title("$M_{AB}$", fontsize=fs) pl.xlabel("$j$", fontsize=fs) pl.tight_layout() ax3.set_aspect('auto') pl.show() ############################################################################## # Compute FGW/GW # -------------- # Computing FGW and GW alpha = 1e-3 ot.tic() Gwg, logw = fused_gromov_wasserstein(M, C1, C2, p, q, loss_fun='square_loss', alpha=alpha, verbose=True, log=True) ot.toc() # reload_ext WGW Gg, log = gromov_wasserstein(C1, C2, p, q, loss_fun='square_loss', verbose=True, log=True) ############################################################################## # Visualize transport matrices # ---------------------------- # visu OT matrix cmap = 'Blues' fs = 15 pl.figure(3, (13, 5)) pl.clf() pl.subplot(1, 3, 1) pl.imshow(Got, cmap=cmap, interpolation='nearest') pl.ylabel("$i$", fontsize=fs) pl.xticks(()) pl.title('Wasserstein ($M$ only)') pl.subplot(1, 3, 2) pl.imshow(Gg, cmap=cmap, interpolation='nearest') pl.title('Gromov ($C_1,C_2$ only)') pl.xticks(()) pl.subplot(1, 3, 3) pl.imshow(Gwg, cmap=cmap, interpolation='nearest') pl.title('FGW ($M+C_1,C_2$)') pl.xlabel("$j$", fontsize=fs) pl.ylabel("$i$", fontsize=fs) pl.tight_layout() pl.show() python-pot-0.9.3+dfsg/examples/gromov/plot_fgw_solvers.py000066400000000000000000000312611455713015700236750ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ============================== Comparison of Fused Gromov-Wasserstein solvers ============================== This example illustrates the computation of FGW for attributed graphs using 4 different solvers to estimate the distance based on Conditional Gradient [24], Sinkhorn projections [12, 51] and alternated Bregman projections [63, 64]. We generate two graphs following Stochastic Block Models further endowed with node features and compute their FGW matchings. [12] Gabriel Peyré, Marco Cuturi, and Justin Solomon (2016), "Gromov-Wasserstein averaging of kernel and distance matrices". International Conference on Machine Learning (ICML). [24] Vayer Titouan, Chapel Laetitia, Flamary Rémi, Tavenard Romain and Courty Nicolas "Optimal Transport for structured data with application on graphs" International Conference on Machine Learning (ICML). 2019. [51] Xu, H., Luo, D., Zha, H., & Duke, L. C. (2019). "Gromov-wasserstein learning for graph matching and node embedding". In International Conference on Machine Learning (ICML), 2019. [63] Li, J., Tang, J., Kong, L., Liu, H., Li, J., So, A. M. C., & Blanchet, J. "A Convergent Single-Loop Algorithm for Relaxation of Gromov-Wasserstein in Graph Data". International Conference on Learning Representations (ICLR), 2023. [64] Ma, X., Chu, X., Wang, Y., Lin, Y., Zhao, J., Ma, L., & Zhu, W. "Fused Gromov-Wasserstein Graph Mixup for Graph-level Classifications". In Thirty-seventh Conference on Neural Information Processing Systems (NeurIPS), 2023. """ # Author: Cédric Vincent-Cuaz # # License: MIT License # sphinx_gallery_thumbnail_number = 1 import numpy as np import matplotlib.pylab as pl from ot.gromov import (fused_gromov_wasserstein, entropic_fused_gromov_wasserstein, BAPG_fused_gromov_wasserstein) import networkx from networkx.generators.community import stochastic_block_model as sbm from time import time ############################################################################# # # Generate two graphs following Stochastic Block models of 2 and 3 clusters. # --------------------------------------------- np.random.seed(0) N2 = 20 # 2 communities N3 = 30 # 3 communities p2 = [[1., 0.1], [0.1, 0.9]] p3 = [[1., 0.1, 0.], [0.1, 0.95, 0.1], [0., 0.1, 0.9]] G2 = sbm(seed=0, sizes=[N2 // 2, N2 // 2], p=p2) G3 = sbm(seed=0, sizes=[N3 // 3, N3 // 3, N3 // 3], p=p3) part_G2 = [G2.nodes[i]['block'] for i in range(N2)] part_G3 = [G3.nodes[i]['block'] for i in range(N3)] C2 = networkx.to_numpy_array(G2) C3 = networkx.to_numpy_array(G3) # We add node features with given mean - by clusters # and inversely proportional to clusters' intra-connectivity F2 = np.zeros((N2, 1)) for i, c in enumerate(part_G2): F2[i, 0] = np.random.normal(loc=c, scale=0.01) F3 = np.zeros((N3, 1)) for i, c in enumerate(part_G3): F3[i, 0] = np.random.normal(loc=2. - c, scale=0.01) # Compute pairwise euclidean distance between node features M = (F2 ** 2).dot(np.ones((1, N3))) + np.ones((N2, 1)).dot((F3 ** 2).T) - 2 * F2.dot(F3.T) h2 = np.ones(C2.shape[0]) / C2.shape[0] h3 = np.ones(C3.shape[0]) / C3.shape[0] ############################################################################# # # Compute their Fused Gromov-Wasserstein distances # --------------------------------------------- alpha = 0.5 # Conditional Gradient algorithm print('Conditional Gradient \n') start_cg = time() T_cg, log_cg = fused_gromov_wasserstein( M, C2, C3, h2, h3, 'square_loss', alpha=alpha, tol_rel=1e-9, verbose=True, log=True) end_cg = time() time_cg = 1000 * (end_cg - start_cg) # Proximal Point algorithm with Kullback-Leibler as proximal operator print('Proximal Point Algorithm \n') start_ppa = time() T_ppa, log_ppa = entropic_fused_gromov_wasserstein( M, C2, C3, h2, h3, 'square_loss', alpha=alpha, epsilon=1., solver='PPA', tol=1e-9, log=True, verbose=True, warmstart=False, numItermax=10) end_ppa = time() time_ppa = 1000 * (end_ppa - start_ppa) # Projected Gradient algorithm with entropic regularization print('Projected Gradient Descent \n') start_pgd = time() T_pgd, log_pgd = entropic_fused_gromov_wasserstein( M, C2, C3, h2, h3, 'square_loss', alpha=alpha, epsilon=0.01, solver='PGD', tol=1e-9, log=True, verbose=True, warmstart=False, numItermax=10) end_pgd = time() time_pgd = 1000 * (end_pgd - start_pgd) # Alternated Bregman Projected Gradient algorithm with Kullback-Leibler as proximal operator print('Bregman Alternated Projected Gradient \n') start_bapg = time() T_bapg, log_bapg = BAPG_fused_gromov_wasserstein( M, C2, C3, h2, h3, 'square_loss', alpha=alpha, epsilon=1., tol=1e-9, marginal_loss=True, verbose=True, log=True) end_bapg = time() time_bapg = 1000 * (end_bapg - start_bapg) print('Fused Gromov-Wasserstein distance estimated with Conditional Gradient solver: ' + str(log_cg['fgw_dist'])) print('Fused Gromov-Wasserstein distance estimated with Proximal Point solver: ' + str(log_ppa['fgw_dist'])) print('Entropic Fused Gromov-Wasserstein distance estimated with Projected Gradient solver: ' + str(log_pgd['fgw_dist'])) print('Fused Gromov-Wasserstein distance estimated with Projected Gradient solver: ' + str(log_bapg['fgw_dist'])) # compute OT sparsity level T_cg_sparsity = 100 * (T_cg == 0.).astype(np.float64).sum() / (N2 * N3) T_ppa_sparsity = 100 * (T_ppa == 0.).astype(np.float64).sum() / (N2 * N3) T_pgd_sparsity = 100 * (T_pgd == 0.).astype(np.float64).sum() / (N2 * N3) T_bapg_sparsity = 100 * (T_bapg == 0.).astype(np.float64).sum() / (N2 * N3) # Methods using Sinkhorn/Bregman projections tend to produce feasibility errors on the # marginal constraints err_cg = np.linalg.norm(T_cg.sum(1) - h2) + np.linalg.norm(T_cg.sum(0) - h3) err_ppa = np.linalg.norm(T_ppa.sum(1) - h2) + np.linalg.norm(T_ppa.sum(0) - h3) err_pgd = np.linalg.norm(T_pgd.sum(1) - h2) + np.linalg.norm(T_pgd.sum(0) - h3) err_bapg = np.linalg.norm(T_bapg.sum(1) - h2) + np.linalg.norm(T_bapg.sum(0) - h3) ############################################################################# # # Visualization of the Fused Gromov-Wasserstein matchings # --------------------------------------------- # # We color nodes of the graph on the right - then project its node colors # based on the optimal transport plan from the FGW matchings # We adjust the intensity of links across domains proportionaly to the mass # sent, adding a minimal intensity of 0.1 if mass sent is not zero. # For each matching, all node sizes are proportionnal to their mass computed # from marginals of the OT plan to illustrate potential feasibility errors. # NB: colors refer to clusters - not to node features # Add weights on the edges for visualization later on weight_intra_G2 = 5 weight_inter_G2 = 0.5 weight_intra_G3 = 1. weight_inter_G3 = 1.5 weightedG2 = networkx.Graph() part_G2 = [G2.nodes[i]['block'] for i in range(N2)] for node in G2.nodes(): weightedG2.add_node(node) for i, j in G2.edges(): if part_G2[i] == part_G2[j]: weightedG2.add_edge(i, j, weight=weight_intra_G2) else: weightedG2.add_edge(i, j, weight=weight_inter_G2) weightedG3 = networkx.Graph() part_G3 = [G3.nodes[i]['block'] for i in range(N3)] for node in G3.nodes(): weightedG3.add_node(node) for i, j in G3.edges(): if part_G3[i] == part_G3[j]: weightedG3.add_edge(i, j, weight=weight_intra_G3) else: weightedG3.add_edge(i, j, weight=weight_inter_G3) def draw_graph(G, C, nodes_color_part, Gweights=None, pos=None, edge_color='black', node_size=None, shiftx=0, seed=0): if (pos is None): pos = networkx.spring_layout(G, scale=1., seed=seed) if shiftx != 0: for k, v in pos.items(): v[0] = v[0] + shiftx alpha_edge = 0.7 width_edge = 1.8 if Gweights is None: networkx.draw_networkx_edges(G, pos, width=width_edge, alpha=alpha_edge, edge_color=edge_color) else: # We make more visible connections between activated nodes n = len(Gweights) edgelist_activated = [] edgelist_deactivated = [] for i in range(n): for j in range(n): if Gweights[i] * Gweights[j] * C[i, j] > 0: edgelist_activated.append((i, j)) elif C[i, j] > 0: edgelist_deactivated.append((i, j)) networkx.draw_networkx_edges(G, pos, edgelist=edgelist_activated, width=width_edge, alpha=alpha_edge, edge_color=edge_color) networkx.draw_networkx_edges(G, pos, edgelist=edgelist_deactivated, width=width_edge, alpha=0.1, edge_color=edge_color) if Gweights is None: for node, node_color in enumerate(nodes_color_part): networkx.draw_networkx_nodes(G, pos, nodelist=[node], node_size=node_size, alpha=1, node_color=node_color) else: scaled_Gweights = Gweights / (0.5 * Gweights.max()) nodes_size = node_size * scaled_Gweights for node, node_color in enumerate(nodes_color_part): networkx.draw_networkx_nodes(G, pos, nodelist=[node], node_size=nodes_size[node], alpha=1, node_color=node_color) return pos def draw_transp_colored_GW(G1, C1, G2, C2, part_G1, p1, p2, T, pos1=None, pos2=None, shiftx=4, switchx=False, node_size=70, seed_G1=0, seed_G2=0): starting_color = 0 # get graphs partition and their coloring part1 = part_G1.copy() unique_colors = ['C%s' % (starting_color + i) for i in np.unique(part1)] nodes_color_part1 = [] for cluster in part1: nodes_color_part1.append(unique_colors[cluster]) nodes_color_part2 = [] # T: getting colors assignment from argmin of columns for i in range(len(G2.nodes())): j = np.argmax(T[:, i]) nodes_color_part2.append(nodes_color_part1[j]) pos1 = draw_graph(G1, C1, nodes_color_part1, Gweights=p1, pos=pos1, node_size=node_size, shiftx=0, seed=seed_G1) pos2 = draw_graph(G2, C2, nodes_color_part2, Gweights=p2, pos=pos2, node_size=node_size, shiftx=shiftx, seed=seed_G2) for k1, v1 in pos1.items(): max_Tk1 = np.max(T[k1, :]) for k2, v2 in pos2.items(): if (T[k1, k2] > 0): pl.plot([pos1[k1][0], pos2[k2][0]], [pos1[k1][1], pos2[k2][1]], '-', lw=0.7, alpha=min(T[k1, k2] / max_Tk1 + 0.1, 1.), color=nodes_color_part1[k1]) return pos1, pos2 node_size = 40 fontsize = 13 seed_G2 = 0 seed_G3 = 4 pl.figure(2, figsize=(15, 3.5)) pl.clf() pl.subplot(141) pl.axis('off') pl.title('(CG) FGW=%s\n \n OT sparsity = %s \n marg. error = %s \n runtime = %s' % ( np.round(log_cg['fgw_dist'], 3), str(np.round(T_cg_sparsity, 2)) + ' %', np.round(err_cg, 4), str(np.round(time_cg, 2)) + ' ms'), fontsize=fontsize) pos1, pos2 = draw_transp_colored_GW( weightedG2, C2, weightedG3, C3, part_G2, p1=T_cg.sum(1), p2=T_cg.sum(0), T=T_cg, shiftx=1.5, node_size=node_size, seed_G1=seed_G2, seed_G2=seed_G3) pl.subplot(142) pl.axis('off') pl.title('(PPA) FGW=%s\n \n OT sparsity = %s \n marg. error = %s \n runtime = %s' % ( np.round(log_ppa['fgw_dist'], 3), str(np.round(T_ppa_sparsity, 2)) + ' %', np.round(err_ppa, 4), str(np.round(time_ppa, 2)) + ' ms'), fontsize=fontsize) pos1, pos2 = draw_transp_colored_GW( weightedG2, C2, weightedG3, C3, part_G2, p1=T_ppa.sum(1), p2=T_ppa.sum(0), T=T_ppa, pos1=pos1, pos2=pos2, shiftx=0., node_size=node_size, seed_G1=0, seed_G2=0) pl.subplot(143) pl.axis('off') pl.title('(PGD) Entropic FGW=%s\n \n OT sparsity = %s \n marg. error = %s \n runtime = %s' % ( np.round(log_pgd['fgw_dist'], 3), str(np.round(T_pgd_sparsity, 2)) + ' %', np.round(err_pgd, 4), str(np.round(time_pgd, 2)) + ' ms'), fontsize=fontsize) pos1, pos2 = draw_transp_colored_GW( weightedG2, C2, weightedG3, C3, part_G2, p1=T_pgd.sum(1), p2=T_pgd.sum(0), T=T_pgd, pos1=pos1, pos2=pos2, shiftx=0., node_size=node_size, seed_G1=0, seed_G2=0) pl.subplot(144) pl.axis('off') pl.title('(BAPG) FGW=%s\n \n OT sparsity = %s \n marg. error = %s \n runtime = %s' % ( np.round(log_bapg['fgw_dist'], 3), str(np.round(T_bapg_sparsity, 2)) + ' %', np.round(err_bapg, 4), str(np.round(time_bapg, 2)) + ' ms'), fontsize=fontsize) pos1, pos2 = draw_transp_colored_GW( weightedG2, C2, weightedG3, C3, part_G2, p1=T_bapg.sum(1), p2=T_bapg.sum(0), T=T_bapg, pos1=pos1, pos2=pos2, shiftx=0., node_size=node_size, seed_G1=0, seed_G2=0) pl.tight_layout() pl.show() python-pot-0.9.3+dfsg/examples/gromov/plot_gnn_TFGW.py000066400000000000000000000164141455713015700227510ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ============================== Graph classification with Tempate Based Fused Gromov Wasserstein ============================== This example first illustrates how to train a graph classification gnn based on the Template Fused Gromov Wasserstein layer as proposed in [52] . [53] C. Vincent-Cuaz, R. Flamary, M. Corneli, T. Vayer, N. Courty (2022).Template based graph neural network with optimal transport distances. Advances in Neural Information Processing Systems, 35. """ # Author: Sonia Mazelet # Rémi Flamary # # License: MIT License # sphinx_gallery_thumbnail_number = 1 #%% import matplotlib.pyplot as pl import torch import networkx as nx from torch.utils.data import random_split from torch_geometric.loader import DataLoader from torch_geometric.utils import to_networkx, one_hot from torch_geometric.utils import stochastic_blockmodel_graph as sbm from torch_geometric.data import Data as GraphData import torch.nn as nn from torch_geometric.nn import Linear, GCNConv from ot.gnn import TFGWPooling from sklearn.manifold import TSNE ############################################################################## # Generate data # ------------- # parameters # We create 2 classes of stochastic block models (SBM) graphs with 1 block and 2 blocks respectively. torch.manual_seed(0) n_graphs = 50 n_nodes = 10 n_node_classes = 2 #edge probabilities for the SBMs P1 = [[0.8]] P2 = [[0.9, 0.1], [0.1, 0.9]] #block sizes block_sizes1 = [n_nodes] block_sizes2 = [n_nodes // 2, n_nodes // 2] #node features x1 = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) x1 = one_hot(x1, num_classes=n_node_classes) x1 = torch.reshape(x1, (n_nodes, n_node_classes)) x2 = torch.tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) x2 = one_hot(x2, num_classes=n_node_classes) x2 = torch.reshape(x2, (n_nodes, n_node_classes)) graphs1 = [GraphData(x=x1, edge_index=sbm(block_sizes1, P1), y=torch.tensor([0])) for i in range(n_graphs)] graphs2 = [GraphData(x=x2, edge_index=sbm(block_sizes2, P2), y=torch.tensor([1])) for i in range(n_graphs)] graphs = graphs1 + graphs2 #split the data into train and test sets train_graphs, test_graphs = random_split(graphs, [n_graphs, n_graphs]) train_loader = DataLoader(train_graphs, batch_size=10, shuffle=True) test_loader = DataLoader(test_graphs, batch_size=10, shuffle=False) #%% ############################################################################## # Plot data # --------- # plot one graph of each class fontsize = 10 pl.figure(0, figsize=(8, 2.5)) pl.clf() pl.subplot(121) pl.axis('off') pl.title('Graph of class 1', fontsize=fontsize) G = to_networkx(graphs1[0], to_undirected=True) pos = nx.spring_layout(G, seed=0) nx.draw_networkx(G, pos, with_labels=False, node_color="tab:blue") pl.subplot(122) pl.axis('off') pl.title('Graph of class 2', fontsize=fontsize) G = to_networkx(graphs2[0], to_undirected=True) pos = nx.spring_layout(G, seed=0) nx.draw_networkx(G, pos, with_labels=False, nodelist=[0, 1, 2, 3, 4], node_color="tab:blue") nx.draw_networkx(G, pos, with_labels=False, nodelist=[5, 6, 7, 8, 9], node_color="tab:red") pl.tight_layout() pl.show() #%% ############################################################################## # Pooling architecture using the TFGW layer # --------- class pooling_TFGW(nn.Module): """ Pooling architecture using the TFGW layer. """ def __init__(self, n_features, n_templates, n_template_nodes, n_classes, n_hidden_layers, feature_init_mean=0., feature_init_std=1.): """ Pooling architecture using the TFGW layer. """ super().__init__() self.n_templates = n_templates self.n_template_nodes = n_template_nodes self.n_hidden_layers = n_hidden_layers self.n_features = n_features self.conv = GCNConv(self.n_features, self.n_hidden_layers) self.TFGW = TFGWPooling(self.n_hidden_layers, self.n_templates, self.n_template_nodes, feature_init_mean=feature_init_mean, feature_init_std=feature_init_std) self.linear = Linear(self.n_templates, n_classes) def forward(self, x, edge_index, batch=None): x = self.conv(x, edge_index) x = self.TFGW(x, edge_index, batch) x_latent = x # save latent embeddings for visualization x = self.linear(x) return x, x_latent ############################################################################## # Graph classification training # --------- n_epochs = 25 #store latent embeddings and classes for TSNE visualization embeddings_for_TSNE = [] classes = [] model = pooling_TFGW(n_features=2, n_templates=2, n_template_nodes=2, n_classes=2, n_hidden_layers=2, feature_init_mean=0.5, feature_init_std=0.5) optimizer = torch.optim.Adam(model.parameters(), lr=0.005, weight_decay=0.0005) criterion = torch.nn.CrossEntropyLoss() all_accuracy = [] all_loss = [] for epoch in range(n_epochs): losses = [] accs = [] for data in train_loader: out, latent_embedding = model(data.x, data.edge_index, data.batch) loss = criterion(out, data.y) loss.backward() optimizer.step() pred = out.argmax(dim=1) train_correct = pred == data.y train_acc = int(train_correct.sum()) / len(data) accs.append(train_acc) losses.append(loss.item()) #store last classes and embeddings for TSNE visualization if epoch == n_epochs - 1: embeddings_for_TSNE.append(latent_embedding) classes.append(data.y) print(f'Epoch: {epoch:03d}, Loss: {torch.mean(torch.tensor(losses)):.4f},Train Accuracy: {torch.mean(torch.tensor(accs)):.4f}') all_accuracy.append(torch.mean(torch.tensor(accs))) all_loss.append(torch.mean(torch.tensor(losses))) pl.figure(1, figsize=(8, 2.5)) pl.clf() pl.subplot(121) pl.plot(all_loss) pl.xlabel('epochs') pl.title('Loss') pl.subplot(122) pl.plot(all_accuracy) pl.xlabel('epochs') pl.title('Accuracy') pl.tight_layout() pl.show() #Test test_accs = [] for data in test_loader: out, latent_embedding = model(data.x, data.edge_index, data.batch) pred = out.argmax(dim=1) test_correct = pred == data.y test_acc = int(test_correct.sum()) / len(data) test_accs.append(test_acc) embeddings_for_TSNE.append(latent_embedding) classes.append(data.y) classes = torch.hstack(classes) print(f'Test Accuracy: {torch.mean(torch.tensor(test_acc)):.4f}') #%% ############################################################################## # TSNE visualization of graph classification # --------- indices = torch.randint(2 * n_graphs, (60,)) # select a subset of embeddings for TSNE visualization latent_embeddings = torch.vstack(embeddings_for_TSNE).detach().numpy()[indices, :] TSNE_embeddings = TSNE(n_components=2, perplexity=20, random_state=1).fit_transform(latent_embeddings) class_0 = classes[indices] == 0 class_1 = classes[indices] == 1 TSNE_embeddings_0 = TSNE_embeddings[class_0, :] TSNE_embeddings_1 = TSNE_embeddings[class_1, :] pl.figure(2, figsize=(6, 2.5)) pl.scatter(TSNE_embeddings_0[:, 0], TSNE_embeddings_0[:, 1], alpha=0.5, marker='o', label='class 1') pl.scatter(TSNE_embeddings_1[:, 0], TSNE_embeddings_1[:, 1], alpha=0.5, marker='o', label='class 2') pl.legend() pl.title('TSNE in the latent space after training') pl.show() # %% python-pot-0.9.3+dfsg/examples/gromov/plot_gromov.py000066400000000000000000000151241455713015700226460ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ========================== Gromov-Wasserstein example ========================== This example is designed to show how to use the Gromov-Wasserstein distance computation in POT. We first compare 3 solvers to estimate the distance based on Conditional Gradient [24] or Sinkhorn projections [12, 51]. Then we compare 2 stochastic solvers to estimate the distance with a lower numerical cost [33]. [12] Gabriel Peyré, Marco Cuturi, and Justin Solomon (2016), "Gromov-Wasserstein averaging of kernel and distance matrices". International Conference on Machine Learning (ICML). [24] Vayer Titouan, Chapel Laetitia, Flamary Rémi, Tavenard Romain and Courty Nicolas "Optimal Transport for structured data with application on graphs" International Conference on Machine Learning (ICML). 2019. [33] Kerdoncuff T., Emonet R., Marc S. "Sampled Gromov Wasserstein", Machine Learning Journal (MJL), 2021. [51] Xu, H., Luo, D., Zha, H., & Duke, L. C. (2019). "Gromov-wasserstein learning for graph matching and node embedding". In International Conference on Machine Learning (ICML), 2019. """ # Author: Erwan Vautier # Nicolas Courty # Cédric Vincent-Cuaz # Tanguy Kerdoncuff # # License: MIT License # sphinx_gallery_thumbnail_number = 1 import scipy as sp import numpy as np import matplotlib.pylab as pl from mpl_toolkits.mplot3d import Axes3D # noqa import ot ############################################################################# # # Sample two Gaussian distributions (2D and 3D) # --------------------------------------------- # # The Gromov-Wasserstein distance allows to compute distances with samples that # do not belong to the same metric space. For demonstration purpose, we sample # two Gaussian distributions in 2- and 3-dimensional spaces. n_samples = 30 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) mu_t = np.array([4, 4, 4]) cov_t = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) np.random.seed(0) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s) P = sp.linalg.sqrtm(cov_t) xt = np.random.randn(n_samples, 3).dot(P) + mu_t ############################################################################# # # Plotting the distributions # -------------------------- fig = pl.figure(1) ax1 = fig.add_subplot(121) ax1.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples') ax2 = fig.add_subplot(122, projection='3d') ax2.scatter(xt[:, 0], xt[:, 1], xt[:, 2], color='r') pl.show() ############################################################################# # # Compute distance kernels, normalize them and then display # --------------------------------------------------------- C1 = sp.spatial.distance.cdist(xs, xs) C2 = sp.spatial.distance.cdist(xt, xt) C1 /= C1.max() C2 /= C2.max() pl.figure(2) pl.subplot(121) pl.imshow(C1) pl.title('C1') pl.subplot(122) pl.imshow(C2) pl.title('C2') pl.show() ############################################################################# # # Compute Gromov-Wasserstein plans and distance # --------------------------------------------- p = ot.unif(n_samples) q = ot.unif(n_samples) # Conditional Gradient algorithm gw0, log0 = ot.gromov.gromov_wasserstein( C1, C2, p, q, 'square_loss', verbose=True, log=True) # Proximal Point algorithm with Kullback-Leibler as proximal operator gw, log = ot.gromov.entropic_gromov_wasserstein( C1, C2, p, q, 'square_loss', epsilon=5e-4, solver='PPA', log=True, verbose=True) # Projected Gradient algorithm with entropic regularization gwe, loge = ot.gromov.entropic_gromov_wasserstein( C1, C2, p, q, 'square_loss', epsilon=5e-4, solver='PGD', log=True, verbose=True) print('Gromov-Wasserstein distance estimated with Conditional Gradient solver: ' + str(log0['gw_dist'])) print('Gromov-Wasserstein distance estimated with Proximal Point solver: ' + str(log['gw_dist'])) print('Entropic Gromov-Wasserstein distance estimated with Projected Gradient solver: ' + str(loge['gw_dist'])) # compute OT sparsity level gw0_sparsity = 100 * (gw0 == 0.).astype(np.float64).sum() / (n_samples ** 2) gw_sparsity = 100 * (gw == 0.).astype(np.float64).sum() / (n_samples ** 2) gwe_sparsity = 100 * (gwe == 0.).astype(np.float64).sum() / (n_samples ** 2) # Methods using Sinkhorn projections tend to produce feasibility errors on the # marginal constraints err0 = np.linalg.norm(gw0.sum(1) - p) + np.linalg.norm(gw0.sum(0) - q) err = np.linalg.norm(gw.sum(1) - p) + np.linalg.norm(gw.sum(0) - q) erre = np.linalg.norm(gwe.sum(1) - p) + np.linalg.norm(gwe.sum(0) - q) pl.figure(3, (10, 6)) cmap = 'Blues' fontsize = 12 pl.subplot(131) pl.imshow(gw0, cmap=cmap) pl.title('(CG algo) GW=%s \n \n OT sparsity=%s \n feasibility error=%s' % ( np.round(log0['gw_dist'], 4), str(np.round(gw0_sparsity, 2)) + ' %', np.round(np.round(err0, 4))), fontsize=fontsize) pl.subplot(132) pl.imshow(gw, cmap=cmap) pl.title('(PP algo) GW=%s \n \n OT sparsity=%s \nfeasibility error=%s' % ( np.round(log['gw_dist'], 4), str(np.round(gw_sparsity, 2)) + ' %', np.round(err, 4)), fontsize=fontsize) pl.subplot(133) pl.imshow(gwe, cmap=cmap) pl.title('Entropic GW=%s \n \n OT sparsity=%s \nfeasibility error=%s' % ( np.round(loge['gw_dist'], 4), str(np.round(gwe_sparsity, 2)) + ' %', np.round(erre, 4)), fontsize=fontsize) pl.tight_layout() pl.show() ############################################################################# # # Compute GW with scalable stochastic methods with any loss function # ---------------------------------------------------------------------- def loss(x, y): return np.abs(x - y) pgw, plog = ot.gromov.pointwise_gromov_wasserstein(C1, C2, p, q, loss, max_iter=100, log=True) sgw, slog = ot.gromov.sampled_gromov_wasserstein(C1, C2, p, q, loss, epsilon=0.1, max_iter=100, log=True) print('Pointwise Gromov-Wasserstein distance estimated: ' + str(plog['gw_dist_estimated'])) print('Variance estimated: ' + str(plog['gw_dist_std'])) print('Sampled Gromov-Wasserstein distance: ' + str(slog['gw_dist_estimated'])) print('Variance estimated: ' + str(slog['gw_dist_std'])) pl.figure(4, (10, 5)) pl.subplot(121) pl.imshow(pgw.toarray(), cmap=cmap) pl.title('Pointwise Gromov Wasserstein') pl.subplot(122) pl.imshow(sgw, cmap=cmap) pl.title('Sampled Gromov Wasserstein') pl.show() python-pot-0.9.3+dfsg/examples/gromov/plot_gromov_barycenter.py000077500000000000000000000164751455713015700251010ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ===================================== Gromov-Wasserstein Barycenter example ===================================== This example is designed to show how to use the Gromov-Wasserstein distance computation in POT. """ # Author: Erwan Vautier # Nicolas Courty # # License: MIT License import os from pathlib import Path import numpy as np import scipy as sp from matplotlib import pyplot as plt from sklearn import manifold from sklearn.decomposition import PCA import ot ############################################################################## # Smacof MDS # ---------- # # This function allows to find an embedding of points given a dissimilarity matrix # that will be given by the output of the algorithm def smacof_mds(C, dim, max_iter=3000, eps=1e-9): """ Returns an interpolated point cloud following the dissimilarity matrix C using SMACOF multidimensional scaling (MDS) in specific dimensioned target space Parameters ---------- C : ndarray, shape (ns, ns) dissimilarity matrix dim : int dimension of the targeted space max_iter : int Maximum number of iterations of the SMACOF algorithm for a single run eps : float relative tolerance w.r.t stress to declare converge Returns ------- npos : ndarray, shape (R, dim) Embedded coordinates of the interpolated point cloud (defined with one isometry) """ rng = np.random.RandomState(seed=3) mds = manifold.MDS( dim, max_iter=max_iter, eps=1e-9, dissimilarity='precomputed', n_init=1) pos = mds.fit(C).embedding_ nmds = manifold.MDS( 2, max_iter=max_iter, eps=1e-9, dissimilarity="precomputed", random_state=rng, n_init=1) npos = nmds.fit_transform(C, init=pos) return npos ############################################################################## # Data preparation # ---------------- # # The four distributions are constructed from 4 simple images def im2mat(img): """Converts and image to matrix (one pixel per line)""" return img.reshape((img.shape[0] * img.shape[1], img.shape[2])) this_file = os.path.realpath('__file__') data_path = os.path.join(Path(this_file).parent.parent.parent, 'data') square = plt.imread(os.path.join(data_path, 'square.png')).astype(np.float64)[:, :, 2] cross = plt.imread(os.path.join(data_path, 'cross.png')).astype(np.float64)[:, :, 2] triangle = plt.imread(os.path.join(data_path, 'triangle.png')).astype(np.float64)[:, :, 2] star = plt.imread(os.path.join(data_path, 'star.png')).astype(np.float64)[:, :, 2] shapes = [square, cross, triangle, star] S = 4 xs = [[] for i in range(S)] for nb in range(4): for i in range(8): for j in range(8): if shapes[nb][i, j] < 0.95: xs[nb].append([j, 8 - i]) xs = [np.array(xs[s]) for s in range(S)] ############################################################################## # Barycenter computation # ---------------------- ns = [len(xs[s]) for s in range(S)] n_samples = 30 """Compute all distances matrices for the four shapes""" Cs = [sp.spatial.distance.cdist(xs[s], xs[s]) for s in range(S)] Cs = [cs / cs.max() for cs in Cs] ps = [ot.unif(ns[s]) for s in range(S)] p = ot.unif(n_samples) lambdast = [[float(i) / 3, float(3 - i) / 3] for i in [1, 2]] Ct01 = [0 for i in range(2)] for i in range(2): Ct01[i] = ot.gromov.gromov_barycenters(n_samples, [Cs[0], Cs[1]], [ps[0], ps[1] ], p, lambdast[i], 'square_loss', # 5e-4, max_iter=100, tol=1e-3) Ct02 = [0 for i in range(2)] for i in range(2): Ct02[i] = ot.gromov.gromov_barycenters(n_samples, [Cs[0], Cs[2]], [ps[0], ps[2] ], p, lambdast[i], 'square_loss', # 5e-4, max_iter=100, tol=1e-3) Ct13 = [0 for i in range(2)] for i in range(2): Ct13[i] = ot.gromov.gromov_barycenters(n_samples, [Cs[1], Cs[3]], [ps[1], ps[3] ], p, lambdast[i], 'square_loss', # 5e-4, max_iter=100, tol=1e-3) Ct23 = [0 for i in range(2)] for i in range(2): Ct23[i] = ot.gromov.gromov_barycenters(n_samples, [Cs[2], Cs[3]], [ps[2], ps[3] ], p, lambdast[i], 'square_loss', # 5e-4, max_iter=100, tol=1e-3) ############################################################################## # Visualization # ------------- # # The PCA helps in getting consistency between the rotations clf = PCA(n_components=2) npos = [0, 0, 0, 0] npos = [smacof_mds(Cs[s], 2) for s in range(S)] npost01 = [0, 0] npost01 = [smacof_mds(Ct01[s], 2) for s in range(2)] npost01 = [clf.fit_transform(npost01[s]) for s in range(2)] npost02 = [0, 0] npost02 = [smacof_mds(Ct02[s], 2) for s in range(2)] npost02 = [clf.fit_transform(npost02[s]) for s in range(2)] npost13 = [0, 0] npost13 = [smacof_mds(Ct13[s], 2) for s in range(2)] npost13 = [clf.fit_transform(npost13[s]) for s in range(2)] npost23 = [0, 0] npost23 = [smacof_mds(Ct23[s], 2) for s in range(2)] npost23 = [clf.fit_transform(npost23[s]) for s in range(2)] fig = plt.figure(figsize=(10, 10)) ax1 = plt.subplot2grid((4, 4), (0, 0)) plt.xlim((-1, 1)) plt.ylim((-1, 1)) ax1.scatter(npos[0][:, 0], npos[0][:, 1], color='r') ax2 = plt.subplot2grid((4, 4), (0, 1)) plt.xlim((-1, 1)) plt.ylim((-1, 1)) ax2.scatter(npost01[1][:, 0], npost01[1][:, 1], color='b') ax3 = plt.subplot2grid((4, 4), (0, 2)) plt.xlim((-1, 1)) plt.ylim((-1, 1)) ax3.scatter(npost01[0][:, 0], npost01[0][:, 1], color='b') ax4 = plt.subplot2grid((4, 4), (0, 3)) plt.xlim((-1, 1)) plt.ylim((-1, 1)) ax4.scatter(npos[1][:, 0], npos[1][:, 1], color='r') ax5 = plt.subplot2grid((4, 4), (1, 0)) plt.xlim((-1, 1)) plt.ylim((-1, 1)) ax5.scatter(npost02[1][:, 0], npost02[1][:, 1], color='b') ax6 = plt.subplot2grid((4, 4), (1, 3)) plt.xlim((-1, 1)) plt.ylim((-1, 1)) ax6.scatter(npost13[1][:, 0], npost13[1][:, 1], color='b') ax7 = plt.subplot2grid((4, 4), (2, 0)) plt.xlim((-1, 1)) plt.ylim((-1, 1)) ax7.scatter(npost02[0][:, 0], npost02[0][:, 1], color='b') ax8 = plt.subplot2grid((4, 4), (2, 3)) plt.xlim((-1, 1)) plt.ylim((-1, 1)) ax8.scatter(npost13[0][:, 0], npost13[0][:, 1], color='b') ax9 = plt.subplot2grid((4, 4), (3, 0)) plt.xlim((-1, 1)) plt.ylim((-1, 1)) ax9.scatter(npos[2][:, 0], npos[2][:, 1], color='r') ax10 = plt.subplot2grid((4, 4), (3, 1)) plt.xlim((-1, 1)) plt.ylim((-1, 1)) ax10.scatter(npost23[1][:, 0], npost23[1][:, 1], color='b') ax11 = plt.subplot2grid((4, 4), (3, 2)) plt.xlim((-1, 1)) plt.ylim((-1, 1)) ax11.scatter(npost23[0][:, 0], npost23[0][:, 1], color='b') ax12 = plt.subplot2grid((4, 4), (3, 3)) plt.xlim((-1, 1)) plt.ylim((-1, 1)) ax12.scatter(npos[3][:, 0], npos[3][:, 1], color='r') python-pot-0.9.3+dfsg/examples/gromov/plot_gromov_wasserstein_dictionary_learning.py000077500000000000000000000326251455713015700314110ustar00rootroot00000000000000# -*- coding: utf-8 -*- r""" ===================================================== (Fused) Gromov-Wasserstein Linear Dictionary Learning ===================================================== In this example, we illustrate how to learn a Gromov-Wasserstein dictionary on a dataset of structured data such as graphs, denoted :math:`\{ \mathbf{C_s} \}_{s \in [S]}` where every nodes have uniform weights. Given a dictionary :math:`\mathbf{C_{dict}}` composed of D structures of a fixed size nt, each graph :math:`(\mathbf{C_s}, \mathbf{p_s})` is modeled as a convex combination :math:`\mathbf{w_s} \in \Sigma_D` of these dictionary atoms as :math:`\sum_d w_{s,d} \mathbf{C_{dict}[d]}`. First, we consider a dataset composed of graphs generated by Stochastic Block models with variable sizes taken in :math:`\{30, ... , 50\}` and quantities of clusters varying in :math:`\{ 1, 2, 3\}`. We learn a dictionary of 3 atoms, by minimizing the Gromov-Wasserstein distance from all samples to its model in the dictionary with respect to the dictionary atoms. Second, we illustrate the extension of this dictionary learning framework to structured data endowed with node features by using the Fused Gromov-Wasserstein distance. Starting from the aforementioned dataset of unattributed graphs, we add discrete labels uniformly depending on the number of clusters. Then we learn and visualize attributed graph atoms where each sample is modeled as a joint convex combination between atom structures and features. [38] C. Vincent-Cuaz, T. Vayer, R. Flamary, M. Corneli, N. Courty, Online Graph Dictionary Learning, International Conference on Machine Learning (ICML), 2021. """ # Author: Cédric Vincent-Cuaz # # License: MIT License # sphinx_gallery_thumbnail_number = 4 import numpy as np import matplotlib.pylab as pl from sklearn.manifold import MDS from ot.gromov import gromov_wasserstein_linear_unmixing, gromov_wasserstein_dictionary_learning, fused_gromov_wasserstein_linear_unmixing, fused_gromov_wasserstein_dictionary_learning import ot import networkx from networkx.generators.community import stochastic_block_model as sbm ############################################################################# # # Generate a dataset composed of graphs following Stochastic Block models of 1, 2 and 3 clusters. # ----------------------------------------------------------------------------------------------- np.random.seed(42) N = 60 # number of graphs in the dataset # For every number of clusters, we generate SBM with fixed inter/intra-clusters probability. clusters = [1, 2, 3] Nc = N // len(clusters) # number of graphs by cluster nlabels = len(clusters) dataset = [] labels = [] p_inter = 0.1 p_intra = 0.9 for n_cluster in clusters: for i in range(Nc): n_nodes = int(np.random.uniform(low=30, high=50)) if n_cluster > 1: P = p_inter * np.ones((n_cluster, n_cluster)) np.fill_diagonal(P, p_intra) else: P = p_intra * np.eye(1) sizes = np.round(n_nodes * np.ones(n_cluster) / n_cluster).astype(np.int32) G = sbm(sizes, P, seed=i, directed=False) C = networkx.to_numpy_array(G) dataset.append(C) labels.append(n_cluster) # Visualize samples def plot_graph(x, C, binary=True, color='C0', s=None): for j in range(C.shape[0]): for i in range(j): if binary: if C[i, j] > 0: pl.plot([x[i, 0], x[j, 0]], [x[i, 1], x[j, 1]], alpha=0.2, color='k') else: # connection intensity proportional to C[i,j] pl.plot([x[i, 0], x[j, 0]], [x[i, 1], x[j, 1]], alpha=C[i, j], color='k') pl.scatter(x[:, 0], x[:, 1], c=color, s=s, zorder=10, edgecolors='k', cmap='tab10', vmax=9) pl.figure(1, (12, 8)) pl.clf() for idx_c, c in enumerate(clusters): C = dataset[(c - 1) * Nc] # sample with c clusters # get 2d position for nodes x = MDS(dissimilarity='precomputed', random_state=0).fit_transform(1 - C) pl.subplot(2, nlabels, c) pl.title('(graph) sample from label ' + str(c), fontsize=14) plot_graph(x, C, binary=True, color='C0', s=50.) pl.axis("off") pl.subplot(2, nlabels, nlabels + c) pl.title('(matrix) sample from label %s \n' % c, fontsize=14) pl.imshow(C, interpolation='nearest') pl.axis("off") pl.tight_layout() pl.show() ############################################################################# # # Estimate the Gromov-Wasserstein dictionary from the dataset # ----------------------------------------------------------- np.random.seed(0) ps = [ot.unif(C.shape[0]) for C in dataset] D = 3 # 3 atoms in the dictionary nt = 6 # of 6 nodes each q = ot.unif(nt) reg = 0. # regularization coefficient to promote sparsity of unmixings {w_s} Cdict_GW, log = gromov_wasserstein_dictionary_learning( Cs=dataset, D=D, nt=nt, ps=ps, q=q, epochs=10, batch_size=16, learning_rate=0.1, reg=reg, projection='nonnegative_symmetric', tol_outer=10**(-5), tol_inner=10**(-5), max_iter_outer=30, max_iter_inner=300, use_log=True, use_adam_optimizer=True, verbose=True ) # visualize loss evolution over epochs pl.figure(2, (4, 3)) pl.clf() pl.title('loss evolution by epoch', fontsize=14) pl.plot(log['loss_epochs']) pl.xlabel('epochs', fontsize=12) pl.ylabel('loss', fontsize=12) pl.tight_layout() pl.show() ############################################################################# # # Visualization of the estimated dictionary atoms # ----------------------------------------------- # Continuous connections between nodes of the atoms are colored in shades of grey (1: dark / 2: white) pl.figure(3, (12, 8)) pl.clf() for idx_atom, atom in enumerate(Cdict_GW): scaled_atom = (atom - atom.min()) / (atom.max() - atom.min()) x = MDS(dissimilarity='precomputed', random_state=0).fit_transform(1 - scaled_atom) pl.subplot(2, D, idx_atom + 1) pl.title('(graph) atom ' + str(idx_atom + 1), fontsize=14) plot_graph(x, atom / atom.max(), binary=False, color='C0', s=100.) pl.axis("off") pl.subplot(2, D, D + idx_atom + 1) pl.title('(matrix) atom %s \n' % (idx_atom + 1), fontsize=14) pl.imshow(scaled_atom, interpolation='nearest') pl.colorbar() pl.axis("off") pl.tight_layout() pl.show() ############################################################################# # # Visualization of the embedding space # ------------------------------------ unmixings = [] reconstruction_errors = [] for C in dataset: p = ot.unif(C.shape[0]) unmixing, Cembedded, OT, reconstruction_error = gromov_wasserstein_linear_unmixing( C, Cdict_GW, p=p, q=q, reg=reg, tol_outer=10**(-5), tol_inner=10**(-5), max_iter_outer=30, max_iter_inner=300 ) unmixings.append(unmixing) reconstruction_errors.append(reconstruction_error) unmixings = np.array(unmixings) print('cumulated reconstruction error:', np.array(reconstruction_errors).sum()) # Compute the 2D representation of the unmixing living in the 2-simplex of probability unmixings2D = np.zeros(shape=(N, 2)) for i, w in enumerate(unmixings): unmixings2D[i, 0] = (2. * w[1] + w[2]) / 2. unmixings2D[i, 1] = (np.sqrt(3.) * w[2]) / 2. x = [0., 0.] y = [1., 0.] z = [0.5, np.sqrt(3) / 2.] extremities = np.stack([x, y, z]) pl.figure(4, (4, 4)) pl.clf() pl.title('Embedding space', fontsize=14) for cluster in range(nlabels): start, end = Nc * cluster, Nc * (cluster + 1) if cluster == 0: pl.scatter(unmixings2D[start:end, 0], unmixings2D[start:end, 1], c='C' + str(cluster), marker='o', s=40., label='1 cluster') else: pl.scatter(unmixings2D[start:end, 0], unmixings2D[start:end, 1], c='C' + str(cluster), marker='o', s=40., label='%s clusters' % (cluster + 1)) pl.scatter(extremities[:, 0], extremities[:, 1], c='black', marker='x', s=80., label='atoms') pl.plot([x[0], y[0]], [x[1], y[1]], color='black', linewidth=2.) pl.plot([x[0], z[0]], [x[1], z[1]], color='black', linewidth=2.) pl.plot([y[0], z[0]], [y[1], z[1]], color='black', linewidth=2.) pl.axis('off') pl.legend(fontsize=11) pl.tight_layout() pl.show() ############################################################################# # # Endow the dataset with node features # ------------------------------------ # We follow this feature assignment on all nodes of a graph depending on its label/number of clusters # 1 cluster --> 0 as nodes feature # 2 clusters --> 1 as nodes feature # 3 clusters --> 2 as nodes feature # features are one-hot encoded following these assignments dataset_features = [] for i in range(len(dataset)): n = dataset[i].shape[0] F = np.zeros((n, 3)) if i < Nc: # graph with 1 cluster F[:, 0] = 1. elif i < 2 * Nc: # graph with 2 clusters F[:, 1] = 1. else: # graph with 3 clusters F[:, 2] = 1. dataset_features.append(F) pl.figure(5, (12, 8)) pl.clf() for idx_c, c in enumerate(clusters): C = dataset[(c - 1) * Nc] # sample with c clusters F = dataset_features[(c - 1) * Nc] colors = ['C' + str(np.argmax(F[i])) for i in range(F.shape[0])] # get 2d position for nodes x = MDS(dissimilarity='precomputed', random_state=0).fit_transform(1 - C) pl.subplot(2, nlabels, c) pl.title('(graph) sample from label ' + str(c), fontsize=14) plot_graph(x, C, binary=True, color=colors, s=50) pl.axis("off") pl.subplot(2, nlabels, nlabels + c) pl.title('(matrix) sample from label %s \n' % c, fontsize=14) pl.imshow(C, interpolation='nearest') pl.axis("off") pl.tight_layout() pl.show() ############################################################################# # # Estimate a Fused Gromov-Wasserstein dictionary from the dataset of attributed graphs # ------------------------------------------------------------------------------------ np.random.seed(0) ps = [ot.unif(C.shape[0]) for C in dataset] D = 3 # 6 atoms instead of 3 nt = 6 q = ot.unif(nt) reg = 0.001 alpha = 0.5 # trade-off parameter between structure and feature information of Fused Gromov-Wasserstein Cdict_FGW, Ydict_FGW, log = fused_gromov_wasserstein_dictionary_learning( Cs=dataset, Ys=dataset_features, D=D, nt=nt, ps=ps, q=q, alpha=alpha, epochs=10, batch_size=16, learning_rate_C=0.1, learning_rate_Y=0.1, reg=reg, tol_outer=10**(-5), tol_inner=10**(-5), max_iter_outer=30, max_iter_inner=300, projection='nonnegative_symmetric', use_log=True, use_adam_optimizer=True, verbose=True ) # visualize loss evolution pl.figure(6, (4, 3)) pl.clf() pl.title('loss evolution by epoch', fontsize=14) pl.plot(log['loss_epochs']) pl.xlabel('epochs', fontsize=12) pl.ylabel('loss', fontsize=12) pl.tight_layout() pl.show() ############################################################################# # # Visualization of the estimated dictionary atoms # ----------------------------------------------- pl.figure(7, (12, 8)) pl.clf() max_features = Ydict_FGW.max() min_features = Ydict_FGW.min() for idx_atom, (Catom, Fatom) in enumerate(zip(Cdict_FGW, Ydict_FGW)): scaled_atom = (Catom - Catom.min()) / (Catom.max() - Catom.min()) #scaled_F = 2 * (Fatom - min_features) / (max_features - min_features) colors = ['C%s' % np.argmax(Fatom[i]) for i in range(Fatom.shape[0])] x = MDS(dissimilarity='precomputed', random_state=0).fit_transform(1 - scaled_atom) pl.subplot(2, D, idx_atom + 1) pl.title('(attributed graph) atom ' + str(idx_atom + 1), fontsize=14) plot_graph(x, Catom / Catom.max(), binary=False, color=colors, s=100) pl.axis("off") pl.subplot(2, D, D + idx_atom + 1) pl.title('(matrix) atom %s \n' % (idx_atom + 1), fontsize=14) pl.imshow(scaled_atom, interpolation='nearest') pl.colorbar() pl.axis("off") pl.tight_layout() pl.show() ############################################################################# # # Visualization of the embedding space # ------------------------------------ unmixings = [] reconstruction_errors = [] for i in range(len(dataset)): C = dataset[i] Y = dataset_features[i] p = ot.unif(C.shape[0]) unmixing, Cembedded, Yembedded, OT, reconstruction_error = fused_gromov_wasserstein_linear_unmixing( C, Y, Cdict_FGW, Ydict_FGW, p=p, q=q, alpha=alpha, reg=reg, tol_outer=10**(-6), tol_inner=10**(-6), max_iter_outer=30, max_iter_inner=300 ) unmixings.append(unmixing) reconstruction_errors.append(reconstruction_error) unmixings = np.array(unmixings) print('cumulated reconstruction error:', np.array(reconstruction_errors).sum()) # Visualize unmixings in the 2-simplex of probability unmixings2D = np.zeros(shape=(N, 2)) for i, w in enumerate(unmixings): unmixings2D[i, 0] = (2. * w[1] + w[2]) / 2. unmixings2D[i, 1] = (np.sqrt(3.) * w[2]) / 2. x = [0., 0.] y = [1., 0.] z = [0.5, np.sqrt(3) / 2.] extremities = np.stack([x, y, z]) pl.figure(8, (4, 4)) pl.clf() pl.title('Embedding space', fontsize=14) for cluster in range(nlabels): start, end = Nc * cluster, Nc * (cluster + 1) if cluster == 0: pl.scatter(unmixings2D[start:end, 0], unmixings2D[start:end, 1], c='C' + str(cluster), marker='o', s=40., label='1 cluster') else: pl.scatter(unmixings2D[start:end, 0], unmixings2D[start:end, 1], c='C' + str(cluster), marker='o', s=40., label='%s clusters' % (cluster + 1)) pl.scatter(extremities[:, 0], extremities[:, 1], c='black', marker='x', s=80., label='atoms') pl.plot([x[0], y[0]], [x[1], y[1]], color='black', linewidth=2.) pl.plot([x[0], z[0]], [x[1], z[1]], color='black', linewidth=2.) pl.plot([y[0], z[0]], [y[1], z[1]], color='black', linewidth=2.) pl.axis('off') pl.legend(fontsize=11) pl.tight_layout() pl.show() python-pot-0.9.3+dfsg/examples/gromov/plot_semirelaxed_fgw.py000066400000000000000000000251561455713015700245100ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ =============================================== Semi-relaxed (Fused) Gromov-Wasserstein example =============================================== This example is designed to show how to use the semi-relaxed Gromov-Wasserstein and the semi-relaxed Fused Gromov-Wasserstein divergences. sr(F)GW between two graphs G1 and G2 searches for a reweighing of the nodes of G2 at a minimal (F)GW distance from G1. First, we generate two graphs following Stochastic Block Models, then show how to compute their srGW matchings and illustrate them. These graphs are then endowed with node features and we follow the same process with srFGW. [48] Cédric Vincent-Cuaz, Rémi Flamary, Marco Corneli, Titouan Vayer, Nicolas Courty. "Semi-relaxed Gromov-Wasserstein divergence and applications on graphs" International Conference on Learning Representations (ICLR), 2021. """ # Author: Cédric Vincent-Cuaz # # License: MIT License # sphinx_gallery_thumbnail_number = 1 import numpy as np import matplotlib.pylab as pl from ot.gromov import semirelaxed_gromov_wasserstein, semirelaxed_fused_gromov_wasserstein, gromov_wasserstein, fused_gromov_wasserstein import networkx from networkx.generators.community import stochastic_block_model as sbm ############################################################################# # # Generate two graphs following Stochastic Block models of 2 and 3 clusters. # -------------------------------------------------------------------------- N2 = 20 # 2 communities N3 = 30 # 3 communities p2 = [[1., 0.1], [0.1, 0.9]] p3 = [[1., 0.1, 0.], [0.1, 0.95, 0.1], [0., 0.1, 0.9]] G2 = sbm(seed=0, sizes=[N2 // 2, N2 // 2], p=p2) G3 = sbm(seed=0, sizes=[N3 // 3, N3 // 3, N3 // 3], p=p3) C2 = networkx.to_numpy_array(G2) C3 = networkx.to_numpy_array(G3) h2 = np.ones(C2.shape[0]) / C2.shape[0] h3 = np.ones(C3.shape[0]) / C3.shape[0] # Add weights on the edges for visualization later on weight_intra_G2 = 5 weight_inter_G2 = 0.5 weight_intra_G3 = 1. weight_inter_G3 = 1.5 weightedG2 = networkx.Graph() part_G2 = [G2.nodes[i]['block'] for i in range(N2)] for node in G2.nodes(): weightedG2.add_node(node) for i, j in G2.edges(): if part_G2[i] == part_G2[j]: weightedG2.add_edge(i, j, weight=weight_intra_G2) else: weightedG2.add_edge(i, j, weight=weight_inter_G2) weightedG3 = networkx.Graph() part_G3 = [G3.nodes[i]['block'] for i in range(N3)] for node in G3.nodes(): weightedG3.add_node(node) for i, j in G3.edges(): if part_G3[i] == part_G3[j]: weightedG3.add_edge(i, j, weight=weight_intra_G3) else: weightedG3.add_edge(i, j, weight=weight_inter_G3) ############################################################################# # # Compute their semi-relaxed Gromov-Wasserstein divergences # --------------------------------------------------------- # 0) GW(C2, h2, C3, h3) for reference OT, log = gromov_wasserstein(C2, C3, h2, h3, symmetric=True, log=True) gw = log['gw_dist'] # 1) srGW(C2, h2, C3) OT_23, log_23 = semirelaxed_gromov_wasserstein(C2, C3, h2, symmetric=True, log=True, G0=None) srgw_23 = log_23['srgw_dist'] # 2) srGW(C3, h3, C2) OT_32, log_32 = semirelaxed_gromov_wasserstein(C3, C2, h3, symmetric=None, log=True, G0=OT.T) srgw_32 = log_32['srgw_dist'] print('GW(C2, C3) = ', gw) print('srGW(C2, h2, C3) = ', srgw_23) print('srGW(C3, h3, C2) = ', srgw_32) ############################################################################# # # Visualization of the semi-relaxed Gromov-Wasserstein matchings # -------------------------------------------------------------- # # We color nodes of the graph on the right - then project its node colors # based on the optimal transport plan from the srGW matching def draw_graph(G, C, nodes_color_part, Gweights=None, pos=None, edge_color='black', node_size=None, shiftx=0, seed=0): if (pos is None): pos = networkx.spring_layout(G, scale=1., seed=seed) if shiftx != 0: for k, v in pos.items(): v[0] = v[0] + shiftx alpha_edge = 0.7 width_edge = 1.8 if Gweights is None: networkx.draw_networkx_edges(G, pos, width=width_edge, alpha=alpha_edge, edge_color=edge_color) else: # We make more visible connections between activated nodes n = len(Gweights) edgelist_activated = [] edgelist_deactivated = [] for i in range(n): for j in range(n): if Gweights[i] * Gweights[j] * C[i, j] > 0: edgelist_activated.append((i, j)) elif C[i, j] > 0: edgelist_deactivated.append((i, j)) networkx.draw_networkx_edges(G, pos, edgelist=edgelist_activated, width=width_edge, alpha=alpha_edge, edge_color=edge_color) networkx.draw_networkx_edges(G, pos, edgelist=edgelist_deactivated, width=width_edge, alpha=0.1, edge_color=edge_color) if Gweights is None: for node, node_color in enumerate(nodes_color_part): networkx.draw_networkx_nodes(G, pos, nodelist=[node], node_size=node_size, alpha=1, node_color=node_color) else: scaled_Gweights = Gweights / (0.5 * Gweights.max()) nodes_size = node_size * scaled_Gweights for node, node_color in enumerate(nodes_color_part): networkx.draw_networkx_nodes(G, pos, nodelist=[node], node_size=nodes_size[node], alpha=1, node_color=node_color) return pos def draw_transp_colored_srGW(G1, C1, G2, C2, part_G1, p1, p2, T, pos1=None, pos2=None, shiftx=4, switchx=False, node_size=70, seed_G1=0, seed_G2=0): starting_color = 0 # get graphs partition and their coloring part1 = part_G1.copy() unique_colors = ['C%s' % (starting_color + i) for i in np.unique(part1)] nodes_color_part1 = [] for cluster in part1: nodes_color_part1.append(unique_colors[cluster]) nodes_color_part2 = [] # T: getting colors assignment from argmin of columns for i in range(len(G2.nodes())): j = np.argmax(T[:, i]) nodes_color_part2.append(nodes_color_part1[j]) pos1 = draw_graph(G1, C1, nodes_color_part1, Gweights=p1, pos=pos1, node_size=node_size, shiftx=0, seed=seed_G1) pos2 = draw_graph(G2, C2, nodes_color_part2, Gweights=p2, pos=pos2, node_size=node_size, shiftx=shiftx, seed=seed_G2) for k1, v1 in pos1.items(): for k2, v2 in pos2.items(): if (T[k1, k2] > 0): pl.plot([pos1[k1][0], pos2[k2][0]], [pos1[k1][1], pos2[k2][1]], '-', lw=0.8, alpha=0.5, color=nodes_color_part1[k1]) return pos1, pos2 node_size = 40 fontsize = 10 seed_G2 = 0 seed_G3 = 4 pl.figure(1, figsize=(8, 2.5)) pl.clf() pl.subplot(121) pl.axis('off') pl.axis pl.title(r'srGW$(\mathbf{C_2},\mathbf{h_2},\mathbf{C_3}) =%s$' % (np.round(srgw_23, 3)), fontsize=fontsize) hbar2 = OT_23.sum(axis=0) pos1, pos2 = draw_transp_colored_srGW( weightedG2, C2, weightedG3, C3, part_G2, p1=None, p2=hbar2, T=OT_23, shiftx=1.5, node_size=node_size, seed_G1=seed_G2, seed_G2=seed_G3) pl.subplot(122) pl.axis('off') hbar3 = OT_32.sum(axis=0) pl.title(r'srGW$(\mathbf{C_3}, \mathbf{h_3},\mathbf{C_2}) =%s$' % (np.round(srgw_32, 3)), fontsize=fontsize) pos1, pos2 = draw_transp_colored_srGW( weightedG3, C3, weightedG2, C2, part_G3, p1=None, p2=hbar3, T=OT_32, pos1=pos2, pos2=pos1, shiftx=3., node_size=node_size, seed_G1=0, seed_G2=0) pl.tight_layout() pl.show() ############################################################################# # # Add node features # ----------------- # We add node features with given mean - by clusters # and inversely proportional to clusters' intra-connectivity F2 = np.zeros((N2, 1)) for i, c in enumerate(part_G2): F2[i, 0] = np.random.normal(loc=c, scale=0.01) F3 = np.zeros((N3, 1)) for i, c in enumerate(part_G3): F3[i, 0] = np.random.normal(loc=2. - c, scale=0.01) ############################################################################# # # Compute their semi-relaxed Fused Gromov-Wasserstein divergences # --------------------------------------------------------------- alpha = 0.5 # Compute pairwise euclidean distance between node features M = (F2 ** 2).dot(np.ones((1, N3))) + np.ones((N2, 1)).dot((F3 ** 2).T) - 2 * F2.dot(F3.T) # 0) FGW_alpha(C2, F2, h2, C3, F3, h3) for reference OT, log = fused_gromov_wasserstein( M, C2, C3, h2, h3, symmetric=True, alpha=alpha, log=True) fgw = log['fgw_dist'] # 1) srFGW(C2, F2, h2, C3, F3) OT_23, log_23 = semirelaxed_fused_gromov_wasserstein( M, C2, C3, h2, symmetric=True, alpha=0.5, log=True, G0=None) srfgw_23 = log_23['srfgw_dist'] # 2) srFGW(C3, F3, h3, C2, F2) OT_32, log_32 = semirelaxed_fused_gromov_wasserstein( M.T, C3, C2, h3, symmetric=None, alpha=alpha, log=True, G0=None) srfgw_32 = log_32['srfgw_dist'] print('FGW(C2, F2, C3, F3) = ', fgw) print('srGW(C2, F2, h2, C3, F3) = ', srfgw_23) print('srGW(C3, F3, h3, C2, F2) = ', srfgw_32) ############################################################################# # # Visualization of the semi-relaxed Fused Gromov-Wasserstein matchings # -------------------------------------------------------------------- # # We color nodes of the graph on the right - then project its node colors # based on the optimal transport plan from the srFGW matching # NB: colors refer to clusters - not to node features pl.figure(2, figsize=(8, 2.5)) pl.clf() pl.subplot(121) pl.axis('off') pl.axis pl.title(r'srFGW$(\mathbf{C_2},\mathbf{F_2},\mathbf{h_2},\mathbf{C_3},\mathbf{F_3}) =%s$' % (np.round(srfgw_23, 3)), fontsize=fontsize) hbar2 = OT_23.sum(axis=0) pos1, pos2 = draw_transp_colored_srGW( weightedG2, C2, weightedG3, C3, part_G2, p1=None, p2=hbar2, T=OT_23, shiftx=1.5, node_size=node_size, seed_G1=seed_G2, seed_G2=seed_G3) pl.subplot(122) pl.axis('off') hbar3 = OT_32.sum(axis=0) pl.title(r'srFGW$(\mathbf{C_3}, \mathbf{F_3}, \mathbf{h_3}, \mathbf{C_2}, \mathbf{F_2}) =%s$' % (np.round(srfgw_32, 3)), fontsize=fontsize) pos1, pos2 = draw_transp_colored_srGW( weightedG3, C3, weightedG2, C2, part_G3, p1=None, p2=hbar3, T=OT_32, pos1=pos2, pos2=pos1, shiftx=3., node_size=node_size, seed_G1=0, seed_G2=0) pl.tight_layout() pl.show() python-pot-0.9.3+dfsg/examples/others/000077500000000000000000000000001455713015700177155ustar00rootroot00000000000000python-pot-0.9.3+dfsg/examples/others/README.txt000066400000000000000000000000461455713015700214130ustar00rootroot00000000000000 Other OT problems -----------------python-pot-0.9.3+dfsg/examples/others/plot_COOT.py000066400000000000000000000053571455713015700221030ustar00rootroot00000000000000# -*- coding: utf-8 -*- r""" =================================================== Row and column alignments with CO-Optimal Transport =================================================== This example is designed to show how to use the CO-Optimal Transport [47]_ in POT. CO-Optimal Transport allows to calculate the distance between two **arbitrary-size** matrices, and to align their rows and columns. In this example, we consider two random matrices :math:`X_1` and :math:`X_2` defined by :math:`(X_1)_{i,j} = \cos(\frac{i}{n_1} \pi) + \cos(\frac{j}{d_1} \pi) + \sigma \mathcal N(0,1)` and :math:`(X_2)_{i,j} = \cos(\frac{i}{n_2} \pi) + \cos(\frac{j}{d_2} \pi) + \sigma \mathcal N(0,1)`. .. [49] Redko, I., Vayer, T., Flamary, R., and Courty, N. (2020). `CO-Optimal Transport `_. Advances in Neural Information Processing Systems, 33. """ # Author: Remi Flamary # Quang Huy Tran # License: MIT License from matplotlib.patches import ConnectionPatch import matplotlib.pylab as pl import numpy as np from ot.coot import co_optimal_transport as coot from ot.coot import co_optimal_transport2 as coot2 # %% # Generating two random matrices n1 = 20 n2 = 10 d1 = 16 d2 = 8 sigma = 0.2 X1 = ( np.cos(np.arange(n1) * np.pi / n1)[:, None] + np.cos(np.arange(d1) * np.pi / d1)[None, :] + sigma * np.random.randn(n1, d1) ) X2 = ( np.cos(np.arange(n2) * np.pi / n2)[:, None] + np.cos(np.arange(d2) * np.pi / d2)[None, :] + sigma * np.random.randn(n2, d2) ) # %% # Visualizing the matrices pl.figure(1, (8, 5)) pl.subplot(1, 2, 1) pl.imshow(X1) pl.title('$X_1$') pl.subplot(1, 2, 2) pl.imshow(X2) pl.title("$X_2$") pl.tight_layout() # %% # Visualizing the alignments of rows and columns, and calculating the CO-Optimal Transport distance pi_sample, pi_feature, log = coot(X1, X2, log=True, verbose=True) coot_distance = coot2(X1, X2) print('CO-Optimal Transport distance = {:.5f}'.format(coot_distance)) fig = pl.figure(4, (9, 7)) pl.clf() ax1 = pl.subplot(2, 2, 3) pl.imshow(X1) pl.xlabel('$X_1$') ax2 = pl.subplot(2, 2, 2) ax2.yaxis.tick_right() pl.imshow(np.transpose(X2)) pl.title("Transpose($X_2$)") ax2.xaxis.tick_top() for i in range(n1): j = np.argmax(pi_sample[i, :]) xyA = (d1 - .5, i) xyB = (j, d2 - .5) con = ConnectionPatch(xyA=xyA, xyB=xyB, coordsA=ax1.transData, coordsB=ax2.transData, color="black") fig.add_artist(con) for i in range(d1): j = np.argmax(pi_feature[i, :]) xyA = (i, -.5) xyB = (-.5, j) con = ConnectionPatch( xyA=xyA, xyB=xyB, coordsA=ax1.transData, coordsB=ax2.transData, color="blue") fig.add_artist(con) python-pot-0.9.3+dfsg/examples/others/plot_EWCA.py000066400000000000000000000110111455713015700220360ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ======================================= Entropic Wasserstein Component Analysis ======================================= This example illustrates the use of EWCA as proposed in [52]. [52] Collas, A., Vayer, T., Flamary, F., & Breloy, A. (2023). Entropic Wasserstein Component Analysis. """ # Author: Antoine Collas # # License: MIT License # sphinx_gallery_thumbnail_number = 2 import numpy as np import matplotlib.pylab as pl from ot.dr import ewca from sklearn.datasets import make_blobs from matplotlib import ticker as mticker import matplotlib.patches as patches import matplotlib ############################################################################## # Generate data # ------------- n_samples = 20 esp = 0.8 centers = np.array([[esp, esp], [-esp, -esp]]) cluster_std = 0.4 rng = np.random.RandomState(42) X, y = make_blobs( n_samples=n_samples, n_features=2, centers=centers, cluster_std=cluster_std, shuffle=False, random_state=rng, ) X = X - X.mean(0) ############################################################################## # Plot data # ------------- fig = pl.figure(figsize=(4, 4)) cmap = matplotlib.colormaps.get_cmap("tab10") pl.scatter( X[: n_samples // 2, 0], X[: n_samples // 2, 1], color=[cmap(y[i] + 1) for i in range(n_samples // 2)], alpha=0.4, label="Class 1", zorder=30, s=50, ) pl.scatter( X[n_samples // 2:, 0], X[n_samples // 2:, 1], color=[cmap(y[i] + 1) for i in range(n_samples // 2, n_samples)], alpha=0.4, label="Class 2", zorder=30, s=50, ) x_y_lim = 2.5 fs = 15 pl.xlim(-x_y_lim, x_y_lim) pl.xticks([]) pl.ylim(-x_y_lim, x_y_lim) pl.yticks([]) pl.legend(fontsize=fs) pl.title("Data", fontsize=fs) pl.tight_layout() ############################################################################## # Compute EWCA # ------------- pi, U = ewca(X, k=2, reg=0.5) ############################################################################## # Plot data, first component, and projected data # ------------- fig = pl.figure(figsize=(4, 4)) scale = 3 u = U[:, 0] pl.plot( [scale * u[0], -scale * u[0]], [scale * u[1], -scale * u[1]], color="grey", linestyle="--", lw=3, alpha=0.3, label=r"$\mathbf{U}$", ) X1 = X @ u[:, None] @ u[:, None].T for i in range(n_samples): for j in range(n_samples): v = pi[i, j] / pi.max() if v >= 0.15 or (i, j) == (n_samples - 1, n_samples - 1): pl.plot( [X[i, 0], X1[j, 0]], [X[i, 1], X1[j, 1]], alpha=v, linestyle="-", c="C0", label=r"$\pi_{ij}$" if (i, j) == (n_samples - 1, n_samples - 1) else None, ) pl.scatter( X[:, 0], X[:, 1], color=[cmap(y[i] + 1) for i in range(n_samples)], alpha=0.4, label=r"$\mathbf{x}_i$", zorder=30, s=50, ) pl.scatter( X1[:, 0], X1[:, 1], color=[cmap(y[i] + 1) for i in range(n_samples)], alpha=0.9, s=50, marker="+", label=r"$\mathbf{U}\mathbf{U}^{\top}\mathbf{x}_i$", zorder=30, ) pl.title("Data and projections", fontsize=fs) pl.xlim(-x_y_lim, x_y_lim) pl.xticks([]) pl.ylim(-x_y_lim, x_y_lim) pl.yticks([]) pl.legend(fontsize=fs, loc="upper left") pl.tight_layout() ############################################################################## # Plot transport plan # ------------- fig = pl.figure(figsize=(5, 5)) norm = matplotlib.colors.PowerNorm(0.5, vmin=0, vmax=100) im = pl.imshow(n_samples * pi * 100, cmap=pl.cm.Blues, norm=norm, aspect="auto") cb = fig.colorbar(im, orientation="vertical", shrink=0.8) ticks_loc = cb.ax.get_yticks().tolist() cb.ax.yaxis.set_major_locator(mticker.FixedLocator(ticks_loc)) cb.ax.set_yticklabels([f"{int(i)}%" for i in cb.get_ticks()]) cb.ax.tick_params(labelsize=fs) for i, class_ in enumerate(np.sort(np.unique(y))): indices = y == class_ idx_min = np.min(np.arange(len(y))[indices]) idx_max = np.max(np.arange(len(y))[indices]) width = idx_max - idx_min + 1 rect = patches.Rectangle( (idx_min - 0.5, idx_min - 0.5), width, width, linewidth=1, edgecolor="r", facecolor="none", ) pl.gca().add_patch(rect) pl.title("OT plan", fontsize=fs) pl.ylabel(r"($\mathbf{x}_1, \cdots, \mathbf{x}_n$)") x_label = r"($\mathbf{U}\mathbf{U}^{\top}\mathbf{x}_1, \cdots," x_label += r"\mathbf{U}\mathbf{U}^{\top}\mathbf{x}_n$)" pl.xlabel(x_label) pl.tight_layout() pl.axis("scaled") pl.show() python-pot-0.9.3+dfsg/examples/others/plot_SSNB.py000066400000000000000000000117501455713015700220760ustar00rootroot00000000000000# -*- coding: utf-8 -*- r""" ===================================================== Smooth and Strongly Convex Nearest Brenier Potentials ===================================================== This example is designed to show how to use SSNB [58] in POT. SSNB computes an l-strongly convex potential :math:`\varphi` with an L-Lipschitz gradient such that :math:`\nabla \varphi \# \mu \approx \nu`. This regularity can be enforced only on the components of a partition of the ambient space, which is a relaxation compared to imposing global regularity. In this example, we consider a source measure :math:`\mu_s` which is the uniform measure on the unit square in :math:`\mathbb{R}^2`, and the target measure :math:`\mu_t` which is the image of :math:`\mu_x` by :math:`T(x_1, x_2) = (x_1 + 2\mathrm{sign}(x_2), 2 * x_2)`. The map :math:`T` is non-smooth, and we wish to approximate it using a "Brenier-style" map :math:`\nabla \varphi` which is regular on the partition :math:`\lbrace x_1 <=0, x_1>0\rbrace`, which is well adapted to this particular dataset. We represent the gradients of the "bounding potentials" :math:`\varphi_l, \varphi_u` (from [59], Theorem 3.14), which bound any SSNB potential which is optimal in the sense of [58], Definition 1: .. math:: \varphi \in \mathrm{argmin}_{\varphi \in \mathcal{F}}\ \mathrm{W}_2(\nabla \varphi \#\mu_s, \mu_t), where :math:`\mathcal{F}` is the space functions that are on every set :math:`E_k` l-strongly convex with an L-Lipschitz gradient, given :math:`(E_k)_{k \in [K]}` a partition of the ambient source space. We perform the optimisation on a low amount of fitting samples and with few iterations, since solving the SSNB problem is quite computationally expensive. THIS EXAMPLE REQUIRES CVXPY .. [58] François-Pierre Paty, Alexandre d’Aspremont, and Marco Cuturi. Regularity as regularization: Smooth and strongly convex brenier potentials in optimal transport. In International Conference on Artificial Intelligence and Statistics, pages 1222–1232. PMLR, 2020. .. [59] Adrien B Taylor. Convex interpolation and performance estimation of first-order methods for convex optimization. PhD thesis, Catholic University of Louvain, Louvain-la-Neuve, Belgium, 2017. """ # Author: Eloi Tanguy # License: MIT License # sphinx_gallery_thumbnail_number = 3 import matplotlib.pyplot as plt import numpy as np import ot # %% # Generating the fitting data n_fitting_samples = 30 rng = np.random.RandomState(seed=0) Xs = rng.uniform(-1, 1, size=(n_fitting_samples, 2)) Xs_classes = (Xs[:, 0] < 0).astype(int) Xt = np.stack([Xs[:, 0] + 2 * np.sign(Xs[:, 0]), 2 * Xs[:, 1]], axis=-1) plt.scatter(Xs[Xs_classes == 0, 0], Xs[Xs_classes == 0, 1], c='blue', label='source class 0') plt.scatter(Xs[Xs_classes == 1, 0], Xs[Xs_classes == 1, 1], c='dodgerblue', label='source class 1') plt.scatter(Xt[:, 0], Xt[:, 1], c='red', label='target') plt.axis('equal') plt.title('Splitting sphere dataset') plt.legend(loc='upper right') plt.show() # %% # Fitting the Nearest Brenier Potential L = 3 # need L > 2 to allow the 2*y term, default is 1.4 phi, G = ot.mapping.nearest_brenier_potential_fit(Xs, Xt, Xs_classes, its=10, init_method='barycentric', gradient_lipschitz_constant=L) # %% # Plotting the images of the source data plt.clf() plt.scatter(Xs[:, 0], Xs[:, 1], c='dodgerblue', label='source') plt.scatter(Xt[:, 0], Xt[:, 1], c='red', label='target') for i in range(n_fitting_samples): plt.plot([Xs[i, 0], G[i, 0]], [Xs[i, 1], G[i, 1]], color='black', alpha=.5) plt.title('Images of in-data source samples by the fitted SSNB') plt.legend(loc='upper right') plt.axis('equal') plt.show() # %% # Computing the predictions (images by nabla phi) for random samples of the source distribution n_predict_samples = 50 Ys = rng.uniform(-1, 1, size=(n_predict_samples, 2)) Ys_classes = (Ys[:, 0] < 0).astype(int) phi_lu, G_lu = ot.mapping.nearest_brenier_potential_predict_bounds(Xs, phi, G, Ys, Xs_classes, Ys_classes, gradient_lipschitz_constant=L) # %% # Plot predictions for the gradient of the lower-bounding potential plt.clf() plt.scatter(Xs[:, 0], Xs[:, 1], c='dodgerblue', label='source') plt.scatter(Xt[:, 0], Xt[:, 1], c='red', label='target') for i in range(n_predict_samples): plt.plot([Ys[i, 0], G_lu[0, i, 0]], [Ys[i, 1], G_lu[0, i, 1]], color='black', alpha=.5) plt.title('Images of new source samples by $\\nabla \\varphi_l$') plt.legend(loc='upper right') plt.axis('equal') plt.show() # %% # Plot predictions for the gradient of the upper-bounding potential plt.clf() plt.scatter(Xs[:, 0], Xs[:, 1], c='dodgerblue', label='source') plt.scatter(Xt[:, 0], Xt[:, 1], c='red', label='target') for i in range(n_predict_samples): plt.plot([Ys[i, 0], G_lu[1, i, 0]], [Ys[i, 1], G_lu[1, i, 1]], color='black', alpha=.5) plt.title('Images of new source samples by $\\nabla \\varphi_u$') plt.legend(loc='upper right') plt.axis('equal') plt.show() python-pot-0.9.3+dfsg/examples/others/plot_WDA.py000066400000000000000000000062431455713015700217450ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ================================= Wasserstein Discriminant Analysis ================================= This example illustrate the use of WDA as proposed in [11]. [11] Flamary, R., Cuturi, M., Courty, N., & Rakotomamonjy, A. (2016). Wasserstein Discriminant Analysis. """ # Author: Remi Flamary # # License: MIT License # sphinx_gallery_thumbnail_number = 2 import numpy as np import matplotlib.pylab as pl from ot.dr import wda, fda ############################################################################## # Generate data # ------------- #%% parameters n = 1000 # nb samples in source and target datasets nz = 0.2 np.random.seed(1) # generate circle dataset t = np.random.rand(n) * 2 * np.pi ys = np.floor((np.arange(n) * 1.0 / n * 3)) + 1 xs = np.concatenate( (np.cos(t).reshape((-1, 1)), np.sin(t).reshape((-1, 1))), 1) xs = xs * ys.reshape(-1, 1) + nz * np.random.randn(n, 2) t = np.random.rand(n) * 2 * np.pi yt = np.floor((np.arange(n) * 1.0 / n * 3)) + 1 xt = np.concatenate( (np.cos(t).reshape((-1, 1)), np.sin(t).reshape((-1, 1))), 1) xt = xt * yt.reshape(-1, 1) + nz * np.random.randn(n, 2) nbnoise = 8 xs = np.hstack((xs, np.random.randn(n, nbnoise))) xt = np.hstack((xt, np.random.randn(n, nbnoise))) ############################################################################## # Plot data # --------- #%% plot samples pl.figure(1, figsize=(6.4, 3.5)) pl.subplot(1, 2, 1) pl.scatter(xt[:, 0], xt[:, 1], c=ys, marker='+', label='Source samples') pl.legend(loc=0) pl.title('Discriminant dimensions') pl.subplot(1, 2, 2) pl.scatter(xt[:, 2], xt[:, 3], c=ys, marker='+', label='Source samples') pl.legend(loc=0) pl.title('Other dimensions') pl.tight_layout() ############################################################################## # Compute Fisher Discriminant Analysis # ------------------------------------ #%% Compute FDA p = 2 Pfda, projfda = fda(xs, ys, p) ############################################################################## # Compute Wasserstein Discriminant Analysis # ----------------------------------------- #%% Compute WDA p = 2 reg = 1e0 k = 10 maxiter = 100 P0 = np.random.randn(xs.shape[1], p) P0 /= np.sqrt(np.sum(P0**2, 0, keepdims=True)) Pwda, projwda = wda(xs, ys, p, reg, k, maxiter=maxiter, P0=P0) ############################################################################## # Plot 2D projections # ------------------- #%% plot samples xsp = projfda(xs) xtp = projfda(xt) xspw = projwda(xs) xtpw = projwda(xt) pl.figure(2) pl.subplot(2, 2, 1) pl.scatter(xsp[:, 0], xsp[:, 1], c=ys, marker='+', label='Projected samples') pl.legend(loc=0) pl.title('Projected training samples FDA') pl.subplot(2, 2, 2) pl.scatter(xtp[:, 0], xtp[:, 1], c=ys, marker='+', label='Projected samples') pl.legend(loc=0) pl.title('Projected test samples FDA') pl.subplot(2, 2, 3) pl.scatter(xspw[:, 0], xspw[:, 1], c=ys, marker='+', label='Projected samples') pl.legend(loc=0) pl.title('Projected training samples WDA') pl.subplot(2, 2, 4) pl.scatter(xtpw[:, 0], xtpw[:, 1], c=ys, marker='+', label='Projected samples') pl.legend(loc=0) pl.title('Projected test samples WDA') pl.tight_layout() pl.show() python-pot-0.9.3+dfsg/examples/others/plot_WeakOT_VS_OT.py000066400000000000000000000046111455713015700234730ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ==================================================== Weak Optimal Transport VS exact Optimal Transport ==================================================== Illustration of 2D optimal transport between distributions that are weighted sum of Diracs. The OT matrix is plotted with the samples. """ # Author: Remi Flamary # # License: MIT License # sphinx_gallery_thumbnail_number = 4 import numpy as np import matplotlib.pylab as pl import ot import ot.plot ############################################################################## # Generate data an plot it # ------------------------ #%% parameters and data generation n = 50 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) mu_t = np.array([4, 4]) cov_t = np.array([[1, -.8], [-.8, 1]]) xs = ot.datasets.make_2D_samples_gauss(n, mu_s, cov_s) xt = ot.datasets.make_2D_samples_gauss(n, mu_t, cov_t) a, b = ot.unif(n), ot.unif(n) # uniform distribution on samples # loss matrix M = ot.dist(xs, xt) M /= M.max() #%% plot samples pl.figure(1) pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples') pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples') pl.legend(loc=0) pl.title('Source and target distributions') pl.figure(2) pl.imshow(M, interpolation='nearest') pl.title('Cost matrix M') ############################################################################## # Compute Weak OT and exact OT solutions # -------------------------------------- #%% EMD G0 = ot.emd(a, b, M) #%% Weak OT Gweak = ot.weak_optimal_transport(xs, xt, a, b) ############################################################################## # Plot weak OT and exact OT solutions # -------------------------------------- pl.figure(3, (8, 5)) pl.subplot(1, 2, 1) pl.imshow(G0, interpolation='nearest') pl.title('OT matrix') pl.subplot(1, 2, 2) pl.imshow(Gweak, interpolation='nearest') pl.title('Weak OT matrix') pl.figure(4, (8, 5)) pl.subplot(1, 2, 1) ot.plot.plot2D_samples_mat(xs, xt, G0, c=[.5, .5, 1]) pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples') pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples') pl.title('OT matrix with samples') pl.subplot(1, 2, 2) ot.plot.plot2D_samples_mat(xs, xt, Gweak, c=[.5, .5, 1]) pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples') pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples') pl.title('Weak OT matrix with samples') python-pot-0.9.3+dfsg/examples/others/plot_dmmot.py000066400000000000000000000105731455713015700224530ustar00rootroot00000000000000# -*- coding: utf-8 -*- r""" =============================================================================== Computing d-dimensional Barycenters via d-MMOT =============================================================================== When the cost is discretized (Monge), the d-MMOT solver can more quickly compute and minimize the distance between many distributions without the need for intermediate barycenter computations. This example compares the time to identify, and the quality of, solutions for the d-MMOT problem using a primal/dual algorithm and classical LP barycenter approaches. """ # Author: Ronak Mehta # Xizheng Yu # # License: MIT License # %% # Generating 2 distributions # ----- import numpy as np import matplotlib.pyplot as pl import ot np.random.seed(0) n = 100 d = 2 # Gaussian distributions a1 = ot.datasets.make_1D_gauss(n, m=20, s=5) # m=mean, s=std a2 = ot.datasets.make_1D_gauss(n, m=60, s=8) A = np.vstack((a1, a2)).T x = np.arange(n, dtype=np.float64) M = ot.utils.dist(x.reshape((n, 1)), metric='minkowski') pl.figure(1, figsize=(6.4, 3)) pl.plot(x, a1, 'b', label='Source distribution') pl.plot(x, a2, 'r', label='Target distribution') pl.legend() # %% # Minimize the distances among distributions, identify the Barycenter # ----- # The objective being minimized is different for both methods, so the objective # values cannot be compared. # L2 Iteration weights = np.ones(d) / d l2_bary = A.dot(weights) print('LP Iterations:') weights = np.ones(d) / d lp_bary, lp_log = ot.lp.barycenter( A, M, weights, solver='interior-point', verbose=False, log=True) print('Time\t: ', ot.toc('')) print('Obj\t: ', lp_log['fun']) print('') print('Discrete MMOT Algorithm:') ot.tic() barys, log = ot.lp.dmmot_monge_1dgrid_optimize( A, niters=4000, lr_init=1e-5, lr_decay=0.997, log=True) dmmot_obj = log['primal objective'] print('Time\t: ', ot.toc('')) print('Obj\t: ', dmmot_obj) # %% # Compare Barycenters in both methods # ----- pl.figure(1, figsize=(6.4, 3)) for i in range(len(barys)): if i == 0: pl.plot(x, barys[i], 'g-*', label='Discrete MMOT') else: continue # pl.plot(x, barys[i], 'g-*') pl.plot(x, lp_bary, label='LP Barycenter') pl.plot(x, l2_bary, label='L2 Barycenter') pl.plot(x, a1, 'b', label='Source distribution') pl.plot(x, a2, 'r', label='Target distribution') pl.title('Monge Cost: Barycenters from LP Solver and dmmot solver') pl.legend() # %% # More than 2 distributions # -------------------------------------------------- # Generate 7 pseudorandom gaussian distributions with 50 bins. n = 50 # nb bins d = 7 vecsize = n * d data = [] for i in range(d): m = n * (0.5 * np.random.rand(1)) * float(np.random.randint(2) + 1) a = ot.datasets.make_1D_gauss(n, m=m, s=5) data.append(a) x = np.arange(n, dtype=np.float64) M = ot.utils.dist(x.reshape((n, 1)), metric='minkowski') A = np.vstack(data).T pl.figure(1, figsize=(6.4, 3)) for i in range(len(data)): pl.plot(x, data[i]) pl.title('Distributions') pl.legend() # %% # Minimizing Distances Among Many Distributions # --------------- # The objective being minimized is different for both methods, so the objective # values cannot be compared. # Perform gradient descent optimization using the d-MMOT method. barys = ot.lp.dmmot_monge_1dgrid_optimize( A, niters=3000, lr_init=1e-4, lr_decay=0.997) # after minimization, any distribution can be used as a estimate of barycenter. bary = barys[0] # Compute 1D Wasserstein barycenter using the L2/LP method weights = ot.unif(d) l2_bary = A.dot(weights) lp_bary, bary_log = ot.lp.barycenter(A, M, weights, solver='interior-point', verbose=False, log=True) # %% # Compare Barycenters in both methods # --------- pl.figure(1, figsize=(6.4, 3)) pl.plot(x, bary, 'g-*', label='Discrete MMOT') pl.plot(x, l2_bary, 'k', label='L2 Barycenter') pl.plot(x, lp_bary, 'k-', label='LP Wasserstein') pl.title('Barycenters') pl.legend() # %% # Compare with original distributions # --------- pl.figure(1, figsize=(6.4, 3)) for i in range(len(data)): pl.plot(x, data[i]) for i in range(len(barys)): if i == 0: pl.plot(x, barys[i], 'g-*', label='Discrete MMOT') else: continue # pl.plot(x, barys[i], 'g') pl.plot(x, l2_bary, 'k^', label='L2') pl.plot(x, lp_bary, 'o', color='grey', label='LP') pl.title('Barycenters') pl.legend() pl.show() # %% python-pot-0.9.3+dfsg/examples/others/plot_factored_coupling.py000066400000000000000000000041351455713015700250170ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ========================================== Optimal transport with factored couplings ========================================== Illustration of the factored coupling OT between 2D empirical distributions """ # Author: Remi Flamary # # License: MIT License # sphinx_gallery_thumbnail_number = 2 import numpy as np import matplotlib.pylab as pl import ot import ot.plot # %% # Generate data an plot it # ------------------------ # parameters and data generation np.random.seed(42) n = 100 # nb samples xs = np.random.rand(n, 2) - .5 xs = xs + np.sign(xs) xt = np.random.rand(n, 2) - .5 a, b = ot.unif(n), ot.unif(n) # uniform distribution on samples #%% plot samples pl.figure(1) pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples') pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples') pl.legend(loc=0) pl.title('Source and target distributions') # %% # Compute Factored OT and exact OT solutions # ------------------------------------------ #%% EMD M = ot.dist(xs, xt) G0 = ot.emd(a, b, M) #%% factored OT OT Ga, Gb, xb = ot.factored_optimal_transport(xs, xt, a, b, r=4) # %% # Plot factored OT and exact OT solutions # --------------------------------------- pl.figure(2, (14, 4)) pl.subplot(1, 3, 1) ot.plot.plot2D_samples_mat(xs, xt, G0, c=[.2, .2, .2], alpha=0.1) pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples') pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples') pl.title('Exact OT with samples') pl.subplot(1, 3, 2) ot.plot.plot2D_samples_mat(xs, xb, Ga, c=[.6, .6, .9], alpha=0.5) ot.plot.plot2D_samples_mat(xb, xt, Gb, c=[.9, .6, .6], alpha=0.5) pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples') pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples') pl.plot(xb[:, 0], xb[:, 1], 'og', label='Template samples') pl.title('Factored OT with template samples') pl.subplot(1, 3, 3) ot.plot.plot2D_samples_mat(xs, xt, Ga.dot(Gb), c=[.2, .2, .2], alpha=0.1) pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples') pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples') pl.title('Factored OT low rank OT plan') python-pot-0.9.3+dfsg/examples/others/plot_learning_weights_with_COOT.py000066400000000000000000000112041455713015700265330ustar00rootroot00000000000000# -*- coding: utf-8 -*- r""" =============================================================== Learning sample marginal distribution with CO-Optimal Transport =============================================================== In this example, we illustrate how to estimate the sample marginal distribution which minimizes the CO-Optimal Transport distance [47]_ between two matrices. More precisely, given a source data :math:`(X, \mu_x^{(s)}, \mu_x^{(f)})` and a target matrix :math:`Y` associated with a fixed histogram on features :math:`\mu_y^{(f)}`, we want to solve the following problem .. math:: \min_{\mu_y^{(s)} \in \Delta} \text{COOT}\left( (X, \mu_x^{(s)}, \mu_x^{(f)}), (Y, \mu_y^{(s)}, \mu_y^{(f)}) \right) where :math:`\Delta` is the probability simplex. This minimization is done with a simple projected gradient descent in PyTorch. We use the automatic backend of POT that allows us to compute the CO-Optimal Transport distance with :func:`ot.coot.co_optimal_transport2` with differentiable losses. .. [49] Redko, I., Vayer, T., Flamary, R., and Courty, N. (2020). `CO-Optimal Transport `_. Advances in Neural Information Processing Systems, 33. """ # Author: Remi Flamary # Quang Huy Tran # License: MIT License from matplotlib.patches import ConnectionPatch import torch import numpy as np import matplotlib.pyplot as pl import ot from ot.coot import co_optimal_transport as coot from ot.coot import co_optimal_transport2 as coot2 # %% # Generate data # ------------- # The source and clean target matrices are generated by # :math:`X_{i,j} = \cos(\frac{i}{n_1} \pi) + \cos(\frac{j}{d_1} \pi)` and # :math:`Y_{i,j} = \cos(\frac{i}{n_2} \pi) + \cos(\frac{j}{d_2} \pi)`. # The target matrix is then contaminated by adding 5 row outliers. # Intuitively, we expect that the estimated sample distribution should ignore these outliers, # i.e. their weights should be zero. np.random.seed(182) n1, d1 = 20, 16 n2, d2 = 10, 8 n = 15 X = ( torch.cos(torch.arange(n1) * torch.pi / n1)[:, None] + torch.cos(torch.arange(d1) * torch.pi / d1)[None, :] ) # Generate clean target data mixed with outliers Y_noisy = torch.randn((n, d2)) * 10.0 Y_noisy[:n2, :] = ( torch.cos(torch.arange(n2) * torch.pi / n2)[:, None] + torch.cos(torch.arange(d2) * torch.pi / d2)[None, :] ) Y = Y_noisy[:n2, :] X, Y_noisy, Y = X.double(), Y_noisy.double(), Y.double() fig, axes = pl.subplots(nrows=1, ncols=3, figsize=(12, 5)) axes[0].imshow(X, vmin=-2, vmax=2) axes[0].set_title('$X$') axes[1].imshow(Y, vmin=-2, vmax=2) axes[1].set_title('Clean $Y$') axes[2].imshow(Y_noisy, vmin=-2, vmax=2) axes[2].set_title('Noisy $Y$') pl.tight_layout() # %% # Optimize the COOT distance with respect to the sample marginal distribution # --------------------------------------------------------------------------- losses = [] lr = 1e-3 niter = 1000 b = torch.tensor(ot.unif(n), requires_grad=True) for i in range(niter): loss = coot2(X, Y_noisy, wy_samp=b, log=False, verbose=False) losses.append(float(loss)) loss.backward() with torch.no_grad(): b -= lr * b.grad # gradient step b[:] = ot.utils.proj_simplex(b) # projection on the simplex b.grad.zero_() # Estimated sample marginal distribution and training loss curve pl.plot(losses[10:]) pl.title('CO-Optimal Transport distance') print(f"Marginal distribution = {b.detach().numpy()}") # %% # Visualizing the row and column alignments with the estimated sample marginal distribution # ----------------------------------------------------------------------------------------- # # Clearly, the learned marginal distribution completely and successfully ignores the 5 outliers. X, Y_noisy = X.numpy(), Y_noisy.numpy() b = b.detach().numpy() pi_sample, pi_feature = coot(X, Y_noisy, wy_samp=b, log=False, verbose=True) fig = pl.figure(4, (9, 7)) pl.clf() ax1 = pl.subplot(2, 2, 3) pl.imshow(X, vmin=-2, vmax=2) pl.xlabel('$X$') ax2 = pl.subplot(2, 2, 2) ax2.yaxis.tick_right() pl.imshow(np.transpose(Y_noisy), vmin=-2, vmax=2) pl.title("Transpose(Noisy $Y$)") ax2.xaxis.tick_top() for i in range(n1): j = np.argmax(pi_sample[i, :]) xyA = (d1 - .5, i) xyB = (j, d2 - .5) con = ConnectionPatch(xyA=xyA, xyB=xyB, coordsA=ax1.transData, coordsB=ax2.transData, color="black") fig.add_artist(con) for i in range(d1): j = np.argmax(pi_feature[i, :]) xyA = (i, -.5) xyB = (-.5, j) con = ConnectionPatch( xyA=xyA, xyB=xyB, coordsA=ax1.transData, coordsB=ax2.transData, color="blue") fig.add_artist(con) python-pot-0.9.3+dfsg/examples/others/plot_logo.py000066400000000000000000000055221455713015700222710ustar00rootroot00000000000000 # -*- coding: utf-8 -*- r""" ======================= Logo of the POT toolbox ======================= In this example we plot the logo of the POT toolbox. This logo is that it is done 100% in Python and generated using matplotlib and plotting the solution of the EMD solver from POT. """ # Author: Remi Flamary # # License: MIT License # sphinx_gallery_thumbnail_number = 1 # %% Load modules import numpy as np import matplotlib.pyplot as pl import ot # %% # Data for logo # ------------- # Letter P p1 = np.array([[0, 6.], [0, 5], [0, 4], [0, 3], [0, 2], [0, 1], ]) p2 = np.array([[1.5, 6], [2, 4], [2, 5], [1.5, 3], [0.5, 2], [.5, 1], ]) # Letter O o1 = np.array([[0, 6.], [-1, 5], [-1.5, 4], [-1.5, 3], [-1, 2], [0, 1], ]) o2 = np.array([[1, 6.], [2, 5], [2.5, 4], [2.5, 3], [2, 2], [1, 1], ]) # Scaling and translation for letter O o1[:, 0] += 6.4 o2[:, 0] += 6.4 o1[:, 0] *= 0.6 o2[:, 0] *= 0.6 # Letter T t1 = np.array([[-1, 6.], [-1, 5], [0, 4], [0, 3], [0, 2], [0, 1], ]) t2 = np.array([[1.5, 6.], [1.5, 5], [0.5, 4], [0.5, 3], [0.5, 2], [0.5, 1], ]) # Translating the T t1[:, 0] += 7.1 t2[:, 0] += 7.1 # Concatenate all letters x1 = np.concatenate((p1, o1, t1), axis=0) x2 = np.concatenate((p2, o2, t2), axis=0) # Horizontal and vertical scaling sx = 1.0 sy = .5 x1[:, 0] *= sx x1[:, 1] *= sy x2[:, 0] *= sx x2[:, 1] *= sy # %% # Plot the logo (clear background) # -------------------------------- # Solve OT problem between the points M = ot.dist(x1, x2, metric='euclidean') T = ot.emd([], [], M) pl.figure(1, (3.5, 1.1)) pl.clf() # plot the OT plan for i in range(M.shape[0]): for j in range(M.shape[1]): if T[i, j] > 1e-8: pl.plot([x1[i, 0], x2[j, 0]], [x1[i, 1], x2[j, 1]], color='k', alpha=0.6, linewidth=3, zorder=1) # plot the samples pl.plot(x1[:, 0], x1[:, 1], 'o', markerfacecolor='C3', markeredgecolor='k') pl.plot(x2[:, 0], x2[:, 1], 'o', markerfacecolor='b', markeredgecolor='k') pl.axis('equal') pl.axis('off') # Save logo file # pl.savefig('logo.svg', dpi=150, transparent=True, bbox_inches='tight') # pl.savefig('logo.png', dpi=150, transparent=True, bbox_inches='tight') # %% # Plot the logo (dark background) # -------------------------------- pl.figure(2, (3.5, 1.1), facecolor='darkgray') pl.clf() # plot the OT plan for i in range(M.shape[0]): for j in range(M.shape[1]): if T[i, j] > 1e-8: pl.plot([x1[i, 0], x2[j, 0]], [x1[i, 1], x2[j, 1]], color='w', alpha=0.8, linewidth=3, zorder=1) # plot the samples pl.plot(x1[:, 0], x1[:, 1], 'o', markerfacecolor='w', markeredgecolor='w') pl.plot(x2[:, 0], x2[:, 1], 'o', markerfacecolor='w', markeredgecolor='w') pl.axis('equal') pl.axis('off') # Save logo file # pl.savefig('logo_dark.svg', dpi=150, transparent=True, bbox_inches='tight') # pl.savefig('logo_dark.png', dpi=150, transparent=True, bbox_inches='tight') python-pot-0.9.3+dfsg/examples/others/plot_lowrank_sinkhorn.py000066400000000000000000000057761455713015700247340ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ======================================== Low rank Sinkhorn ======================================== This example illustrates the computation of Low Rank Sinkhorn [26]. [65] Scetbon, M., Cuturi, M., & Peyré, G. (2021). "Low-rank Sinkhorn factorization". In International Conference on Machine Learning. """ # Author: Laurène David # # License: MIT License # # sphinx_gallery_thumbnail_number = 2 import numpy as np import matplotlib.pylab as pl import ot.plot from ot.datasets import make_1D_gauss as gauss ############################################################################## # Generate data # ------------- #%% parameters n = 100 m = 120 # Gaussian distribution a = gauss(n, m=int(n / 3), s=25 / np.sqrt(2)) + 1.5 * gauss(n, m=int(5 * n / 6), s=15 / np.sqrt(2)) a = a / np.sum(a) b = 2 * gauss(m, m=int(m / 5), s=30 / np.sqrt(2)) + gauss(m, m=int(m / 2), s=35 / np.sqrt(2)) b = b / np.sum(b) # Source and target distribution X = np.arange(n).reshape(-1, 1) Y = np.arange(m).reshape(-1, 1) ############################################################################## # Solve Low rank sinkhorn # ------------ #%% # Solve low rank sinkhorn Q, R, g, log = ot.lowrank_sinkhorn(X, Y, a, b, rank=10, init="random", gamma_init="rescale", rescale_cost=True, warn=False, log=True) P = log["lazy_plan"][:] ot.plot.plot1D_mat(a, b, P, 'OT matrix Low rank') ############################################################################## # Sinkhorn vs Low Rank Sinkhorn # ----------------------- # Compare Sinkhorn and Low rank sinkhorn with different regularizations and ranks. #%% Sinkhorn # Compute cost matrix for sinkhorn OT M = ot.dist(X, Y) M = M / np.max(M) # Solve sinkhorn with different regularizations using ot.solve list_reg = [0.05, 0.005, 0.001] list_P_Sin = [] for reg in list_reg: P = ot.solve(M, a, b, reg=reg, max_iter=2000, tol=1e-8).plan list_P_Sin.append(P) #%% Low rank sinkhorn # Solve low rank sinkhorn with different ranks using ot.solve_sample list_rank = [3, 10, 50] list_P_LR = [] for rank in list_rank: P = ot.solve_sample(X, Y, a, b, method='lowrank', rank=rank).plan P = P[:] list_P_LR.append(P) #%% # Plot sinkhorn vs low rank sinkhorn pl.figure(1, figsize=(10, 4)) pl.subplot(1, 3, 1) pl.imshow(list_P_Sin[0], interpolation='nearest') pl.axis('off') pl.title('Sinkhorn (reg=0.05)') pl.subplot(1, 3, 2) pl.imshow(list_P_Sin[1], interpolation='nearest') pl.axis('off') pl.title('Sinkhorn (reg=0.005)') pl.subplot(1, 3, 3) pl.imshow(list_P_Sin[2], interpolation='nearest') pl.axis('off') pl.title('Sinkhorn (reg=0.001)') pl.show() #%% pl.figure(2, figsize=(10, 4)) pl.subplot(1, 3, 1) pl.imshow(list_P_LR[0], interpolation='nearest') pl.axis('off') pl.title('Low rank (rank=3)') pl.subplot(1, 3, 2) pl.imshow(list_P_LR[1], interpolation='nearest') pl.axis('off') pl.title('Low rank (rank=10)') pl.subplot(1, 3, 3) pl.imshow(list_P_LR[2], interpolation='nearest') pl.axis('off') pl.title('Low rank (rank=50)') pl.tight_layout() python-pot-0.9.3+dfsg/examples/others/plot_screenkhorn_1D.py000066400000000000000000000037101455713015700241730ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ======================================== Screened optimal transport (Screenkhorn) ======================================== This example illustrates the computation of Screenkhorn [26]. [26] Alaya M. Z., Bérar M., Gasso G., Rakotomamonjy A. (2019). Screening Sinkhorn Algorithm for Regularized Optimal Transport, Advances in Neural Information Processing Systems 33 (NeurIPS). """ # Author: Mokhtar Z. Alaya # # License: MIT License import numpy as np import matplotlib.pylab as pl import ot.plot from ot.datasets import make_1D_gauss as gauss from ot.bregman import screenkhorn ############################################################################## # Generate data # ------------- #%% parameters n = 100 # nb bins # bin positions x = np.arange(n, dtype=np.float64) # Gaussian distributions a = gauss(n, m=20, s=5) # m= mean, s= std b = gauss(n, m=60, s=10) # loss matrix M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1))) M /= M.max() ############################################################################## # Plot distributions and loss matrix # ---------------------------------- #%% plot the distributions pl.figure(1, figsize=(6.4, 3)) pl.plot(x, a, 'b', label='Source distribution') pl.plot(x, b, 'r', label='Target distribution') pl.legend() # plot distributions and loss matrix pl.figure(2, figsize=(5, 5)) ot.plot.plot1D_mat(a, b, M, 'Cost matrix M') ############################################################################## # Solve Screenkhorn # ----------------------- # Screenkhorn lambd = 2e-03 # entropy parameter ns_budget = 30 # budget number of points to be kept in the source distribution nt_budget = 30 # budget number of points to be kept in the target distribution G_screen = screenkhorn(a, b, M, lambd, ns_budget, nt_budget, uniform=False, restricted=True, verbose=True) pl.figure(4, figsize=(5, 5)) ot.plot.plot1D_mat(a, b, G_screen, 'OT matrix Screenkhorn') pl.show() python-pot-0.9.3+dfsg/examples/others/plot_stochastic.py000066400000000000000000000122101455713015700234650ustar00rootroot00000000000000""" =================== Stochastic examples =================== This example is designed to show how to use the stochastic optimization algorithms for discrete and semi-continuous measures from the POT library. [18] Genevay, A., Cuturi, M., Peyré, G. & Bach, F. Stochastic Optimization for Large-scale Optimal Transport. Advances in Neural Information Processing Systems (2016). [19] Seguy, V., Bhushan Damodaran, B., Flamary, R., Courty, N., Rolet, A. & Blondel, M. Large-scale Optimal Transport and Mapping Estimation. International Conference on Learning Representation (2018) """ # Author: Kilian Fatras # # License: MIT License import matplotlib.pylab as pl import numpy as np import ot import ot.plot ############################################################################# # Compute the Transportation Matrix for the Semi-Dual Problem # ----------------------------------------------------------- # # Discrete case # ````````````` # # Sample two discrete measures for the discrete case and compute their cost # matrix c. n_source = 7 n_target = 4 reg = 1 numItermax = 1000 a = ot.utils.unif(n_source) b = ot.utils.unif(n_target) rng = np.random.RandomState(0) X_source = rng.randn(n_source, 2) Y_target = rng.randn(n_target, 2) M = ot.dist(X_source, Y_target) ############################################################################# # Call the "SAG" method to find the transportation matrix in the discrete case method = "SAG" sag_pi = ot.stochastic.solve_semi_dual_entropic(a, b, M, reg, method, numItermax) print(sag_pi) ############################################################################# # Semi-Continuous Case # ```````````````````` # # Sample one general measure a, one discrete measures b for the semicontinuous # case, the points where source and target measures are defined and compute the # cost matrix. n_source = 7 n_target = 4 reg = 1 numItermax = 1000 log = True a = ot.utils.unif(n_source) b = ot.utils.unif(n_target) rng = np.random.RandomState(0) X_source = rng.randn(n_source, 2) Y_target = rng.randn(n_target, 2) M = ot.dist(X_source, Y_target) ############################################################################# # Call the "ASGD" method to find the transportation matrix in the semicontinuous # case. method = "ASGD" asgd_pi, log_asgd = ot.stochastic.solve_semi_dual_entropic(a, b, M, reg, method, numItermax, log=log) print(log_asgd['alpha'], log_asgd['beta']) print(asgd_pi) ############################################################################# # Compare the results with the Sinkhorn algorithm sinkhorn_pi = ot.sinkhorn(a, b, M, reg) print(sinkhorn_pi) ############################################################################## # Plot Transportation Matrices # ```````````````````````````` # # For SAG pl.figure(4, figsize=(5, 5)) ot.plot.plot1D_mat(a, b, sag_pi, 'semi-dual : OT matrix SAG') pl.show() ############################################################################## # For ASGD pl.figure(4, figsize=(5, 5)) ot.plot.plot1D_mat(a, b, asgd_pi, 'semi-dual : OT matrix ASGD') pl.show() ############################################################################## # For Sinkhorn pl.figure(4, figsize=(5, 5)) ot.plot.plot1D_mat(a, b, sinkhorn_pi, 'OT matrix Sinkhorn') pl.show() ############################################################################# # Compute the Transportation Matrix for the Dual Problem # ------------------------------------------------------ # # Semi-continuous case # ```````````````````` # # Sample one general measure a, one discrete measures b for the semi-continuous # case and compute the cost matrix c. n_source = 7 n_target = 4 reg = 1 numItermax = 100000 lr = 0.1 batch_size = 3 log = True a = ot.utils.unif(n_source) b = ot.utils.unif(n_target) rng = np.random.RandomState(0) X_source = rng.randn(n_source, 2) Y_target = rng.randn(n_target, 2) M = ot.dist(X_source, Y_target) ############################################################################# # # Call the "SGD" dual method to find the transportation matrix in the # semi-continuous case sgd_dual_pi, log_sgd = ot.stochastic.solve_dual_entropic(a, b, M, reg, batch_size, numItermax, lr, log=log) print(log_sgd['alpha'], log_sgd['beta']) print(sgd_dual_pi) ############################################################################# # # Compare the results with the Sinkhorn algorithm # ``````````````````````````````````````````````` # # Call the Sinkhorn algorithm from POT sinkhorn_pi = ot.sinkhorn(a, b, M, reg) print(sinkhorn_pi) ############################################################################## # Plot Transportation Matrices # ```````````````````````````` # # For SGD pl.figure(4, figsize=(5, 5)) ot.plot.plot1D_mat(a, b, sgd_dual_pi, 'dual : OT matrix SGD') pl.show() ############################################################################## # For Sinkhorn pl.figure(4, figsize=(5, 5)) ot.plot.plot1D_mat(a, b, sinkhorn_pi, 'OT matrix Sinkhorn') pl.show() python-pot-0.9.3+dfsg/examples/plot_Intro_OT.py000066400000000000000000000307731455713015700215300ustar00rootroot00000000000000# coding: utf-8 """ ============================================= Introduction to Optimal Transport with Python ============================================= This example gives an introduction on how to use Optimal Transport in Python. """ # Author: Remi Flamary, Nicolas Courty, Aurelie Boisbunon # # License: MIT License # sphinx_gallery_thumbnail_number = 1 ############################################################################## # POT Python Optimal Transport Toolbox # ------------------------------------ # # POT installation # ``````````````````` # # * Install with pip:: # # pip install pot # * Install with conda:: # # conda install -c conda-forge pot # # Import the toolbox # ``````````````````` # import numpy as np # always need it import pylab as pl # do the plots import ot # ot import time ############################################################################## # Getting help # ````````````` # # Online documentation : ``_ # # Or inline help: # help(ot.dist) ############################################################################## # First OT Problem # ---------------- # # We will solve the Bakery/Cafés problem of transporting croissants from a # number of Bakeries to Cafés in a City (in this case Manhattan). We did a # quick google map search in Manhattan for bakeries and Cafés: # # .. image:: ../_static/images/bak.png # :align: center # :alt: bakery-cafe-manhattan # :width: 600px # :height: 280px # # We extracted from this search their positions and generated fictional # production and sale number (that both sum to the same value). # # We have access to the position of Bakeries ``bakery_pos`` and their # respective production ``bakery_prod`` which describe the source # distribution. The Cafés where the croissants are sold are defined also by # their position ``cafe_pos`` and ``cafe_prod``, and describe the target # distribution. For fun we also provide a # map ``Imap`` that will illustrate the position of these shops in the city. # # # Now we load the data # # data = np.load('../data/manhattan.npz') bakery_pos = data['bakery_pos'] bakery_prod = data['bakery_prod'] cafe_pos = data['cafe_pos'] cafe_prod = data['cafe_prod'] Imap = data['Imap'] print('Bakery production: {}'.format(bakery_prod)) print('Cafe sale: {}'.format(cafe_prod)) print('Total croissants : {}'.format(cafe_prod.sum())) ############################################################################## # Plotting bakeries in the city # ----------------------------- # # Next we plot the position of the bakeries and cafés on the map. The size of # the circle is proportional to their production. # pl.figure(1, (7, 6)) pl.clf() pl.imshow(Imap, interpolation='bilinear') # plot the map pl.scatter(bakery_pos[:, 0], bakery_pos[:, 1], s=bakery_prod, c='r', ec='k', label='Bakeries') pl.scatter(cafe_pos[:, 0], cafe_pos[:, 1], s=cafe_prod, c='b', ec='k', label='Cafés') pl.legend() pl.title('Manhattan Bakeries and Cafés') ############################################################################## # Cost matrix # ----------- # # # We can now compute the cost matrix between the bakeries and the cafés, which # will be the transport cost matrix. This can be done using the # `ot.dist `_ function that # defaults to squared Euclidean distance but can return other things such as # cityblock (or Manhattan distance). # C = ot.dist(bakery_pos, cafe_pos) labels = [str(i) for i in range(len(bakery_prod))] f = pl.figure(2, (14, 7)) pl.clf() pl.subplot(121) pl.imshow(Imap, interpolation='bilinear') # plot the map for i in range(len(cafe_pos)): pl.text(cafe_pos[i, 0], cafe_pos[i, 1], labels[i], color='b', fontsize=14, fontweight='bold', ha='center', va='center') for i in range(len(bakery_pos)): pl.text(bakery_pos[i, 0], bakery_pos[i, 1], labels[i], color='r', fontsize=14, fontweight='bold', ha='center', va='center') pl.title('Manhattan Bakeries and Cafés') ax = pl.subplot(122) im = pl.imshow(C, cmap="coolwarm") pl.title('Cost matrix') cbar = pl.colorbar(im, ax=ax, shrink=0.5, use_gridspec=True) cbar.ax.set_ylabel("cost", rotation=-90, va="bottom") pl.xlabel('Cafés') pl.ylabel('Bakeries') pl.tight_layout() ############################################################################## # The red cells in the matrix image show the bakeries and cafés that are # further away, and thus more costly to transport from one to the other, while # the blue ones show those that are very close to each other, with respect to # the squared Euclidean distance. ############################################################################## # Solving the OT problem with `ot.emd `_ # ----------------------------------------------------------------------------------- start = time.time() ot_emd = ot.emd(bakery_prod, cafe_prod, C) time_emd = time.time() - start ############################################################################## # The function returns the transport matrix, which we can then visualize (next section). ############################################################################## # Transportation plan visualization # ````````````````````````````````` # # A good visualization of the OT matrix in the 2D plane is to denote the # transportation of mass between a Bakery and a Café by a line. This can easily # be done with a double ``for`` loop. # # In order to make it more interpretable one can also use the ``alpha`` # parameter of plot and set it to ``alpha=G[i,j]/G.max()``. # Plot the matrix and the map f = pl.figure(3, (14, 7)) pl.clf() pl.subplot(121) pl.imshow(Imap, interpolation='bilinear') # plot the map for i in range(len(bakery_pos)): for j in range(len(cafe_pos)): pl.plot([bakery_pos[i, 0], cafe_pos[j, 0]], [bakery_pos[i, 1], cafe_pos[j, 1]], '-k', lw=3. * ot_emd[i, j] / ot_emd.max()) for i in range(len(cafe_pos)): pl.text(cafe_pos[i, 0], cafe_pos[i, 1], labels[i], color='b', fontsize=14, fontweight='bold', ha='center', va='center') for i in range(len(bakery_pos)): pl.text(bakery_pos[i, 0], bakery_pos[i, 1], labels[i], color='r', fontsize=14, fontweight='bold', ha='center', va='center') pl.title('Manhattan Bakeries and Cafés') ax = pl.subplot(122) im = pl.imshow(ot_emd) for i in range(len(bakery_prod)): for j in range(len(cafe_prod)): text = ax.text(j, i, '{0:g}'.format(ot_emd[i, j]), ha="center", va="center", color="w") pl.title('Transport matrix') pl.xlabel('Cafés') pl.ylabel('Bakeries') pl.tight_layout() ############################################################################## # The transport matrix gives the number of croissants that can be transported # from each bakery to each café. We can see that the bakeries only need to # transport croissants to one or two cafés, the transport matrix being very # sparse. ############################################################################## # OT loss and dual variables # -------------------------- # # The resulting wasserstein loss loss is of the form: # # .. math:: # W=\sum_{i,j}\gamma_{i,j}C_{i,j} # # where :math:`\gamma` is the optimal transport matrix. # W = np.sum(ot_emd * C) print('Wasserstein loss (EMD) = {0:.2f}'.format(W)) ############################################################################## # Regularized OT with Sinkhorn # ---------------------------- # # The Sinkhorn algorithm is very simple to code. You can implement it directly # using the following pseudo-code # # .. image:: ../_static/images/sinkhorn.png # :align: center # :alt: Sinkhorn algorithm # :width: 440px # :height: 240px # # In this algorithm, :math:`\oslash` corresponds to the element-wise division. # # An alternative is to use the POT toolbox with # `ot.sinkhorn `_ # # Be careful of numerical problems. A good pre-processing for Sinkhorn is to # divide the cost matrix ``C`` by its maximum value. ############################################################################## # Algorithm # ````````` # Compute Sinkhorn transport matrix from algorithm reg = 0.1 K = np.exp(-C / C.max() / reg) nit = 100 u = np.ones((len(bakery_prod), )) for i in range(1, nit): v = cafe_prod / np.dot(K.T, u) u = bakery_prod / (np.dot(K, v)) ot_sink_algo = np.atleast_2d(u).T * (K * v.T) # Equivalent to np.dot(np.diag(u), np.dot(K, np.diag(v))) # Compute Sinkhorn transport matrix with POT ot_sinkhorn = ot.sinkhorn(bakery_prod, cafe_prod, reg=reg, M=C / C.max()) # Difference between the 2 print('Difference between algo and ot.sinkhorn = {0:.2g}'.format(np.sum(np.power(ot_sink_algo - ot_sinkhorn, 2)))) ############################################################################## # Plot the matrix and the map # ``````````````````````````` print('Min. of Sinkhorn\'s transport matrix = {0:.2g}'.format(np.min(ot_sinkhorn))) f = pl.figure(4, (13, 6)) pl.clf() pl.subplot(121) pl.imshow(Imap, interpolation='bilinear') # plot the map for i in range(len(bakery_pos)): for j in range(len(cafe_pos)): pl.plot([bakery_pos[i, 0], cafe_pos[j, 0]], [bakery_pos[i, 1], cafe_pos[j, 1]], '-k', lw=3. * ot_sinkhorn[i, j] / ot_sinkhorn.max()) for i in range(len(cafe_pos)): pl.text(cafe_pos[i, 0], cafe_pos[i, 1], labels[i], color='b', fontsize=14, fontweight='bold', ha='center', va='center') for i in range(len(bakery_pos)): pl.text(bakery_pos[i, 0], bakery_pos[i, 1], labels[i], color='r', fontsize=14, fontweight='bold', ha='center', va='center') pl.title('Manhattan Bakeries and Cafés') ax = pl.subplot(122) im = pl.imshow(ot_sinkhorn) for i in range(len(bakery_prod)): for j in range(len(cafe_prod)): text = ax.text(j, i, np.round(ot_sinkhorn[i, j], 1), ha="center", va="center", color="w") pl.title('Transport matrix') pl.xlabel('Cafés') pl.ylabel('Bakeries') pl.tight_layout() ############################################################################## # We notice right away that the matrix is not sparse at all with Sinkhorn, # each bakery delivering croissants to all 5 cafés with that solution. Also, # this solution gives a transport with fractions, which does not make sense # in the case of croissants. This was not the case with EMD. ############################################################################## # Varying the regularization parameter in Sinkhorn # ```````````````````````````````````````````````` # reg_parameter = np.logspace(-3, 0, 20) W_sinkhorn_reg = np.zeros((len(reg_parameter), )) time_sinkhorn_reg = np.zeros((len(reg_parameter), )) f = pl.figure(5, (14, 5)) pl.clf() max_ot = 100 # plot matrices with the same colorbar for k in range(len(reg_parameter)): start = time.time() ot_sinkhorn = ot.sinkhorn(bakery_prod, cafe_prod, reg=reg_parameter[k], M=C / C.max()) time_sinkhorn_reg[k] = time.time() - start if k % 4 == 0 and k > 0: # we only plot a few ax = pl.subplot(1, 5, k // 4) im = pl.imshow(ot_sinkhorn, vmin=0, vmax=max_ot) pl.title('reg={0:.2g}'.format(reg_parameter[k])) pl.xlabel('Cafés') pl.ylabel('Bakeries') # Compute the Wasserstein loss for Sinkhorn, and compare with EMD W_sinkhorn_reg[k] = np.sum(ot_sinkhorn * C) pl.tight_layout() ############################################################################## # This series of graph shows that the solution of Sinkhorn starts with something # very similar to EMD (although not sparse) for very small values of the # regularization parameter, and tends to a more uniform solution as the # regularization parameter increases. # ############################################################################## # Wasserstein loss and computational time # ``````````````````````````````````````` # # Plot the matrix and the map f = pl.figure(6, (4, 4)) pl.clf() pl.title("Comparison between Sinkhorn and EMD") pl.plot(reg_parameter, W_sinkhorn_reg, 'o', label="Sinkhorn") XLim = pl.xlim() pl.plot(XLim, [W, W], '--k', label="EMD") pl.legend() pl.xlabel("reg") pl.ylabel("Wasserstein loss") ############################################################################## # In this last graph, we show the impact of the regularization parameter on # the Wasserstein loss. We can see that higher # values of ``reg`` leads to a much higher Wasserstein loss. # # The Wasserstein loss of EMD is displayed for # comparison. The Wasserstein loss of Sinkhorn can be a little lower than that # of EMD for low values of ``reg``, but it quickly gets much higher. # python-pot-0.9.3+dfsg/examples/plot_OT_1D.py000066400000000000000000000035011455713015700206660ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ====================================== Optimal Transport for 1D distributions ====================================== This example illustrates the computation of EMD and Sinkhorn transport plans and their visualization. """ # Author: Remi Flamary # # License: MIT License # sphinx_gallery_thumbnail_number = 3 import numpy as np import matplotlib.pylab as pl import ot import ot.plot from ot.datasets import make_1D_gauss as gauss ############################################################################## # Generate data # ------------- #%% parameters n = 100 # nb bins # bin positions x = np.arange(n, dtype=np.float64) # Gaussian distributions a = gauss(n, m=20, s=5) # m= mean, s= std b = gauss(n, m=60, s=10) # loss matrix M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1))) M /= M.max() ############################################################################## # Plot distributions and loss matrix # ---------------------------------- #%% plot the distributions pl.figure(1, figsize=(6.4, 3)) pl.plot(x, a, 'b', label='Source distribution') pl.plot(x, b, 'r', label='Target distribution') pl.legend() #%% plot distributions and loss matrix pl.figure(2, figsize=(5, 5)) ot.plot.plot1D_mat(a, b, M, 'Cost matrix M') ############################################################################## # Solve EMD # --------- #%% EMD # use fast 1D solver G0 = ot.emd_1d(x, x, a, b) # Equivalent to # G0 = ot.emd(a, b, M) pl.figure(3, figsize=(5, 5)) ot.plot.plot1D_mat(a, b, G0, 'OT matrix G0') ############################################################################## # Solve Sinkhorn # -------------- #%% Sinkhorn lambd = 1e-3 Gs = ot.sinkhorn(a, b, M, lambd, verbose=True) pl.figure(4, figsize=(5, 5)) ot.plot.plot1D_mat(a, b, Gs, 'OT matrix Sinkhorn') pl.show() python-pot-0.9.3+dfsg/examples/plot_OT_1D_smooth.py000066400000000000000000000042401455713015700222600ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ================================ Smooth and sparse OT example ================================ This example illustrates the computation of Smooth and Sparse (KL an L2 reg.) OT and sparsity-constrained OT, together with their visualizations. """ # Author: Remi Flamary # # License: MIT License # sphinx_gallery_thumbnail_number = 5 import numpy as np import matplotlib.pylab as pl import ot import ot.plot from ot.datasets import make_1D_gauss as gauss ############################################################################## # Generate data # ------------- #%% parameters n = 100 # nb bins # bin positions x = np.arange(n, dtype=np.float64) # Gaussian distributions a = gauss(n, m=20, s=5) # m= mean, s= std b = gauss(n, m=60, s=10) # loss matrix M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1))) M /= M.max() ############################################################################## # Plot distributions and loss matrix # ---------------------------------- #%% plot the distributions pl.figure(1, figsize=(6.4, 3)) pl.plot(x, a, 'b', label='Source distribution') pl.plot(x, b, 'r', label='Target distribution') pl.legend() #%% plot distributions and loss matrix pl.figure(2, figsize=(5, 5)) ot.plot.plot1D_mat(a, b, M, 'Cost matrix M') ############################################################################## # Solve Smooth OT # --------------- #%% Smooth OT with KL regularization lambd = 2e-3 Gsm = ot.smooth.smooth_ot_dual(a, b, M, lambd, reg_type='kl') pl.figure(3, figsize=(5, 5)) ot.plot.plot1D_mat(a, b, Gsm, 'OT matrix Smooth OT KL reg.') pl.show() #%% Smooth OT with squared l2 regularization lambd = 1e-1 Gsm = ot.smooth.smooth_ot_dual(a, b, M, lambd, reg_type='l2') pl.figure(4, figsize=(5, 5)) ot.plot.plot1D_mat(a, b, Gsm, 'OT matrix Smooth OT l2 reg.') pl.show() #%% Sparsity-constrained OT lambd = 1e-1 max_nz = 2 # two non-zero entries are permitted per column of the OT plan Gsc = ot.smooth.smooth_ot_dual( a, b, M, lambd, reg_type='sparsity_constrained', max_nz=max_nz) pl.figure(5, figsize=(5, 5)) ot.plot.plot1D_mat(a, b, Gsc, 'Sparsity constrained OT matrix; k=2.') pl.show() python-pot-0.9.3+dfsg/examples/plot_OT_2D_samples.py000066400000000000000000000057331455713015700224240ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ==================================================== Optimal Transport between 2D empirical distributions ==================================================== Illustration of 2D optimal transport between distributions that are weighted sum of Diracs. The OT matrix is plotted with the samples. """ # Author: Remi Flamary # Kilian Fatras # # License: MIT License # sphinx_gallery_thumbnail_number = 4 import numpy as np import matplotlib.pylab as pl import ot import ot.plot ############################################################################## # Generate data # ------------- #%% parameters and data generation n = 50 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) mu_t = np.array([4, 4]) cov_t = np.array([[1, -.8], [-.8, 1]]) xs = ot.datasets.make_2D_samples_gauss(n, mu_s, cov_s) xt = ot.datasets.make_2D_samples_gauss(n, mu_t, cov_t) a, b = np.ones((n,)) / n, np.ones((n,)) / n # uniform distribution on samples # loss matrix M = ot.dist(xs, xt) ############################################################################## # Plot data # --------- #%% plot samples pl.figure(1) pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples') pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples') pl.legend(loc=0) pl.title('Source and target distributions') pl.figure(2) pl.imshow(M, interpolation='nearest') pl.title('Cost matrix M') ############################################################################## # Compute EMD # ----------- #%% EMD G0 = ot.emd(a, b, M) pl.figure(3) pl.imshow(G0, interpolation='nearest') pl.title('OT matrix G0') pl.figure(4) ot.plot.plot2D_samples_mat(xs, xt, G0, c=[.5, .5, 1]) pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples') pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples') pl.legend(loc=0) pl.title('OT matrix with samples') ############################################################################## # Compute Sinkhorn # ---------------- #%% sinkhorn # reg term lambd = 1e-1 Gs = ot.sinkhorn(a, b, M, lambd) pl.figure(5) pl.imshow(Gs, interpolation='nearest') pl.title('OT matrix sinkhorn') pl.figure(6) ot.plot.plot2D_samples_mat(xs, xt, Gs, color=[.5, .5, 1]) pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples') pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples') pl.legend(loc=0) pl.title('OT matrix Sinkhorn with samples') pl.show() ############################################################################## # Empirical Sinkhorn # ------------------- #%% sinkhorn # reg term lambd = 1e-1 Ges = ot.bregman.empirical_sinkhorn(xs, xt, lambd) pl.figure(7) pl.imshow(Ges, interpolation='nearest') pl.title('OT matrix empirical sinkhorn') pl.figure(8) ot.plot.plot2D_samples_mat(xs, xt, Ges, color=[.5, .5, 1]) pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples') pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples') pl.legend(loc=0) pl.title('OT matrix Sinkhorn from samples') pl.show() python-pot-0.9.3+dfsg/examples/plot_OT_L1_vs_L2.py000066400000000000000000000114331455713015700217460ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ================================================ Optimal Transport with different ground metrics ================================================ 2D OT on empirical distribution with different ground metric. Stole the figure idea from Fig. 1 and 2 in https://arxiv.org/pdf/1706.07650.pdf """ # Author: Remi Flamary # # License: MIT License # sphinx_gallery_thumbnail_number = 3 import numpy as np import matplotlib.pylab as pl import ot import ot.plot # %% # Dataset 1 : uniform sampling # ---------------------------- n = 20 # nb samples xs = np.zeros((n, 2)) xs[:, 0] = np.arange(n) + 1 xs[:, 1] = (np.arange(n) + 1) * -0.001 # to make it strictly convex... xt = np.zeros((n, 2)) xt[:, 1] = np.arange(n) + 1 a, b = ot.unif(n), ot.unif(n) # uniform distribution on samples # loss matrix M1 = ot.dist(xs, xt, metric='euclidean') M1 /= M1.max() # loss matrix M2 = ot.dist(xs, xt, metric='sqeuclidean') M2 /= M2.max() # loss matrix Mp = ot.dist(xs, xt, metric='cityblock') Mp /= Mp.max() # Data pl.figure(1, figsize=(7, 3)) pl.clf() pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples') pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples') pl.axis('equal') pl.title('Source and target distributions') # Cost matrices pl.figure(2, figsize=(7, 3)) pl.subplot(1, 3, 1) pl.imshow(M1, interpolation='nearest') pl.title('Euclidean cost') pl.subplot(1, 3, 2) pl.imshow(M2, interpolation='nearest') pl.title('Squared Euclidean cost') pl.subplot(1, 3, 3) pl.imshow(Mp, interpolation='nearest') pl.title('L1 (cityblock cost') pl.tight_layout() ############################################################################## # Dataset 1 : Plot OT Matrices # ---------------------------- #%% EMD G1 = ot.emd(a, b, M1) G2 = ot.emd(a, b, M2) Gp = ot.emd(a, b, Mp) # OT matrices pl.figure(3, figsize=(7, 3)) pl.subplot(1, 3, 1) ot.plot.plot2D_samples_mat(xs, xt, G1, c=[.5, .5, 1]) pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples') pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples') pl.axis('equal') # pl.legend(loc=0) pl.title('OT Euclidean') pl.subplot(1, 3, 2) ot.plot.plot2D_samples_mat(xs, xt, G2, c=[.5, .5, 1]) pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples') pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples') pl.axis('equal') # pl.legend(loc=0) pl.title('OT squared Euclidean') pl.subplot(1, 3, 3) ot.plot.plot2D_samples_mat(xs, xt, Gp, c=[.5, .5, 1]) pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples') pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples') pl.axis('equal') # pl.legend(loc=0) pl.title('OT L1 (cityblock)') pl.tight_layout() pl.show() # %% # Dataset 2 : Partial circle # -------------------------- n = 20 # nb samples xtot = np.zeros((n + 1, 2)) xtot[:, 0] = np.cos( (np.arange(n + 1) + 1.0) * 0.8 / (n + 2) * 2 * np.pi) xtot[:, 1] = np.sin( (np.arange(n + 1) + 1.0) * 0.8 / (n + 2) * 2 * np.pi) xs = xtot[:n, :] xt = xtot[1:, :] a, b = ot.unif(n), ot.unif(n) # uniform distribution on samples # loss matrix M1 = ot.dist(xs, xt, metric='euclidean') M1 /= M1.max() # loss matrix M2 = ot.dist(xs, xt, metric='sqeuclidean') M2 /= M2.max() # loss matrix Mp = ot.dist(xs, xt, metric='cityblock') Mp /= Mp.max() # Data pl.figure(4, figsize=(7, 3)) pl.clf() pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples') pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples') pl.axis('equal') pl.title('Source and target distributions') # Cost matrices pl.figure(5, figsize=(7, 3)) pl.subplot(1, 3, 1) pl.imshow(M1, interpolation='nearest') pl.title('Euclidean cost') pl.subplot(1, 3, 2) pl.imshow(M2, interpolation='nearest') pl.title('Squared Euclidean cost') pl.subplot(1, 3, 3) pl.imshow(Mp, interpolation='nearest') pl.title('L1 (cityblock) cost') pl.tight_layout() ############################################################################## # Dataset 2 : Plot OT Matrices # ----------------------------- # #%% EMD G1 = ot.emd(a, b, M1) G2 = ot.emd(a, b, M2) Gp = ot.emd(a, b, Mp) # OT matrices pl.figure(6, figsize=(7, 3)) pl.subplot(1, 3, 1) ot.plot.plot2D_samples_mat(xs, xt, G1, c=[.5, .5, 1]) pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples') pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples') pl.axis('equal') # pl.legend(loc=0) pl.title('OT Euclidean') pl.subplot(1, 3, 2) ot.plot.plot2D_samples_mat(xs, xt, G2, c=[.5, .5, 1]) pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples') pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples') pl.axis('equal') # pl.legend(loc=0) pl.title('OT squared Euclidean') pl.subplot(1, 3, 3) ot.plot.plot2D_samples_mat(xs, xt, Gp, c=[.5, .5, 1]) pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples') pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples') pl.axis('equal') # pl.legend(loc=0) pl.title('OT L1 (cityblock)') pl.tight_layout() pl.show() python-pot-0.9.3+dfsg/examples/plot_compute_emd.py000066400000000000000000000064621455713015700223320ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ================== OT distances in 1D ================== Shows how to compute multiple Wasserstein and Sinkhorn with two different ground metrics and plot their values for different distributions. """ # Author: Remi Flamary # # License: MIT License # sphinx_gallery_thumbnail_number = 2 import numpy as np import matplotlib.pylab as pl import ot from ot.datasets import make_1D_gauss as gauss ############################################################################## # Generate data # ------------- #%% parameters n = 100 # nb bins n_target = 20 # nb target distributions # bin positions x = np.arange(n, dtype=np.float64) lst_m = np.linspace(20, 90, n_target) # Gaussian distributions a = gauss(n, m=20, s=5) # m= mean, s= std B = np.zeros((n, n_target)) for i, m in enumerate(lst_m): B[:, i] = gauss(n, m=m, s=5) # loss matrix and normalization M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1)), 'euclidean') M /= M.max() * 0.1 M2 = ot.dist(x.reshape((n, 1)), x.reshape((n, 1)), 'sqeuclidean') M2 /= M2.max() * 0.1 ############################################################################## # Plot data # --------- #%% plot the distributions pl.figure(1) pl.subplot(2, 1, 1) pl.plot(x, a, 'r', label='Source distribution') pl.title('Source distribution') pl.subplot(2, 1, 2) for i in range(n_target): pl.plot(x, B[:, i], 'b', alpha=i / n_target) pl.plot(x, B[:, -1], 'b', label='Target distributions') pl.title('Target distributions') pl.tight_layout() ############################################################################## # Compute EMD for the different losses # ------------------------------------ #%% Compute and plot distributions and loss matrix d_emd = ot.emd2(a, B, M) # direct computation of OT loss d_emd2 = ot.emd2(a, B, M2) # direct computation of OT loss with metric M2 d_tv = [np.sum(abs(a - B[:, i])) for i in range(n_target)] pl.figure(2) pl.subplot(2, 1, 1) pl.plot(x, a, 'r', label='Source distribution') pl.title('Distributions') for i in range(n_target): pl.plot(x, B[:, i], 'b', alpha=i / n_target) pl.plot(x, B[:, -1], 'b', label='Target distributions') pl.ylim((-.01, 0.13)) pl.xticks(()) pl.legend() pl.subplot(2, 1, 2) pl.plot(d_emd, label='Euclidean OT') pl.plot(d_emd2, label='Squared Euclidean OT') pl.plot(d_tv, label='Total Variation (TV)') #pl.xlim((-7,23)) pl.xlabel('Displacement') pl.title('Divergences') pl.legend() ############################################################################## # Compute Sinkhorn for the different losses # ----------------------------------------- #%% reg = 1e-1 d_sinkhorn = ot.sinkhorn2(a, B, M, reg) d_sinkhorn2 = ot.sinkhorn2(a, B, M2, reg) pl.figure(3) pl.clf() pl.subplot(2, 1, 1) pl.plot(x, a, 'r', label='Source distribution') pl.title('Distributions') for i in range(n_target): pl.plot(x, B[:, i], 'b', alpha=i / n_target) pl.plot(x, B[:, -1], 'b', label='Target distributions') pl.ylim((-.01, 0.13)) pl.xticks(()) pl.legend() pl.subplot(2, 1, 2) pl.plot(d_emd, label='Euclidean OT') pl.plot(d_emd2, label='Squared Euclidean OT') pl.plot(d_sinkhorn, '+', label='Euclidean Sinkhorn') pl.plot(d_sinkhorn2, '+', label='Squared Euclidean Sinkhorn') pl.plot(d_tv, label='Total Variation (TV)') #pl.xlim((-7,23)) pl.xlabel('Displacement') pl.title('Divergences') pl.legend() pl.show() python-pot-0.9.3+dfsg/examples/plot_compute_wasserstein_circle.py000066400000000000000000000113711455713015700254500ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ========================= OT distance on the Circle ========================= Shows how to compute the Wasserstein distance on the circle """ # Author: Clément Bonet # # License: MIT License # sphinx_gallery_thumbnail_number = 2 import numpy as np import matplotlib.pylab as pl import ot from scipy.special import iv ############################################################################## # Plot data # --------- #%% plot the distributions def pdf_von_Mises(theta, mu, kappa): pdf = np.exp(kappa * np.cos(theta - mu)) / (2.0 * np.pi * iv(0, kappa)) return pdf t = np.linspace(0, 2 * np.pi, 1000, endpoint=False) mu1 = 1 kappa1 = 20 mu_targets = np.linspace(mu1, mu1 + 2 * np.pi, 10) pdf1 = pdf_von_Mises(t, mu1, kappa1) pl.figure(1) for k, mu in enumerate(mu_targets): pdf_t = pdf_von_Mises(t, mu, kappa1) if k == 0: label = "Source distributions" else: label = None pl.plot(t / (2 * np.pi), pdf_t, c='b', label=label) pl.plot(t / (2 * np.pi), pdf1, c="r", label="Target distribution") pl.legend() mu2 = 0 kappa2 = kappa1 x1 = np.random.vonmises(mu1, kappa1, size=(10,)) + np.pi x2 = np.random.vonmises(mu2, kappa2, size=(10,)) + np.pi angles = np.linspace(0, 2 * np.pi, 150) pl.figure(2) pl.plot(np.cos(angles), np.sin(angles), c="k") pl.xlim(-1.25, 1.25) pl.ylim(-1.25, 1.25) pl.scatter(np.cos(x1), np.sin(x1), c="b") pl.scatter(np.cos(x2), np.sin(x2), c="r") ######################################################################################### # Compare the Euclidean Wasserstein distance with the Wasserstein distance on the circle # --------------------------------------------------------------------------------------- # This examples illustrates the periodicity of the Wasserstein distance on the circle. # We choose as target distribution a von Mises distribution with mean :math:`\mu_{\mathrm{target}}` # and :math:`\kappa=20`. Then, we compare the distances with samples obtained from a von Mises distribution # with parameters :math:`\mu_{\mathrm{source}}` and :math:`\kappa=20`. # The Wasserstein distance on the circle takes into account the periodicity # and attains its maximum in :math:`\mu_{\mathrm{target}}+1` (the antipodal point) contrary to the # Euclidean version. #%% Compute and plot distributions mu_targets = np.linspace(0, 2 * np.pi, 200) xs = np.random.vonmises(mu1 - np.pi, kappa1, size=(500,)) + np.pi n_try = 5 xts = np.zeros((n_try, 200, 500)) for i in range(n_try): for k, mu in enumerate(mu_targets): # np.random.vonmises deals with data on [-pi, pi[ xt = np.random.vonmises(mu - np.pi, kappa2, size=(500,)) + np.pi xts[i, k] = xt # Put data on S^1=[0,1[ xts2 = xts / (2 * np.pi) xs2 = np.concatenate([xs[None] for k in range(200)], axis=0) / (2 * np.pi) L_w2_circle = np.zeros((n_try, 200)) L_w2 = np.zeros((n_try, 200)) for i in range(n_try): w2_circle = ot.wasserstein_circle(xs2.T, xts2[i].T, p=2) w2 = ot.wasserstein_1d(xs2.T, xts2[i].T, p=2) L_w2_circle[i] = w2_circle L_w2[i] = w2 m_w2_circle = np.mean(L_w2_circle, axis=0) std_w2_circle = np.std(L_w2_circle, axis=0) m_w2 = np.mean(L_w2, axis=0) std_w2 = np.std(L_w2, axis=0) pl.figure(1) pl.plot(mu_targets / (2 * np.pi), m_w2_circle, label="Wasserstein circle") pl.fill_between(mu_targets / (2 * np.pi), m_w2_circle - 2 * std_w2_circle, m_w2_circle + 2 * std_w2_circle, alpha=0.5) pl.plot(mu_targets / (2 * np.pi), m_w2, label="Euclidean Wasserstein") pl.fill_between(mu_targets / (2 * np.pi), m_w2 - 2 * std_w2, m_w2 + 2 * std_w2, alpha=0.5) pl.vlines(x=[mu1 / (2 * np.pi)], ymin=0, ymax=np.max(w2), linestyle="--", color="k", label=r"$\mu_{\mathrm{target}}$") pl.legend() pl.xlabel(r"$\mu_{\mathrm{source}}$") pl.show() ######################################################################## # Wasserstein distance between von Mises and uniform for different kappa # ---------------------------------------------------------------------- # When :math:`\kappa=0`, the von Mises distribution is the uniform distribution on :math:`S^1`. #%% Compute Wasserstein between Von Mises and uniform kappas = np.logspace(-5, 2, 100) n_try = 20 xts = np.zeros((n_try, 100, 500)) for i in range(n_try): for k, kappa in enumerate(kappas): # np.random.vonmises deals with data on [-pi, pi[ xt = np.random.vonmises(0, kappa, size=(500,)) + np.pi xts[i, k] = xt / (2 * np.pi) L_w2 = np.zeros((n_try, 100)) for i in range(n_try): L_w2[i] = ot.semidiscrete_wasserstein2_unif_circle(xts[i].T) m_w2 = np.mean(L_w2, axis=0) std_w2 = np.std(L_w2, axis=0) pl.figure(1) pl.plot(kappas, m_w2) pl.fill_between(kappas, m_w2 - std_w2, m_w2 + std_w2, alpha=0.5) pl.title(r"Evolution of $W_2^2(vM(0,\kappa), Unif(S^1))$") pl.xlabel(r"$\kappa$") pl.show() # %% python-pot-0.9.3+dfsg/examples/plot_optim_OTreg.py000066400000000000000000000065341455713015700222610ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ================================== Regularized OT with generic solver ================================== Illustrates the use of the generic solver for regularized OT with user-designed regularization term. It uses Conditional gradient as in [6] and generalized Conditional Gradient as proposed in [5,7]. [5] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, Optimal Transport for Domain Adaptation, in IEEE Transactions on Pattern Analysis and Machine Intelligence , vol.PP, no.99, pp.1-1. [6] Ferradans, S., Papadakis, N., Peyré, G., & Aujol, J. F. (2014). Regularized discrete optimal transport. SIAM Journal on Imaging Sciences, 7(3), 1853-1882. [7] Rakotomamonjy, A., Flamary, R., & Courty, N. (2015). Generalized conditional gradient: analysis of convergence and applications. arXiv preprint arXiv:1510.06567. """ # sphinx_gallery_thumbnail_number = 5 import numpy as np import matplotlib.pylab as pl import ot import ot.plot ############################################################################## # Generate data # ------------- #%% parameters n = 100 # nb bins # bin positions x = np.arange(n, dtype=np.float64) # Gaussian distributions a = ot.datasets.make_1D_gauss(n, m=20, s=5) # m= mean, s= std b = ot.datasets.make_1D_gauss(n, m=60, s=10) # loss matrix M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1))) M /= M.max() ############################################################################## # Solve EMD # --------- #%% EMD G0 = ot.emd(a, b, M) pl.figure(1, figsize=(5, 5)) ot.plot.plot1D_mat(a, b, G0, 'OT matrix G0') ############################################################################## # Solve EMD with Frobenius norm regularization # -------------------------------------------- #%% Example with Frobenius norm regularization def f(G): return 0.5 * np.sum(G**2) def df(G): return G reg = 1e-1 Gl2 = ot.optim.cg(a, b, M, reg, f, df, verbose=True) pl.figure(2) ot.plot.plot1D_mat(a, b, Gl2, 'OT matrix Frob. reg') ############################################################################## # Solve EMD with entropic regularization # -------------------------------------- #%% Example with entropic regularization def f(G): return np.sum(G * np.log(G)) def df(G): return np.log(G) + 1. reg = 1e-3 Ge = ot.optim.cg(a, b, M, reg, f, df, verbose=True) pl.figure(3, figsize=(5, 5)) ot.plot.plot1D_mat(a, b, Ge, 'OT matrix Entrop. reg') ############################################################################## # Solve EMD with Frobenius norm + entropic regularization # ------------------------------------------------------- #%% Example with Frobenius norm + entropic regularization with gcg def f(G): return 0.5 * np.sum(G**2) def df(G): return G reg1 = 1e-3 reg2 = 1e-1 Gel2 = ot.optim.gcg(a, b, M, reg1, reg2, f, df, verbose=True) pl.figure(4, figsize=(5, 5)) ot.plot.plot1D_mat(a, b, Gel2, 'OT entropic + matrix Frob. reg') pl.show() # %% # Comparison of the OT matrices nvisu = 40 pl.figure(5, figsize=(10, 4)) pl.subplot(2, 2, 1) pl.imshow(G0[:nvisu, :]) pl.axis('off') pl.title('Exact OT') pl.subplot(2, 2, 2) pl.imshow(Gl2[:nvisu, :]) pl.axis('off') pl.title('Frobenius reg.') pl.subplot(2, 2, 3) pl.imshow(Ge[:nvisu, :]) pl.axis('off') pl.title('Entropic reg.') pl.subplot(2, 2, 4) pl.imshow(Gel2[:nvisu, :]) pl.axis('off') pl.title('Entropic + Frobenius reg.') python-pot-0.9.3+dfsg/examples/sliced-wasserstein/000077500000000000000000000000001455713015700222215ustar00rootroot00000000000000python-pot-0.9.3+dfsg/examples/sliced-wasserstein/README.txt000066400000000000000000000000721455713015700237160ustar00rootroot00000000000000 Sliced Wasserstein Distance --------------------------- python-pot-0.9.3+dfsg/examples/sliced-wasserstein/plot_variance.py000066400000000000000000000046561455713015700254340ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ =============================================== Sliced Wasserstein Distance on 2D distributions =============================================== This example illustrates the computation of the sliced Wasserstein Distance as proposed in [31]. [31] Bonneel, Nicolas, et al. "Sliced and radon wasserstein barycenters of measures." Journal of Mathematical Imaging and Vision 51.1 (2015): 22-45 """ # Author: Adrien Corenflos # # License: MIT License # sphinx_gallery_thumbnail_number = 2 import matplotlib.pylab as pl import numpy as np import ot ############################################################################## # Generate data # ------------- # %% parameters and data generation n = 200 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) mu_t = np.array([4, 4]) cov_t = np.array([[1, -.8], [-.8, 1]]) xs = ot.datasets.make_2D_samples_gauss(n, mu_s, cov_s) xt = ot.datasets.make_2D_samples_gauss(n, mu_t, cov_t) a, b = np.ones((n,)) / n, np.ones((n,)) / n # uniform distribution on samples ############################################################################## # Plot data # --------- # %% plot samples pl.figure(1) pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples') pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples') pl.legend(loc=0) pl.title('Source and target distributions') ############################################################################### # Sliced Wasserstein distance for different seeds and number of projections # ------------------------------------------------------------------------- n_seed = 20 n_projections_arr = np.logspace(0, 3, 10, dtype=int) res = np.empty((n_seed, 10)) # %% Compute statistics for seed in range(n_seed): for i, n_projections in enumerate(n_projections_arr): res[seed, i] = ot.sliced_wasserstein_distance(xs, xt, a, b, n_projections, seed=seed) res_mean = np.mean(res, axis=0) res_std = np.std(res, axis=0) ############################################################################### # Plot Sliced Wasserstein Distance # -------------------------------- pl.figure(2) pl.plot(n_projections_arr, res_mean, label="SWD") pl.fill_between(n_projections_arr, res_mean - 2 * res_std, res_mean + 2 * res_std, alpha=0.5) pl.legend() pl.xscale('log') pl.xlabel("Number of projections") pl.ylabel("Distance") pl.title('Sliced Wasserstein Distance with 95% confidence interval') pl.show() python-pot-0.9.3+dfsg/examples/sliced-wasserstein/plot_variance_ssw.py000066400000000000000000000057671455713015700263340ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ==================================================== Spherical Sliced Wasserstein on distributions in S^2 ==================================================== This example illustrates the computation of the spherical sliced Wasserstein discrepancy as proposed in [46]. [46] Bonet, C., Berg, P., Courty, N., Septier, F., Drumetz, L., & Pham, M. T. (2023). 'Spherical Sliced-Wasserstein". International Conference on Learning Representations. """ # Author: Clément Bonet # # License: MIT License # sphinx_gallery_thumbnail_number = 2 import matplotlib.pylab as pl import numpy as np import ot ############################################################################## # Generate data # ------------- # %% parameters and data generation n = 200 # nb samples xs = np.random.randn(n, 3) xt = np.random.randn(n, 3) xs = xs / np.sqrt(np.sum(xs**2, -1, keepdims=True)) xt = xt / np.sqrt(np.sum(xt**2, -1, keepdims=True)) a, b = np.ones((n,)) / n, np.ones((n,)) / n # uniform distribution on samples ############################################################################## # Plot data # --------- # %% plot samples fig = pl.figure(figsize=(10, 10)) ax = pl.axes(projection='3d') ax.grid(False) u, v = np.mgrid[0:2 * np.pi:30j, 0:np.pi:30j] x = np.cos(u) * np.sin(v) y = np.sin(u) * np.sin(v) z = np.cos(v) ax.plot_surface(x, y, z, color="gray", alpha=0.03) ax.plot_wireframe(x, y, z, linewidth=1, alpha=0.25, color="gray") ax.scatter(xs[:, 0], xs[:, 1], xs[:, 2], label="Source") ax.scatter(xt[:, 0], xt[:, 1], xt[:, 2], label="Target") fs = 10 # Labels ax.set_xlabel('x', fontsize=fs) ax.set_ylabel('y', fontsize=fs) ax.set_zlabel('z', fontsize=fs) ax.view_init(20, 120) ax.set_xlim(-1.5, 1.5) ax.set_ylim(-1.5, 1.5) ax.set_zlim(-1.5, 1.5) # Ticks ax.set_xticks([-1, 0, 1]) ax.set_yticks([-1, 0, 1]) ax.set_zticks([-1, 0, 1]) pl.legend(loc=0) pl.title("Source and Target distribution") ############################################################################### # Spherical Sliced Wasserstein for different seeds and number of projections # -------------------------------------------------------------------------- n_seed = 20 n_projections_arr = np.logspace(0, 3, 10, dtype=int) res = np.empty((n_seed, 10)) # %% Compute statistics for seed in range(n_seed): for i, n_projections in enumerate(n_projections_arr): res[seed, i] = ot.sliced_wasserstein_sphere(xs, xt, a, b, n_projections, seed=seed, p=1) res_mean = np.mean(res, axis=0) res_std = np.std(res, axis=0) ############################################################################### # Plot Spherical Sliced Wasserstein # --------------------------------- pl.figure(2) pl.plot(n_projections_arr, res_mean, label=r"$SSW_1$") pl.fill_between(n_projections_arr, res_mean - 2 * res_std, res_mean + 2 * res_std, alpha=0.5) pl.legend() pl.xscale('log') pl.xlabel("Number of projections") pl.ylabel("Distance") pl.title('Spherical Sliced Wasserstein Distance with 95% confidence interval') pl.show() python-pot-0.9.3+dfsg/examples/unbalanced-partial/000077500000000000000000000000001455713015700221375ustar00rootroot00000000000000python-pot-0.9.3+dfsg/examples/unbalanced-partial/README.txt000066400000000000000000000000641455713015700236350ustar00rootroot00000000000000 Unbalanced and Partial OT -------------------------python-pot-0.9.3+dfsg/examples/unbalanced-partial/plot_UOT_1D.py000066400000000000000000000041311455713015700245410ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ =============================== 1D Unbalanced optimal transport =============================== This example illustrates the computation of Unbalanced Optimal transport using a Kullback-Leibler relaxation. """ # Author: Hicham Janati # # License: MIT License # sphinx_gallery_thumbnail_number = 4 import numpy as np import matplotlib.pylab as pl import ot import ot.plot from ot.datasets import make_1D_gauss as gauss ############################################################################## # Generate data # ------------- #%% parameters n = 100 # nb bins # bin positions x = np.arange(n, dtype=np.float64) # Gaussian distributions a = gauss(n, m=20, s=5) # m= mean, s= std b = gauss(n, m=60, s=10) # make distributions unbalanced b *= 5. # loss matrix M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1))) M /= M.max() ############################################################################## # Plot distributions and loss matrix # ---------------------------------- #%% plot the distributions pl.figure(1, figsize=(6.4, 3)) pl.plot(x, a, 'b', label='Source distribution') pl.plot(x, b, 'r', label='Target distribution') pl.legend() # plot distributions and loss matrix pl.figure(2, figsize=(5, 5)) ot.plot.plot1D_mat(a, b, M, 'Cost matrix M') ############################################################################## # Solve Unbalanced Sinkhorn # ------------------------- # Sinkhorn epsilon = 0.1 # entropy parameter alpha = 1. # Unbalanced KL relaxation parameter Gs = ot.unbalanced.sinkhorn_unbalanced(a, b, M, epsilon, alpha, verbose=True) pl.figure(3, figsize=(5, 5)) ot.plot.plot1D_mat(a, b, Gs, 'UOT matrix Sinkhorn') pl.show() # %% # plot the transported mass # ------------------------- pl.figure(4, figsize=(6.4, 3)) pl.plot(x, a, 'b', label='Source distribution') pl.plot(x, b, 'r', label='Target distribution') pl.fill(x, Gs.sum(1), 'b', alpha=0.5, label='Transported source') pl.fill(x, Gs.sum(0), 'r', alpha=0.5, label='Transported target') pl.legend(loc='upper right') pl.title('Distributions and transported mass for UOT') python-pot-0.9.3+dfsg/examples/unbalanced-partial/plot_UOT_barycenter_1D.py000066400000000000000000000075171455713015700267720ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ =========================================================== 1D Wasserstein barycenter demo for Unbalanced distributions =========================================================== This example illustrates the computation of regularized Wasserstein Barycenter as proposed in [10] for Unbalanced inputs. [10] Chizat, L., Peyré, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816. """ # Author: Hicham Janati # # License: MIT License # sphinx_gallery_thumbnail_number = 2 import numpy as np import matplotlib.pylab as pl import ot # necessary for 3d plot even if not used from mpl_toolkits.mplot3d import Axes3D # noqa from matplotlib.collections import PolyCollection ############################################################################## # Generate data # ------------- # parameters n = 100 # nb bins # bin positions x = np.arange(n, dtype=np.float64) # Gaussian distributions a1 = ot.datasets.make_1D_gauss(n, m=20, s=5) # m= mean, s= std a2 = ot.datasets.make_1D_gauss(n, m=60, s=8) # make unbalanced dists a2 *= 3. # creating matrix A containing all distributions A = np.vstack((a1, a2)).T n_distributions = A.shape[1] # loss matrix + normalization M = ot.utils.dist0(n) M /= M.max() ############################################################################## # Plot data # --------- # plot the distributions pl.figure(1, figsize=(6.4, 3)) for i in range(n_distributions): pl.plot(x, A[:, i]) pl.title('Distributions') pl.tight_layout() ############################################################################## # Barycenter computation # ---------------------- # non weighted barycenter computation weight = 0.5 # 0<=weight<=1 weights = np.array([1 - weight, weight]) # l2bary bary_l2 = A.dot(weights) # wasserstein reg = 1e-3 alpha = 1. bary_wass = ot.unbalanced.barycenter_unbalanced(A, M, reg, alpha, weights=weights) pl.figure(2) pl.clf() pl.subplot(2, 1, 1) for i in range(n_distributions): pl.plot(x, A[:, i]) pl.title('Distributions') pl.subplot(2, 1, 2) pl.plot(x, bary_l2, 'r', label='l2') pl.plot(x, bary_wass, 'g', label='Wasserstein') pl.legend() pl.title('Barycenters') pl.tight_layout() ############################################################################## # Barycentric interpolation # ------------------------- # barycenter interpolation n_weight = 11 weight_list = np.linspace(0, 1, n_weight) B_l2 = np.zeros((n, n_weight)) B_wass = np.copy(B_l2) for i in range(0, n_weight): weight = weight_list[i] weights = np.array([1 - weight, weight]) B_l2[:, i] = A.dot(weights) B_wass[:, i] = ot.unbalanced.barycenter_unbalanced(A, M, reg, alpha, weights=weights) # plot interpolation pl.figure(3) cmap = pl.cm.get_cmap('viridis') verts = [] zs = weight_list for i, z in enumerate(zs): ys = B_l2[:, i] verts.append(list(zip(x, ys))) ax = pl.gcf().add_subplot(projection='3d') poly = PolyCollection(verts, facecolors=[cmap(a) for a in weight_list]) poly.set_alpha(0.7) ax.add_collection3d(poly, zs=zs, zdir='y') ax.set_xlabel('x') ax.set_xlim3d(0, n) ax.set_ylabel(r'$\alpha$') ax.set_ylim3d(0, 1) ax.set_zlabel('') ax.set_zlim3d(0, B_l2.max() * 1.01) pl.title('Barycenter interpolation with l2') pl.tight_layout() pl.figure(4) cmap = pl.cm.get_cmap('viridis') verts = [] zs = weight_list for i, z in enumerate(zs): ys = B_wass[:, i] verts.append(list(zip(x, ys))) ax = pl.gcf().add_subplot(projection='3d') poly = PolyCollection(verts, facecolors=[cmap(a) for a in weight_list]) poly.set_alpha(0.7) ax.add_collection3d(poly, zs=zs, zdir='y') ax.set_xlabel('x') ax.set_xlim3d(0, n) ax.set_ylabel(r'$\alpha$') ax.set_ylim3d(0, 1) ax.set_zlabel('') ax.set_zlim3d(0, B_l2.max() * 1.01) pl.title('Barycenter interpolation with Wasserstein') pl.tight_layout() pl.show() python-pot-0.9.3+dfsg/examples/unbalanced-partial/plot_partial_wass_and_gromov.py000077500000000000000000000127701455713015700304650ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ================================================== Partial Wasserstein and Gromov-Wasserstein example ================================================== This example is designed to show how to use the Partial (Gromov-)Wasserstein distance computation in POT. """ # Author: Laetitia Chapel # License: MIT License # sphinx_gallery_thumbnail_number = 2 # necessary for 3d plot even if not used from mpl_toolkits.mplot3d import Axes3D # noqa import scipy as sp import numpy as np import matplotlib.pylab as pl import ot ############################################################################# # # Sample two 2D Gaussian distributions and plot them # -------------------------------------------------- # # For demonstration purpose, we sample two Gaussian distributions in 2-d # spaces and add some random noise. n_samples = 20 # nb samples (gaussian) n_noise = 20 # nb of samples (noise) mu = np.array([0, 0]) cov = np.array([[1, 0], [0, 2]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov) xs = np.append(xs, (np.random.rand(n_noise, 2) + 1) * 4).reshape((-1, 2)) xt = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov) xt = np.append(xt, (np.random.rand(n_noise, 2) + 1) * -3).reshape((-1, 2)) M = sp.spatial.distance.cdist(xs, xt) fig = pl.figure() ax1 = fig.add_subplot(131) ax1.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples') ax2 = fig.add_subplot(132) ax2.scatter(xt[:, 0], xt[:, 1], color='r') ax3 = fig.add_subplot(133) ax3.imshow(M) pl.show() ############################################################################# # # Compute partial Wasserstein plans and distance # ---------------------------------------------- p = ot.unif(n_samples + n_noise) q = ot.unif(n_samples + n_noise) w0, log0 = ot.partial.partial_wasserstein(p, q, M, m=0.5, log=True) w, log = ot.partial.entropic_partial_wasserstein(p, q, M, reg=0.1, m=0.5, log=True) print('Partial Wasserstein distance (m = 0.5): ' + str(log0['partial_w_dist'])) print('Entropic partial Wasserstein distance (m = 0.5): ' + str(log['partial_w_dist'])) pl.figure(1, (10, 5)) pl.subplot(1, 2, 1) pl.imshow(w0, cmap='jet') pl.title('Partial Wasserstein') pl.subplot(1, 2, 2) pl.imshow(w, cmap='jet') pl.title('Entropic partial Wasserstein') pl.show() ############################################################################# # # Sample one 2D and 3D Gaussian distributions and plot them # --------------------------------------------------------- # # The Gromov-Wasserstein distance allows to compute distances with samples that # do not belong to the same metric space. For demonstration purpose, we sample # two Gaussian distributions in 2- and 3-dimensional spaces. n_samples = 20 # nb samples n_noise = 10 # nb of samples (noise) p = ot.unif(n_samples + n_noise) q = ot.unif(n_samples + n_noise) mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) mu_t = np.array([0, 0, 0]) cov_t = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s) xs = np.concatenate((xs, ((np.random.rand(n_noise, 2) + 1) * 4)), axis=0) P = sp.linalg.sqrtm(cov_t) xt = np.random.randn(n_samples, 3).dot(P) + mu_t xt = np.concatenate((xt, ((np.random.rand(n_noise, 3) + 1) * 10)), axis=0) fig = pl.figure() ax1 = fig.add_subplot(121) ax1.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples') ax2 = fig.add_subplot(122, projection='3d') ax2.scatter(xt[:, 0], xt[:, 1], xt[:, 2], color='r') pl.show() ############################################################################# # # Compute partial Gromov-Wasserstein plans and distance # ----------------------------------------------------- C1 = sp.spatial.distance.cdist(xs, xs) C2 = sp.spatial.distance.cdist(xt, xt) # transport 100% of the mass print('------m = 1') m = 1 res0, log0 = ot.partial.partial_gromov_wasserstein(C1, C2, p, q, m=m, log=True) res, log = ot.partial.entropic_partial_gromov_wasserstein(C1, C2, p, q, 10, m=m, log=True, verbose=True) print('Wasserstein distance (m = 1): ' + str(log0['partial_gw_dist'])) print('Entropic Wasserstein distance (m = 1): ' + str(log['partial_gw_dist'])) pl.figure(1, (10, 5)) pl.title("mass to be transported m = 1") pl.subplot(1, 2, 1) pl.imshow(res0, cmap='jet') pl.title('Gromov-Wasserstein') pl.subplot(1, 2, 2) pl.imshow(res, cmap='jet') pl.title('Entropic Gromov-Wasserstein') pl.show() # transport 2/3 of the mass print('------m = 2/3') m = 2 / 3 res0, log0 = ot.partial.partial_gromov_wasserstein(C1, C2, p, q, m=m, log=True, verbose=True) res, log = ot.partial.entropic_partial_gromov_wasserstein(C1, C2, p, q, 10, m=m, log=True, verbose=True) print('Partial Wasserstein distance (m = 2/3): ' + str(log0['partial_gw_dist'])) print('Entropic partial Wasserstein distance (m = 2/3): ' + str(log['partial_gw_dist'])) pl.figure(1, (10, 5)) pl.title("mass to be transported m = 2/3") pl.subplot(1, 2, 1) pl.imshow(res0, cmap='jet') pl.title('Partial Gromov-Wasserstein') pl.subplot(1, 2, 2) pl.imshow(res, cmap='jet') pl.title('Entropic partial Gromov-Wasserstein') pl.show() python-pot-0.9.3+dfsg/examples/unbalanced-partial/plot_regpath.py000066400000000000000000000160131455713015700252020ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ================================================================ Regularization path of l2-penalized unbalanced optimal transport ================================================================ This example illustrate the regularization path for 2D unbalanced optimal transport. We present here both the fully relaxed case and the semi-relaxed case. [Chapel et al., 2021] Chapel, L., Flamary, R., Wu, H., Févotte, C., and Gasso, G. (2021). Unbalanced optimal transport through non-negative penalized linear regression. """ # Author: Haoran Wu # License: MIT License # sphinx_gallery_thumbnail_number = 2 import numpy as np import matplotlib.pylab as pl import ot import matplotlib.animation as animation ############################################################################## # Generate data # ------------- #%% parameters and data generation n = 20 # nb samples mu_s = np.array([-1, -1]) cov_s = np.array([[1, 0], [0, 1]]) mu_t = np.array([4, 4]) cov_t = np.array([[1, -.8], [-.8, 1]]) np.random.seed(0) xs = ot.datasets.make_2D_samples_gauss(n, mu_s, cov_s) xt = ot.datasets.make_2D_samples_gauss(n, mu_t, cov_t) a, b = np.ones((n,)) / n, np.ones((n,)) / n # uniform distribution on samples # loss matrix M = ot.dist(xs, xt) M /= M.max() ############################################################################## # Plot data # --------- #%% plot 2 distribution samples pl.figure(1) pl.scatter(xs[:, 0], xs[:, 1], c='C0', label='Source') pl.scatter(xt[:, 0], xt[:, 1], c='C1', label='Target') pl.legend(loc=2) pl.title('Source and target distributions') pl.show() ############################################################################## # Compute semi-relaxed and fully relaxed regularization paths # ----------------------------------------------------------- #%% final_gamma = 1e-6 t, t_list, g_list = ot.regpath.regularization_path(a, b, M, reg=final_gamma, semi_relaxed=False) t2, t_list2, g_list2 = ot.regpath.regularization_path(a, b, M, reg=final_gamma, semi_relaxed=True) ############################################################################## # Plot the regularization path # ---------------------------- # # The OT plan is plotted as a function of $\gamma$ that is the inverse of the # weight on the marginal relaxations. #%% fully relaxed l2-penalized UOT pl.figure(2) selected_gamma = [2e-1, 1e-1, 5e-2, 1e-3] for p in range(4): tp = ot.regpath.compute_transport_plan(selected_gamma[p], g_list, t_list) P = tp.reshape((n, n)) pl.subplot(2, 2, p + 1) if P.sum() > 0: P = P / P.max() for i in range(n): for j in range(n): if P[i, j] > 0: pl.plot([xs[i, 0], xt[j, 0]], [xs[i, 1], xt[j, 1]], color='C2', alpha=P[i, j] * 0.3) pl.scatter(xs[:, 0], xs[:, 1], c='C0', alpha=0.2) pl.scatter(xt[:, 0], xt[:, 1], c='C1', alpha=0.2) pl.scatter(xs[:, 0], xs[:, 1], c='C0', s=P.sum(1).ravel() * (1 + p) * 2, label='Re-weighted source', alpha=1) pl.scatter(xt[:, 0], xt[:, 1], c='C1', s=P.sum(0).ravel() * (1 + p) * 2, label='Re-weighted target', alpha=1) pl.plot([], [], color='C2', alpha=0.8, label='OT plan') pl.title(r'$\ell_2$ UOT $\gamma$={}'.format(selected_gamma[p]), fontsize=11) if p < 2: pl.xticks(()) pl.show() # %% # Animation of the regpath for UOT l2 # ----------------------------------- nv = 50 g_list_v = np.logspace(-.5, -2.5, nv) pl.figure(3) def _update_plot(iv): pl.clf() tp = ot.regpath.compute_transport_plan(g_list_v[iv], g_list, t_list) P = tp.reshape((n, n)) if P.sum() > 0: P = P / P.max() for i in range(n): for j in range(n): if P[i, j] > 0: pl.plot([xs[i, 0], xt[j, 0]], [xs[i, 1], xt[j, 1]], color='C2', alpha=P[i, j] * 0.5) pl.scatter(xs[:, 0], xs[:, 1], c='C0', alpha=0.2) pl.scatter(xt[:, 0], xt[:, 1], c='C1', alpha=0.2) pl.scatter(xs[:, 0], xs[:, 1], c='C0', s=P.sum(1).ravel() * (1 + p) * 4, label='Re-weighted source', alpha=1) pl.scatter(xt[:, 0], xt[:, 1], c='C1', s=P.sum(0).ravel() * (1 + p) * 4, label='Re-weighted target', alpha=1) pl.plot([], [], color='C2', alpha=0.8, label='OT plan') pl.title(r'$\ell_2$ UOT $\gamma$={:1.3f}'.format(g_list_v[iv]), fontsize=11) return 1 i = 0 _update_plot(i) ani = animation.FuncAnimation(pl.gcf(), _update_plot, nv, interval=100, repeat_delay=2000) ############################################################################## # Plot the semi-relaxed regularization path # ----------------------------------------- #%% semi-relaxed l2-penalized UOT pl.figure(4) selected_gamma = [10, 1, 1e-1, 1e-2] for p in range(4): tp = ot.regpath.compute_transport_plan(selected_gamma[p], g_list2, t_list2) P = tp.reshape((n, n)) pl.subplot(2, 2, p + 1) if P.sum() > 0: P = P / P.max() for i in range(n): for j in range(n): if P[i, j] > 0: pl.plot([xs[i, 0], xt[j, 0]], [xs[i, 1], xt[j, 1]], color='C2', alpha=P[i, j] * 0.3) pl.scatter(xs[:, 0], xs[:, 1], c='C0', alpha=0.2) pl.scatter(xt[:, 0], xt[:, 1], c='C1', alpha=1, label='Target marginal') pl.scatter(xs[:, 0], xs[:, 1], c='C0', s=P.sum(1).ravel() * 2 * (1 + p), label='Source marginal', alpha=1) pl.plot([], [], color='C2', alpha=0.8, label='OT plan') pl.title(r'Semi-relaxed $l_2$ UOT $\gamma$={}'.format(selected_gamma[p]), fontsize=11) if p < 2: pl.xticks(()) pl.show() # %% # Animation of the regpath for semi-relaxed UOT l2 # ------------------------------------------------ nv = 50 g_list_v = np.logspace(2, -2, nv) pl.figure(5) def _update_plot(iv): pl.clf() tp = ot.regpath.compute_transport_plan(g_list_v[iv], g_list2, t_list2) P = tp.reshape((n, n)) if P.sum() > 0: P = P / P.max() for i in range(n): for j in range(n): if P[i, j] > 0: pl.plot([xs[i, 0], xt[j, 0]], [xs[i, 1], xt[j, 1]], color='C2', alpha=P[i, j] * 0.5) pl.scatter(xs[:, 0], xs[:, 1], c='C0', alpha=0.2) pl.scatter(xt[:, 0], xt[:, 1], c='C1', alpha=0.2) pl.scatter(xs[:, 0], xs[:, 1], c='C0', s=P.sum(1).ravel() * (1 + p) * 4, label='Re-weighted source', alpha=1) pl.scatter(xt[:, 0], xt[:, 1], c='C1', s=P.sum(0).ravel() * (1 + p) * 4, label='Re-weighted target', alpha=1) pl.plot([], [], color='C2', alpha=0.8, label='OT plan') pl.title(r'Semi-relaxed $\ell_2$ UOT $\gamma$={:1.3f}'.format(g_list_v[iv]), fontsize=11) return 1 i = 0 _update_plot(i) ani = animation.FuncAnimation(pl.gcf(), _update_plot, nv, interval=100, repeat_delay=2000) python-pot-0.9.3+dfsg/examples/unbalanced-partial/plot_unbalanced_OT.py000066400000000000000000000070241455713015700262500ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ ============================================================== 2D examples of exact and entropic unbalanced optimal transport ============================================================== This example is designed to show how to compute unbalanced and partial OT in POT. UOT aims at solving the following optimization problem: .. math:: W = \min_{\gamma} <\gamma, \mathbf{M}>_F + \mathrm{reg}\cdot\Omega(\gamma) + \mathrm{reg_m} \cdot \mathrm{div}(\gamma \mathbf{1}, \mathbf{a}) + \mathrm{reg_m} \cdot \mathrm{div}(\gamma^T \mathbf{1}, \mathbf{b}) s.t. \gamma \geq 0 where :math:`\mathrm{div}` is a divergence. When using the entropic UOT, :math:`\mathrm{reg}>0` and :math:`\mathrm{div}` should be the Kullback-Leibler divergence. When solving exact UOT, :math:`\mathrm{reg}=0` and :math:`\mathrm{div}` can be either the Kullback-Leibler or the quadratic divergence. Using :math:`\ell_1` norm gives the so-called partial OT. """ # Author: Laetitia Chapel # License: MIT License import numpy as np import matplotlib.pylab as pl import ot ############################################################################## # Generate data # ------------- # %% parameters and data generation n = 40 # nb samples mu_s = np.array([-1, -1]) cov_s = np.array([[1, 0], [0, 1]]) mu_t = np.array([4, 4]) cov_t = np.array([[1, -.8], [-.8, 1]]) np.random.seed(0) xs = ot.datasets.make_2D_samples_gauss(n, mu_s, cov_s) xt = ot.datasets.make_2D_samples_gauss(n, mu_t, cov_t) n_noise = 10 xs = np.concatenate((xs, ((np.random.rand(n_noise, 2) - 4))), axis=0) xt = np.concatenate((xt, ((np.random.rand(n_noise, 2) + 6))), axis=0) n = n + n_noise a, b = np.ones((n,)) / n, np.ones((n,)) / n # uniform distribution on samples # loss matrix M = ot.dist(xs, xt) M /= M.max() ############################################################################## # Compute entropic kl-regularized UOT, kl- and l2-regularized UOT # ----------- reg = 0.005 reg_m_kl = 0.05 reg_m_l2 = 5 mass = 0.7 entropic_kl_uot = ot.unbalanced.sinkhorn_unbalanced(a, b, M, reg, reg_m_kl) kl_uot = ot.unbalanced.mm_unbalanced(a, b, M, reg_m_kl, div='kl') l2_uot = ot.unbalanced.mm_unbalanced(a, b, M, reg_m_l2, div='l2') partial_ot = ot.partial.partial_wasserstein(a, b, M, m=mass) ############################################################################## # Plot the results # ---------------- pl.figure(2) transp = [partial_ot, l2_uot, kl_uot, entropic_kl_uot] title = ["partial OT \n m=" + str(mass), "$\ell_2$-UOT \n $\mathrm{reg_m}$=" + str(reg_m_l2), "kl-UOT \n $\mathrm{reg_m}$=" + str(reg_m_kl), "entropic kl-UOT \n $\mathrm{reg_m}$=" + str(reg_m_kl)] for p in range(4): pl.subplot(2, 4, p + 1) P = transp[p] if P.sum() > 0: P = P / P.max() for i in range(n): for j in range(n): if P[i, j] > 0: pl.plot([xs[i, 0], xt[j, 0]], [xs[i, 1], xt[j, 1]], color='C2', alpha=P[i, j] * 0.3) pl.scatter(xs[:, 0], xs[:, 1], c='C0', alpha=0.2) pl.scatter(xt[:, 0], xt[:, 1], c='C1', alpha=0.2) pl.scatter(xs[:, 0], xs[:, 1], c='C0', s=P.sum(1).ravel() * (1 + p) * 2) pl.scatter(xt[:, 0], xt[:, 1], c='C1', s=P.sum(0).ravel() * (1 + p) * 2) pl.title(title[p]) pl.yticks(()) pl.xticks(()) if p < 1: pl.ylabel("mappings") pl.subplot(2, 4, p + 5) pl.imshow(P, cmap='jet') pl.yticks(()) pl.xticks(()) if p < 1: pl.ylabel("transport plans") pl.show() python-pot-0.9.3+dfsg/ot/000077500000000000000000000000001455713015700152155ustar00rootroot00000000000000python-pot-0.9.3+dfsg/ot/__init__.py000066400000000000000000000057601455713015700173360ustar00rootroot00000000000000""" .. warning:: The list of automatically imported sub-modules is as follows: :py:mod:`ot.lp`, :py:mod:`ot.bregman`, :py:mod:`ot.optim` :py:mod:`ot.utils`, :py:mod:`ot.datasets`, :py:mod:`ot.gromov`, :py:mod:`ot.smooth` :py:mod:`ot.stochastic`, :py:mod:`ot.partial`, :py:mod:`ot.regpath` , :py:mod:`ot.unbalanced`, :py:mod`ot.mapping`. The following sub-modules are not imported due to additional dependencies: - :any:`ot.dr` : depends on :code:`pymanopt` and :code:`autograd`. - :any:`ot.plot` : depends on :code:`matplotlib` """ # Author: Remi Flamary # Nicolas Courty # # License: MIT License # All submodules and packages from . import lp from . import bregman from . import optim from . import utils from . import datasets from . import da from . import gromov from . import smooth from . import stochastic from . import unbalanced from . import partial from . import backend from . import regpath from . import weak from . import factored from . import solvers from . import gaussian from . import lowrank # OT functions from .lp import (emd, emd2, emd_1d, emd2_1d, wasserstein_1d, binary_search_circle, wasserstein_circle, semidiscrete_wasserstein2_unif_circle) from .bregman import sinkhorn, sinkhorn2, barycenter from .unbalanced import (sinkhorn_unbalanced, barycenter_unbalanced, sinkhorn_unbalanced2) from .da import sinkhorn_lpl1_mm from .sliced import (sliced_wasserstein_distance, max_sliced_wasserstein_distance, sliced_wasserstein_sphere, sliced_wasserstein_sphere_unif) from .gromov import (gromov_wasserstein, gromov_wasserstein2, gromov_barycenters, fused_gromov_wasserstein, fused_gromov_wasserstein2) from .weak import weak_optimal_transport from .factored import factored_optimal_transport from .solvers import solve, solve_gromov, solve_sample from .lowrank import lowrank_sinkhorn # utils functions from .utils import dist, unif, tic, toc, toq __version__ = "0.9.3" __all__ = ['emd', 'emd2', 'emd_1d', 'sinkhorn', 'sinkhorn2', 'utils', 'datasets', 'bregman', 'lp', 'tic', 'toc', 'toq', 'gromov', 'emd2_1d', 'wasserstein_1d', 'backend', 'gaussian', 'dist', 'unif', 'barycenter', 'sinkhorn_lpl1_mm', 'da', 'optim', 'sinkhorn_unbalanced', 'barycenter_unbalanced', 'sinkhorn_unbalanced2', 'sliced_wasserstein_distance', 'sliced_wasserstein_sphere', 'gromov_wasserstein', 'gromov_wasserstein2', 'gromov_barycenters', 'fused_gromov_wasserstein', 'fused_gromov_wasserstein2', 'max_sliced_wasserstein_distance', 'weak_optimal_transport', 'factored_optimal_transport', 'solve', 'solve_gromov','solve_sample', 'smooth', 'stochastic', 'unbalanced', 'partial', 'regpath', 'solvers', 'binary_search_circle', 'wasserstein_circle', 'semidiscrete_wasserstein2_unif_circle', 'sliced_wasserstein_sphere_unif', 'lowrank_sinkhorn'] python-pot-0.9.3+dfsg/ot/backend.py000066400000000000000000002741241455713015700171700ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Multi-lib backend for POT The goal is to write backend-agnostic code. Whether you're using Numpy, PyTorch, Jax, Cupy, or Tensorflow, POT code should work nonetheless. To achieve that, POT provides backend classes which implements functions in their respective backend imitating Numpy API. As a convention, we use nx instead of np to refer to the backend. Examples -------- >>> from ot.utils import list_to_array >>> from ot.backend import get_backend >>> def f(a, b): # the function does not know which backend to use ... a, b = list_to_array(a, b) # if a list in given, make it an array ... nx = get_backend(a, b) # infer the backend from the arguments ... c = nx.dot(a, b) # now use the backend to do any calculation ... return c .. warning:: Tensorflow only works with the Numpy API. To activate it, please run the following: .. code-block:: from tensorflow.python.ops.numpy_ops import np_config np_config.enable_numpy_behavior() Performance ----------- - CPU: Intel(R) Xeon(R) Gold 6248 CPU @ 2.50GHz - GPU: Tesla V100-SXM2-32GB - Date of the benchmark: December 8th, 2021 - Commit of benchmark: PR #316, https://github.com/PythonOT/POT/pull/316 .. raw:: html
Sinkhorn Knopp - Averaged on 100 runs
Bitsize32 bits
DeviceCPUGPU
Sample sizeNumpyPytorchTensorflowCupyJaxPytorchTensorflow
500.00080.00220.01510.00950.01930.00510.0293
1000.00050.00130.00970.00570.01150.00290.0173
5000.00090.00160.01100.00580.01150.00290.0166
10000.00210.00210.01450.00560.01180.00290.0168
20000.00690.00430.02780.00590.01180.00300.0165
50000.07070.03140.13950.00740.01250.00350.0198
 
Bitsize64 bits
DeviceCPUGPU
Sample sizeNumpyPytorchTensorflowCupyJaxPytorchTensorflow
500.00080.00200.01540.00930.01910.00510.0328
1000.00050.00130.00940.00560.01140.00290.0169
5000.00130.00170.01200.00590.01160.00290.0168
10000.00340.00270.01770.00580.01180.00290.0167
20000.01460.00750.04360.00590.01200.00290.0165
50000.14670.05680.24680.00770.01460.00450.0204
""" # Author: Remi Flamary # Nicolas Courty # # License: MIT License import os import time import warnings import numpy as np import scipy import scipy.linalg import scipy.special as special from scipy.sparse import coo_matrix, csr_matrix, issparse DISABLE_TORCH_KEY = 'POT_BACKEND_DISABLE_PYTORCH' DISABLE_JAX_KEY = 'POT_BACKEND_DISABLE_JAX' DISABLE_CUPY_KEY = 'POT_BACKEND_DISABLE_CUPY' DISABLE_TF_KEY = 'POT_BACKEND_DISABLE_TENSORFLOW' if not os.environ.get(DISABLE_TORCH_KEY, False): try: import torch torch_type = torch.Tensor except ImportError: torch = False torch_type = float else: torch = False torch_type = float if not os.environ.get(DISABLE_JAX_KEY, False): try: import jax import jax.numpy as jnp import jax.scipy.special as jspecial from jax.lib import xla_bridge jax_type = jax.numpy.ndarray except ImportError: jax = False jax_type = float else: jax = False jax_type = float if not os.environ.get(DISABLE_CUPY_KEY, False): try: import cupy as cp import cupyx cp_type = cp.ndarray except ImportError: cp = False cp_type = float else: cp = False cp_type = float if not os.environ.get(DISABLE_TF_KEY, False): try: import tensorflow as tf import tensorflow.experimental.numpy as tnp tf_type = tf.Tensor except ImportError: tf = False tf_type = float else: tf = False tf_type = float str_type_error = "All array should be from the same type/backend. Current types are : {}" # Mapping between argument types and the existing backend _BACKEND_IMPLEMENTATIONS = [] _BACKENDS = {} def _register_backend_implementation(backend_impl): _BACKEND_IMPLEMENTATIONS.append(backend_impl) def _get_backend_instance(backend_impl): if backend_impl.__name__ not in _BACKENDS: _BACKENDS[backend_impl.__name__] = backend_impl() return _BACKENDS[backend_impl.__name__] def _check_args_backend(backend_impl, args): is_instance = set(isinstance(arg, backend_impl.__type__) for arg in args) # check that all arguments matched or not the type if len(is_instance) == 1: return is_instance.pop() # Otherwise return an error raise ValueError(str_type_error.format([type(arg) for arg in args])) def get_backend_list(): """Returns instances of all available backends. Note that the function forces all detected implementations to be instantiated even if specific backend was not use before. Be careful as instantiation of the backend might lead to side effects, like GPU memory pre-allocation. See the documentation for more details. If you only need to know which implementations are available, use `:py:func:`ot.backend.get_available_backend_implementations`, which does not force instance of the backend object to be created. """ return [ _get_backend_instance(backend_impl) for backend_impl in get_available_backend_implementations() ] def get_available_backend_implementations(): """Returns the list of available backend implementations.""" return _BACKEND_IMPLEMENTATIONS def get_backend(*args): """Returns the proper backend for a list of input arrays Accepts None entries in the arguments, and ignores them Also raises TypeError if all arrays are not from the same backend """ args = [arg for arg in args if arg is not None] # exclude None entries # check that some arrays given if not len(args) > 0: raise ValueError(" The function takes at least one (non-None) parameter") for backend_impl in _BACKEND_IMPLEMENTATIONS: if _check_args_backend(backend_impl, args): return _get_backend_instance(backend_impl) raise ValueError("Unknown type of non implemented backend.") def to_numpy(*args): """Returns numpy arrays from any compatible backend""" if len(args) == 1: return get_backend(args[0]).to_numpy(args[0]) else: return [get_backend(a).to_numpy(a) for a in args] class Backend(): """ Backend abstract class. Implementations: :py:class:`JaxBackend`, :py:class:`NumpyBackend`, :py:class:`TorchBackend`, :py:class:`CupyBackend`, :py:class:`TensorflowBackend` - The `__name__` class attribute refers to the name of the backend. - The `__type__` class attribute refers to the data structure used by the backend. """ __name__ = None __type__ = None __type_list__ = None rng_ = None def __str__(self): return self.__name__ # convert batch of tensors to numpy def to_numpy(self, *arrays): """Returns the numpy version of tensors""" if len(arrays) == 1: return self._to_numpy(arrays[0]) else: return [self._to_numpy(array) for array in arrays] # convert a tensor to numpy def _to_numpy(self, a): """Returns the numpy version of a tensor""" raise NotImplementedError() # convert batch of arrays from numpy def from_numpy(self, *arrays, type_as=None): """Creates tensors cloning a numpy array, with the given precision (defaulting to input's precision) and the given device (in case of GPUs)""" if len(arrays) == 1: return self._from_numpy(arrays[0], type_as=type_as) else: return [self._from_numpy(array, type_as=type_as) for array in arrays] # convert an array from numpy def _from_numpy(self, a, type_as=None): """Creates a tensor cloning a numpy array, with the given precision (defaulting to input's precision) and the given device (in case of GPUs)""" raise NotImplementedError() def set_gradients(self, val, inputs, grads): """Define the gradients for the value val wrt the inputs """ raise NotImplementedError() def zeros(self, shape, type_as=None): r""" Creates a tensor full of zeros. This function follows the api from :any:`numpy.zeros` See: https://numpy.org/doc/stable/reference/generated/numpy.zeros.html """ raise NotImplementedError() def ones(self, shape, type_as=None): r""" Creates a tensor full of ones. This function follows the api from :any:`numpy.ones` See: https://numpy.org/doc/stable/reference/generated/numpy.ones.html """ raise NotImplementedError() def arange(self, stop, start=0, step=1, type_as=None): r""" Returns evenly spaced values within a given interval. This function follows the api from :any:`numpy.arange` See: https://numpy.org/doc/stable/reference/generated/numpy.arange.html """ raise NotImplementedError() def full(self, shape, fill_value, type_as=None): r""" Creates a tensor with given shape, filled with given value. This function follows the api from :any:`numpy.full` See: https://numpy.org/doc/stable/reference/generated/numpy.full.html """ raise NotImplementedError() def eye(self, N, M=None, type_as=None): r""" Creates the identity matrix of given size. This function follows the api from :any:`numpy.eye` See: https://numpy.org/doc/stable/reference/generated/numpy.eye.html """ raise NotImplementedError() def sum(self, a, axis=None, keepdims=False): r""" Sums tensor elements over given dimensions. This function follows the api from :any:`numpy.sum` See: https://numpy.org/doc/stable/reference/generated/numpy.sum.html """ raise NotImplementedError() def cumsum(self, a, axis=None): r""" Returns the cumulative sum of tensor elements over given dimensions. This function follows the api from :any:`numpy.cumsum` See: https://numpy.org/doc/stable/reference/generated/numpy.cumsum.html """ raise NotImplementedError() def max(self, a, axis=None, keepdims=False): r""" Returns the maximum of an array or maximum along given dimensions. This function follows the api from :any:`numpy.amax` See: https://numpy.org/doc/stable/reference/generated/numpy.amax.html """ raise NotImplementedError() def min(self, a, axis=None, keepdims=False): r""" Returns the maximum of an array or maximum along given dimensions. This function follows the api from :any:`numpy.amin` See: https://numpy.org/doc/stable/reference/generated/numpy.amin.html """ raise NotImplementedError() def maximum(self, a, b): r""" Returns element-wise maximum of array elements. This function follows the api from :any:`numpy.maximum` See: https://numpy.org/doc/stable/reference/generated/numpy.maximum.html """ raise NotImplementedError() def minimum(self, a, b): r""" Returns element-wise minimum of array elements. This function follows the api from :any:`numpy.minimum` See: https://numpy.org/doc/stable/reference/generated/numpy.minimum.html """ raise NotImplementedError() def sign(self, a): r""" Returns an element-wise indication of the sign of a number. This function follows the api from :any:`numpy.sign` See: https://numpy.org/doc/stable/reference/generated/numpy.sign.html """ raise NotImplementedError() def dot(self, a, b): r""" Returns the dot product of two tensors. This function follows the api from :any:`numpy.dot` See: https://numpy.org/doc/stable/reference/generated/numpy.dot.html """ raise NotImplementedError() def abs(self, a): r""" Computes the absolute value element-wise. This function follows the api from :any:`numpy.absolute` See: https://numpy.org/doc/stable/reference/generated/numpy.absolute.html """ raise NotImplementedError() def exp(self, a): r""" Computes the exponential value element-wise. This function follows the api from :any:`numpy.exp` See: https://numpy.org/doc/stable/reference/generated/numpy.exp.html """ raise NotImplementedError() def log(self, a): r""" Computes the natural logarithm, element-wise. This function follows the api from :any:`numpy.log` See: https://numpy.org/doc/stable/reference/generated/numpy.log.html """ raise NotImplementedError() def sqrt(self, a): r""" Returns the non-ngeative square root of a tensor, element-wise. This function follows the api from :any:`numpy.sqrt` See: https://numpy.org/doc/stable/reference/generated/numpy.sqrt.html """ raise NotImplementedError() def power(self, a, exponents): r""" First tensor elements raised to powers from second tensor, element-wise. This function follows the api from :any:`numpy.power` See: https://numpy.org/doc/stable/reference/generated/numpy.power.html """ raise NotImplementedError() def norm(self, a, axis=None, keepdims=False): r""" Computes the matrix frobenius norm. This function follows the api from :any:`numpy.linalg.norm` See: https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.html """ raise NotImplementedError() def any(self, a): r""" Tests whether any tensor element along given dimensions evaluates to True. This function follows the api from :any:`numpy.any` See: https://numpy.org/doc/stable/reference/generated/numpy.any.html """ raise NotImplementedError() def isnan(self, a): r""" Tests element-wise for NaN and returns result as a boolean tensor. This function follows the api from :any:`numpy.isnan` See: https://numpy.org/doc/stable/reference/generated/numpy.isnan.html """ raise NotImplementedError() def isinf(self, a): r""" Tests element-wise for positive or negative infinity and returns result as a boolean tensor. This function follows the api from :any:`numpy.isinf` See: https://numpy.org/doc/stable/reference/generated/numpy.isinf.html """ raise NotImplementedError() def einsum(self, subscripts, *operands): r""" Evaluates the Einstein summation convention on the operands. This function follows the api from :any:`numpy.einsum` See: https://numpy.org/doc/stable/reference/generated/numpy.einsum.html """ raise NotImplementedError() def sort(self, a, axis=-1): r""" Returns a sorted copy of a tensor. This function follows the api from :any:`numpy.sort` See: https://numpy.org/doc/stable/reference/generated/numpy.sort.html """ raise NotImplementedError() def argsort(self, a, axis=None): r""" Returns the indices that would sort a tensor. This function follows the api from :any:`numpy.argsort` See: https://numpy.org/doc/stable/reference/generated/numpy.argsort.html """ raise NotImplementedError() def searchsorted(self, a, v, side='left'): r""" Finds indices where elements should be inserted to maintain order in given tensor. This function follows the api from :any:`numpy.searchsorted` See: https://numpy.org/doc/stable/reference/generated/numpy.searchsorted.html """ raise NotImplementedError() def flip(self, a, axis=None): r""" Reverses the order of elements in a tensor along given dimensions. This function follows the api from :any:`numpy.flip` See: https://numpy.org/doc/stable/reference/generated/numpy.flip.html """ raise NotImplementedError() def clip(self, a, a_min, a_max): """ Limits the values in a tensor. This function follows the api from :any:`numpy.clip` See: https://numpy.org/doc/stable/reference/generated/numpy.clip.html """ raise NotImplementedError() def repeat(self, a, repeats, axis=None): r""" Repeats elements of a tensor. This function follows the api from :any:`numpy.repeat` See: https://numpy.org/doc/stable/reference/generated/numpy.repeat.html """ raise NotImplementedError() def take_along_axis(self, arr, indices, axis): r""" Gathers elements of a tensor along given dimensions. This function follows the api from :any:`numpy.take_along_axis` See: https://numpy.org/doc/stable/reference/generated/numpy.take_along_axis.html """ raise NotImplementedError() def concatenate(self, arrays, axis=0): r""" Joins a sequence of tensors along an existing dimension. This function follows the api from :any:`numpy.concatenate` See: https://numpy.org/doc/stable/reference/generated/numpy.concatenate.html """ raise NotImplementedError() def zero_pad(self, a, pad_width, value=0): r""" Pads a tensor with a given value (0 by default). This function follows the api from :any:`numpy.pad` See: https://numpy.org/doc/stable/reference/generated/numpy.pad.html """ raise NotImplementedError() def argmax(self, a, axis=None): r""" Returns the indices of the maximum values of a tensor along given dimensions. This function follows the api from :any:`numpy.argmax` See: https://numpy.org/doc/stable/reference/generated/numpy.argmax.html """ raise NotImplementedError() def argmin(self, a, axis=None): r""" Returns the indices of the minimum values of a tensor along given dimensions. This function follows the api from :any:`numpy.argmin` See: https://numpy.org/doc/stable/reference/generated/numpy.argmin.html """ raise NotImplementedError() def mean(self, a, axis=None): r""" Computes the arithmetic mean of a tensor along given dimensions. This function follows the api from :any:`numpy.mean` See: https://numpy.org/doc/stable/reference/generated/numpy.mean.html """ raise NotImplementedError() def median(self, a, axis=None): r""" Computes the median of a tensor along given dimensions. This function follows the api from :any:`numpy.median` See: https://numpy.org/doc/stable/reference/generated/numpy.median.html """ raise NotImplementedError() def std(self, a, axis=None): r""" Computes the standard deviation of a tensor along given dimensions. This function follows the api from :any:`numpy.std` See: https://numpy.org/doc/stable/reference/generated/numpy.std.html """ raise NotImplementedError() def linspace(self, start, stop, num, type_as=None): r""" Returns a specified number of evenly spaced values over a given interval. This function follows the api from :any:`numpy.linspace` See: https://numpy.org/doc/stable/reference/generated/numpy.linspace.html """ raise NotImplementedError() def meshgrid(self, a, b): r""" Returns coordinate matrices from coordinate vectors (Numpy convention). This function follows the api from :any:`numpy.meshgrid` See: https://numpy.org/doc/stable/reference/generated/numpy.meshgrid.html """ raise NotImplementedError() def diag(self, a, k=0): r""" Extracts or constructs a diagonal tensor. This function follows the api from :any:`numpy.diag` See: https://numpy.org/doc/stable/reference/generated/numpy.diag.html """ raise NotImplementedError() def unique(self, a, return_inverse=False): r""" Finds unique elements of given tensor. This function follows the api from :any:`numpy.unique` See: https://numpy.org/doc/stable/reference/generated/numpy.unique.html """ raise NotImplementedError() def logsumexp(self, a, axis=None): r""" Computes the log of the sum of exponentials of input elements. This function follows the api from :any:`scipy.special.logsumexp` See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.logsumexp.html """ raise NotImplementedError() def stack(self, arrays, axis=0): r""" Joins a sequence of tensors along a new dimension. This function follows the api from :any:`numpy.stack` See: https://numpy.org/doc/stable/reference/generated/numpy.stack.html """ raise NotImplementedError() def outer(self, a, b): r""" Computes the outer product between two vectors. This function follows the api from :any:`numpy.outer` See: https://numpy.org/doc/stable/reference/generated/numpy.outer.html """ raise NotImplementedError() def reshape(self, a, shape): r""" Gives a new shape to a tensor without changing its data. This function follows the api from :any:`numpy.reshape` See: https://numpy.org/doc/stable/reference/generated/numpy.reshape.html """ raise NotImplementedError() def seed(self, seed=None): r""" Sets the seed for the random generator. This function follows the api from :any:`numpy.random.seed` See: https://numpy.org/doc/stable/reference/random/generated/numpy.random.seed.html """ raise NotImplementedError() def rand(self, *size, type_as=None): r""" Generate uniform random numbers. This function follows the api from :any:`numpy.random.rand` See: https://numpy.org/doc/stable/reference/random/generated/numpy.random.rand.html """ raise NotImplementedError() def randn(self, *size, type_as=None): r""" Generate normal Gaussian random numbers. This function follows the api from :any:`numpy.random.rand` See: https://numpy.org/doc/stable/reference/random/generated/numpy.random.rand.html """ raise NotImplementedError() def coo_matrix(self, data, rows, cols, shape=None, type_as=None): r""" Creates a sparse tensor in COOrdinate format. This function follows the api from :any:`scipy.sparse.coo_matrix` See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.html """ raise NotImplementedError() def issparse(self, a): r""" Checks whether or not the input tensor is a sparse tensor. This function follows the api from :any:`scipy.sparse.issparse` See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.issparse.html """ raise NotImplementedError() def tocsr(self, a): r""" Converts this matrix to Compressed Sparse Row format. This function follows the api from :any:`scipy.sparse.coo_matrix.tocsr` See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.tocsr.html """ raise NotImplementedError() def eliminate_zeros(self, a, threshold=0.): r""" Removes entries smaller than the given threshold from the sparse tensor. This function follows the api from :any:`scipy.sparse.csr_matrix.eliminate_zeros` See: https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.csr_matrix.eliminate_zeros.html """ raise NotImplementedError() def todense(self, a): r""" Converts a sparse tensor to a dense tensor. This function follows the api from :any:`scipy.sparse.csr_matrix.toarray` See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.toarray.html """ raise NotImplementedError() def where(self, condition, x, y): r""" Returns elements chosen from x or y depending on condition. This function follows the api from :any:`numpy.where` See: https://numpy.org/doc/stable/reference/generated/numpy.where.html """ raise NotImplementedError() def copy(self, a): r""" Returns a copy of the given tensor. This function follows the api from :any:`numpy.copy` See: https://numpy.org/doc/stable/reference/generated/numpy.copy.html """ raise NotImplementedError() def allclose(self, a, b, rtol=1e-05, atol=1e-08, equal_nan=False): r""" Returns True if two arrays are element-wise equal within a tolerance. This function follows the api from :any:`numpy.allclose` See: https://numpy.org/doc/stable/reference/generated/numpy.allclose.html """ raise NotImplementedError() def dtype_device(self, a): r""" Returns the dtype and the device of the given tensor. """ raise NotImplementedError() def assert_same_dtype_device(self, a, b): r""" Checks whether or not the two given inputs have the same dtype as well as the same device """ raise NotImplementedError() def squeeze(self, a, axis=None): r""" Remove axes of length one from a. This function follows the api from :any:`numpy.squeeze`. See: https://numpy.org/doc/stable/reference/generated/numpy.squeeze.html """ raise NotImplementedError() def bitsize(self, type_as): r""" Gives the number of bits used by the data type of the given tensor. """ raise NotImplementedError() def device_type(self, type_as): r""" Returns CPU or GPU depending on the device where the given tensor is located. """ raise NotImplementedError() def _bench(self, callable, *args, n_runs=1, warmup_runs=1): r""" Executes a benchmark of the given callable with the given arguments. """ raise NotImplementedError() def solve(self, a, b): r""" Solves a linear matrix equation, or system of linear scalar equations. This function follows the api from :any:`numpy.linalg.solve`. See: https://numpy.org/doc/stable/reference/generated/numpy.linalg.solve.html """ raise NotImplementedError() def trace(self, a): r""" Returns the sum along diagonals of the array. This function follows the api from :any:`numpy.trace`. See: https://numpy.org/doc/stable/reference/generated/numpy.trace.html """ raise NotImplementedError() def inv(self, a): r""" Computes the inverse of a matrix. This function follows the api from :any:`scipy.linalg.inv`. See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.inv.html """ raise NotImplementedError() def sqrtm(self, a): r""" Computes the matrix square root. Requires input to be definite positive. This function follows the api from :any:`scipy.linalg.sqrtm`. See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.sqrtm.html """ raise NotImplementedError() def eigh(self, a): r""" Computes the eigenvalues and eigenvectors of a symmetric tensor. This function follows the api from :any:`scipy.linalg.eigh`. See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.eigh.html """ raise NotImplementedError() def kl_div(self, p, q, eps=1e-16): r""" Computes the Kullback-Leibler divergence. This function follows the api from :any:`scipy.stats.entropy`. Parameter eps is used to avoid numerical errors and is added in the log. .. math:: KL(p,q) = \sum_i p(i) \log (\frac{p(i)}{q(i)}+\epsilon) See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.entropy.html """ raise NotImplementedError() def isfinite(self, a): r""" Tests element-wise for finiteness (not infinity and not Not a Number). This function follows the api from :any:`numpy.isfinite`. See: https://numpy.org/doc/stable/reference/generated/numpy.isfinite.html """ raise NotImplementedError() def array_equal(self, a, b): r""" True if two arrays have the same shape and elements, False otherwise. This function follows the api from :any:`numpy.array_equal`. See: https://numpy.org/doc/stable/reference/generated/numpy.array_equal.html """ raise NotImplementedError() def is_floating_point(self, a): r""" Returns whether or not the input consists of floats """ raise NotImplementedError() def tile(self, a, reps): r""" Construct an array by repeating a the number of times given by reps See: https://numpy.org/doc/stable/reference/generated/numpy.tile.html """ raise NotImplementedError() def floor(self, a): r""" Return the floor of the input element-wise See: https://numpy.org/doc/stable/reference/generated/numpy.floor.html """ raise NotImplementedError() def prod(self, a, axis=None): r""" Return the product of all elements. See: https://numpy.org/doc/stable/reference/generated/numpy.prod.html """ raise NotImplementedError() def sort2(self, a, axis=None): r""" Return the sorted array and the indices to sort the array See: https://pytorch.org/docs/stable/generated/torch.sort.html """ raise NotImplementedError() def qr(self, a): r""" Return the QR factorization See: https://numpy.org/doc/stable/reference/generated/numpy.linalg.qr.html """ raise NotImplementedError() def atan2(self, a, b): r""" Element wise arctangent See: https://numpy.org/doc/stable/reference/generated/numpy.arctan2.html """ raise NotImplementedError() def transpose(self, a, axes=None): r""" Returns a tensor that is a transposed version of a. The given dimensions dim0 and dim1 are swapped. See: https://numpy.org/doc/stable/reference/generated/numpy.transpose.html """ raise NotImplementedError() def detach(self, *args): r""" Detach tensors in arguments from the current graph. See: https://pytorch.org/docs/stable/generated/torch.Tensor.detach.html """ raise NotImplementedError() def matmul(self, a, b): r""" Matrix product of two arrays. See: https://numpy.org/doc/stable/reference/generated/numpy.matmul.html#numpy.matmul """ raise NotImplementedError() def nan_to_num(self, x, copy=True, nan=0.0, posinf=None, neginf=None): r""" Replace NaN with zero and infinity with large finite numbers or with the numbers defined by the user. See: https://numpy.org/doc/stable/reference/generated/numpy.nan_to_num.html#numpy.nan_to_num """ raise NotImplementedError() class NumpyBackend(Backend): """ NumPy implementation of the backend - `__name__` is "numpy" - `__type__` is np.ndarray """ __name__ = 'numpy' __type__ = np.ndarray __type_list__ = [np.array(1, dtype=np.float32), np.array(1, dtype=np.float64)] rng_ = np.random.RandomState() def _to_numpy(self, a): return a def _from_numpy(self, a, type_as=None): if type_as is None: return a elif isinstance(a, float): return a else: return a.astype(type_as.dtype) def set_gradients(self, val, inputs, grads): # No gradients for numpy return val def zeros(self, shape, type_as=None): if type_as is None: return np.zeros(shape) else: return np.zeros(shape, dtype=type_as.dtype) def ones(self, shape, type_as=None): if type_as is None: return np.ones(shape) else: return np.ones(shape, dtype=type_as.dtype) def arange(self, stop, start=0, step=1, type_as=None): return np.arange(start, stop, step) def full(self, shape, fill_value, type_as=None): if type_as is None: return np.full(shape, fill_value) else: return np.full(shape, fill_value, dtype=type_as.dtype) def eye(self, N, M=None, type_as=None): if type_as is None: return np.eye(N, M) else: return np.eye(N, M, dtype=type_as.dtype) def sum(self, a, axis=None, keepdims=False): return np.sum(a, axis, keepdims=keepdims) def cumsum(self, a, axis=None): return np.cumsum(a, axis) def max(self, a, axis=None, keepdims=False): return np.max(a, axis, keepdims=keepdims) def min(self, a, axis=None, keepdims=False): return np.min(a, axis, keepdims=keepdims) def maximum(self, a, b): return np.maximum(a, b) def minimum(self, a, b): return np.minimum(a, b) def sign(self, a): return np.sign(a) def dot(self, a, b): return np.dot(a, b) def abs(self, a): return np.abs(a) def exp(self, a): return np.exp(a) def log(self, a): return np.log(a) def sqrt(self, a): return np.sqrt(a) def power(self, a, exponents): return np.power(a, exponents) def norm(self, a, axis=None, keepdims=False): return np.linalg.norm(a, axis=axis, keepdims=keepdims) def any(self, a): return np.any(a) def isnan(self, a): return np.isnan(a) def isinf(self, a): return np.isinf(a) def einsum(self, subscripts, *operands): return np.einsum(subscripts, *operands) def sort(self, a, axis=-1): return np.sort(a, axis) def argsort(self, a, axis=-1): return np.argsort(a, axis) def searchsorted(self, a, v, side='left'): if a.ndim == 1: return np.searchsorted(a, v, side) else: # this is a not very efficient way to make numpy # searchsorted work on 2d arrays ret = np.empty(v.shape, dtype=int) for i in range(a.shape[0]): ret[i, :] = np.searchsorted(a[i, :], v[i, :], side) return ret def flip(self, a, axis=None): return np.flip(a, axis) def outer(self, a, b): return np.outer(a, b) def clip(self, a, a_min, a_max): return np.clip(a, a_min, a_max) def repeat(self, a, repeats, axis=None): return np.repeat(a, repeats, axis) def take_along_axis(self, arr, indices, axis): return np.take_along_axis(arr, indices, axis) def concatenate(self, arrays, axis=0): return np.concatenate(arrays, axis) def zero_pad(self, a, pad_width, value=0): return np.pad(a, pad_width, constant_values=value) def argmax(self, a, axis=None): return np.argmax(a, axis=axis) def argmin(self, a, axis=None): return np.argmin(a, axis=axis) def mean(self, a, axis=None): return np.mean(a, axis=axis) def median(self, a, axis=None): return np.median(a, axis=axis) def std(self, a, axis=None): return np.std(a, axis=axis) def linspace(self, start, stop, num, type_as=None): if type_as is None: return np.linspace(start, stop, num) else: return np.linspace(start, stop, num, dtype=type_as.dtype) def meshgrid(self, a, b): return np.meshgrid(a, b) def diag(self, a, k=0): return np.diag(a, k) def unique(self, a, return_inverse=False): return np.unique(a, return_inverse=return_inverse) def logsumexp(self, a, axis=None): return special.logsumexp(a, axis=axis) def stack(self, arrays, axis=0): return np.stack(arrays, axis) def reshape(self, a, shape): return np.reshape(a, shape) def seed(self, seed=None): if seed is not None: self.rng_.seed(seed) def rand(self, *size, type_as=None): return self.rng_.rand(*size) def randn(self, *size, type_as=None): return self.rng_.randn(*size) def coo_matrix(self, data, rows, cols, shape=None, type_as=None): if type_as is None: return coo_matrix((data, (rows, cols)), shape=shape) else: return coo_matrix((data, (rows, cols)), shape=shape, dtype=type_as.dtype) def issparse(self, a): return issparse(a) def tocsr(self, a): if self.issparse(a): return a.tocsr() else: return csr_matrix(a) def eliminate_zeros(self, a, threshold=0.): if threshold > 0: if self.issparse(a): a.data[self.abs(a.data) <= threshold] = 0 else: a[self.abs(a) <= threshold] = 0 if self.issparse(a): a.eliminate_zeros() return a def todense(self, a): if self.issparse(a): return a.toarray() else: return a def where(self, condition, x=None, y=None): if x is None and y is None: return np.where(condition) else: return np.where(condition, x, y) def copy(self, a): return a.copy() def allclose(self, a, b, rtol=1e-05, atol=1e-08, equal_nan=False): return np.allclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan) def dtype_device(self, a): if hasattr(a, "dtype"): return a.dtype, "cpu" else: return type(a), "cpu" def assert_same_dtype_device(self, a, b): # numpy has implicit type conversion so we automatically validate the test pass def squeeze(self, a, axis=None): return np.squeeze(a, axis=axis) def bitsize(self, type_as): return type_as.itemsize * 8 def device_type(self, type_as): return "CPU" def _bench(self, callable, *args, n_runs=1, warmup_runs=1): results = dict() for type_as in self.__type_list__: inputs = [self.from_numpy(arg, type_as=type_as) for arg in args] for _ in range(warmup_runs): callable(*inputs) t0 = time.perf_counter() for _ in range(n_runs): callable(*inputs) t1 = time.perf_counter() key = ("Numpy", self.device_type(type_as), self.bitsize(type_as)) results[key] = (t1 - t0) / n_runs return results def solve(self, a, b): return np.linalg.solve(a, b) def trace(self, a): return np.trace(a) def inv(self, a): return scipy.linalg.inv(a) def sqrtm(self, a): L, V = np.linalg.eigh(a) return (V * np.sqrt(L)[None, :]) @ V.T def eigh(self, a): return np.linalg.eigh(a) def kl_div(self, p, q, eps=1e-16): return np.sum(p * np.log(p / q + eps)) def isfinite(self, a): return np.isfinite(a) def array_equal(self, a, b): return np.array_equal(a, b) def is_floating_point(self, a): return a.dtype.kind == "f" def tile(self, a, reps): return np.tile(a, reps) def floor(self, a): return np.floor(a) def prod(self, a, axis=0): return np.prod(a, axis=axis) def sort2(self, a, axis=-1): return self.sort(a, axis), self.argsort(a, axis) def qr(self, a): np_version = tuple([int(k) for k in np.__version__.split(".")]) if np_version < (1, 22, 0): M, N = a.shape[-2], a.shape[-1] K = min(M, N) if len(a.shape) >= 3: n = a.shape[0] qs, rs = np.zeros((n, M, K)), np.zeros((n, K, N)) for i in range(a.shape[0]): qs[i], rs[i] = np.linalg.qr(a[i]) else: return np.linalg.qr(a) return qs, rs return np.linalg.qr(a) def atan2(self, a, b): return np.arctan2(a, b) def transpose(self, a, axes=None): return np.transpose(a, axes) def detach(self, *args): if len(args) == 1: return args[0] return args def matmul(self, a, b): return np.matmul(a, b) def nan_to_num(self, x, copy=True, nan=0.0, posinf=None, neginf=None): return np.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf) _register_backend_implementation(NumpyBackend) class JaxBackend(Backend): """ JAX implementation of the backend - `__name__` is "jax" - `__type__` is jax.numpy.ndarray """ __name__ = 'jax' __type__ = jax_type __type_list__ = None rng_ = None def __init__(self): self.rng_ = jax.random.PRNGKey(42) self.__type_list__ = [] # available_devices = jax.devices("cpu") available_devices = [] if xla_bridge.get_backend().platform == "gpu": available_devices += jax.devices("gpu") for d in available_devices: self.__type_list__ += [ jax.device_put(jnp.array(1, dtype=jnp.float32), d), jax.device_put(jnp.array(1, dtype=jnp.float64), d) ] def _to_numpy(self, a): return np.array(a) def _change_device(self, a, type_as): return jax.device_put(a, type_as.device_buffer.device()) def _from_numpy(self, a, type_as=None): if isinstance(a, float): a = np.array(a) if type_as is None: return jnp.array(a) else: return self._change_device(jnp.array(a).astype(type_as.dtype), type_as) def set_gradients(self, val, inputs, grads): from jax.flatten_util import ravel_pytree val, = jax.lax.stop_gradient((val,)) ravelled_inputs, _ = ravel_pytree(inputs) ravelled_grads, _ = ravel_pytree(grads) aux = jnp.sum(ravelled_inputs * ravelled_grads) / 2 aux = aux - jax.lax.stop_gradient(aux) val, = jax.tree_map(lambda z: z + aux, (val,)) return val def zeros(self, shape, type_as=None): if type_as is None: return jnp.zeros(shape) else: return self._change_device(jnp.zeros(shape, dtype=type_as.dtype), type_as) def ones(self, shape, type_as=None): if type_as is None: return jnp.ones(shape) else: return self._change_device(jnp.ones(shape, dtype=type_as.dtype), type_as) def arange(self, stop, start=0, step=1, type_as=None): return jnp.arange(start, stop, step) def full(self, shape, fill_value, type_as=None): if type_as is None: return jnp.full(shape, fill_value) else: return self._change_device(jnp.full(shape, fill_value, dtype=type_as.dtype), type_as) def eye(self, N, M=None, type_as=None): if type_as is None: return jnp.eye(N, M) else: return self._change_device(jnp.eye(N, M, dtype=type_as.dtype), type_as) def sum(self, a, axis=None, keepdims=False): return jnp.sum(a, axis, keepdims=keepdims) def cumsum(self, a, axis=None): return jnp.cumsum(a, axis) def max(self, a, axis=None, keepdims=False): return jnp.max(a, axis, keepdims=keepdims) def min(self, a, axis=None, keepdims=False): return jnp.min(a, axis, keepdims=keepdims) def maximum(self, a, b): return jnp.maximum(a, b) def minimum(self, a, b): return jnp.minimum(a, b) def sign(self, a): return jnp.sign(a) def dot(self, a, b): return jnp.dot(a, b) def abs(self, a): return jnp.abs(a) def exp(self, a): return jnp.exp(a) def log(self, a): return jnp.log(a) def sqrt(self, a): return jnp.sqrt(a) def power(self, a, exponents): return jnp.power(a, exponents) def norm(self, a, axis=None, keepdims=False): return jnp.linalg.norm(a, axis=axis, keepdims=keepdims) def any(self, a): return jnp.any(a) def isnan(self, a): return jnp.isnan(a) def isinf(self, a): return jnp.isinf(a) def einsum(self, subscripts, *operands): return jnp.einsum(subscripts, *operands) def sort(self, a, axis=-1): return jnp.sort(a, axis) def argsort(self, a, axis=-1): return jnp.argsort(a, axis) def searchsorted(self, a, v, side='left'): if a.ndim == 1: return jnp.searchsorted(a, v, side) else: # this is a not very efficient way to make jax numpy # searchsorted work on 2d arrays return jnp.array([jnp.searchsorted(a[i, :], v[i, :], side) for i in range(a.shape[0])]) def flip(self, a, axis=None): return jnp.flip(a, axis) def outer(self, a, b): return jnp.outer(a, b) def clip(self, a, a_min, a_max): return jnp.clip(a, a_min, a_max) def repeat(self, a, repeats, axis=None): return jnp.repeat(a, repeats, axis) def take_along_axis(self, arr, indices, axis): return jnp.take_along_axis(arr, indices, axis) def concatenate(self, arrays, axis=0): return jnp.concatenate(arrays, axis) def zero_pad(self, a, pad_width, value=0): return jnp.pad(a, pad_width, constant_values=value) def argmax(self, a, axis=None): return jnp.argmax(a, axis=axis) def argmin(self, a, axis=None): return jnp.argmin(a, axis=axis) def mean(self, a, axis=None): return jnp.mean(a, axis=axis) def median(self, a, axis=None): return jnp.median(a, axis=axis) def std(self, a, axis=None): return jnp.std(a, axis=axis) def linspace(self, start, stop, num, type_as=None): if type_as is None: return jnp.linspace(start, stop, num) else: return self._change_device(jnp.linspace(start, stop, num, dtype=type_as.dtype), type_as) def meshgrid(self, a, b): return jnp.meshgrid(a, b) def diag(self, a, k=0): return jnp.diag(a, k) def unique(self, a, return_inverse=False): return jnp.unique(a, return_inverse=return_inverse) def logsumexp(self, a, axis=None): return jspecial.logsumexp(a, axis=axis) def stack(self, arrays, axis=0): return jnp.stack(arrays, axis) def reshape(self, a, shape): return jnp.reshape(a, shape) def seed(self, seed=None): if seed is not None: self.rng_ = jax.random.PRNGKey(seed) def rand(self, *size, type_as=None): self.rng_, subkey = jax.random.split(self.rng_) if type_as is not None: return jax.random.uniform(subkey, shape=size, dtype=type_as.dtype) else: return jax.random.uniform(subkey, shape=size) def randn(self, *size, type_as=None): self.rng_, subkey = jax.random.split(self.rng_) if type_as is not None: return jax.random.normal(subkey, shape=size, dtype=type_as.dtype) else: return jax.random.normal(subkey, shape=size) def coo_matrix(self, data, rows, cols, shape=None, type_as=None): # Currently, JAX does not support sparse matrices data = self.to_numpy(data) rows = self.to_numpy(rows) cols = self.to_numpy(cols) nx = NumpyBackend() coo_matrix = nx.coo_matrix(data, rows, cols, shape=shape, type_as=type_as) matrix = nx.todense(coo_matrix) return self.from_numpy(matrix) def issparse(self, a): # Currently, JAX does not support sparse matrices return False def tocsr(self, a): # Currently, JAX does not support sparse matrices return a def eliminate_zeros(self, a, threshold=0.): # Currently, JAX does not support sparse matrices if threshold > 0: return self.where( self.abs(a) <= threshold, self.zeros((1,), type_as=a), a ) return a def todense(self, a): # Currently, JAX does not support sparse matrices return a def where(self, condition, x=None, y=None): if x is None and y is None: return jnp.where(condition) else: return jnp.where(condition, x, y) def copy(self, a): # No need to copy, JAX arrays are immutable return a def allclose(self, a, b, rtol=1e-05, atol=1e-08, equal_nan=False): return jnp.allclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan) def dtype_device(self, a): return a.dtype, a.device_buffer.device() def assert_same_dtype_device(self, a, b): a_dtype, a_device = self.dtype_device(a) b_dtype, b_device = self.dtype_device(b) assert a_dtype == b_dtype, "Dtype discrepancy" assert a_device == b_device, f"Device discrepancy. First input is on {str(a_device)}, whereas second input is on {str(b_device)}" def squeeze(self, a, axis=None): return jnp.squeeze(a, axis=axis) def bitsize(self, type_as): return type_as.dtype.itemsize * 8 def device_type(self, type_as): return self.dtype_device(type_as)[1].platform.upper() def _bench(self, callable, *args, n_runs=1, warmup_runs=1): results = dict() for type_as in self.__type_list__: inputs = [self.from_numpy(arg, type_as=type_as) for arg in args] for _ in range(warmup_runs): a = callable(*inputs) a.block_until_ready() t0 = time.perf_counter() for _ in range(n_runs): a = callable(*inputs) a.block_until_ready() t1 = time.perf_counter() key = ("Jax", self.device_type(type_as), self.bitsize(type_as)) results[key] = (t1 - t0) / n_runs return results def solve(self, a, b): return jnp.linalg.solve(a, b) def trace(self, a): return jnp.trace(a) def inv(self, a): return jnp.linalg.inv(a) def sqrtm(self, a): L, V = jnp.linalg.eigh(a) return (V * jnp.sqrt(L)[None, :]) @ V.T def eigh(self, a): return jnp.linalg.eigh(a) def kl_div(self, p, q, eps=1e-16): return jnp.sum(p * jnp.log(p / q + eps)) def isfinite(self, a): return jnp.isfinite(a) def array_equal(self, a, b): return jnp.array_equal(a, b) def is_floating_point(self, a): return a.dtype.kind == "f" def tile(self, a, reps): return jnp.tile(a, reps) def floor(self, a): return jnp.floor(a) def prod(self, a, axis=0): return jnp.prod(a, axis=axis) def sort2(self, a, axis=-1): return self.sort(a, axis), self.argsort(a, axis) def qr(self, a): return jnp.linalg.qr(a) def atan2(self, a, b): return jnp.arctan2(a, b) def transpose(self, a, axes=None): return jnp.transpose(a, axes) def detach(self, *args): if len(args) == 1: return jax.lax.stop_gradient((args[0],))[0] return [jax.lax.stop_gradient((a,))[0] for a in args] def matmul(self, a, b): return jnp.matmul(a, b) def nan_to_num(self, x, copy=True, nan=0.0, posinf=None, neginf=None): return jnp.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf) if jax: # Only register jax backend if it is installed _register_backend_implementation(JaxBackend) class TorchBackend(Backend): """ PyTorch implementation of the backend - `__name__` is "torch" - `__type__` is torch.Tensor """ __name__ = 'torch' __type__ = torch_type __type_list__ = None rng_ = None def __init__(self): self.rng_ = torch.Generator("cpu") self.rng_.seed() self.__type_list__ = [torch.tensor(1, dtype=torch.float32), torch.tensor(1, dtype=torch.float64)] if torch.cuda.is_available(): self.rng_cuda_ = torch.Generator("cuda") self.rng_cuda_.seed() self.__type_list__.append(torch.tensor(1, dtype=torch.float32, device='cuda')) self.__type_list__.append(torch.tensor(1, dtype=torch.float64, device='cuda')) else: self.rng_cuda_ = torch.Generator("cpu") from torch.autograd import Function # define a function that takes inputs val and grads # ad returns a val tensor with proper gradients class ValFunction(Function): @staticmethod def forward(ctx, val, grads, *inputs): ctx.grads = grads return val @staticmethod def backward(ctx, grad_output): # the gradients are grad return (None, None) + tuple(g * grad_output for g in ctx.grads) self.ValFunction = ValFunction def _to_numpy(self, a): if isinstance(a, float) or isinstance(a, int) or isinstance(a, np.ndarray): return np.array(a) return a.cpu().detach().numpy() def _from_numpy(self, a, type_as=None): if isinstance(a, float) or isinstance(a, int): a = np.array(a) if type_as is None: return torch.from_numpy(a) else: return torch.as_tensor(a, dtype=type_as.dtype, device=type_as.device) def set_gradients(self, val, inputs, grads): Func = self.ValFunction res = Func.apply(val, grads, *inputs) return res def zeros(self, shape, type_as=None): if isinstance(shape, int): shape = (shape,) if type_as is None: return torch.zeros(shape) else: return torch.zeros(shape, dtype=type_as.dtype, device=type_as.device) def ones(self, shape, type_as=None): if isinstance(shape, int): shape = (shape,) if type_as is None: return torch.ones(shape) else: return torch.ones(shape, dtype=type_as.dtype, device=type_as.device) def arange(self, stop, start=0, step=1, type_as=None): if type_as is None: return torch.arange(start, stop, step) else: return torch.arange(start, stop, step, device=type_as.device) def full(self, shape, fill_value, type_as=None): if isinstance(shape, int): shape = (shape,) if type_as is None: return torch.full(shape, fill_value) else: return torch.full(shape, fill_value, dtype=type_as.dtype, device=type_as.device) def eye(self, N, M=None, type_as=None): if M is None: M = N if type_as is None: return torch.eye(N, m=M) else: return torch.eye(N, m=M, dtype=type_as.dtype, device=type_as.device) def sum(self, a, axis=None, keepdims=False): if axis is None: return torch.sum(a) else: return torch.sum(a, axis, keepdim=keepdims) def cumsum(self, a, axis=None): if axis is None: return torch.cumsum(a.flatten(), 0) else: return torch.cumsum(a, axis) def max(self, a, axis=None, keepdims=False): if axis is None: return torch.max(a) else: return torch.max(a, axis, keepdim=keepdims)[0] def min(self, a, axis=None, keepdims=False): if axis is None: return torch.min(a) else: return torch.min(a, axis, keepdim=keepdims)[0] def maximum(self, a, b): if isinstance(a, int) or isinstance(a, float): a = torch.tensor([float(a)], dtype=b.dtype, device=b.device) if isinstance(b, int) or isinstance(b, float): b = torch.tensor([float(b)], dtype=a.dtype, device=a.device) if hasattr(torch, "maximum"): return torch.maximum(a, b) else: return torch.max(torch.stack(torch.broadcast_tensors(a, b)), axis=0)[0] def minimum(self, a, b): if isinstance(a, int) or isinstance(a, float): a = torch.tensor([float(a)], dtype=b.dtype, device=b.device) if isinstance(b, int) or isinstance(b, float): b = torch.tensor([float(b)], dtype=a.dtype, device=a.device) if hasattr(torch, "minimum"): return torch.minimum(a, b) else: return torch.min(torch.stack(torch.broadcast_tensors(a, b)), axis=0)[0] def sign(self, a): return torch.sign(a) def dot(self, a, b): return torch.matmul(a, b) def abs(self, a): return torch.abs(a) def exp(self, a): return torch.exp(a) def log(self, a): return torch.log(a) def sqrt(self, a): return torch.sqrt(a) def power(self, a, exponents): return torch.pow(a, exponents) def norm(self, a, axis=None, keepdims=False): return torch.linalg.norm(a, dim=axis, keepdims=keepdims) def any(self, a): return torch.any(a) def isnan(self, a): return torch.isnan(a) def isinf(self, a): return torch.isinf(a) def einsum(self, subscripts, *operands): return torch.einsum(subscripts, *operands) def sort(self, a, axis=-1): sorted0, indices = torch.sort(a, dim=axis) return sorted0 def argsort(self, a, axis=-1): sorted, indices = torch.sort(a, dim=axis) return indices def searchsorted(self, a, v, side='left'): right = (side != 'left') return torch.searchsorted(a, v, right=right) def flip(self, a, axis=None): if axis is None: return torch.flip(a, tuple(i for i in range(len(a.shape)))) if isinstance(axis, int): return torch.flip(a, (axis,)) else: return torch.flip(a, dims=axis) def outer(self, a, b): return torch.outer(a, b) def clip(self, a, a_min, a_max): return torch.clamp(a, a_min, a_max) def repeat(self, a, repeats, axis=None): return torch.repeat_interleave(a, repeats, dim=axis) def take_along_axis(self, arr, indices, axis): return torch.gather(arr, axis, indices) def concatenate(self, arrays, axis=0): return torch.cat(arrays, dim=axis) def zero_pad(self, a, pad_width, value=0): from torch.nn.functional import pad # pad_width is an array of ndim tuples indicating how many 0 before and after # we need to add. We first need to make it compliant with torch syntax, that # starts with the last dim, then second last, etc. how_pad = tuple(element for tupl in pad_width[::-1] for element in tupl) return pad(a, how_pad, value=value) def argmax(self, a, axis=None): return torch.argmax(a, dim=axis) def argmin(self, a, axis=None): return torch.argmin(a, dim=axis) def mean(self, a, axis=None): if axis is not None: return torch.mean(a, dim=axis) else: return torch.mean(a) def median(self, a, axis=None): from packaging import version # Since version 1.11.0, interpolation is available if version.parse(torch.__version__) >= version.parse("1.11.0"): if axis is not None: return torch.quantile(a, 0.5, interpolation="midpoint", dim=axis) else: return torch.quantile(a, 0.5, interpolation="midpoint") # Else, use numpy warnings.warn("The median is being computed using numpy and the array has been detached " "in the Pytorch backend.") a_ = self.to_numpy(a) a_median = np.median(a_, axis=axis) return self.from_numpy(a_median, type_as=a) def std(self, a, axis=None): if axis is not None: return torch.std(a, dim=axis, unbiased=False) else: return torch.std(a, unbiased=False) def linspace(self, start, stop, num, type_as=None): if type_as is None: return torch.linspace(start, stop, num) else: return torch.linspace(start, stop, num, dtype=type_as.dtype, device=type_as.device) def meshgrid(self, a, b): try: return torch.meshgrid(a, b, indexing="xy") except TypeError: X, Y = torch.meshgrid(a, b) return X.T, Y.T def diag(self, a, k=0): return torch.diag(a, diagonal=k) def unique(self, a, return_inverse=False): return torch.unique(a, return_inverse=return_inverse) def logsumexp(self, a, axis=None): if axis is not None: return torch.logsumexp(a, dim=axis) else: return torch.logsumexp(a, dim=tuple(range(len(a.shape)))) def stack(self, arrays, axis=0): return torch.stack(arrays, dim=axis) def reshape(self, a, shape): return torch.reshape(a, shape) def seed(self, seed=None): if isinstance(seed, int): self.rng_.manual_seed(seed) self.rng_cuda_.manual_seed(seed) elif isinstance(seed, torch.Generator): if self.device_type(seed) == "GPU": self.rng_cuda_ = seed else: self.rng_ = seed else: raise ValueError("Non compatible seed : {}".format(seed)) def rand(self, *size, type_as=None): if type_as is not None: generator = self.rng_cuda_ if self.device_type(type_as) == "GPU" else self.rng_ return torch.rand(size=size, generator=generator, dtype=type_as.dtype, device=type_as.device) else: return torch.rand(size=size, generator=self.rng_) def randn(self, *size, type_as=None): if type_as is not None: generator = self.rng_cuda_ if self.device_type(type_as) == "GPU" else self.rng_ return torch.randn(size=size, dtype=type_as.dtype, generator=generator, device=type_as.device) else: return torch.randn(size=size, generator=self.rng_) def coo_matrix(self, data, rows, cols, shape=None, type_as=None): if type_as is None: return torch.sparse_coo_tensor(torch.stack([rows, cols]), data, size=shape) else: return torch.sparse_coo_tensor( torch.stack([rows, cols]), data, size=shape, dtype=type_as.dtype, device=type_as.device ) def issparse(self, a): return getattr(a, "is_sparse", False) or getattr(a, "is_sparse_csr", False) def tocsr(self, a): # Versions older than 1.9 do not support CSR tensors. PyTorch 1.9 and 1.10 offer a very limited support return self.todense(a) def eliminate_zeros(self, a, threshold=0.): if self.issparse(a): if threshold > 0: mask = self.abs(a) <= threshold mask = ~mask mask = mask.nonzero() else: mask = a._values().nonzero() nv = a._values().index_select(0, mask.view(-1)) ni = a._indices().index_select(1, mask.view(-1)) return self.coo_matrix(nv, ni[0], ni[1], shape=a.shape, type_as=a) else: if threshold > 0: a[self.abs(a) <= threshold] = 0 return a def todense(self, a): if self.issparse(a): return a.to_dense() else: return a def where(self, condition, x=None, y=None): if x is None and y is None: return torch.where(condition) else: return torch.where(condition, x, y) def copy(self, a): return torch.clone(a) def allclose(self, a, b, rtol=1e-05, atol=1e-08, equal_nan=False): return torch.allclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan) def dtype_device(self, a): return a.dtype, a.device def assert_same_dtype_device(self, a, b): a_dtype, a_device = self.dtype_device(a) b_dtype, b_device = self.dtype_device(b) assert a_dtype == b_dtype, "Dtype discrepancy" assert a_device == b_device, f"Device discrepancy. First input is on {str(a_device)}, whereas second input is on {str(b_device)}" def squeeze(self, a, axis=None): if axis is None: return torch.squeeze(a) else: return torch.squeeze(a, dim=axis) def bitsize(self, type_as): return torch.finfo(type_as.dtype).bits def device_type(self, type_as): return type_as.device.type.replace("cuda", "gpu").upper() def _bench(self, callable, *args, n_runs=1, warmup_runs=1): results = dict() for type_as in self.__type_list__: inputs = [self.from_numpy(arg, type_as=type_as) for arg in args] for _ in range(warmup_runs): callable(*inputs) if self.device_type(type_as) == "GPU": # pragma: no cover torch.cuda.synchronize() start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) start.record() else: start = time.perf_counter() for _ in range(n_runs): callable(*inputs) if self.device_type(type_as) == "GPU": # pragma: no cover end.record() torch.cuda.synchronize() duration = start.elapsed_time(end) / 1000. else: end = time.perf_counter() duration = end - start key = ("Pytorch", self.device_type(type_as), self.bitsize(type_as)) results[key] = duration / n_runs if torch.cuda.is_available(): torch.cuda.empty_cache() return results def solve(self, a, b): return torch.linalg.solve(a, b) def trace(self, a): return torch.trace(a) def inv(self, a): return torch.linalg.inv(a) def sqrtm(self, a): L, V = torch.linalg.eigh(a) return (V * torch.sqrt(L)[None, :]) @ V.T def eigh(self, a): return torch.linalg.eigh(a) def kl_div(self, p, q, eps=1e-16): return torch.sum(p * torch.log(p / q + eps)) def isfinite(self, a): return torch.isfinite(a) def array_equal(self, a, b): return torch.equal(a, b) def is_floating_point(self, a): return a.dtype.is_floating_point def tile(self, a, reps): return a.repeat(reps) def floor(self, a): return torch.floor(a) def prod(self, a, axis=0): return torch.prod(a, dim=axis) def sort2(self, a, axis=-1): return torch.sort(a, axis) def qr(self, a): return torch.linalg.qr(a) def atan2(self, a, b): return torch.atan2(a, b) def transpose(self, a, axes=None): if axes is None: axes = tuple(range(a.ndim)[::-1]) return a.permute(axes) def detach(self, *args): if len(args) == 1: return args[0].detach() return [a.detach() for a in args] def matmul(self, a, b): return torch.matmul(a, b) def nan_to_num(self, x, copy=True, nan=0.0, posinf=None, neginf=None): out = None if copy else x return torch.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf, out=out) if torch: # Only register torch backend if it is installed _register_backend_implementation(TorchBackend) class CupyBackend(Backend): # pragma: no cover """ CuPy implementation of the backend - `__name__` is "cupy" - `__type__` is cp.ndarray """ __name__ = 'cupy' __type__ = cp_type __type_list__ = None rng_ = None def __init__(self): self.rng_ = cp.random.RandomState() self.__type_list__ = [ cp.array(1, dtype=cp.float32), cp.array(1, dtype=cp.float64) ] def _to_numpy(self, a): return cp.asnumpy(a) def _from_numpy(self, a, type_as=None): if isinstance(a, float): a = np.array(a) if type_as is None: return cp.asarray(a) else: with cp.cuda.Device(type_as.device): return cp.asarray(a, dtype=type_as.dtype) def set_gradients(self, val, inputs, grads): # No gradients for cupy return val def zeros(self, shape, type_as=None): if isinstance(shape, (list, tuple)): shape = tuple(int(i) for i in shape) if type_as is None: return cp.zeros(shape) else: with cp.cuda.Device(type_as.device): return cp.zeros(shape, dtype=type_as.dtype) def ones(self, shape, type_as=None): if isinstance(shape, (list, tuple)): shape = tuple(int(i) for i in shape) if type_as is None: return cp.ones(shape) else: with cp.cuda.Device(type_as.device): return cp.ones(shape, dtype=type_as.dtype) def arange(self, stop, start=0, step=1, type_as=None): return cp.arange(start, stop, step) def full(self, shape, fill_value, type_as=None): if isinstance(shape, (list, tuple)): shape = tuple(int(i) for i in shape) if type_as is None: return cp.full(shape, fill_value) else: with cp.cuda.Device(type_as.device): return cp.full(shape, fill_value, dtype=type_as.dtype) def eye(self, N, M=None, type_as=None): if type_as is None: return cp.eye(N, M) else: with cp.cuda.Device(type_as.device): return cp.eye(N, M, dtype=type_as.dtype) def sum(self, a, axis=None, keepdims=False): return cp.sum(a, axis, keepdims=keepdims) def cumsum(self, a, axis=None): return cp.cumsum(a, axis) def max(self, a, axis=None, keepdims=False): return cp.max(a, axis, keepdims=keepdims) def min(self, a, axis=None, keepdims=False): return cp.min(a, axis, keepdims=keepdims) def maximum(self, a, b): return cp.maximum(a, b) def minimum(self, a, b): return cp.minimum(a, b) def sign(self, a): return cp.sign(a) def abs(self, a): return cp.abs(a) def exp(self, a): return cp.exp(a) def log(self, a): return cp.log(a) def sqrt(self, a): return cp.sqrt(a) def power(self, a, exponents): return cp.power(a, exponents) def dot(self, a, b): return cp.dot(a, b) def norm(self, a, axis=None, keepdims=False): return cp.linalg.norm(a, axis=axis, keepdims=keepdims) def any(self, a): return cp.any(a) def isnan(self, a): return cp.isnan(a) def isinf(self, a): return cp.isinf(a) def einsum(self, subscripts, *operands): return cp.einsum(subscripts, *operands) def sort(self, a, axis=-1): return cp.sort(a, axis) def argsort(self, a, axis=-1): return cp.argsort(a, axis) def searchsorted(self, a, v, side='left'): if a.ndim == 1: return cp.searchsorted(a, v, side) else: # this is a not very efficient way to make numpy # searchsorted work on 2d arrays ret = cp.empty(v.shape, dtype=int) for i in range(a.shape[0]): ret[i, :] = cp.searchsorted(a[i, :], v[i, :], side) return ret def flip(self, a, axis=None): return cp.flip(a, axis) def outer(self, a, b): return cp.outer(a, b) def clip(self, a, a_min, a_max): return cp.clip(a, a_min, a_max) def repeat(self, a, repeats, axis=None): return cp.repeat(a, repeats, axis) def take_along_axis(self, arr, indices, axis): return cp.take_along_axis(arr, indices, axis) def concatenate(self, arrays, axis=0): return cp.concatenate(arrays, axis) def zero_pad(self, a, pad_width, value=0): return cp.pad(a, pad_width, constant_values=value) def argmax(self, a, axis=None): return cp.argmax(a, axis=axis) def argmin(self, a, axis=None): return cp.argmin(a, axis=axis) def mean(self, a, axis=None): return cp.mean(a, axis=axis) def median(self, a, axis=None): return cp.median(a, axis=axis) def std(self, a, axis=None): return cp.std(a, axis=axis) def linspace(self, start, stop, num, type_as=None): if type_as is None: return cp.linspace(start, stop, num) else: with cp.cuda.Device(type_as.device): return cp.linspace(start, stop, num, dtype=type_as.dtype) def meshgrid(self, a, b): return cp.meshgrid(a, b) def diag(self, a, k=0): return cp.diag(a, k) def unique(self, a, return_inverse=False): return cp.unique(a, return_inverse=return_inverse) def logsumexp(self, a, axis=None): # Taken from # https://github.com/scipy/scipy/blob/v1.7.1/scipy/special/_logsumexp.py#L7-L127 a_max = cp.amax(a, axis=axis, keepdims=True) if a_max.ndim > 0: a_max[~cp.isfinite(a_max)] = 0 elif not cp.isfinite(a_max): a_max = 0 tmp = cp.exp(a - a_max) s = cp.sum(tmp, axis=axis) out = cp.log(s) a_max = cp.squeeze(a_max, axis=axis) out += a_max return out def stack(self, arrays, axis=0): return cp.stack(arrays, axis) def reshape(self, a, shape): return cp.reshape(a, shape) def seed(self, seed=None): if seed is not None: self.rng_.seed(seed) def rand(self, *size, type_as=None): if type_as is None: return self.rng_.rand(*size) else: with cp.cuda.Device(type_as.device): return self.rng_.rand(*size, dtype=type_as.dtype) def randn(self, *size, type_as=None): if type_as is None: return self.rng_.randn(*size) else: with cp.cuda.Device(type_as.device): return self.rng_.randn(*size, dtype=type_as.dtype) def coo_matrix(self, data, rows, cols, shape=None, type_as=None): data = self.from_numpy(data) rows = self.from_numpy(rows) cols = self.from_numpy(cols) if type_as is None: return cupyx.scipy.sparse.coo_matrix( (data, (rows, cols)), shape=shape ) else: with cp.cuda.Device(type_as.device): return cupyx.scipy.sparse.coo_matrix( (data, (rows, cols)), shape=shape, dtype=type_as.dtype ) def issparse(self, a): return cupyx.scipy.sparse.issparse(a) def tocsr(self, a): if self.issparse(a): return a.tocsr() else: return cupyx.scipy.sparse.csr_matrix(a) def eliminate_zeros(self, a, threshold=0.): if threshold > 0: if self.issparse(a): a.data[self.abs(a.data) <= threshold] = 0 else: a[self.abs(a) <= threshold] = 0 if self.issparse(a): a.eliminate_zeros() return a def todense(self, a): if self.issparse(a): return a.toarray() else: return a def where(self, condition, x=None, y=None): if x is None and y is None: return cp.where(condition) else: return cp.where(condition, x, y) def copy(self, a): return a.copy() def allclose(self, a, b, rtol=1e-05, atol=1e-08, equal_nan=False): return cp.allclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan) def dtype_device(self, a): return a.dtype, a.device def assert_same_dtype_device(self, a, b): a_dtype, a_device = self.dtype_device(a) b_dtype, b_device = self.dtype_device(b) # cupy has implicit type conversion so # we automatically validate the test for type assert a_device == b_device, f"Device discrepancy. First input is on {str(a_device)}, whereas second input is on {str(b_device)}" def squeeze(self, a, axis=None): return cp.squeeze(a, axis=axis) def bitsize(self, type_as): return type_as.itemsize * 8 def device_type(self, type_as): return "GPU" def _bench(self, callable, *args, n_runs=1, warmup_runs=1): mempool = cp.get_default_memory_pool() pinned_mempool = cp.get_default_pinned_memory_pool() results = dict() for type_as in self.__type_list__: inputs = [self.from_numpy(arg, type_as=type_as) for arg in args] start_gpu = cp.cuda.Event() end_gpu = cp.cuda.Event() for _ in range(warmup_runs): callable(*inputs) start_gpu.synchronize() start_gpu.record() for _ in range(n_runs): callable(*inputs) end_gpu.record() end_gpu.synchronize() key = ("Cupy", self.device_type(type_as), self.bitsize(type_as)) t_gpu = cp.cuda.get_elapsed_time(start_gpu, end_gpu) / 1000. results[key] = t_gpu / n_runs mempool.free_all_blocks() pinned_mempool.free_all_blocks() return results def solve(self, a, b): return cp.linalg.solve(a, b) def trace(self, a): return cp.trace(a) def inv(self, a): return cp.linalg.inv(a) def sqrtm(self, a): L, V = cp.linalg.eigh(a) return (V * cp.sqrt(L)[None, :]) @ V.T def eigh(self, a): return cp.linalg.eigh(a) def kl_div(self, p, q, eps=1e-16): return cp.sum(p * cp.log(p / q + eps)) def isfinite(self, a): return cp.isfinite(a) def array_equal(self, a, b): return cp.array_equal(a, b) def is_floating_point(self, a): return a.dtype.kind == "f" def tile(self, a, reps): return cp.tile(a, reps) def floor(self, a): return cp.floor(a) def prod(self, a, axis=0): return cp.prod(a, axis=axis) def sort2(self, a, axis=-1): return self.sort(a, axis), self.argsort(a, axis) def qr(self, a): return cp.linalg.qr(a) def atan2(self, a, b): return cp.arctan2(a, b) def transpose(self, a, axes=None): return cp.transpose(a, axes) def detach(self, *args): if len(args) == 1: return args[0] return args def matmul(self, a, b): return cp.matmul(a, b) def nan_to_num(self, x, copy=True, nan=0.0, posinf=None, neginf=None): return cp.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf) if cp: # Only register cp backend if it is installed _register_backend_implementation(CupyBackend) class TensorflowBackend(Backend): __name__ = "tf" __type__ = tf_type __type_list__ = None rng_ = None def __init__(self): self.seed(None) self.__type_list__ = [ tf.convert_to_tensor([1], dtype=tf.float32), tf.convert_to_tensor([1], dtype=tf.float64) ] tmp = self.randn(15, 10) try: tmp.reshape((150, 1)) except AttributeError: warnings.warn( "To use TensorflowBackend, you need to activate the tensorflow " "numpy API. You can activate it by running: \n" "from tensorflow.python.ops.numpy_ops import np_config\n" "np_config.enable_numpy_behavior()", stacklevel=2 ) def _to_numpy(self, a): if isinstance(a, float) or isinstance(a, int) or isinstance(a, np.ndarray): return np.array(a) return a.numpy() def _from_numpy(self, a, type_as=None): if isinstance(a, float): a = np.array(a) if not isinstance(a, self.__type__): if type_as is None: return tf.convert_to_tensor(a) else: return tf.convert_to_tensor(a, dtype=type_as.dtype) else: if type_as is None: return a else: return tf.cast(a, dtype=type_as.dtype) def set_gradients(self, val, inputs, grads): @tf.custom_gradient def tmp(input): def grad(upstream): return grads return val, grad return tmp(inputs) def zeros(self, shape, type_as=None): if type_as is None: return tnp.zeros(shape) else: return tnp.zeros(shape, dtype=type_as.dtype) def ones(self, shape, type_as=None): if type_as is None: return tnp.ones(shape) else: return tnp.ones(shape, dtype=type_as.dtype) def arange(self, stop, start=0, step=1, type_as=None): return tnp.arange(start, stop, step) def full(self, shape, fill_value, type_as=None): if type_as is None: return tnp.full(shape, fill_value) else: return tnp.full(shape, fill_value, dtype=type_as.dtype) def eye(self, N, M=None, type_as=None): if type_as is None: return tnp.eye(N, M) else: return tnp.eye(N, M, dtype=type_as.dtype) def sum(self, a, axis=None, keepdims=False): return tnp.sum(a, axis, keepdims=keepdims) def cumsum(self, a, axis=None): return tnp.cumsum(a, axis) def max(self, a, axis=None, keepdims=False): return tnp.max(a, axis, keepdims=keepdims) def min(self, a, axis=None, keepdims=False): return tnp.min(a, axis, keepdims=keepdims) def maximum(self, a, b): return tnp.maximum(a, b) def minimum(self, a, b): return tnp.minimum(a, b) def sign(self, a): return tnp.sign(a) def dot(self, a, b): if len(b.shape) == 1: if len(a.shape) == 1: # inner product return tf.reduce_sum(tf.multiply(a, b)) else: # matrix vector return tf.linalg.matvec(a, b) else: if len(a.shape) == 1: return tf.linalg.matvec(b.T, a.T).T else: return tf.matmul(a, b) def abs(self, a): return tnp.abs(a) def exp(self, a): return tnp.exp(a) def log(self, a): return tnp.log(a) def sqrt(self, a): return tnp.sqrt(a) def power(self, a, exponents): return tnp.power(a, exponents) def norm(self, a, axis=None, keepdims=False): return tf.math.reduce_euclidean_norm(a, axis=axis, keepdims=keepdims) def any(self, a): return tnp.any(a) def isnan(self, a): return tnp.isnan(a) def isinf(self, a): return tnp.isinf(a) def einsum(self, subscripts, *operands): return tnp.einsum(subscripts, *operands) def sort(self, a, axis=-1): return tnp.sort(a, axis) def argsort(self, a, axis=-1): return tnp.argsort(a, axis) def searchsorted(self, a, v, side='left'): return tf.searchsorted(a, v, side=side) def flip(self, a, axis=None): return tnp.flip(a, axis) def outer(self, a, b): return tnp.outer(a, b) def clip(self, a, a_min, a_max): return tnp.clip(a, a_min, a_max) def repeat(self, a, repeats, axis=None): return tnp.repeat(a, repeats, axis) def take_along_axis(self, arr, indices, axis): return tnp.take_along_axis(arr, indices, axis) def concatenate(self, arrays, axis=0): return tnp.concatenate(arrays, axis) def zero_pad(self, a, pad_width, value=0): return tnp.pad(a, pad_width, mode="constant", constant_values=value) def argmax(self, a, axis=None): return tnp.argmax(a, axis=axis) def argmin(self, a, axis=None): return tnp.argmin(a, axis=axis) def mean(self, a, axis=None): return tnp.mean(a, axis=axis) def median(self, a, axis=None): warnings.warn("The median is being computed using numpy and the array has been detached " "in the Tensorflow backend.") a_ = self.to_numpy(a) a_median = np.median(a_, axis=axis) return self.from_numpy(a_median, type_as=a) def std(self, a, axis=None): return tnp.std(a, axis=axis) def linspace(self, start, stop, num, type_as=None): if type_as is None: return tnp.linspace(start, stop, num) else: return tnp.linspace(start, stop, num, dtype=type_as.dtype) def meshgrid(self, a, b): return tnp.meshgrid(a, b) def diag(self, a, k=0): return tnp.diag(a, k) def unique(self, a, return_inverse=False): y, idx = tf.unique(tf.reshape(a, [-1])) sort_idx = tf.argsort(y) y_prime = tf.gather(y, sort_idx) if return_inverse: inv_sort_idx = tf.math.invert_permutation(sort_idx) return y_prime, tf.gather(inv_sort_idx, idx) else: return y_prime def logsumexp(self, a, axis=None): return tf.math.reduce_logsumexp(a, axis=axis) def stack(self, arrays, axis=0): return tnp.stack(arrays, axis) def reshape(self, a, shape): return tnp.reshape(a, shape) def seed(self, seed=None): if isinstance(seed, int): self.rng_ = tf.random.Generator.from_seed(seed) elif isinstance(seed, tf.random.Generator): self.rng_ = seed elif seed is None: self.rng_ = tf.random.Generator.from_non_deterministic_state() else: raise ValueError("Non compatible seed : {}".format(seed)) def rand(self, *size, type_as=None): if type_as is None: return self.rng_.uniform(size, minval=0., maxval=1.) else: return self.rng_.uniform( size, minval=0., maxval=1., dtype=type_as.dtype ) def randn(self, *size, type_as=None): if type_as is None: return self.rng_.normal(size) else: return self.rng_.normal(size, dtype=type_as.dtype) def _convert_to_index_for_coo(self, tensor): if isinstance(tensor, self.__type__): return int(self.max(tensor)) + 1 else: return int(np.max(tensor)) + 1 def coo_matrix(self, data, rows, cols, shape=None, type_as=None): if shape is None: shape = ( self._convert_to_index_for_coo(rows), self._convert_to_index_for_coo(cols) ) if type_as is not None: data = self.from_numpy(data, type_as=type_as) sparse_tensor = tf.sparse.SparseTensor( indices=tnp.stack([rows, cols]).T, values=data, dense_shape=shape ) # if type_as is not None: # sparse_tensor = self.from_numpy(sparse_tensor, type_as=type_as) # SparseTensor are not subscriptable so we use dense tensors return self.todense(sparse_tensor) def issparse(self, a): return isinstance(a, tf.sparse.SparseTensor) def tocsr(self, a): return a def eliminate_zeros(self, a, threshold=0.): if self.issparse(a): values = a.values if threshold > 0: mask = self.abs(values) <= threshold else: mask = values == 0 return tf.sparse.retain(a, ~mask) else: if threshold > 0: a = tnp.where(self.abs(a) > threshold, a, 0.) return a def todense(self, a): if self.issparse(a): return tf.sparse.to_dense(tf.sparse.reorder(a)) else: return a def where(self, condition, x=None, y=None): if x is None and y is None: return tnp.where(condition) else: return tnp.where(condition, x, y) def copy(self, a): return tf.identity(a) def allclose(self, a, b, rtol=1e-05, atol=1e-08, equal_nan=False): return tnp.allclose( a, b, rtol=rtol, atol=atol, equal_nan=equal_nan ) def dtype_device(self, a): return a.dtype, a.device.split("device:")[1] def assert_same_dtype_device(self, a, b): a_dtype, a_device = self.dtype_device(a) b_dtype, b_device = self.dtype_device(b) assert a_dtype == b_dtype, "Dtype discrepancy" assert a_device == b_device, f"Device discrepancy. First input is on {str(a_device)}, whereas second input is on {str(b_device)}" def squeeze(self, a, axis=None): return tnp.squeeze(a, axis=axis) def bitsize(self, type_as): return type_as.dtype.size * 8 def device_type(self, type_as): return self.dtype_device(type_as)[1].split(":")[0] def _bench(self, callable, *args, n_runs=1, warmup_runs=1): results = dict() device_contexts = [tf.device("/CPU:0")] if len(tf.config.list_physical_devices('GPU')) > 0: # pragma: no cover device_contexts.append(tf.device("/GPU:0")) for device_context in device_contexts: with device_context: for type_as in self.__type_list__: inputs = [self.from_numpy(arg, type_as=type_as) for arg in args] for _ in range(warmup_runs): callable(*inputs) t0 = time.perf_counter() for _ in range(n_runs): res = callable(*inputs) _ = res.numpy() t1 = time.perf_counter() key = ( "Tensorflow", self.device_type(inputs[0]), self.bitsize(type_as) ) results[key] = (t1 - t0) / n_runs return results def solve(self, a, b): return tf.linalg.solve(a, b) def trace(self, a): return tf.linalg.trace(a) def inv(self, a): return tf.linalg.inv(a) def sqrtm(self, a): L, V = tf.linalg.eigh(a) return (V * tf.sqrt(L)[None, :]) @ V.T def eigh(self, a): return tf.linalg.eigh(a) def kl_div(self, p, q, eps=1e-16): return tnp.sum(p * tnp.log(p / q + eps)) def isfinite(self, a): return tnp.isfinite(a) def array_equal(self, a, b): return tnp.array_equal(a, b) def is_floating_point(self, a): return a.dtype.is_floating def tile(self, a, reps): return tnp.tile(a, reps) def floor(self, a): return tf.floor(a) def prod(self, a, axis=0): return tnp.prod(a, axis=axis) def sort2(self, a, axis=-1): return self.sort(a, axis), self.argsort(a, axis) def qr(self, a): return tf.linalg.qr(a) def atan2(self, a, b): return tf.math.atan2(a, b) def transpose(self, a, axes=None): return tf.transpose(a, perm=axes) def detach(self, *args): if len(args) == 1: return tf.stop_gradient(args[0]) return [tf.stop_gradient(a) for a in args] def matmul(self, a, b): return tnp.matmul(a, b) # todo(okachaiev): replace this with a more reasonable implementation def nan_to_num(self, x, copy=True, nan=0.0, posinf=None, neginf=None): x = self.to_numpy(x) x = np.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf) return self.from_numpy(x) if tf: # Only register tensorflow backend if it is installed _register_backend_implementation(TensorflowBackend) python-pot-0.9.3+dfsg/ot/bregman/000077500000000000000000000000001455713015700166305ustar00rootroot00000000000000python-pot-0.9.3+dfsg/ot/bregman/__init__.py000066400000000000000000000037051455713015700207460ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Solvers related to Bregman projections for entropic regularized OT """ # Author: Remi Flamary # CĂ©dric Vincent-Cuaz # # License: MIT License from ._utils import (geometricBar, geometricMean, projR, projC) from ._sinkhorn import (sinkhorn, sinkhorn2, sinkhorn_knopp, sinkhorn_log, greenkhorn, sinkhorn_stabilized, sinkhorn_epsilon_scaling) from ._barycenter import (barycenter, barycenter_sinkhorn, free_support_sinkhorn_barycenter, barycenter_stabilized, barycenter_debiased, jcpot_barycenter) from ._convolutional import (convolutional_barycenter2d, convolutional_barycenter2d_debiased) from ._empirical import (empirical_sinkhorn, empirical_sinkhorn2, empirical_sinkhorn_divergence) from ._screenkhorn import (screenkhorn) from ._dictionary import (unmix) from ._geomloss import (empirical_sinkhorn2_geomloss, geomloss) __all__ = ['geometricBar', 'geometricMean', 'projR', 'projC', 'sinkhorn', 'sinkhorn2', 'sinkhorn_knopp', 'sinkhorn_log', 'greenkhorn', 'sinkhorn_stabilized', 'sinkhorn_epsilon_scaling', 'barycenter', 'barycenter_sinkhorn', 'free_support_sinkhorn_barycenter', 'barycenter_stabilized', 'barycenter_debiased', 'jcpot_barycenter', 'convolutional_barycenter2d', 'convolutional_barycenter2d_debiased', 'empirical_sinkhorn', 'empirical_sinkhorn2', 'empirical_sinkhorn2_geomloss' 'empirical_sinkhorn_divergence', 'geomloss', 'screenkhorn', 'unmix' ] python-pot-0.9.3+dfsg/ot/bregman/_barycenter.py000066400000000000000000000767741455713015700215240ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Bregman projections solvers for entropic regularized wasserstein barycenters """ # Author: Remi Flamary # Nicolas Courty # Hicham Janati # Ievgen Redko # # License: MIT License import warnings import numpy as np from ..utils import dist, list_to_array, unif from ..backend import get_backend from ._utils import geometricBar, geometricMean, projR, projC from ._sinkhorn import sinkhorn def barycenter(A, M, reg, weights=None, method="sinkhorn", numItermax=10000, stopThr=1e-4, verbose=False, log=False, warn=True, **kwargs): r"""Compute the entropic regularized wasserstein barycenter of distributions :math:`\mathbf{A}` The function solves the following optimization problem: .. math:: \mathbf{a} = \mathop{\arg \min}_\mathbf{a} \quad \sum_i W_{reg}(\mathbf{a},\mathbf{a}_i) where : - :math:`W_{reg}(\cdot,\cdot)` is the entropic regularized Wasserstein distance (see :py:func:`ot.bregman.sinkhorn`) if `method` is `sinkhorn` or `sinkhorn_stabilized` or `sinkhorn_log`. - :math:`\mathbf{a}_i` are training distributions in the columns of matrix :math:`\mathbf{A}` - `reg` and :math:`\mathbf{M}` are respectively the regularization term and the cost matrix for OT The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in :ref:`[3] ` Parameters ---------- A : array-like, shape (dim, n_hists) `n_hists` training distributions :math:`\mathbf{a}_i` of size `dim` M : array-like, shape (dim, dim) loss matrix for OT reg : float Regularization term > 0 method : str (optional) method used for the solver either 'sinkhorn' or 'sinkhorn_stabilized' or 'sinkhorn_log' weights : array-like, shape (n_hists,) Weights of each histogram :math:`\mathbf{a}_i` on the simplex (barycentric coodinates) numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True warn : bool, optional if True, raises a warning if the algorithm doesn't convergence. Returns ------- a : (dim,) array-like Wasserstein barycenter log : dict log dictionary return only if log==True in parameters .. _references-barycenter: References ---------- .. [3] Benamou, J. D., Carlier, G., Cuturi, M., Nenna, L., & PeyrĂ©, G. (2015). Iterative Bregman projections for regularized transportation problems. SIAM Journal on Scientific Computing, 37(2), A1111-A1138. """ if method.lower() == 'sinkhorn': return barycenter_sinkhorn(A, M, reg, weights=weights, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, **kwargs) elif method.lower() == 'sinkhorn_stabilized': return barycenter_stabilized(A, M, reg, weights=weights, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, **kwargs) elif method.lower() == 'sinkhorn_log': return _barycenter_sinkhorn_log(A, M, reg, weights=weights, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, **kwargs) else: raise ValueError("Unknown method '%s'." % method) def barycenter_sinkhorn(A, M, reg, weights=None, numItermax=1000, stopThr=1e-4, verbose=False, log=False, warn=True): r"""Compute the entropic regularized wasserstein barycenter of distributions :math:`\mathbf{A}` The function solves the following optimization problem: .. math:: \mathbf{a} = \mathop{\arg \min}_\mathbf{a} \quad \sum_i W_{reg}(\mathbf{a},\mathbf{a}_i) where : - :math:`W_{reg}(\cdot,\cdot)` is the entropic regularized Wasserstein distance (see :py:func:`ot.bregman.sinkhorn`) - :math:`\mathbf{a}_i` are training distributions in the columns of matrix :math:`\mathbf{A}` - `reg` and :math:`\mathbf{M}` are respectively the regularization term and the cost matrix for OT The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in :ref:`[3]`. Parameters ---------- A : array-like, shape (dim, n_hists) `n_hists` training distributions :math:`\mathbf{a}_i` of size `dim` M : array-like, shape (dim, dim) loss matrix for OT reg : float Regularization term > 0 weights : array-like, shape (n_hists,) Weights of each histogram :math:`\mathbf{a}_i` on the simplex (barycentric coodinates) numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True warn : bool, optional if True, raises a warning if the algorithm doesn't convergence. Returns ------- a : (dim,) array-like Wasserstein barycenter log : dict log dictionary return only if log==True in parameters .. _references-barycenter-sinkhorn: References ---------- .. [3] Benamou, J. D., Carlier, G., Cuturi, M., Nenna, L., & PeyrĂ©, G. (2015). Iterative Bregman projections for regularized transportation problems. SIAM Journal on Scientific Computing, 37(2), A1111-A1138. """ A, M = list_to_array(A, M) nx = get_backend(A, M) if weights is None: weights = nx.ones((A.shape[1],), type_as=A) / A.shape[1] else: assert (len(weights) == A.shape[1]) if log: log = {'err': []} K = nx.exp(-M / reg) err = 1 UKv = nx.dot(K, (A.T / nx.sum(K, axis=0)).T) u = (geometricMean(UKv) / UKv.T).T for ii in range(numItermax): UKv = u * nx.dot(K.T, A / nx.dot(K, u)) u = (u.T * geometricBar(weights, UKv)).T / UKv if ii % 10 == 1: err = nx.sum(nx.std(UKv, axis=1)) # log and verbose print if log: log['err'].append(err) if err < stopThr: break if verbose: if ii % 200 == 0: print( '{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(ii, err)) else: if warn: warnings.warn("Sinkhorn did not converge. You might want to " "increase the number of iterations `numItermax` " "or the regularization parameter `reg`.") if log: log['niter'] = ii return geometricBar(weights, UKv), log else: return geometricBar(weights, UKv) def free_support_sinkhorn_barycenter(measures_locations, measures_weights, X_init, reg, b=None, weights=None, numItermax=100, numInnerItermax=1000, stopThr=1e-7, verbose=False, log=None, **kwargs): r""" Solves the free support (locations of the barycenters are optimized, not the weights) regularized Wasserstein barycenter problem (i.e. the weighted Frechet mean for the 2-Sinkhorn divergence), formally: .. math:: \min_\mathbf{X} \quad \sum_{i=1}^N w_i W_{reg}^2(\mathbf{b}, \mathbf{X}, \mathbf{a}_i, \mathbf{X}_i) where : - :math:`w \in \mathbb{(0, 1)}^{N}`'s are the barycenter weights and sum to one - `measure_weights` denotes the :math:`\mathbf{a}_i \in \mathbb{R}^{k_i}`: empirical measures weights (on simplex) - `measures_locations` denotes the :math:`\mathbf{X}_i \in \mathbb{R}^{k_i, d}`: empirical measures atoms locations - :math:`\mathbf{b} \in \mathbb{R}^{k}` is the desired weights vector of the barycenter This problem is considered in :ref:`[20] ` (Algorithm 2). There are two differences with the following codes: - we do not optimize over the weights - we do not do line search for the locations updates, we use i.e. :math:`\theta = 1` in :ref:`[20] ` (Algorithm 2). This can be seen as a discrete implementation of the fixed-point algorithm of :ref:`[43] ` proposed in the continuous setting. - at each iteration, instead of solving an exact OT problem, we use the Sinkhorn algorithm for calculating the transport plan in :ref:`[20] ` (Algorithm 2). Parameters ---------- measures_locations : list of N (k_i,d) array-like The discrete support of a measure supported on :math:`k_i` locations of a `d`-dimensional space (:math:`k_i` can be different for each element of the list) measures_weights : list of N (k_i,) array-like Numpy arrays where each numpy array has :math:`k_i` non-negatives values summing to one representing the weights of each discrete input measure X_init : (k,d) array-like Initialization of the support locations (on `k` atoms) of the barycenter reg : float Regularization term >0 b : (k,) array-like Initialization of the weights of the barycenter (non-negatives, sum to 1) weights : (N,) array-like Initialization of the coefficients of the barycenter (non-negatives, sum to 1) numItermax : int, optional Max number of iterations numInnerItermax : int, optional Max number of iterations when calculating the transport plans with Sinkhorn stopThr : float, optional Stop threshold on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- X : (k,d) array-like Support locations (on k atoms) of the barycenter See Also -------- ot.bregman.sinkhorn : Entropic regularized OT solver ot.lp.free_support_barycenter : Barycenter solver based on Linear Programming .. _references-free-support-barycenter: References ---------- .. [20] Cuturi, Marco, and Arnaud Doucet. "Fast computation of Wasserstein barycenters." International Conference on Machine Learning. 2014. .. [43] Ălvarez-Esteban, Pedro C., et al. "A fixed-point approach to barycenters in Wasserstein space." Journal of Mathematical Analysis and Applications 441.2 (2016): 744-762. """ nx = get_backend(*measures_locations, *measures_weights, X_init) iter_count = 0 N = len(measures_locations) k = X_init.shape[0] d = X_init.shape[1] if b is None: b = nx.ones((k,), type_as=X_init) / k if weights is None: weights = nx.ones((N,), type_as=X_init) / N X = X_init log_dict = {} displacement_square_norms = [] displacement_square_norm = stopThr + 1. while (displacement_square_norm > stopThr and iter_count < numItermax): T_sum = nx.zeros((k, d), type_as=X_init) for (measure_locations_i, measure_weights_i, weight_i) in zip(measures_locations, measures_weights, weights): M_i = dist(X, measure_locations_i) T_i = sinkhorn(b, measure_weights_i, M_i, reg=reg, numItermax=numInnerItermax, **kwargs) T_sum = T_sum + weight_i * 1. / \ b[:, None] * nx.dot(T_i, measure_locations_i) displacement_square_norm = nx.sum((T_sum - X) ** 2) if log: displacement_square_norms.append(displacement_square_norm) X = T_sum if verbose: print('iteration %d, displacement_square_norm=%f\n', iter_count, displacement_square_norm) iter_count += 1 if log: log_dict['displacement_square_norms'] = displacement_square_norms return X, log_dict else: return X def _barycenter_sinkhorn_log(A, M, reg, weights=None, numItermax=1000, stopThr=1e-4, verbose=False, log=False, warn=True): r"""Compute the entropic wasserstein barycenter in log-domain """ A, M = list_to_array(A, M) dim, n_hists = A.shape nx = get_backend(A, M) if nx.__name__ in ("jax", "tf"): raise NotImplementedError( "Log-domain functions are not yet implemented" " for Jax and tf. Use numpy or torch arrays instead." ) if weights is None: weights = nx.ones(n_hists, type_as=A) / n_hists else: assert (len(weights) == A.shape[1]) if log: log = {'err': []} M = - M / reg logA = nx.log(A + 1e-15) log_KU, G = nx.zeros((2, *logA.shape), type_as=A) err = 1 for ii in range(numItermax): log_bar = nx.zeros(dim, type_as=A) for k in range(n_hists): f = logA[:, k] - nx.logsumexp(M + G[None, :, k], axis=1) log_KU[:, k] = nx.logsumexp(M + f[:, None], axis=0) log_bar = log_bar + weights[k] * log_KU[:, k] if ii % 10 == 1: err = nx.exp(G + log_KU).std(axis=1).sum() # log and verbose print if log: log['err'].append(err) if err < stopThr: break if verbose: if ii % 200 == 0: print( '{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(ii, err)) G = log_bar[:, None] - log_KU else: if warn: warnings.warn("Sinkhorn did not converge. You might want to " "increase the number of iterations `numItermax` " "or the regularization parameter `reg`.") if log: log['niter'] = ii return nx.exp(log_bar), log else: return nx.exp(log_bar) def barycenter_stabilized(A, M, reg, tau=1e10, weights=None, numItermax=1000, stopThr=1e-4, verbose=False, log=False, warn=True): r"""Compute the entropic regularized wasserstein barycenter of distributions :math:`\mathbf{A}` with stabilization. The function solves the following optimization problem: .. math:: \mathbf{a} = \mathop{\arg \min}_\mathbf{a} \quad \sum_i W_{reg}(\mathbf{a},\mathbf{a}_i) where : - :math:`W_{reg}(\cdot,\cdot)` is the entropic regularized Wasserstein distance (see :py:func:`ot.bregman.sinkhorn`) - :math:`\mathbf{a}_i` are training distributions in the columns of matrix :math:`\mathbf{A}` - `reg` and :math:`\mathbf{M}` are respectively the regularization term and the cost matrix for OT The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in :ref:`[3] ` Parameters ---------- A : array-like, shape (dim, n_hists) `n_hists` training distributions :math:`\mathbf{a}_i` of size `dim` M : array-like, shape (dim, dim) loss matrix for OT reg : float Regularization term > 0 tau : float threshold for max value in :math:`\mathbf{u}` or :math:`\mathbf{v}` for log scaling weights : array-like, shape (n_hists,) Weights of each histogram :math:`\mathbf{a}_i` on the simplex (barycentric coodinates) numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True warn : bool, optional if True, raises a warning if the algorithm doesn't convergence. Returns ------- a : (dim,) array-like Wasserstein barycenter log : dict log dictionary return only if log==True in parameters .. _references-barycenter-stabilized: References ---------- .. [3] Benamou, J. D., Carlier, G., Cuturi, M., Nenna, L., & PeyrĂ©, G. (2015). Iterative Bregman projections for regularized transportation problems. SIAM Journal on Scientific Computing, 37(2), A1111-A1138. """ A, M = list_to_array(A, M) nx = get_backend(A, M) dim, n_hists = A.shape if weights is None: weights = nx.ones((n_hists,), type_as=M) / n_hists else: assert (len(weights) == A.shape[1]) if log: log = {'err': []} u = nx.ones((dim, n_hists), type_as=M) / dim v = nx.ones((dim, n_hists), type_as=M) / dim K = nx.exp(-M / reg) err = 1. alpha = nx.zeros((dim,), type_as=M) beta = nx.zeros((dim,), type_as=M) q = nx.ones((dim,), type_as=M) / dim for ii in range(numItermax): qprev = q Kv = nx.dot(K, v) u = A / Kv Ktu = nx.dot(K.T, u) q = geometricBar(weights, Ktu) Q = q[:, None] v = Q / Ktu absorbing = False if nx.any(u > tau) or nx.any(v > tau): absorbing = True alpha += reg * nx.log(nx.max(u, 1)) beta += reg * nx.log(nx.max(v, 1)) K = nx.exp((alpha[:, None] + beta[None, :] - M) / reg) v = nx.ones(tuple(v.shape), type_as=v) Kv = nx.dot(K, v) if (nx.any(Ktu == 0.) or nx.any(nx.isnan(u)) or nx.any(nx.isnan(v)) or nx.any(nx.isinf(u)) or nx.any(nx.isinf(v))): # we have reached the machine precision # come back to previous solution and quit loop warnings.warn('Numerical errors at iteration %s' % ii) q = qprev break if (ii % 10 == 0 and not absorbing) or ii == 0: # we can speed up the process by checking for the error only all # the 10th iterations err = nx.max(nx.abs(u * Kv - A)) if log: log['err'].append(err) if err < stopThr: break if verbose: if ii % 50 == 0: print( '{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(ii, err)) else: if warn: warnings.warn("Stabilized Sinkhorn did not converge." + "Try a larger entropy `reg`" + "Or a larger absorption threshold `tau`.") if log: log['niter'] = ii log['logu'] = nx.log(u + 1e-16) log['logv'] = nx.log(v + 1e-16) return q, log else: return q def barycenter_debiased(A, M, reg, weights=None, method="sinkhorn", numItermax=10000, stopThr=1e-4, verbose=False, log=False, warn=True, **kwargs): r"""Compute the debiased Sinkhorn barycenter of distributions A The function solves the following optimization problem: .. math:: \mathbf{a} = \mathop{\arg \min}_\mathbf{a} \quad \sum_i S_{reg}(\mathbf{a},\mathbf{a}_i) where : - :math:`S_{reg}(\cdot,\cdot)` is the debiased Sinkhorn divergence (see :py:func:`ot.bregman.empirical_sinkhorn_divergence`) - :math:`\mathbf{a}_i` are training distributions in the columns of matrix :math:`\mathbf{A}` - `reg` and :math:`\mathbf{M}` are respectively the regularization term and the cost matrix for OT The algorithm used for solving the problem is the debiased Sinkhorn algorithm as proposed in :ref:`[37] ` Parameters ---------- A : array-like, shape (dim, n_hists) `n_hists` training distributions :math:`\mathbf{a}_i` of size `dim` M : array-like, shape (dim, dim) loss matrix for OT reg : float Regularization term > 0 method : str (optional) method used for the solver either 'sinkhorn' or 'sinkhorn_log' weights : array-like, shape (n_hists,) Weights of each histogram :math:`\mathbf{a}_i` on the simplex (barycentric coodinates) numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True warn : bool, optional if True, raises a warning if the algorithm doesn't convergence. Returns ------- a : (dim,) array-like Wasserstein barycenter log : dict log dictionary return only if log==True in parameters .. _references-barycenter-debiased: References ---------- .. [37] Janati, H., Cuturi, M., Gramfort, A. Proceedings of the 37th International Conference on Machine Learning, PMLR 119:4692-4701, 2020 """ if method.lower() == 'sinkhorn': return _barycenter_debiased(A, M, reg, weights=weights, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, **kwargs) elif method.lower() == 'sinkhorn_log': return _barycenter_debiased_log(A, M, reg, weights=weights, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, **kwargs) else: raise ValueError("Unknown method '%s'." % method) def _barycenter_debiased(A, M, reg, weights=None, numItermax=1000, stopThr=1e-4, verbose=False, log=False, warn=True): r"""Compute the debiased sinkhorn barycenter of distributions A. """ A, M = list_to_array(A, M) nx = get_backend(A, M) if weights is None: weights = nx.ones((A.shape[1],), type_as=A) / A.shape[1] else: assert (len(weights) == A.shape[1]) if log: log = {'err': []} K = nx.exp(-M / reg) err = 1 UKv = nx.dot(K, (A.T / nx.sum(K, axis=0)).T) u = (geometricMean(UKv) / UKv.T).T c = nx.ones(A.shape[0], type_as=A) bar = nx.ones(A.shape[0], type_as=A) for ii in range(numItermax): bold = bar UKv = nx.dot(K, A / nx.dot(K, u)) bar = c * geometricBar(weights, UKv) u = bar[:, None] / UKv c = (c * bar / nx.dot(K, c)) ** 0.5 if ii % 10 == 9: err = abs(bar - bold).max() / max(bar.max(), 1.) # log and verbose print if log: log['err'].append(err) # debiased Sinkhorn does not converge monotonically # guarantee a few iterations are done before stopping if err < stopThr and ii > 20: break if verbose: if ii % 200 == 0: print( '{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(ii, err)) else: if warn: warnings.warn("Sinkhorn did not converge. You might want to " "increase the number of iterations `numItermax` " "or the regularization parameter `reg`.") if log: log['niter'] = ii return bar, log else: return bar def _barycenter_debiased_log(A, M, reg, weights=None, numItermax=1000, stopThr=1e-4, verbose=False, log=False, warn=True): r"""Compute the debiased sinkhorn barycenter in log domain. """ A, M = list_to_array(A, M) dim, n_hists = A.shape nx = get_backend(A, M) if nx.__name__ in ("jax", "tf"): raise NotImplementedError( "Log-domain functions are not yet implemented" " for Jax and TF. Use numpy or torch arrays instead." ) if weights is None: weights = nx.ones(n_hists, type_as=A) / n_hists else: assert (len(weights) == A.shape[1]) if log: log = {'err': []} M = - M / reg logA = nx.log(A + 1e-15) log_KU, G = nx.zeros((2, *logA.shape), type_as=A) c = nx.zeros(dim, type_as=A) err = 1 for ii in range(numItermax): log_bar = nx.zeros(dim, type_as=A) for k in range(n_hists): f = logA[:, k] - nx.logsumexp(M + G[None, :, k], axis=1) log_KU[:, k] = nx.logsumexp(M + f[:, None], axis=0) log_bar += weights[k] * log_KU[:, k] log_bar += c if ii % 10 == 1: err = nx.exp(G + log_KU).std(axis=1).sum() # log and verbose print if log: log['err'].append(err) if err < stopThr and ii > 20: break if verbose: if ii % 200 == 0: print( '{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(ii, err)) G = log_bar[:, None] - log_KU for _ in range(10): c = 0.5 * (c + log_bar - nx.logsumexp(M + c[:, None], axis=0)) else: if warn: warnings.warn("Sinkhorn did not converge. You might want to " "increase the number of iterations `numItermax` " "or the regularization parameter `reg`.") if log: log['niter'] = ii return nx.exp(log_bar), log else: return nx.exp(log_bar) def jcpot_barycenter(Xs, Ys, Xt, reg, metric='sqeuclidean', numItermax=100, stopThr=1e-6, verbose=False, log=False, warn=True, **kwargs): r'''Joint OT and proportion estimation for multi-source target shift as proposed in :ref:`[27] ` The function solves the following optimization problem: .. math:: \mathbf{h} = \mathop{\arg \min}_{\mathbf{h}} \quad \sum_{k=1}^{K} \lambda_k W_{reg}((\mathbf{D}_2^{(k)} \mathbf{h})^T, \mathbf{a}) s.t. \ \forall k, \mathbf{D}_1^{(k)} \gamma_k \mathbf{1}_n= \mathbf{h} where : - :math:`\lambda_k` is the weight of `k`-th source domain - :math:`W_{reg}(\cdot,\cdot)` is the entropic regularized Wasserstein distance (see :py:func:`ot.bregman.sinkhorn`) - :math:`\mathbf{D}_2^{(k)}` is a matrix of weights related to `k`-th source domain defined as in [p. 5, :ref:`27 `], its expected shape is :math:`(n_k, C)` where :math:`n_k` is the number of elements in the `k`-th source domain and `C` is the number of classes - :math:`\mathbf{h}` is a vector of estimated proportions in the target domain of size `C` - :math:`\mathbf{a}` is a uniform vector of weights in the target domain of size `n` - :math:`\mathbf{D}_1^{(k)}` is a matrix of class assignments defined as in [p. 5, :ref:`27 `], its expected shape is :math:`(n_k, C)` The problem consist in solving a Wasserstein barycenter problem to estimate the proportions :math:`\mathbf{h}` in the target domain. The algorithm used for solving the problem is the Iterative Bregman projections algorithm with two sets of marginal constraints related to the unknown vector :math:`\mathbf{h}` and uniform target distribution. Parameters ---------- Xs : list of K array-like(nsk,d) features of all source domains' samples Ys : list of K array-like(nsk,) labels of all source domains' samples Xt : array-like (nt,d) samples in the target domain reg : float Regularization term > 0 metric : string, optional (default="sqeuclidean") The ground metric for the Wasserstein problem numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on relative change in the barycenter (>0) verbose : bool, optional (default=False) Controls the verbosity of the optimization algorithm log : bool, optional record log if True warn : bool, optional if True, raises a warning if the algorithm doesn't convergence. Returns ------- h : (C,) array-like proportion estimation in the target domain log : dict log dictionary return only if log==True in parameters .. _references-jcpot-barycenter: References ---------- .. [27] Ievgen Redko, Nicolas Courty, RĂ©mi Flamary, Devis Tuia "Optimal transport for multi-source domain adaptation under target shift", International Conference on Artificial Intelligence and Statistics (AISTATS), 2019. ''' Xs = list_to_array(*Xs) Ys = list_to_array(*Ys) Xt = list_to_array(Xt) nx = get_backend(*Xs, *Ys, Xt) nbclasses = len(nx.unique(Ys[0])) nbdomains = len(Xs) # log dictionary if log: log = {'niter': 0, 'err': [], 'M': [], 'D1': [], 'D2': [], 'gamma': []} K = [] M = [] D1 = [] D2 = [] # For each source domain, build cost matrices M, Gibbs kernels K and corresponding matrices D_1 and D_2 for d in range(nbdomains): dom = {} nsk = Xs[d].shape[0] # get number of elements for this domain dom['nbelem'] = nsk classes = nx.unique(Ys[d]) # get number of classes for this domain # format classes to start from 0 for convenience if nx.min(classes) != 0: Ys[d] -= nx.min(classes) classes = nx.unique(Ys[d]) # build the corresponding D_1 and D_2 matrices Dtmp1 = np.zeros((nbclasses, nsk)) Dtmp2 = np.zeros((nbclasses, nsk)) for c in classes: nbelemperclass = float(nx.sum(Ys[d] == c)) if nbelemperclass != 0: Dtmp1[int(c), nx.to_numpy(Ys[d] == c)] = 1. Dtmp2[int(c), nx.to_numpy(Ys[d] == c)] = 1. / (nbelemperclass) D1.append(nx.from_numpy(Dtmp1, type_as=Xs[0])) D2.append(nx.from_numpy(Dtmp2, type_as=Xs[0])) # build the cost matrix and the Gibbs kernel Mtmp = dist(Xs[d], Xt, metric=metric) M.append(Mtmp) Ktmp = nx.exp(-Mtmp / reg) K.append(Ktmp) # uniform target distribution a = nx.from_numpy(unif(Xt.shape[0]), type_as=Xs[0]) err = 1 old_bary = nx.ones((nbclasses,), type_as=Xs[0]) for ii in range(numItermax): bary = nx.zeros((nbclasses,), type_as=Xs[0]) # update coupling matrices for marginal constraints w.r.t. uniform target distribution for d in range(nbdomains): K[d] = projC(K[d], a) other = nx.sum(K[d], axis=1) bary += nx.log(nx.dot(D1[d], other)) / nbdomains bary = nx.exp(bary) # update coupling matrices for marginal constraints w.r.t. unknown proportions based on [Prop 4., 27] for d in range(nbdomains): new = nx.dot(D2[d].T, bary) K[d] = projR(K[d], new) err = nx.norm(bary - old_bary) old_bary = bary if log: log['err'].append(err) if err < stopThr: break if verbose: if ii % 200 == 0: print('{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(ii, err)) else: if warn: warnings.warn("Algorithm did not converge. You might want to " "increase the number of iterations `numItermax` " "or the regularization parameter `reg`.") bary = bary / nx.sum(bary) if log: log['niter'] = ii log['M'] = M log['D1'] = D1 log['D2'] = D2 log['gamma'] = K return bary, log else: return bary python-pot-0.9.3+dfsg/ot/bregman/_convolutional.py000066400000000000000000000414051455713015700222410ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Bregman projections solvers for entropic regularized Wasserstein convolutional barycenters """ # Author: Remi Flamary # Nicolas Courty # # License: MIT License import warnings from ..utils import list_to_array from ..backend import get_backend def convolutional_barycenter2d(A, reg, weights=None, method="sinkhorn", numItermax=10000, stopThr=1e-4, verbose=False, log=False, warn=True, **kwargs): r"""Compute the entropic regularized wasserstein barycenter of distributions :math:`\mathbf{A}` where :math:`\mathbf{A}` is a collection of 2D images. The function solves the following optimization problem: .. math:: \mathbf{a} = \mathop{\arg \min}_\mathbf{a} \quad \sum_i W_{reg}(\mathbf{a},\mathbf{a}_i) where : - :math:`W_{reg}(\cdot,\cdot)` is the entropic regularized Wasserstein distance (see :py:func:`ot.bregman.sinkhorn`) - :math:`\mathbf{a}_i` are training distributions (2D images) in the mast two dimensions of matrix :math:`\mathbf{A}` - `reg` is the regularization strength scalar value The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in :ref:`[21] ` Parameters ---------- A : array-like, shape (n_hists, width, height) `n` distributions (2D images) of size `width` x `height` reg : float Regularization term >0 weights : array-like, shape (n_hists,) Weights of each image on the simplex (barycentric coodinates) method : string, optional method used for the solver either 'sinkhorn' or 'sinkhorn_log' numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (> 0) stabThr : float, optional Stabilization threshold to avoid numerical precision issue verbose : bool, optional Print information along iterations log : bool, optional record log if True warn : bool, optional if True, raises a warning if the algorithm doesn't convergence. Returns ------- a : array-like, shape (width, height) 2D Wasserstein barycenter log : dict log dictionary return only if log==True in parameters .. _references-convolutional-barycenter-2d: References ---------- .. [21] Solomon, J., De Goes, F., PeyrĂ©, G., Cuturi, M., Butscher, A., Nguyen, A. & Guibas, L. (2015). Convolutional wasserstein distances: Efficient optimal transportation on geometric domains. ACM Transactions on Graphics (TOG), 34(4), 66 .. [37] Janati, H., Cuturi, M., Gramfort, A. Proceedings of the 37th International Conference on Machine Learning, PMLR 119:4692-4701, 2020 """ if method.lower() == 'sinkhorn': return _convolutional_barycenter2d(A, reg, weights=weights, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, **kwargs) elif method.lower() == 'sinkhorn_log': return _convolutional_barycenter2d_log(A, reg, weights=weights, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, **kwargs) else: raise ValueError("Unknown method '%s'." % method) def _convolutional_barycenter2d(A, reg, weights=None, numItermax=10000, stopThr=1e-9, stabThr=1e-30, verbose=False, log=False, warn=True): r"""Compute the entropic regularized wasserstein barycenter of distributions A where A is a collection of 2D images. """ A = list_to_array(A) nx = get_backend(A) if weights is None: weights = nx.ones((A.shape[0],), type_as=A) / A.shape[0] else: assert (len(weights) == A.shape[0]) if log: log = {'err': []} bar = nx.ones(A.shape[1:], type_as=A) bar /= nx.sum(bar) U = nx.ones(A.shape, type_as=A) V = nx.ones(A.shape, type_as=A) err = 1 # build the convolution operator # this is equivalent to blurring on horizontal then vertical directions t = nx.linspace(0, 1, A.shape[1], type_as=A) [Y, X] = nx.meshgrid(t, t) K1 = nx.exp(-(X - Y) ** 2 / reg) t = nx.linspace(0, 1, A.shape[2], type_as=A) [Y, X] = nx.meshgrid(t, t) K2 = nx.exp(-(X - Y) ** 2 / reg) def convol_imgs(imgs): kx = nx.einsum("...ij,kjl->kil", K1, imgs) kxy = nx.einsum("...ij,klj->kli", K2, kx) return kxy KU = convol_imgs(U) for ii in range(numItermax): V = bar[None] / KU KV = convol_imgs(V) U = A / KV KU = convol_imgs(U) bar = nx.exp( nx.sum(weights[:, None, None] * nx.log(KU + stabThr), axis=0) ) if ii % 10 == 9: err = nx.sum(nx.std(V * KU, axis=0)) # log and verbose print if log: log['err'].append(err) if verbose: if ii % 200 == 0: print('{:5s}|{:12s}'.format( 'It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(ii, err)) if err < stopThr: break else: if warn: warnings.warn("Convolutional Sinkhorn did not converge. " "Try a larger number of iterations `numItermax` " "or a larger entropy `reg`.") if log: log['niter'] = ii log['U'] = U return bar, log else: return bar def _convolutional_barycenter2d_log(A, reg, weights=None, numItermax=10000, stopThr=1e-4, stabThr=1e-30, verbose=False, log=False, warn=True): r"""Compute the entropic regularized wasserstein barycenter of distributions A where A is a collection of 2D images in log-domain. """ A = list_to_array(A) nx = get_backend(A) if nx.__name__ in ("jax", "tf"): raise NotImplementedError( "Log-domain functions are not yet implemented" " for Jax and TF. Use numpy or torch arrays instead." ) n_hists, width, height = A.shape if weights is None: weights = nx.ones((n_hists,), type_as=A) / n_hists else: assert (len(weights) == n_hists) if log: log = {'err': []} err = 1 # build the convolution operator # this is equivalent to blurring on horizontal then vertical directions t = nx.linspace(0, 1, width, type_as=A) [Y, X] = nx.meshgrid(t, t) M1 = - (X - Y) ** 2 / reg t = nx.linspace(0, 1, height, type_as=A) [Y, X] = nx.meshgrid(t, t) M2 = - (X - Y) ** 2 / reg def convol_img(log_img): log_img = nx.logsumexp(M1[:, :, None] + log_img[None], axis=1) log_img = nx.logsumexp(M2[:, :, None] + log_img.T[None], axis=1).T return log_img logA = nx.log(A + stabThr) log_KU, G, F = nx.zeros((3, *logA.shape), type_as=A) err = 1 for ii in range(numItermax): log_bar = nx.zeros((width, height), type_as=A) for k in range(n_hists): f = logA[k] - convol_img(G[k]) log_KU[k] = convol_img(f) log_bar = log_bar + weights[k] * log_KU[k] if ii % 10 == 9: err = nx.exp(G + log_KU).std(axis=0).sum() # log and verbose print if log: log['err'].append(err) if verbose: if ii % 200 == 0: print('{:5s}|{:12s}'.format( 'It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(ii, err)) if err < stopThr: break G = log_bar[None, :, :] - log_KU else: if warn: warnings.warn("Convolutional Sinkhorn did not converge. " "Try a larger number of iterations `numItermax` " "or a larger entropy `reg`.") if log: log['niter'] = ii return nx.exp(log_bar), log else: return nx.exp(log_bar) def convolutional_barycenter2d_debiased(A, reg, weights=None, method="sinkhorn", numItermax=10000, stopThr=1e-3, verbose=False, log=False, warn=True, **kwargs): r"""Compute the debiased sinkhorn barycenter of distributions :math:`\mathbf{A}` where :math:`\mathbf{A}` is a collection of 2D images. The function solves the following optimization problem: .. math:: \mathbf{a} = \mathop{\arg \min}_\mathbf{a} \quad \sum_i S_{reg}(\mathbf{a},\mathbf{a}_i) where : - :math:`S_{reg}(\cdot,\cdot)` is the debiased entropic regularized Wasserstein distance (see :py:func:`ot.bregman.barycenter_debiased`) - :math:`\mathbf{a}_i` are training distributions (2D images) in the mast two dimensions of matrix :math:`\mathbf{A}` - `reg` is the regularization strength scalar value The algorithm used for solving the problem is the debiased Sinkhorn scaling algorithm as proposed in :ref:`[37] ` Parameters ---------- A : array-like, shape (n_hists, width, height) `n` distributions (2D images) of size `width` x `height` reg : float Regularization term >0 weights : array-like, shape (n_hists,) Weights of each image on the simplex (barycentric coodinates) method : string, optional method used for the solver either 'sinkhorn' or 'sinkhorn_log' numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (> 0) stabThr : float, optional Stabilization threshold to avoid numerical precision issue verbose : bool, optional Print information along iterations log : bool, optional record log if True warn : bool, optional if True, raises a warning if the algorithm doesn't convergence. Returns ------- a : array-like, shape (width, height) 2D Wasserstein barycenter log : dict log dictionary return only if log==True in parameters .. _references-convolutional-barycenter2d-debiased: References ---------- .. [37] Janati, H., Cuturi, M., Gramfort, A. Proceedings of the 37th International Conference on Machine Learning, PMLR 119:4692-4701, 2020 """ if method.lower() == 'sinkhorn': return _convolutional_barycenter2d_debiased(A, reg, weights=weights, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, **kwargs) elif method.lower() == 'sinkhorn_log': return _convolutional_barycenter2d_debiased_log(A, reg, weights=weights, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, **kwargs) else: raise ValueError("Unknown method '%s'." % method) def _convolutional_barycenter2d_debiased(A, reg, weights=None, numItermax=10000, stopThr=1e-3, stabThr=1e-15, verbose=False, log=False, warn=True): r"""Compute the debiased barycenter of 2D images via sinkhorn convolutions. """ A = list_to_array(A) n_hists, width, height = A.shape nx = get_backend(A) if weights is None: weights = nx.ones((n_hists,), type_as=A) / n_hists else: assert (len(weights) == n_hists) if log: log = {'err': []} bar = nx.ones((width, height), type_as=A) bar /= width * height U = nx.ones(A.shape, type_as=A) V = nx.ones(A.shape, type_as=A) c = nx.ones(A.shape[1:], type_as=A) err = 1 # build the convolution operator # this is equivalent to blurring on horizontal then vertical directions t = nx.linspace(0, 1, width, type_as=A) [Y, X] = nx.meshgrid(t, t) K1 = nx.exp(-(X - Y) ** 2 / reg) t = nx.linspace(0, 1, height, type_as=A) [Y, X] = nx.meshgrid(t, t) K2 = nx.exp(-(X - Y) ** 2 / reg) def convol_imgs(imgs): kx = nx.einsum("...ij,kjl->kil", K1, imgs) kxy = nx.einsum("...ij,klj->kli", K2, kx) return kxy KU = convol_imgs(U) for ii in range(numItermax): V = bar[None] / KU KV = convol_imgs(V) U = A / KV KU = convol_imgs(U) bar = c * nx.exp( nx.sum(weights[:, None, None] * nx.log(KU + stabThr), axis=0) ) for _ in range(10): c = (c * bar / nx.squeeze(convol_imgs(c[None]))) ** 0.5 if ii % 10 == 9: err = nx.sum(nx.std(V * KU, axis=0)) # log and verbose print if log: log['err'].append(err) if verbose: if ii % 200 == 0: print('{:5s}|{:12s}'.format( 'It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(ii, err)) # debiased Sinkhorn does not converge monotonically # guarantee a few iterations are done before stopping if err < stopThr and ii > 20: break else: if warn: warnings.warn("Sinkhorn did not converge. You might want to " "increase the number of iterations `numItermax` " "or the regularization parameter `reg`.") if log: log['niter'] = ii log['U'] = U return bar, log else: return bar def _convolutional_barycenter2d_debiased_log(A, reg, weights=None, numItermax=10000, stopThr=1e-3, stabThr=1e-30, verbose=False, log=False, warn=True): r"""Compute the debiased barycenter of 2D images in log-domain. """ A = list_to_array(A) n_hists, width, height = A.shape nx = get_backend(A) if nx.__name__ in ("jax", "tf"): raise NotImplementedError( "Log-domain functions are not yet implemented" " for Jax and TF. Use numpy or torch arrays instead." ) if weights is None: weights = nx.ones((n_hists,), type_as=A) / n_hists else: assert (len(weights) == A.shape[0]) if log: log = {'err': []} err = 1 # build the convolution operator # this is equivalent to blurring on horizontal then vertical directions t = nx.linspace(0, 1, width, type_as=A) [Y, X] = nx.meshgrid(t, t) M1 = - (X - Y) ** 2 / reg t = nx.linspace(0, 1, height, type_as=A) [Y, X] = nx.meshgrid(t, t) M2 = - (X - Y) ** 2 / reg def convol_img(log_img): log_img = nx.logsumexp(M1[:, :, None] + log_img[None], axis=1) log_img = nx.logsumexp(M2[:, :, None] + log_img.T[None], axis=1).T return log_img logA = nx.log(A + stabThr) log_bar, c = nx.zeros((2, width, height), type_as=A) log_KU, G, F = nx.zeros((3, *logA.shape), type_as=A) err = 1 for ii in range(numItermax): log_bar = nx.zeros((width, height), type_as=A) for k in range(n_hists): f = logA[k] - convol_img(G[k]) log_KU[k] = convol_img(f) log_bar = log_bar + weights[k] * log_KU[k] log_bar += c for _ in range(10): c = 0.5 * (c + log_bar - convol_img(c)) if ii % 10 == 9: err = nx.sum(nx.std(nx.exp(G + log_KU), axis=0)) # log and verbose print if log: log['err'].append(err) if verbose: if ii % 200 == 0: print('{:5s}|{:12s}'.format( 'It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(ii, err)) if err < stopThr and ii > 20: break G = log_bar[None, :, :] - log_KU else: if warn: warnings.warn("Convolutional Sinkhorn did not converge. " "Try a larger number of iterations `numItermax` " "or a larger entropy `reg`.") if log: log['niter'] = ii return nx.exp(log_bar), log else: return nx.exp(log_bar) python-pot-0.9.3+dfsg/ot/bregman/_dictionary.py000066400000000000000000000112551455713015700215120ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Dictionary Learning based on Bregman projections for entropic regularized OT """ # Author: Remi Flamary # Nicolas Courty # # License: MIT License import warnings from ..utils import list_to_array from ..backend import get_backend from ._utils import projC, projR def unmix(a, D, M, M0, h0, reg, reg0, alpha, numItermax=1000, stopThr=1e-3, verbose=False, log=False, warn=True): r""" Compute the unmixing of an observation with a given dictionary using Wasserstein distance The function solve the following optimization problem: .. math:: \mathbf{h} = \mathop{\arg \min}_\mathbf{h} \quad (1 - \alpha) W_{\mathbf{M}, \mathrm{reg}}(\mathbf{a}, \mathbf{Dh}) + \alpha W_{\mathbf{M_0}, \mathrm{reg}_0}(\mathbf{h}_0, \mathbf{h}) where : - :math:`W_{M,reg}(\cdot,\cdot)` is the entropic regularized Wasserstein distance with :math:`\mathbf{M}` loss matrix (see :py:func:`ot.bregman.sinkhorn`) - :math:`\mathbf{D}` is a dictionary of `n_atoms` atoms of dimension `dim_a`, its expected shape is `(dim_a, n_atoms)` - :math:`\mathbf{h}` is the estimated unmixing of dimension `n_atoms` - :math:`\mathbf{a}` is an observed distribution of dimension `dim_a` - :math:`\mathbf{h}_0` is a prior on :math:`\mathbf{h}` of dimension `dim_prior` - `reg` and :math:`\mathbf{M}` are respectively the regularization term and the cost matrix (`dim_a`, `dim_a`) for OT data fitting - `reg`:math:`_0` and :math:`\mathbf{M_0}` are respectively the regularization term and the cost matrix (`dim_prior`, `n_atoms`) regularization - :math:`\alpha` weight data fitting and regularization The optimization problem is solved following the algorithm described in :ref:`[4] ` Parameters ---------- a : array-like, shape (dim_a) observed distribution (histogram, sums to 1) D : array-like, shape (dim_a, n_atoms) dictionary matrix M : array-like, shape (dim_a, dim_a) loss matrix M0 : array-like, shape (n_atoms, dim_prior) loss matrix h0 : array-like, shape (n_atoms,) prior on the estimated unmixing h reg : float Regularization term >0 (Wasserstein data fitting) reg0 : float Regularization term >0 (Wasserstein reg with h0) alpha : float How much should we trust the prior ([0,1]) numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True warn : bool, optional if True, raises a warning if the algorithm doesn't convergence. Returns ------- h : array-like, shape (n_atoms,) Wasserstein barycenter log : dict log dictionary return only if log==True in parameters .. _references-unmix: References ---------- .. [4] S. Nakhostin, N. Courty, R. Flamary, D. Tuia, T. Corpetti, Supervised planetary unmixing with optimal transport, Workshop on Hyperspectral Image and Signal Processing : Evolution in Remote Sensing (WHISPERS), 2016. """ a, D, M, M0, h0 = list_to_array(a, D, M, M0, h0) nx = get_backend(a, D, M, M0, h0) # M = M/np.median(M) K = nx.exp(-M / reg) # M0 = M0/np.median(M0) K0 = nx.exp(-M0 / reg0) old = h0 err = 1 # log = {'niter':0, 'all_err':[]} if log: log = {'err': []} for ii in range(numItermax): K = projC(K, a) K0 = projC(K0, h0) new = nx.sum(K0, axis=1) # we recombine the current selection from dictionnary inv_new = nx.dot(D, new) other = nx.sum(K, axis=1) # geometric interpolation delta = nx.exp(alpha * nx.log(other) + (1 - alpha) * nx.log(inv_new)) K = projR(K, delta) K0 = nx.dot(D.T, delta / inv_new)[:, None] * K0 err = nx.norm(nx.sum(K0, axis=1) - old) old = new if log: log['err'].append(err) if verbose: if ii % 200 == 0: print('{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(ii, err)) if err < stopThr: break else: if warn: warnings.warn("Unmixing algorithm did not converge. You might want to " "increase the number of iterations `numItermax` " "or the regularization parameter `reg`.") if log: log['niter'] = ii return nx.sum(K0, axis=1), log else: return nx.sum(K0, axis=1) python-pot-0.9.3+dfsg/ot/bregman/_empirical.py000066400000000000000000000536371455713015700213240ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Bregman projections solvers for entropic regularized OT for empirical distributions """ # Author: Remi Flamary # Kilian Fatras # Quang Huy Tran # # License: MIT License import warnings from ..utils import dist, list_to_array, unif, LazyTensor from ..backend import get_backend from ._sinkhorn import sinkhorn, sinkhorn2 def get_sinkhorn_lazytensor(X_a, X_b, f, g, metric='sqeuclidean', reg=1e-1, nx=None): r""" Get a LazyTensor of Sinkhorn solution from the dual potentials The returned LazyTensor is :math:`\mathbf{T} = exp( \mathbf{f} \mathbf{1}_b^\top + \mathbf{1}_a \mathbf{g}^\top - \mathbf{C}/reg)`, where :math:`\mathbf{C}` is the pairwise metric matrix between samples :math:`\mathbf{X}_a` and :math:`\mathbf{X}_b`. Parameters ---------- X_a : array-like, shape (n_samples_a, dim) samples in the source domain X_b : array-like, shape (n_samples_b, dim) samples in the target domain f : array-like, shape (n_samples_a,) First dual potentials (log space) g : array-like, shape (n_samples_b,) Second dual potentials (log space) metric : str, default='sqeuclidean' Metric used for the cost matrix computation reg : float, default=1e-1 Regularization term >0 nx : Backend(), default=None Numerical backend used Returns ------- T : LazyTensor Sinkhorn solution tensor """ if nx is None: nx = get_backend(X_a, X_b, f, g) shape = (X_a.shape[0], X_b.shape[0]) def func(i, j, X_a, X_b, f, g, metric, reg): C = dist(X_a[i], X_b[j], metric=metric) return nx.exp(f[i, None] + g[None, j] - C / reg) T = LazyTensor(shape, func, X_a=X_a, X_b=X_b, f=f, g=g, metric=metric, reg=reg) return T def empirical_sinkhorn(X_s, X_t, reg, a=None, b=None, metric='sqeuclidean', numIterMax=10000, stopThr=1e-9, isLazy=False, batchSize=100, verbose=False, log=False, warn=True, warmstart=None, **kwargs): r''' Solve the entropic regularization optimal transport problem and return the OT matrix from empirical data The function solves the following optimization problem: .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg} \cdot\Omega(\gamma) s.t. \ \gamma \mathbf{1} &= \mathbf{a} \gamma^T \mathbf{1} &= \mathbf{b} \gamma &\geq 0 where : - :math:`\mathbf{M}` is the (`n_samples_a`, `n_samples_b`) metric cost matrix - :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (sum to 1) Parameters ---------- X_s : array-like, shape (n_samples_a, dim) samples in the source domain X_t : array-like, shape (n_samples_b, dim) samples in the target domain reg : float Regularization term >0 a : array-like, shape (n_samples_a,) samples weights in the source domain b : array-like, shape (n_samples_b,) samples weights in the target domain numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (>0) isLazy: boolean, optional If True, then only calculate the cost matrix by block and return the dual potentials only (to save memory). If False, calculate full cost matrix and return outputs of sinkhorn function. batchSize: int or tuple of 2 int, optional Size of the batches used to compute the sinkhorn update without memory overhead. When a tuple is provided it sets the size of the left/right batches. verbose : bool, optional Print information along iterations log : bool, optional record log if True warn : bool, optional if True, raises a warning if the algorithm doesn't convergence. warmstart: tuple of arrays, shape (dim_a, dim_b), optional Initialization of dual potentials. If provided, the dual potentials should be given (that is the logarithm of the u,v sinkhorn scaling vectors) Returns ------- gamma : array-like, shape (n_samples_a, n_samples_b) Regularized optimal transportation matrix for the given parameters log : dict log dictionary return only if log==True in parameters Examples -------- >>> import numpy as np >>> n_samples_a = 2 >>> n_samples_b = 2 >>> reg = 0.1 >>> X_s = np.reshape(np.arange(n_samples_a, dtype=np.float64), (n_samples_a, 1)) >>> X_t = np.reshape(np.arange(0, n_samples_b, dtype=np.float64), (n_samples_b, 1)) >>> empirical_sinkhorn(X_s, X_t, reg=reg, verbose=False) # doctest: +NORMALIZE_WHITESPACE array([[4.99977301e-01, 2.26989344e-05], [2.26989344e-05, 4.99977301e-01]]) References ---------- .. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013 .. [9] Schmitzer, B. (2016). Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. arXiv preprint arXiv:1610.06519. .. [10] Chizat, L., PeyrĂ©, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816. ''' X_s, X_t = list_to_array(X_s, X_t) nx = get_backend(X_s, X_t) ns, nt = X_s.shape[0], X_t.shape[0] if a is None: a = nx.from_numpy(unif(ns), type_as=X_s) if b is None: b = nx.from_numpy(unif(nt), type_as=X_s) if isLazy: if log: dict_log = {"err": []} log_a, log_b = nx.log(a), nx.log(b) if warmstart is None: f, g = nx.zeros((ns,), type_as=a), nx.zeros((nt,), type_as=a) else: f, g = warmstart if isinstance(batchSize, int): bs, bt = batchSize, batchSize elif isinstance(batchSize, tuple) and len(batchSize) == 2: bs, bt = batchSize[0], batchSize[1] else: raise ValueError( "Batch size must be in integer or a tuple of two integers") range_s, range_t = range(0, ns, bs), range(0, nt, bt) lse_f = nx.zeros((ns,), type_as=a) lse_g = nx.zeros((nt,), type_as=a) X_s_np = nx.to_numpy(X_s) X_t_np = nx.to_numpy(X_t) for i_ot in range(numIterMax): lse_f_cols = [] for i in range_s: M = dist(X_s_np[i:i + bs, :], X_t_np, metric=metric) M = nx.from_numpy(M, type_as=a) lse_f_cols.append( nx.logsumexp(g[None, :] - M / reg, axis=1) ) lse_f = nx.concatenate(lse_f_cols, axis=0) f = log_a - lse_f lse_g_cols = [] for j in range_t: M = dist(X_s_np, X_t_np[j:j + bt, :], metric=metric) M = nx.from_numpy(M, type_as=a) lse_g_cols.append( nx.logsumexp(f[:, None] - M / reg, axis=0) ) lse_g = nx.concatenate(lse_g_cols, axis=0) g = log_b - lse_g if (i_ot + 1) % 10 == 0: m1_cols = [] for i in range_s: M = dist(X_s_np[i:i + bs, :], X_t_np, metric=metric) M = nx.from_numpy(M, type_as=a) m1_cols.append( nx.sum(nx.exp(f[i:i + bs, None] + g[None, :] - M / reg), axis=1) ) m1 = nx.concatenate(m1_cols, axis=0) err = nx.sum(nx.abs(m1 - a)) if log: dict_log["err"].append(err) if verbose and (i_ot + 1) % 100 == 0: print("Error in marginal at iteration {} = {}".format( i_ot + 1, err)) if err <= stopThr: break else: if warn: warnings.warn("Sinkhorn did not converge. You might want to " "increase the number of iterations `numItermax` " "or the regularization parameter `reg`.") if log: dict_log["u"] = f dict_log["v"] = g dict_log["niter"] = i_ot dict_log["lazy_plan"] = get_sinkhorn_lazytensor(X_s, X_t, f, g, metric, reg) return (f, g, dict_log) else: return (f, g) else: M = dist(X_s, X_t, metric=metric) if log: pi, log = sinkhorn(a, b, M, reg, numItermax=numIterMax, stopThr=stopThr, verbose=verbose, log=True, warmstart=warmstart, **kwargs) return pi, log else: pi = sinkhorn(a, b, M, reg, numItermax=numIterMax, stopThr=stopThr, verbose=verbose, log=False, warmstart=warmstart, **kwargs) return pi def empirical_sinkhorn2(X_s, X_t, reg, a=None, b=None, metric='sqeuclidean', numIterMax=10000, stopThr=1e-9, isLazy=False, batchSize=100, verbose=False, log=False, warn=True, warmstart=None, **kwargs): r''' Solve the entropic regularization optimal transport problem from empirical data and return the OT loss The function solves the following optimization problem: .. math:: W = \min_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg} \cdot\Omega(\gamma) s.t. \ \gamma \mathbf{1} &= \mathbf{a} \gamma^T \mathbf{1} &= \mathbf{b} \gamma &\geq 0 where : - :math:`\mathbf{M}` is the (`n_samples_a`, `n_samples_b`) metric cost matrix - :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (sum to 1) and returns :math:`\langle \gamma^*, \mathbf{M} \rangle_F` (without the entropic contribution). Parameters ---------- X_s : array-like, shape (n_samples_a, dim) samples in the source domain X_t : array-like, shape (n_samples_b, dim) samples in the target domain reg : float Regularization term >0 a : array-like, shape (n_samples_a,) samples weights in the source domain b : array-like, shape (n_samples_b,) samples weights in the target domain numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (>0) isLazy: boolean, optional If True, then only calculate the cost matrix by block and return the dual potentials only (to save memory). If False, calculate full cost matrix and return outputs of sinkhorn function. batchSize: int or tuple of 2 int, optional Size of the batches used to compute the sinkhorn update without memory overhead. When a tuple is provided it sets the size of the left/right batches. verbose : bool, optional Print information along iterations log : bool, optional record log if True warn : bool, optional if True, raises a warning if the algorithm doesn't convergence. warmstart: tuple of arrays, shape (dim_a, dim_b), optional Initialization of dual potentials. If provided, the dual potentials should be given (that is the logarithm of the u,v sinkhorn scaling vectors) Returns ------- W : (n_hists) array-like or float Optimal transportation loss for the given parameters log : dict log dictionary return only if log==True in parameters Examples -------- >>> import numpy as np >>> n_samples_a = 2 >>> n_samples_b = 2 >>> reg = 0.1 >>> X_s = np.reshape(np.arange(n_samples_a, dtype=np.float64), (n_samples_a, 1)) >>> X_t = np.reshape(np.arange(0, n_samples_b, dtype=np.float64), (n_samples_b, 1)) >>> b = np.full((n_samples_b, 3), 1/n_samples_b) >>> empirical_sinkhorn2(X_s, X_t, b=b, reg=reg, verbose=False) array([4.53978687e-05, 4.53978687e-05, 4.53978687e-05]) References ---------- .. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013 .. [9] Schmitzer, B. (2016). Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. arXiv preprint arXiv:1610.06519. .. [10] Chizat, L., PeyrĂ©, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816. ''' X_s, X_t = list_to_array(X_s, X_t) nx = get_backend(X_s, X_t) ns, nt = X_s.shape[0], X_t.shape[0] if a is None: a = nx.from_numpy(unif(ns), type_as=X_s) if b is None: b = nx.from_numpy(unif(nt), type_as=X_s) if isLazy: if log: f, g, dict_log = empirical_sinkhorn(X_s, X_t, reg, a, b, metric, numIterMax=numIterMax, stopThr=stopThr, isLazy=isLazy, batchSize=batchSize, verbose=verbose, log=log, warn=warn, warmstart=warmstart) else: f, g = empirical_sinkhorn(X_s, X_t, reg, a, b, metric, numIterMax=numIterMax, stopThr=stopThr, isLazy=isLazy, batchSize=batchSize, verbose=verbose, log=log, warn=warn, warmstart=warmstart) bs = batchSize if isinstance(batchSize, int) else batchSize[0] range_s = range(0, ns, bs) loss = 0 X_s_np = nx.to_numpy(X_s) X_t_np = nx.to_numpy(X_t) for i in range_s: M_block = dist(X_s_np[i:i + bs, :], X_t_np, metric=metric) M_block = nx.from_numpy(M_block, type_as=a) pi_block = nx.exp(f[i:i + bs, None] + g[None, :] - M_block / reg) loss += nx.sum(M_block * pi_block) if log: return loss, dict_log else: return loss else: M = dist(X_s, X_t, metric=metric) if log: sinkhorn_loss, log = sinkhorn2(a, b, M, reg, numItermax=numIterMax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, warmstart=warmstart, **kwargs) return sinkhorn_loss, log else: sinkhorn_loss = sinkhorn2(a, b, M, reg, numItermax=numIterMax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, warmstart=warmstart, **kwargs) return sinkhorn_loss def empirical_sinkhorn_divergence(X_s, X_t, reg, a=None, b=None, metric='sqeuclidean', numIterMax=10000, stopThr=1e-9, verbose=False, log=False, warn=True, warmstart=None, **kwargs): r''' Compute the sinkhorn divergence loss from empirical data The function solves the following optimization problems and return the sinkhorn divergence :math:`S`: .. math:: W &= \min_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg} \cdot\Omega(\gamma) W_a &= \min_{\gamma_a} \quad \langle \gamma_a, \mathbf{M_a} \rangle_F + \mathrm{reg} \cdot\Omega(\gamma_a) W_b &= \min_{\gamma_b} \quad \langle \gamma_b, \mathbf{M_b} \rangle_F + \mathrm{reg} \cdot\Omega(\gamma_b) S &= W - \frac{W_a + W_b}{2} .. math:: s.t. \ \gamma \mathbf{1} &= \mathbf{a} \gamma^T \mathbf{1} &= \mathbf{b} \gamma &\geq 0 \gamma_a \mathbf{1} &= \mathbf{a} \gamma_a^T \mathbf{1} &= \mathbf{a} \gamma_a &\geq 0 \gamma_b \mathbf{1} &= \mathbf{b} \gamma_b^T \mathbf{1} &= \mathbf{b} \gamma_b &\geq 0 where : - :math:`\mathbf{M}` (resp. :math:`\mathbf{M_a}`, :math:`\mathbf{M_b}`) is the (`n_samples_a`, `n_samples_b`) metric cost matrix (resp (`n_samples_a, n_samples_a`) and (`n_samples_b`, `n_samples_b`)) - :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (sum to 1) and returns :math:`\langle \gamma^*, \mathbf{M} \rangle_F -(\langle \gamma^*_a, \mathbf{M_a} \rangle_F + \langle \gamma^*_b , \mathbf{M_b} \rangle_F)/2`. .. note: The current implementation does not account for the entropic contributions and thus differs from the Sinkhorn divergence as introduced in the literature. The possibility to account for the entropic contributions will be provided in a future release. Parameters ---------- X_s : array-like, shape (n_samples_a, dim) samples in the source domain X_t : array-like, shape (n_samples_b, dim) samples in the target domain reg : float Regularization term >0 a : array-like, shape (n_samples_a,) samples weights in the source domain b : array-like, shape (n_samples_b,) samples weights in the target domain numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True warn : bool, optional if True, raises a warning if the algorithm doesn't convergence. warmstart: tuple of arrays, shape (dim_a, dim_b), optional Initialization of dual potentials. If provided, the dual potentials should be given (that is the logarithm of the u,v sinkhorn scaling vectors) Returns ------- W : (1,) array-like Optimal transportation symmetrized loss for the given parameters log : dict log dictionary return only if log==True in parameters Examples -------- >>> import numpy as np >>> n_samples_a = 2 >>> n_samples_b = 4 >>> reg = 0.1 >>> X_s = np.reshape(np.arange(n_samples_a, dtype=np.float64), (n_samples_a, 1)) >>> X_t = np.reshape(np.arange(0, n_samples_b, dtype=np.float64), (n_samples_b, 1)) >>> empirical_sinkhorn_divergence(X_s, X_t, reg) # doctest: +ELLIPSIS 1.499887176049052 References ---------- .. [23] Aude Genevay, Gabriel PeyrĂ©, Marco Cuturi, Learning Generative Models with Sinkhorn Divergences, Proceedings of the Twenty-First International Conference on Artificial Intelligence and Statistics, (AISTATS) 21, 2018 ''' X_s, X_t = list_to_array(X_s, X_t) nx = get_backend(X_s, X_t) if warmstart is None: warmstart_a, warmstart_b = None, None else: u, v = warmstart warmstart_a = (u, u) warmstart_b = (v, v) if log: sinkhorn_loss_ab, log_ab = empirical_sinkhorn2(X_s, X_t, reg, a, b, metric=metric, numIterMax=numIterMax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, warmstart=warmstart, **kwargs) sinkhorn_loss_a, log_a = empirical_sinkhorn2(X_s, X_s, reg, a, a, metric=metric, numIterMax=numIterMax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, warmstart=warmstart_a, **kwargs) sinkhorn_loss_b, log_b = empirical_sinkhorn2(X_t, X_t, reg, b, b, metric=metric, numIterMax=numIterMax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, warmstart=warmstart_b, **kwargs) sinkhorn_div = sinkhorn_loss_ab - 0.5 * \ (sinkhorn_loss_a + sinkhorn_loss_b) log = {} log['sinkhorn_loss_ab'] = sinkhorn_loss_ab log['sinkhorn_loss_a'] = sinkhorn_loss_a log['sinkhorn_loss_b'] = sinkhorn_loss_b log['log_sinkhorn_ab'] = log_ab log['log_sinkhorn_a'] = log_a log['log_sinkhorn_b'] = log_b return nx.maximum(0, sinkhorn_div), log else: sinkhorn_loss_ab = empirical_sinkhorn2(X_s, X_t, reg, a, b, metric=metric, numIterMax=numIterMax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, warmstart=warmstart, **kwargs) sinkhorn_loss_a = empirical_sinkhorn2(X_s, X_s, reg, a, a, metric=metric, numIterMax=numIterMax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, warmstart=warmstart_a, **kwargs) sinkhorn_loss_b = empirical_sinkhorn2(X_t, X_t, reg, b, b, metric=metric, numIterMax=numIterMax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, warmstart=warmstart_b, **kwargs) sinkhorn_div = sinkhorn_loss_ab - 0.5 * \ (sinkhorn_loss_a + sinkhorn_loss_b) return nx.maximum(0, sinkhorn_div) python-pot-0.9.3+dfsg/ot/bregman/_geomloss.py000066400000000000000000000156231455713015700212000ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Wrapper functions for geomloss """ # Author: Remi Flamary # # License: MIT License import numpy as np try: import geomloss from geomloss import SamplesLoss import torch from torch.autograd import grad from ..utils import get_backend, LazyTensor, dist except ImportError: geomloss = False def get_sinkhorn_geomloss_lazytensor(X_a, X_b, f, g, a, b, metric='sqeuclidean', blur=0.1, nx=None): """ Get a LazyTensor of sinkhorn solution T = exp((f+g^T-C)/reg)*(ab^T) Parameters ---------- X_a : array-like, shape (n_samples_a, dim) samples in the source domain X_torch: array-like, shape (n_samples_b, dim) samples in the target domain f : array-like, shape (n_samples_a,) First dual potentials (log space) g : array-like, shape (n_samples_b,) Second dual potentials (log space) metric : str, default='sqeuclidean' Metric used for the cost matrix computation blur : float, default=1e-1 blur term (blur=sqrt(reg)) >0 nx : Backend(), default=None Numerical backend used Returns ------- T : LazyTensor Lowrank tensor T = exp((f+g^T-C)/reg)*(ab^T) """ if nx is None: nx = get_backend(X_a, X_b, f, g) shape = (X_a.shape[0], X_b.shape[0]) def func(i, j, X_a, X_b, f, g, a, b, metric, blur): if metric == 'sqeuclidean': C = dist(X_a[i], X_b[j], metric=metric) / 2 else: C = dist(X_a[i], X_b[j], metric=metric) return nx.exp((f[i, None] + g[None, j] - C) / (blur**2)) * (a[i, None] * b[None, j]) T = LazyTensor(shape, func, X_a=X_a, X_b=X_b, f=f, g=g, a=a, b=b, metric=metric, blur=blur) return T def empirical_sinkhorn2_geomloss(X_s, X_t, reg, a=None, b=None, metric='sqeuclidean', scaling=0.95, verbose=False, debias=False, log=False, backend='auto'): r""" Solve the entropic regularization optimal transport problem with geomloss The function solves the following optimization problem: .. math:: \gamma = arg\min_\gamma <\gamma,C>_F + reg\cdot\Omega(\gamma) s.t. \gamma 1 = a \gamma^T 1= b \gamma\geq 0 where : - :math:`C` is the cost matrix such that :math:`C_{i,j}=d(x_i^s,x_j^t)` and :math:`d` is a metric. - :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j}\gamma_{i,j}\log(\gamma_{i,j})-\gamma_{i,j}+1` - :math:`a` and :math:`b` are source and target weights (sum to 1) The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in and computed in log space for better stability and epsilon-scaling. The solution is computed in a lazy way using the Geomloss [60] and the KeOps library [61]. Parameters ---------- X_s : array-like, shape (n_samples_a, dim) samples in the source domain X_t : array-like, shape (n_samples_b, dim) samples in the target domain reg : float Regularization term >0 a : array-like, shape (n_samples_a,), default=None samples weights in the source domain b : array-like, shape (n_samples_b,), default=None samples weights in the target domain metric : str, default='sqeuclidean' Metric used for the cost matrix computation Only acepted values are 'sqeuclidean' and 'euclidean'. scaling : float, default=0.95 Scaling parameter used for epsilon scaling. Value close to one promote precision while value close to zero promote speed. verbose : bool, default=False Print information debias : bool, default=False Use the debiased version of Sinkhorn algorithm [12]_. log : bool, default=False Return log dictionary containing all computed objects backend : str, default='auto' Numerical backend for geomloss. Only 'auto' and 'tensorized' 'online' and 'multiscale' are accepted values. Returns ------- value : float OT value log : dict Log dictionary return only if log==True in parameters References ---------- .. [60] Feydy, J., Roussillon, P., TrouvĂ©, A., & Gori, P. (2019). [Fast and scalable optimal transport for brain tractograms. In Medical Image Computing and Computer Assisted Intervention–MICCAI 2019: 22nd International Conference, Shenzhen, China, October 13–17, 2019, Proceedings, Part III 22 (pp. 636-644). Springer International Publishing. .. [61] Charlier, B., Feydy, J., Glaunes, J. A., Collin, F. D., & Durif, G. (2021). Kernel operations on the gpu, with autodiff, without memory overflows. The Journal of Machine Learning Research, 22(1), 3457-3462. """ if geomloss: nx = get_backend(X_s, X_t, a, b) if nx.__name__ not in ['torch', 'numpy']: raise ValueError('geomloss only support torch or numpy backend') if a is None: a = nx.ones(X_s.shape[0], type_as=X_s) / X_s.shape[0] if b is None: b = nx.ones(X_t.shape[0], type_as=X_t) / X_t.shape[0] if nx.__name__ == 'numpy': X_s_torch = torch.tensor(X_s) X_t_torch = torch.tensor(X_t) a_torch = torch.tensor(a) b_torch = torch.tensor(b) else: X_s_torch = X_s X_t_torch = X_t a_torch = a b_torch = b # after that we are all in torch # set blur value and p if metric == 'sqeuclidean': p = 2 blur = np.sqrt(reg / 2) # because geomloss divides cost by two elif metric == 'euclidean': p = 1 blur = np.sqrt(reg) else: raise ValueError('geomloss only supports sqeuclidean and euclidean metrics') # force gradients for computing dual a_torch.requires_grad = True b_torch.requires_grad = True loss = SamplesLoss(loss='sinkhorn', p=p, blur=blur, backend=backend, debias=debias, scaling=scaling, verbose=verbose) # compute value value = loss(a_torch, X_s_torch, b_torch, X_t_torch) # linear + entropic/KL reg? # get dual potentials f, g = grad(value, [a_torch, b_torch]) if metric == 'sqeuclidean': value *= 2 # because geomloss divides cost by two if nx.__name__ == 'numpy': f = f.cpu().detach().numpy() g = g.cpu().detach().numpy() value = value.cpu().detach().numpy() if log: log = {} log['f'] = f log['g'] = g log['value'] = value log['lazy_plan'] = get_sinkhorn_geomloss_lazytensor(X_s, X_t, f, g, a, b, metric=metric, blur=blur, nx=nx) return value, log else: return value else: raise ImportError('geomloss not installed') python-pot-0.9.3+dfsg/ot/bregman/_screenkhorn.py000066400000000000000000000324511455713015700216670ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Screening Sinkhorn Algorithms for Regularized Optimal Transport """ # Author: Remi Flamary # Mokhtar Z. Alaya # # License: MIT License import warnings import numpy as np from scipy.optimize import fmin_l_bfgs_b from ..utils import list_to_array from ..backend import get_backend def screenkhorn(a, b, M, reg, ns_budget=None, nt_budget=None, uniform=False, restricted=True, maxiter=10000, maxfun=10000, pgtol=1e-09, verbose=False, log=False): r""" Screening Sinkhorn Algorithm for Regularized Optimal Transport The function solves an approximate dual of Sinkhorn divergence :ref:`[2] ` which is written as the following optimization problem: .. math:: (\mathbf{u}, \mathbf{v}) = \mathop{\arg \min}_{\mathbf{u}, \mathbf{v}} \quad \mathbf{1}_{ns}^T \mathbf{B}(\mathbf{u}, \mathbf{v}) \mathbf{1}_{nt} - \langle \kappa \mathbf{u}, \mathbf{a} \rangle - \langle \frac{1}{\kappa} \mathbf{v}, \mathbf{b} \rangle where: .. math:: \mathbf{B}(\mathbf{u}, \mathbf{v}) = \mathrm{diag}(e^\mathbf{u}) \mathbf{K} \mathrm{diag}(e^\mathbf{v}) \text{, with } \mathbf{K} = e^{-\mathbf{M} / \mathrm{reg}} \text{ and} .. math:: s.t. \ e^{u_i} &\geq \epsilon / \kappa, \forall i \in \{1, \ldots, ns\} e^{v_j} &\geq \epsilon \kappa, \forall j \in \{1, \ldots, nt\} The parameters `kappa` and `epsilon` are determined w.r.t the couple number budget of points (`ns_budget`, `nt_budget`), see Equation (5) in :ref:`[26] ` Parameters ---------- a: array-like, shape=(ns,) samples weights in the source domain b: array-like, shape=(nt,) samples weights in the target domain M: array-like, shape=(ns, nt) Cost matrix reg: `float` Level of the entropy regularisation ns_budget: `int`, default=None Number budget of points to be kept in the source domain. If it is None then 50% of the source sample points will be kept nt_budget: `int`, default=None Number budget of points to be kept in the target domain. If it is None then 50% of the target sample points will be kept uniform: `bool`, default=False If `True`, the source and target distribution are supposed to be uniform, i.e., :math:`a_i = 1 / ns` and :math:`b_j = 1 / nt` restricted : `bool`, default=True If `True`, a warm-start initialization for the L-BFGS-B solver using a restricted Sinkhorn algorithm with at most 5 iterations maxiter: `int`, default=10000 Maximum number of iterations in LBFGS solver maxfun: `int`, default=10000 Maximum number of function evaluations in LBFGS solver pgtol: `float`, default=1e-09 Final objective function accuracy in LBFGS solver verbose: `bool`, default=False If `True`, display informations about the cardinals of the active sets and the parameters kappa and epsilon .. admonition:: Dependency To gain more efficiency, :py:func:`ot.bregman.screenkhorn` needs to call the "Bottleneck" package (https://pypi.org/project/Bottleneck/) in the screening pre-processing step. If Bottleneck isn't installed, the following error message appears: "Bottleneck module doesn't exist. Install it from https://pypi.org/project/Bottleneck/" Returns ------- gamma : array-like, shape=(ns, nt) Screened optimal transportation matrix for the given parameters log : `dict`, default=False Log dictionary return only if log==True in parameters .. _references-screenkhorn: References ----------- .. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013 .. [26] Alaya M. Z., BĂ©rar M., Gasso G., Rakotomamonjy A. (2019). Screening Sinkhorn Algorithm for Regularized Optimal Transport (NIPS) 33, 2019 """ # check if bottleneck module exists try: import bottleneck except ImportError: warnings.warn( "Bottleneck module is not installed. Install it from" " https://pypi.org/project/Bottleneck/ for better performance.") bottleneck = np a, b, M = list_to_array(a, b, M) nx = get_backend(M, a, b) if nx.__name__ in ("jax", "tf"): raise TypeError("JAX or TF arrays have been received but screenkhorn is not " "compatible with neither JAX nor TF.") ns, nt = M.shape # by default, we keep only 50% of the sample data points if ns_budget is None: ns_budget = int(np.floor(0.5 * ns)) if nt_budget is None: nt_budget = int(np.floor(0.5 * nt)) # calculate the Gibbs kernel K = nx.exp(-M / reg) def projection(u, epsilon): u = nx.maximum(u, epsilon) return u # ----------------------------------------------------------------------------------------------------------------# # Step 1: Screening pre-processing # # ----------------------------------------------------------------------------------------------------------------# if ns_budget == ns and nt_budget == nt: # full number of budget points (ns, nt) = (ns_budget, nt_budget) Isel = nx.from_numpy(np.ones(ns, dtype=bool)) Jsel = nx.from_numpy(np.ones(nt, dtype=bool)) epsilon = 0.0 kappa = 1.0 cst_u = 0. cst_v = 0. bounds_u = [(0.0, np.inf)] * ns bounds_v = [(0.0, np.inf)] * nt a_I = a b_J = b K_IJ = K K_IJc = [] K_IcJ = [] vec_eps_IJc = nx.zeros((nt,), type_as=M) vec_eps_IcJ = nx.zeros((ns,), type_as=M) else: # sum of rows and columns of K K_sum_cols = nx.sum(K, axis=1) K_sum_rows = nx.sum(K, axis=0) if uniform: if ns / ns_budget < 4: aK_sort = nx.sort(K_sum_cols) epsilon_u_square = a[0] / aK_sort[ns_budget - 1] else: aK_sort = nx.from_numpy( bottleneck.partition(nx.to_numpy( K_sum_cols), ns_budget - 1)[ns_budget - 1], type_as=M ) epsilon_u_square = a[0] / aK_sort if nt / nt_budget < 4: bK_sort = nx.sort(K_sum_rows) epsilon_v_square = b[0] / bK_sort[nt_budget - 1] else: bK_sort = nx.from_numpy( bottleneck.partition(nx.to_numpy( K_sum_rows), nt_budget - 1)[nt_budget - 1], type_as=M ) epsilon_v_square = b[0] / bK_sort else: aK = a / K_sum_cols bK = b / K_sum_rows aK_sort = nx.flip(nx.sort(aK), axis=0) epsilon_u_square = aK_sort[ns_budget - 1] bK_sort = nx.flip(nx.sort(bK), axis=0) epsilon_v_square = bK_sort[nt_budget - 1] # active sets I and J (see Lemma 1 in [26]) Isel = a >= epsilon_u_square * K_sum_cols Jsel = b >= epsilon_v_square * K_sum_rows if nx.sum(Isel) != ns_budget: if uniform: aK = a / K_sum_cols aK_sort = nx.flip(nx.sort(aK), axis=0) epsilon_u_square = nx.mean(aK_sort[ns_budget - 1:ns_budget + 1]) Isel = a >= epsilon_u_square * K_sum_cols ns_budget = nx.sum(Isel) if nx.sum(Jsel) != nt_budget: if uniform: bK = b / K_sum_rows bK_sort = nx.flip(nx.sort(bK), axis=0) epsilon_v_square = nx.mean(bK_sort[nt_budget - 1:nt_budget + 1]) Jsel = b >= epsilon_v_square * K_sum_rows nt_budget = nx.sum(Jsel) epsilon = (epsilon_u_square * epsilon_v_square) ** (1 / 4) kappa = (epsilon_v_square / epsilon_u_square) ** (1 / 2) if verbose: print("epsilon = %s\n" % epsilon) print("kappa = %s\n" % kappa) print('Cardinality of selected points: |Isel| = %s \t |Jsel| = %s \n' % (sum(Isel), sum(Jsel))) # Ic, Jc: complementary of the active sets I and J Ic = ~Isel Jc = ~Jsel K_IJ = K[np.ix_(Isel, Jsel)] K_IcJ = K[np.ix_(Ic, Jsel)] K_IJc = K[np.ix_(Isel, Jc)] K_min = nx.min(K_IJ) if K_min == 0: K_min = float(np.finfo(float).tiny) # a_I, b_J, a_Ic, b_Jc a_I = a[Isel] b_J = b[Jsel] if not uniform: a_I_min = nx.min(a_I) a_I_max = nx.max(a_I) b_J_max = nx.max(b_J) b_J_min = nx.min(b_J) else: a_I_min = a_I[0] a_I_max = a_I[0] b_J_max = b_J[0] b_J_min = b_J[0] # box constraints in L-BFGS-B (see Proposition 1 in [26]) bounds_u = [(max(a_I_min / ((nt - nt_budget) * epsilon + nt_budget * (b_J_max / ( ns * epsilon * kappa * K_min))), epsilon / kappa), a_I_max / (nt * epsilon * K_min))] * ns_budget bounds_v = [( max(b_J_min / ((ns - ns_budget) * epsilon + ns_budget * (kappa * a_I_max / (nt * epsilon * K_min))), epsilon * kappa), b_J_max / (ns * epsilon * K_min))] * nt_budget # pre-calculated constants for the objective vec_eps_IJc = epsilon * kappa * nx.sum( K_IJc * nx.ones((nt - nt_budget,), type_as=M)[None, :], axis=1 ) vec_eps_IcJ = (epsilon / kappa) * nx.sum( nx.ones((ns - ns_budget,), type_as=M)[:, None] * K_IcJ, axis=0 ) # initialisation u0 = nx.full((ns_budget,), 1. / ns_budget + epsilon / kappa, type_as=M) v0 = nx.full((nt_budget,), 1. / nt_budget + epsilon * kappa, type_as=M) # pre-calculed constants for Restricted Sinkhorn (see Algorithm 1 in supplementary of [26]) if restricted: if ns_budget != ns or nt_budget != nt: cst_u = kappa * epsilon * nx.sum(K_IJc, axis=1) cst_v = epsilon * nx.sum(K_IcJ, axis=0) / kappa for _ in range(5): # 5 iterations K_IJ_v = nx.dot(K_IJ.T, u0) + cst_v v0 = b_J / (kappa * K_IJ_v) KIJ_u = nx.dot(K_IJ, v0) + cst_u u0 = (kappa * a_I) / KIJ_u u0 = projection(u0, epsilon / kappa) v0 = projection(v0, epsilon * kappa) else: u0 = u0 v0 = v0 def restricted_sinkhorn(usc, vsc, max_iter=5): """ Restricted Sinkhorn Algorithm as a warm-start initialized pointfor L-BFGS-B) """ for _ in range(max_iter): K_IJ_v = nx.dot(K_IJ.T, usc) + cst_v vsc = b_J / (kappa * K_IJ_v) KIJ_u = nx.dot(K_IJ, vsc) + cst_u usc = (kappa * a_I) / KIJ_u usc = projection(usc, epsilon / kappa) vsc = projection(vsc, epsilon * kappa) return usc, vsc def screened_obj(usc, vsc): part_IJ = ( nx.dot(nx.dot(usc, K_IJ), vsc) - kappa * nx.dot(a_I, nx.log(usc)) - (1. / kappa) * nx.dot(b_J, nx.log(vsc)) ) part_IJc = nx.dot(usc, vec_eps_IJc) part_IcJ = nx.dot(vec_eps_IcJ, vsc) psi_epsilon = part_IJ + part_IJc + part_IcJ return psi_epsilon def screened_grad(usc, vsc): # gradients of Psi_(kappa,epsilon) w.r.t u and v grad_u = nx.dot(K_IJ, vsc) + vec_eps_IJc - kappa * a_I / usc grad_v = nx.dot(K_IJ.T, usc) + vec_eps_IcJ - (1. / kappa) * b_J / vsc return grad_u, grad_v def bfgspost(theta): u = theta[:ns_budget] v = theta[ns_budget:] # objective f = screened_obj(u, v) # gradient g_u, g_v = screened_grad(u, v) g = nx.concatenate([g_u, g_v], axis=0) return nx.to_numpy(f), nx.to_numpy(g) # ----------------------------------------------------------------------------------------------------------------# # Step 2: L-BFGS-B solver # # ----------------------------------------------------------------------------------------------------------------# u0, v0 = restricted_sinkhorn(u0, v0) theta0 = nx.concatenate([u0, v0], axis=0) bounds = bounds_u + bounds_v # constraint bounds def obj(theta): return bfgspost(nx.from_numpy(theta, type_as=M)) theta, _, _ = fmin_l_bfgs_b(func=obj, x0=theta0, bounds=bounds, maxfun=maxfun, pgtol=pgtol, maxiter=maxiter) theta = nx.from_numpy(theta, type_as=M) usc = theta[:ns_budget] vsc = theta[ns_budget:] usc_full = nx.full((ns,), epsilon / kappa, type_as=M) vsc_full = nx.full((nt,), epsilon * kappa, type_as=M) usc_full[Isel] = usc vsc_full[Jsel] = vsc if log: log = {} log['u'] = usc_full log['v'] = vsc_full log['Isel'] = Isel log['Jsel'] = Jsel gamma = usc_full[:, None] * K * vsc_full[None, :] gamma = gamma / nx.sum(gamma) if log: return gamma, log else: return gamma python-pot-0.9.3+dfsg/ot/bregman/_sinkhorn.py000066400000000000000000001333151455713015700212020ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Bregman projections solvers for entropic regularized OT """ # Author: Remi Flamary # Nicolas Courty # Titouan Vayer # Alexander Tong # Quang Huy Tran # # License: MIT License import warnings import numpy as np from ..utils import list_to_array from ..backend import get_backend def sinkhorn(a, b, M, reg, method='sinkhorn', numItermax=1000, stopThr=1e-9, verbose=False, log=False, warn=True, warmstart=None, **kwargs): r""" Solve the entropic regularization optimal transport problem and return the OT matrix The function solves the following optimization problem: .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg}\cdot\Omega(\gamma) s.t. \ \gamma \mathbf{1} &= \mathbf{a} \gamma^T \mathbf{1} &= \mathbf{b} \gamma &\geq 0 where : - :math:`\mathbf{M}` is the (`dim_a`, `dim_b`) metric cost matrix - :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (histograms, both sum to 1) .. note:: This function is backend-compatible and will work on arrays from all compatible backends. The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in :ref:`[2] ` **Choosing a Sinkhorn solver** By default and when using a regularization parameter that is not too small the default sinkhorn solver should be enough. If you need to use a small regularization to get sharper OT matrices, you should use the :py:func:`ot.bregman.sinkhorn_stabilized` solver that will avoid numerical errors. This last solver can be very slow in practice and might not even converge to a reasonable OT matrix in a finite time. This is why :py:func:`ot.bregman.sinkhorn_epsilon_scaling` that relies on iterating the value of the regularization (and using warm start) sometimes leads to better solutions. Note that the greedy version of the sinkhorn :py:func:`ot.bregman.greenkhorn` can also lead to a speedup and the screening version of the sinkhorn :py:func:`ot.bregman.screenkhorn` aim at providing a fast approximation of the Sinkhorn problem. For use of GPU and gradient computation with small number of iterations we strongly recommend the :py:func:`ot.bregman.sinkhorn_log` solver that will no need to check for numerical problems. Parameters ---------- a : array-like, shape (dim_a,) samples weights in the source domain b : array-like, shape (dim_b,) or ndarray, shape (dim_b, n_hists) samples in the target domain, compute sinkhorn with multiple targets and fixed :math:`\mathbf{M}` if :math:`\mathbf{b}` is a matrix (return OT loss + dual variables in log) M : array-like, shape (dim_a, dim_b) loss matrix reg : float Regularization term >0 method : str method used for the solver either 'sinkhorn','sinkhorn_log', 'greenkhorn', 'sinkhorn_stabilized' or 'sinkhorn_epsilon_scaling', see those function for specific parameters numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True warn : bool, optional if True, raises a warning if the algorithm doesn't convergence. warmstart: tuple of arrays, shape (dim_a, dim_b), optional Initialization of dual potentials. If provided, the dual potentials should be given (that is the logarithm of the u,v sinkhorn scaling vectors) Returns ------- gamma : array-like, shape (dim_a, dim_b) Optimal transportation matrix for the given parameters log : dict log dictionary return only if log==True in parameters Examples -------- >>> import ot >>> a=[.5, .5] >>> b=[.5, .5] >>> M=[[0., 1.], [1., 0.]] >>> ot.sinkhorn(a, b, M, 1) array([[0.36552929, 0.13447071], [0.13447071, 0.36552929]]) .. _references-sinkhorn: References ---------- .. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013 .. [9] Schmitzer, B. (2016). Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. arXiv preprint arXiv:1610.06519. .. [10] Chizat, L., PeyrĂ©, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816. .. [34] Feydy, J., SĂ©journĂ©, T., Vialard, F. X., Amari, S. I., TrouvĂ©, A., & PeyrĂ©, G. (2019, April). Interpolating between optimal transport and MMD using Sinkhorn divergences. In The 22nd International Conference on Artificial Intelligence and Statistics (pp. 2681-2690). PMLR. See Also -------- ot.lp.emd : Unregularized OT ot.optim.cg : General regularized OT ot.bregman.sinkhorn_knopp : Classic Sinkhorn :ref:`[2] ` ot.bregman.sinkhorn_stabilized: Stabilized sinkhorn :ref:`[9] ` :ref:`[10] ` ot.bregman.sinkhorn_epsilon_scaling: Sinkhorn with epsilon scaling :ref:`[9] ` :ref:`[10] ` """ if method.lower() == 'sinkhorn': return sinkhorn_knopp(a, b, M, reg, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, warmstart=warmstart, **kwargs) elif method.lower() == 'sinkhorn_log': return sinkhorn_log(a, b, M, reg, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, warmstart=warmstart, **kwargs) elif method.lower() == 'greenkhorn': return greenkhorn(a, b, M, reg, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, warmstart=warmstart) elif method.lower() == 'sinkhorn_stabilized': return sinkhorn_stabilized(a, b, M, reg, numItermax=numItermax, stopThr=stopThr, warmstart=warmstart, verbose=verbose, log=log, warn=warn, **kwargs) elif method.lower() == 'sinkhorn_epsilon_scaling': return sinkhorn_epsilon_scaling(a, b, M, reg, numItermax=numItermax, stopThr=stopThr, warmstart=warmstart, verbose=verbose, log=log, warn=warn, **kwargs) else: raise ValueError("Unknown method '%s'." % method) def sinkhorn2(a, b, M, reg, method='sinkhorn', numItermax=1000, stopThr=1e-9, verbose=False, log=False, warn=False, warmstart=None, **kwargs): r""" Solve the entropic regularization optimal transport problem and return the loss The function solves the following optimization problem: .. math:: W = \min_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg}\cdot\Omega(\gamma) s.t. \ \gamma \mathbf{1} &= \mathbf{a} \gamma^T \mathbf{1} &= \mathbf{b} \gamma &\geq 0 where : - :math:`\mathbf{M}` is the (`dim_a`, `dim_b`) metric cost matrix - :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (histograms, both sum to 1) and returns :math:`\langle \gamma^*, \mathbf{M} \rangle_F` (without the entropic contribution). .. note:: This function is backend-compatible and will work on arrays from all compatible backends. The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in :ref:`[2] ` **Choosing a Sinkhorn solver** By default and when using a regularization parameter that is not too small the default sinkhorn solver should be enough. If you need to use a small regularization to get sharper OT matrices, you should use the :py:func:`ot.bregman.sinkhorn_log` solver that will avoid numerical errors. This last solver can be very slow in practice and might not even converge to a reasonable OT matrix in a finite time. This is why :py:func:`ot.bregman.sinkhorn_epsilon_scaling` that relies on iterating the value of the regularization (and using warm start) sometimes leads to better solutions. Note that the greedy version of the sinkhorn :py:func:`ot.bregman.greenkhorn` can also lead to a speedup and the screening version of the sinkhorn :py:func:`ot.bregman.screenkhorn` aim a providing a fast approximation of the Sinkhorn problem. For use of GPU and gradient computation with small number of iterations we strongly recommend the :py:func:`ot.bregman.sinkhorn_log` solver that will no need to check for numerical problems. Parameters ---------- a : array-like, shape (dim_a,) samples weights in the source domain b : array-like, shape (dim_b,) or ndarray, shape (dim_b, n_hists) samples in the target domain, compute sinkhorn with multiple targets and fixed :math:`\mathbf{M}` if :math:`\mathbf{b}` is a matrix (return OT loss + dual variables in log) M : array-like, shape (dim_a, dim_b) loss matrix reg : float Regularization term >0 method : str method used for the solver either 'sinkhorn','sinkhorn_log', 'sinkhorn_stabilized', see those function for specific parameters numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True warn : bool, optional if True, raises a warning if the algorithm doesn't convergence. warmstart: tuple of arrays, shape (dim_a, dim_b), optional Initialization of dual potentials. If provided, the dual potentials should be given (that is the logarithm of the u,v sinkhorn scaling vectors) Returns ------- W : (n_hists) float/array-like Optimal transportation loss for the given parameters log : dict log dictionary return only if log==True in parameters Examples -------- >>> import ot >>> a=[.5, .5] >>> b=[.5, .5] >>> M=[[0., 1.], [1., 0.]] >>> ot.sinkhorn2(a, b, M, 1) 0.26894142136999516 .. _references-sinkhorn2: References ---------- .. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013 .. [9] Schmitzer, B. (2016). Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. arXiv preprint arXiv:1610.06519. .. [10] Chizat, L., PeyrĂ©, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816. .. [21] Altschuler J., Weed J., Rigollet P. : Near-linear time approximation algorithms for optimal transport via Sinkhorn iteration, Advances in Neural Information Processing Systems (NIPS) 31, 2017 .. [34] Feydy, J., SĂ©journĂ©, T., Vialard, F. X., Amari, S. I., TrouvĂ©, A., & PeyrĂ©, G. (2019, April). Interpolating between optimal transport and MMD using Sinkhorn divergences. In The 22nd International Conference on Artificial Intelligence and Statistics (pp. 2681-2690). PMLR. See Also -------- ot.lp.emd : Unregularized OT ot.optim.cg : General regularized OT ot.bregman.sinkhorn_knopp : Classic Sinkhorn :ref:`[2] ` ot.bregman.greenkhorn : Greenkhorn :ref:`[21] ` ot.bregman.sinkhorn_stabilized: Stabilized sinkhorn :ref:`[9] ` :ref:`[10] ` """ M, a, b = list_to_array(M, a, b) nx = get_backend(M, a, b) if len(b.shape) < 2: if method.lower() == 'sinkhorn': res = sinkhorn_knopp(a, b, M, reg, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, warmstart=warmstart, **kwargs) elif method.lower() == 'sinkhorn_log': res = sinkhorn_log(a, b, M, reg, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, warmstart=warmstart, **kwargs) elif method.lower() == 'sinkhorn_stabilized': res = sinkhorn_stabilized(a, b, M, reg, numItermax=numItermax, stopThr=stopThr, warmstart=warmstart, verbose=verbose, log=log, warn=warn, **kwargs) else: raise ValueError("Unknown method '%s'." % method) if log: return nx.sum(M * res[0]), res[1] else: return nx.sum(M * res) else: if method.lower() == 'sinkhorn': return sinkhorn_knopp(a, b, M, reg, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, warmstart=warmstart, **kwargs) elif method.lower() == 'sinkhorn_log': return sinkhorn_log(a, b, M, reg, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, warn=warn, warmstart=warmstart, **kwargs) elif method.lower() == 'sinkhorn_stabilized': return sinkhorn_stabilized(a, b, M, reg, numItermax=numItermax, stopThr=stopThr, warmstart=warmstart, verbose=verbose, log=log, warn=warn, **kwargs) else: raise ValueError("Unknown method '%s'." % method) def sinkhorn_knopp(a, b, M, reg, numItermax=1000, stopThr=1e-9, verbose=False, log=False, warn=True, warmstart=None, **kwargs): r""" Solve the entropic regularization optimal transport problem and return the OT matrix The function solves the following optimization problem: .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg}\cdot\Omega(\gamma) s.t. \ \gamma \mathbf{1} &= \mathbf{a} \gamma^T \mathbf{1} &= \mathbf{b} \gamma &\geq 0 where : - :math:`\mathbf{M}` is the (`dim_a`, `dim_b`) metric cost matrix - :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (histograms, both sum to 1) The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in :ref:`[2] ` Parameters ---------- a : array-like, shape (dim_a,) samples weights in the source domain b : array-like, shape (dim_b,) or array-like, shape (dim_b, n_hists) samples in the target domain, compute sinkhorn with multiple targets and fixed :math:`\mathbf{M}` if :math:`\mathbf{b}` is a matrix (return OT loss + dual variables in log) M : array-like, shape (dim_a, dim_b) loss matrix reg : float Regularization term >0 numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True warn : bool, optional if True, raises a warning if the algorithm doesn't convergence. warmstart: tuple of arrays, shape (dim_a, dim_b), optional Initialization of dual potentials. If provided, the dual potentials should be given (that is the logarithm of the u,v sinkhorn scaling vectors) Returns ------- gamma : array-like, shape (dim_a, dim_b) Optimal transportation matrix for the given parameters log : dict log dictionary return only if log==True in parameters Examples -------- >>> import ot >>> a=[.5, .5] >>> b=[.5, .5] >>> M=[[0., 1.], [1., 0.]] >>> ot.sinkhorn(a, b, M, 1) array([[0.36552929, 0.13447071], [0.13447071, 0.36552929]]) .. _references-sinkhorn-knopp: References ---------- .. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013 See Also -------- ot.lp.emd : Unregularized OT ot.optim.cg : General regularized OT """ a, b, M = list_to_array(a, b, M) nx = get_backend(M, a, b) if len(a) == 0: a = nx.full((M.shape[0],), 1.0 / M.shape[0], type_as=M) if len(b) == 0: b = nx.full((M.shape[1],), 1.0 / M.shape[1], type_as=M) # init data dim_a = len(a) dim_b = b.shape[0] if len(b.shape) > 1: n_hists = b.shape[1] else: n_hists = 0 if log: log = {'err': []} # we assume that no distances are null except those of the diagonal of # distances if warmstart is None: if n_hists: u = nx.ones((dim_a, n_hists), type_as=M) / dim_a v = nx.ones((dim_b, n_hists), type_as=M) / dim_b else: u = nx.ones(dim_a, type_as=M) / dim_a v = nx.ones(dim_b, type_as=M) / dim_b else: u, v = nx.exp(warmstart[0]), nx.exp(warmstart[1]) K = nx.exp(M / (-reg)) Kp = (1 / a).reshape(-1, 1) * K err = 1 for ii in range(numItermax): uprev = u vprev = v KtransposeU = nx.dot(K.T, u) v = b / KtransposeU u = 1. / nx.dot(Kp, v) if (nx.any(KtransposeU == 0) or nx.any(nx.isnan(u)) or nx.any(nx.isnan(v)) or nx.any(nx.isinf(u)) or nx.any(nx.isinf(v))): # we have reached the machine precision # come back to previous solution and quit loop warnings.warn('Warning: numerical errors at iteration %d' % ii) u = uprev v = vprev break if ii % 10 == 0: # we can speed up the process by checking for the error only all # the 10th iterations if n_hists: tmp2 = nx.einsum('ik,ij,jk->jk', u, K, v) else: # compute right marginal tmp2= (diag(u)Kdiag(v))^T1 tmp2 = nx.einsum('i,ij,j->j', u, K, v) err = nx.norm(tmp2 - b) # violation of marginal if log: log['err'].append(err) if err < stopThr: break if verbose: if ii % 200 == 0: print( '{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(ii, err)) else: if warn: warnings.warn("Sinkhorn did not converge. You might want to " "increase the number of iterations `numItermax` " "or the regularization parameter `reg`.") if log: log['niter'] = ii log['u'] = u log['v'] = v if n_hists: # return only loss res = nx.einsum('ik,ij,jk,ij->k', u, K, v, M) if log: return res, log else: return res else: # return OT matrix if log: return u.reshape((-1, 1)) * K * v.reshape((1, -1)), log else: return u.reshape((-1, 1)) * K * v.reshape((1, -1)) def sinkhorn_log(a, b, M, reg, numItermax=1000, stopThr=1e-9, verbose=False, log=False, warn=True, warmstart=None, **kwargs): r""" Solve the entropic regularization optimal transport problem in log space and return the OT matrix The function solves the following optimization problem: .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg}\cdot\Omega(\gamma) s.t. \ \gamma \mathbf{1} &= \mathbf{a} \gamma^T \mathbf{1} &= \mathbf{b} \gamma &\geq 0 where : - :math:`\mathbf{M}` is the (`dim_a`, `dim_b`) metric cost matrix - :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (histograms, both sum to 1) The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm :ref:`[2] ` with the implementation from :ref:`[34] ` Parameters ---------- a : array-like, shape (dim_a,) samples weights in the source domain b : array-like, shape (dim_b,) or array-like, shape (dim_b, n_hists) samples in the target domain, compute sinkhorn with multiple targets and fixed :math:`\mathbf{M}` if :math:`\mathbf{b}` is a matrix (return OT loss + dual variables in log) M : array-like, shape (dim_a, dim_b) loss matrix reg : float Regularization term >0 numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True warn : bool, optional if True, raises a warning if the algorithm doesn't convergence. warmstart: tuple of arrays, shape (dim_a, dim_b), optional Initialization of dual potentials. If provided, the dual potentials should be given (that is the logarithm of the u,v sinkhorn scaling vectors) Returns ------- gamma : array-like, shape (dim_a, dim_b) Optimal transportation matrix for the given parameters log : dict log dictionary return only if log==True in parameters Examples -------- >>> import ot >>> a=[.5, .5] >>> b=[.5, .5] >>> M=[[0., 1.], [1., 0.]] >>> ot.sinkhorn(a, b, M, 1) array([[0.36552929, 0.13447071], [0.13447071, 0.36552929]]) .. _references-sinkhorn-log: References ---------- .. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013 .. [34] Feydy, J., SĂ©journĂ©, T., Vialard, F. X., Amari, S. I., TrouvĂ©, A., & PeyrĂ©, G. (2019, April). Interpolating between optimal transport and MMD using Sinkhorn divergences. In The 22nd International Conference on Artificial Intelligence and Statistics (pp. 2681-2690). PMLR. See Also -------- ot.lp.emd : Unregularized OT ot.optim.cg : General regularized OT """ a, b, M = list_to_array(a, b, M) nx = get_backend(M, a, b) if len(a) == 0: a = nx.full((M.shape[0],), 1.0 / M.shape[0], type_as=M) if len(b) == 0: b = nx.full((M.shape[1],), 1.0 / M.shape[1], type_as=M) # init data dim_a = len(a) dim_b = b.shape[0] if len(b.shape) > 1: n_hists = b.shape[1] else: n_hists = 0 # in case of multiple historgrams if n_hists > 1 and warmstart is None: warmstart = [None] * n_hists if n_hists: # we do not want to use tensors sor we do a loop lst_loss = [] lst_u = [] lst_v = [] for k in range(n_hists): res = sinkhorn_log(a, b[:, k], M, reg, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, warmstart=warmstart[k], **kwargs) if log: lst_loss.append(nx.sum(M * res[0])) lst_u.append(res[1]['log_u']) lst_v.append(res[1]['log_v']) else: lst_loss.append(nx.sum(M * res)) res = nx.stack(lst_loss) if log: log = {'log_u': nx.stack(lst_u, 1), 'log_v': nx.stack(lst_v, 1), } log['u'] = nx.exp(log['log_u']) log['v'] = nx.exp(log['log_v']) return res, log else: return res else: if log: log = {'err': []} Mr = - M / reg # we assume that no distances are null except those of the diagonal of # distances if warmstart is None: u = nx.zeros(dim_a, type_as=M) v = nx.zeros(dim_b, type_as=M) else: u, v = warmstart def get_logT(u, v): if n_hists: return Mr[:, :, None] + u + v else: return Mr + u[:, None] + v[None, :] loga = nx.log(a) logb = nx.log(b) err = 1 for ii in range(numItermax): v = logb - nx.logsumexp(Mr + u[:, None], 0) u = loga - nx.logsumexp(Mr + v[None, :], 1) if ii % 10 == 0: # we can speed up the process by checking for the error only all # the 10th iterations # compute right marginal tmp2= (diag(u)Kdiag(v))^T1 tmp2 = nx.sum(nx.exp(get_logT(u, v)), 0) err = nx.norm(tmp2 - b) # violation of marginal if log: log['err'].append(err) if verbose: if ii % 200 == 0: print( '{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(ii, err)) if err < stopThr: break else: if warn: warnings.warn("Sinkhorn did not converge. You might want to " "increase the number of iterations `numItermax` " "or the regularization parameter `reg`.") if log: log['niter'] = ii log['log_u'] = u log['log_v'] = v log['u'] = nx.exp(u) log['v'] = nx.exp(v) return nx.exp(get_logT(u, v)), log else: return nx.exp(get_logT(u, v)) def greenkhorn(a, b, M, reg, numItermax=10000, stopThr=1e-9, verbose=False, log=False, warn=True, warmstart=None): r""" Solve the entropic regularization optimal transport problem and return the OT matrix The algorithm used is based on the paper :ref:`[22] ` which is a stochastic version of the Sinkhorn-Knopp algorithm :ref:`[2] ` The function solves the following optimization problem: .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg}\cdot\Omega(\gamma) s.t. \ \gamma \mathbf{1} &= \mathbf{a} \gamma^T \mathbf{1} &= \mathbf{b} \gamma &\geq 0 where : - :math:`\mathbf{M}` is the (`dim_a`, `dim_b`) metric cost matrix - :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (histograms, both sum to 1) Parameters ---------- a : array-like, shape (dim_a,) samples weights in the source domain b : array-like, shape (dim_b,) or array-like, shape (dim_b, n_hists) samples in the target domain, compute sinkhorn with multiple targets and fixed :math:`\mathbf{M}` if :math:`\mathbf{b}` is a matrix (return OT loss + dual variables in log) M : array-like, shape (dim_a, dim_b) loss matrix reg : float Regularization term >0 numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (>0) log : bool, optional record log if True warn : bool, optional if True, raises a warning if the algorithm doesn't convergence. warmstart: tuple of arrays, shape (dim_a, dim_b), optional Initialization of dual potentials. If provided, the dual potentials should be given (that is the logarithm of the u,v sinkhorn scaling vectors) Returns ------- gamma : array-like, shape (dim_a, dim_b) Optimal transportation matrix for the given parameters log : dict log dictionary return only if log==True in parameters Examples -------- >>> import ot >>> a=[.5, .5] >>> b=[.5, .5] >>> M=[[0., 1.], [1., 0.]] >>> ot.bregman.greenkhorn(a, b, M, 1) array([[0.36552929, 0.13447071], [0.13447071, 0.36552929]]) .. _references-greenkhorn: References ---------- .. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013 .. [22] J. Altschuler, J.Weed, P. Rigollet : Near-linear time approximation algorithms for optimal transport via Sinkhorn iteration, Advances in Neural Information Processing Systems (NIPS) 31, 2017 See Also -------- ot.lp.emd : Unregularized OT ot.optim.cg : General regularized OT """ a, b, M = list_to_array(a, b, M) nx = get_backend(M, a, b) if nx.__name__ in ("jax", "tf"): raise TypeError("JAX or TF arrays have been received. Greenkhorn is not " "compatible with neither JAX nor TF") if len(a) == 0: a = nx.ones((M.shape[0],), type_as=M) / M.shape[0] if len(b) == 0: b = nx.ones((M.shape[1],), type_as=M) / M.shape[1] dim_a = a.shape[0] dim_b = b.shape[0] K = nx.exp(-M / reg) if warmstart is None: u = nx.full((dim_a,), 1. / dim_a, type_as=K) v = nx.full((dim_b,), 1. / dim_b, type_as=K) else: u, v = nx.exp(warmstart[0]), nx.exp(warmstart[1]) G = u[:, None] * K * v[None, :] viol = nx.sum(G, axis=1) - a viol_2 = nx.sum(G, axis=0) - b stopThr_val = 1 if log: log = dict() log['u'] = u log['v'] = v for ii in range(numItermax): i_1 = nx.argmax(nx.abs(viol)) i_2 = nx.argmax(nx.abs(viol_2)) m_viol_1 = nx.abs(viol[i_1]) m_viol_2 = nx.abs(viol_2[i_2]) stopThr_val = nx.maximum(m_viol_1, m_viol_2) if m_viol_1 > m_viol_2: old_u = u[i_1] new_u = a[i_1] / nx.dot(K[i_1, :], v) G[i_1, :] = new_u * K[i_1, :] * v viol[i_1] = nx.dot(new_u * K[i_1, :], v) - a[i_1] viol_2 += (K[i_1, :].T * (new_u - old_u) * v) u[i_1] = new_u else: old_v = v[i_2] new_v = b[i_2] / nx.dot(K[:, i_2].T, u) G[:, i_2] = u * K[:, i_2] * new_v # aviol = (G@one_m - a) # aviol_2 = (G.T@one_n - b) viol += (-old_v + new_v) * K[:, i_2] * u viol_2[i_2] = new_v * nx.dot(K[:, i_2], u) - b[i_2] v[i_2] = new_v if stopThr_val <= stopThr: break else: if warn: warnings.warn("Sinkhorn did not converge. You might want to " "increase the number of iterations `numItermax` " "or the regularization parameter `reg`.") if log: log["n_iter"] = ii log['u'] = u log['v'] = v if log: return G, log else: return G def sinkhorn_stabilized(a, b, M, reg, numItermax=1000, tau=1e3, stopThr=1e-9, warmstart=None, verbose=False, print_period=20, log=False, warn=True, **kwargs): r""" Solve the entropic regularization OT problem with log stabilization The function solves the following optimization problem: .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg}\cdot\Omega(\gamma) s.t. \ \gamma \mathbf{1} &= \mathbf{a} \gamma^T \mathbf{1} &= \mathbf{b} \gamma &\geq 0 where : - :math:`\mathbf{M}` is the (`dim_a`, `dim_b`) metric cost matrix - :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (histograms, both sum to 1) The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in :ref:`[2] ` but with the log stabilization proposed in :ref:`[10] ` an defined in :ref:`[9] ` (Algo 3.1) . Parameters ---------- a : array-like, shape (dim_a,) samples weights in the source domain b : array-like, shape (dim_b,) samples in the target domain M : array-like, shape (dim_a, dim_b) loss matrix reg : float Regularization term >0 tau : float threshold for max value in :math:`\mathbf{u}` or :math:`\mathbf{v}` for log scaling warmstart : table of vectors if given then starting values for alpha and beta log scalings numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True warn : bool, optional if True, raises a warning if the algorithm doesn't convergence. Returns ------- gamma : array-like, shape (dim_a, dim_b) Optimal transportation matrix for the given parameters log : dict log dictionary return only if log==True in parameters Examples -------- >>> import ot >>> a=[.5,.5] >>> b=[.5,.5] >>> M=[[0.,1.],[1.,0.]] >>> ot.bregman.sinkhorn_stabilized(a, b, M, 1) array([[0.36552929, 0.13447071], [0.13447071, 0.36552929]]) .. _references-sinkhorn-stabilized: References ---------- .. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013 .. [9] Schmitzer, B. (2016). Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. arXiv preprint arXiv:1610.06519. .. [10] Chizat, L., PeyrĂ©, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816. See Also -------- ot.lp.emd : Unregularized OT ot.optim.cg : General regularized OT """ a, b, M = list_to_array(a, b, M) nx = get_backend(M, a, b) if len(a) == 0: a = nx.ones((M.shape[0],), type_as=M) / M.shape[0] if len(b) == 0: b = nx.ones((M.shape[1],), type_as=M) / M.shape[1] # test if multiple target if len(b.shape) > 1: n_hists = b.shape[1] a = a[:, None] else: n_hists = 0 # init data dim_a = len(a) dim_b = len(b) if log: log = {'err': []} # we assume that no distances are null except those of the diagonal of # distances if warmstart is None: alpha, beta = nx.zeros(dim_a, type_as=M), nx.zeros(dim_b, type_as=M) else: alpha, beta = warmstart if n_hists: u = nx.ones((dim_a, n_hists), type_as=M) / dim_a v = nx.ones((dim_b, n_hists), type_as=M) / dim_b else: u, v = nx.ones(dim_a, type_as=M), nx.ones(dim_b, type_as=M) u /= dim_a v /= dim_b def get_K(alpha, beta): """log space computation""" return nx.exp(-(M - alpha.reshape((dim_a, 1)) - beta.reshape((1, dim_b))) / reg) def get_Gamma(alpha, beta, u, v): """log space gamma computation""" return nx.exp(-(M - alpha.reshape((dim_a, 1)) - beta.reshape((1, dim_b))) / reg + nx.log(u.reshape((dim_a, 1))) + nx.log(v.reshape((1, dim_b)))) K = get_K(alpha, beta) transp = K err = 1 for ii in range(numItermax): uprev = u vprev = v # sinkhorn update v = b / (nx.dot(K.T, u)) u = a / (nx.dot(K, v)) # remove numerical problems and store them in K if nx.max(nx.abs(u)) > tau or nx.max(nx.abs(v)) > tau: if n_hists: alpha, beta = alpha + reg * \ nx.max(nx.log(u), 1), beta + reg * nx.max(nx.log(v)) else: alpha, beta = alpha + reg * nx.log(u), beta + reg * nx.log(v) if n_hists: u = nx.ones((dim_a, n_hists), type_as=M) / dim_a v = nx.ones((dim_b, n_hists), type_as=M) / dim_b else: u = nx.ones(dim_a, type_as=M) / dim_a v = nx.ones(dim_b, type_as=M) / dim_b K = get_K(alpha, beta) if ii % print_period == 0: # we can speed up the process by checking for the error only all # the 10th iterations if n_hists: err_u = nx.max(nx.abs(u - uprev)) err_u /= max(nx.max(nx.abs(u)), nx.max(nx.abs(uprev)), 1.0) err_v = nx.max(nx.abs(v - vprev)) err_v /= max(nx.max(nx.abs(v)), nx.max(nx.abs(vprev)), 1.0) err = 0.5 * (err_u + err_v) else: transp = get_Gamma(alpha, beta, u, v) err = nx.norm(nx.sum(transp, axis=0) - b) if log: log['err'].append(err) if verbose: if ii % (print_period * 20) == 0: print( '{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(ii, err)) if err <= stopThr: break if nx.any(nx.isnan(u)) or nx.any(nx.isnan(v)): # we have reached the machine precision # come back to previous solution and quit loop warnings.warn('Numerical errors at iteration %d' % ii) u = uprev v = vprev break else: if warn: warnings.warn("Sinkhorn did not converge. You might want to " "increase the number of iterations `numItermax` " "or the regularization parameter `reg`.") if log: if n_hists: alpha = alpha[:, None] beta = beta[:, None] logu = alpha / reg + nx.log(u) logv = beta / reg + nx.log(v) log["n_iter"] = ii log['logu'] = logu log['logv'] = logv log['alpha'] = alpha + reg * nx.log(u) log['beta'] = beta + reg * nx.log(v) log['warmstart'] = (log['alpha'], log['beta']) if n_hists: res = nx.stack([ nx.sum(get_Gamma(alpha, beta, u[:, i], v[:, i]) * M) for i in range(n_hists) ]) return res, log else: return get_Gamma(alpha, beta, u, v), log else: if n_hists: res = nx.stack([ nx.sum(get_Gamma(alpha, beta, u[:, i], v[:, i]) * M) for i in range(n_hists) ]) return res else: return get_Gamma(alpha, beta, u, v) def sinkhorn_epsilon_scaling(a, b, M, reg, numItermax=100, epsilon0=1e4, numInnerItermax=100, tau=1e3, stopThr=1e-9, warmstart=None, verbose=False, print_period=10, log=False, warn=True, **kwargs): r""" Solve the entropic regularization optimal transport problem with log stabilization and epsilon scaling. The function solves the following optimization problem: .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg}\cdot\Omega(\gamma) s.t. \ \gamma \mathbf{1} &= \mathbf{a} \gamma^T \mathbf{1} &= \mathbf{b} \gamma &\geq 0 where : - :math:`\mathbf{M}` is the (`dim_a`, `dim_b`) metric cost matrix - :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (histograms, both sum to 1) The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in :ref:`[2] ` but with the log stabilization proposed in :ref:`[10] ` and the log scaling proposed in :ref:`[9] ` algorithm 3.2 Parameters ---------- a : array-like, shape (dim_a,) samples weights in the source domain b : array-like, shape (dim_b,) samples in the target domain M : array-like, shape (dim_a, dim_b) loss matrix reg : float Regularization term >0 tau : float threshold for max value in :math:`\mathbf{u}` or :math:`\mathbf{b}` for log scaling warmstart : tuple of vectors if given then starting values for alpha and beta log scalings numItermax : int, optional Max number of iterations numInnerItermax : int, optional Max number of iterations in the inner slog stabilized sinkhorn epsilon0 : int, optional first epsilon regularization value (then exponential decrease to reg) stopThr : float, optional Stop threshold on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True warn : bool, optional if True, raises a warning if the algorithm doesn't convergence. Returns ------- gamma : array-like, shape (dim_a, dim_b) Optimal transportation matrix for the given parameters log : dict log dictionary return only if log==True in parameters Examples -------- >>> import ot >>> a=[.5, .5] >>> b=[.5, .5] >>> M=[[0., 1.], [1., 0.]] >>> ot.bregman.sinkhorn_epsilon_scaling(a, b, M, 1) array([[0.36552929, 0.13447071], [0.13447071, 0.36552929]]) .. _references-sinkhorn-epsilon-scaling: References ---------- .. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013 .. [9] Schmitzer, B. (2016). Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. arXiv preprint arXiv:1610.06519. .. [10] Chizat, L., PeyrĂ©, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816. See Also -------- ot.lp.emd : Unregularized OT ot.optim.cg : General regularized OT """ a, b, M = list_to_array(a, b, M) nx = get_backend(M, a, b) if len(a) == 0: a = nx.ones((M.shape[0],), type_as=M) / M.shape[0] if len(b) == 0: b = nx.ones((M.shape[1],), type_as=M) / M.shape[1] # init data dim_a = len(a) dim_b = len(b) # nrelative umerical precision with 64 bits numItermin = 35 numItermax = max(numItermin, numItermax) # ensure that last velue is exact ii = 0 if log: log = {'err': []} # we assume that no distances are null except those of the diagonal of # distances if warmstart is None: alpha, beta = nx.zeros(dim_a, type_as=M), nx.zeros(dim_b, type_as=M) else: alpha, beta = warmstart # print(np.min(K)) def get_reg(n): # exponential decreasing return (epsilon0 - reg) * np.exp(-n) + reg err = 1 for ii in range(numItermax): regi = get_reg(ii) G, logi = sinkhorn_stabilized(a, b, M, regi, numItermax=numInnerItermax, stopThr=stopThr, warmstart=(alpha, beta), verbose=False, print_period=20, tau=tau, log=True) alpha = logi['alpha'] beta = logi['beta'] if ii % (print_period) == 0: # spsion nearly converged # we can speed up the process by checking for the error only all # the 10th iterations transp = G err = nx.norm(nx.sum(transp, axis=0) - b) ** 2 + \ nx.norm(nx.sum(transp, axis=1) - a) ** 2 if log: log['err'].append(err) if verbose: if ii % (print_period * 10) == 0: print('{:5s}|{:12s}'.format( 'It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(ii, err)) if err <= stopThr and ii > numItermin: break else: if warn: warnings.warn("Sinkhorn did not converge. You might want to " "increase the number of iterations `numItermax` " "or the regularization parameter `reg`.") if log: log['alpha'] = alpha log['beta'] = beta log['warmstart'] = (log['alpha'], log['beta']) log['niter'] = ii return G, log else: return G python-pot-0.9.3+dfsg/ot/bregman/_utils.py000066400000000000000000000023701455713015700205030ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Common tools of Bregman projections solvers for entropic regularized OT """ # Author: Remi Flamary # Nicolas Courty # # License: MIT License from ..utils import list_to_array from ..backend import get_backend def geometricBar(weights, alldistribT): """return the weighted geometric mean of distributions""" weights, alldistribT = list_to_array(weights, alldistribT) nx = get_backend(weights, alldistribT) assert (len(weights) == alldistribT.shape[1]) return nx.exp(nx.dot(nx.log(alldistribT), weights.T)) def geometricMean(alldistribT): """return the geometric mean of distributions""" alldistribT = list_to_array(alldistribT) nx = get_backend(alldistribT) return nx.exp(nx.mean(nx.log(alldistribT), axis=1)) def projR(gamma, p): """return the KL projection on the row constraints """ gamma, p = list_to_array(gamma, p) nx = get_backend(gamma, p) return (gamma.T * p / nx.maximum(nx.sum(gamma, axis=1), 1e-10)).T def projC(gamma, q): """return the KL projection on the column constraints """ gamma, q = list_to_array(gamma, q) nx = get_backend(gamma, q) return gamma * q / nx.maximum(nx.sum(gamma, axis=0), 1e-10) python-pot-0.9.3+dfsg/ot/coot.py000066400000000000000000000505441455713015700165430ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ CO-Optimal Transport solver """ # Author: Quang Huy Tran # # License: MIT License import warnings from .lp import emd from .utils import list_to_array from .backend import get_backend from .bregman import sinkhorn def co_optimal_transport(X, Y, wx_samp=None, wx_feat=None, wy_samp=None, wy_feat=None, epsilon=0, alpha=0, M_samp=None, M_feat=None, warmstart=None, nits_bcd=100, tol_bcd=1e-7, eval_bcd=1, nits_ot=500, tol_sinkhorn=1e-7, method_sinkhorn="sinkhorn", early_stopping_tol=1e-6, log=False, verbose=False): r"""Compute the CO-Optimal Transport between two matrices. Return the sample and feature transport plans between :math:`(\mathbf{X}, \mathbf{w}_{xs}, \mathbf{w}_{xf})` and :math:`(\mathbf{Y}, \mathbf{w}_{ys}, \mathbf{w}_{yf})`. The function solves the following CO-Optimal Transport (COOT) problem: .. math:: \mathbf{COOT}_{\alpha, \varepsilon} = \mathop{\arg \min}_{\mathbf{P}, \mathbf{Q}} &\quad \sum_{i,j,k,l} (\mathbf{X}_{i,k} - \mathbf{Y}_{j,l})^2 \mathbf{P}_{i,j} \mathbf{Q}_{k,l} + \alpha_s \sum_{i,j} \mathbf{P}_{i,j} \mathbf{M^{(s)}}_{i, j} \\ &+ \alpha_f \sum_{k, l} \mathbf{Q}_{k,l} \mathbf{M^{(f)}}_{k, l} + \varepsilon_s \mathbf{KL}(\mathbf{P} | \mathbf{w}_{xs} \mathbf{w}_{ys}^T) + \varepsilon_f \mathbf{KL}(\mathbf{Q} | \mathbf{w}_{xf} \mathbf{w}_{yf}^T) Where : - :math:`\mathbf{X}`: Data matrix in the source space - :math:`\mathbf{Y}`: Data matrix in the target space - :math:`\mathbf{M^{(s)}}`: Additional sample matrix - :math:`\mathbf{M^{(f)}}`: Additional feature matrix - :math:`\mathbf{w}_{xs}`: Distribution of the samples in the source space - :math:`\mathbf{w}_{xf}`: Distribution of the features in the source space - :math:`\mathbf{w}_{ys}`: Distribution of the samples in the target space - :math:`\mathbf{w}_{yf}`: Distribution of the features in the target space .. note:: This function allows epsilon to be zero. In that case, the :any:`ot.lp.emd` solver of POT will be used. Parameters ---------- X : (n_sample_x, n_feature_x) array-like, float First input matrix. Y : (n_sample_y, n_feature_y) array-like, float Second input matrix. wx_samp : (n_sample_x, ) array-like, float, optional (default = None) Histogram assigned on rows (samples) of matrix X. Uniform distribution by default. wx_feat : (n_feature_x, ) array-like, float, optional (default = None) Histogram assigned on columns (features) of matrix X. Uniform distribution by default. wy_samp : (n_sample_y, ) array-like, float, optional (default = None) Histogram assigned on rows (samples) of matrix Y. Uniform distribution by default. wy_feat : (n_feature_y, ) array-like, float, optional (default = None) Histogram assigned on columns (features) of matrix Y. Uniform distribution by default. epsilon : scalar or indexable object of length 2, float or int, optional (default = 0) Regularization parameters for entropic approximation of sample and feature couplings. Allow the case where epsilon contains 0. In that case, the EMD solver is used instead of Sinkhorn solver. If epsilon is scalar, then the same epsilon is applied to both regularization of sample and feature couplings. alpha : scalar or indexable object of length 2, float or int, optional (default = 0) Coefficient parameter of linear terms with respect to the sample and feature couplings. If alpha is scalar, then the same alpha is applied to both linear terms. M_samp : (n_sample_x, n_sample_y), float, optional (default = None) Sample matrix with respect to the linear term on sample coupling. M_feat : (n_feature_x, n_feature_y), float, optional (default = None) Feature matrix with respect to the linear term on feature coupling. warmstart : dictionary, optional (default = None) Contains 4 keys: - "duals_sample" and "duals_feature" whose values are tuples of 2 vectors of size (n_sample_x, n_sample_y) and (n_feature_x, n_feature_y). Initialization of sample and feature dual vectors if using Sinkhorn algorithm. Zero vectors by default. - "pi_sample" and "pi_feature" whose values are matrices of size (n_sample_x, n_sample_y) and (n_feature_x, n_feature_y). Initialization of sample and feature couplings. Uniform distributions by default. nits_bcd : int, optional (default = 100) Number of Block Coordinate Descent (BCD) iterations to solve COOT. tol_bcd : float, optional (default = 1e-7) Tolerance of BCD scheme. If the L1-norm between the current and previous sample couplings is under this threshold, then stop BCD scheme. eval_bcd : int, optional (default = 1) Multiplier of iteration at which the COOT cost is evaluated. For example, if `eval_bcd = 8`, then the cost is calculated at iterations 8, 16, 24, etc... nits_ot : int, optional (default = 100) Number of iterations to solve each of the two optimal transport problems in each BCD iteration. tol_sinkhorn : float, optional (default = 1e-7) Tolerance of Sinkhorn algorithm to stop the Sinkhorn scheme for entropic optimal transport problem (if any) in each BCD iteration. Only triggered when Sinkhorn solver is used. method_sinkhorn : string, optional (default = "sinkhorn") Method used in POT's `ot.sinkhorn` solver. Only support "sinkhorn" and "sinkhorn_log". early_stopping_tol : float, optional (default = 1e-6) Tolerance for the early stopping. If the absolute difference between the last 2 recorded COOT distances is under this tolerance, then stop BCD scheme. log : bool, optional (default = False) If True then the cost and 4 dual vectors, including 2 from sample and 2 from feature couplings, are recorded. verbose : bool, optional (default = False) If True then print the COOT cost at every multiplier of `eval_bcd`-th iteration. Returns ------- pi_samp : (n_sample_x, n_sample_y) array-like, float Sample coupling matrix. pi_feat : (n_feature_x, n_feature_y) array-like, float Feature coupling matrix. log : dictionary, optional Returned if `log` is True. The keys are: duals_sample : (n_sample_x, n_sample_y) tuple, float Pair of dual vectors when solving OT problem w.r.t the sample coupling. duals_feature : (n_feature_x, n_feature_y) tuple, float Pair of dual vectors when solving OT problem w.r.t the feature coupling. distances : list, float List of COOT distances. References ---------- .. [49] I. Redko, T. Vayer, R. Flamary, and N. Courty, CO-Optimal Transport, Advances in Neural Information Processing ny_sampstems, 33 (2020). """ def compute_kl(p, q): kl = nx.sum(p * nx.log(p + 1.0 * (p == 0))) - nx.sum(p * nx.log(q)) return kl # Main function if method_sinkhorn not in ["sinkhorn", "sinkhorn_log"]: raise ValueError( "Method {} is not supported in CO-Optimal Transport.".format(method_sinkhorn)) X, Y = list_to_array(X, Y) nx = get_backend(X, Y) if isinstance(epsilon, float) or isinstance(epsilon, int): eps_samp, eps_feat = epsilon, epsilon else: if len(epsilon) != 2: raise ValueError("Epsilon must be either a scalar or an indexable object of length 2.") else: eps_samp, eps_feat = epsilon[0], epsilon[1] if isinstance(alpha, float) or isinstance(alpha, int): alpha_samp, alpha_feat = alpha, alpha else: if len(alpha) != 2: raise ValueError("Alpha must be either a scalar or an indexable object of length 2.") else: alpha_samp, alpha_feat = alpha[0], alpha[1] # constant input variables if M_samp is None or alpha_samp == 0: M_samp, alpha_samp = 0, 0 if M_feat is None or alpha_feat == 0: M_feat, alpha_feat = 0, 0 nx_samp, nx_feat = X.shape ny_samp, ny_feat = Y.shape # measures on rows and columns if wx_samp is None: wx_samp = nx.ones(nx_samp, type_as=X) / nx_samp if wx_feat is None: wx_feat = nx.ones(nx_feat, type_as=X) / nx_feat if wy_samp is None: wy_samp = nx.ones(ny_samp, type_as=Y) / ny_samp if wy_feat is None: wy_feat = nx.ones(ny_feat, type_as=Y) / ny_feat wxy_samp = wx_samp[:, None] * wy_samp[None, :] wxy_feat = wx_feat[:, None] * wy_feat[None, :] # pre-calculate cost constants XY_sqr = (X ** 2 @ wx_feat)[:, None] + (Y ** 2 @ wy_feat)[None, :] + alpha_samp * M_samp XY_sqr_T = ((X.T)**2 @ wx_samp)[:, None] + ((Y.T) ** 2 @ wy_samp)[None, :] + alpha_feat * M_feat # initialize coupling and dual vectors if warmstart is None: pi_samp, pi_feat = wxy_samp, wxy_feat # shape nx_samp x ny_samp and nx_feat x ny_feat duals_samp = (nx.zeros(nx_samp, type_as=X), nx.zeros( ny_samp, type_as=Y)) # shape nx_samp, ny_samp duals_feat = (nx.zeros(nx_feat, type_as=X), nx.zeros( ny_feat, type_as=Y)) # shape nx_feat, ny_feat else: pi_samp, pi_feat = warmstart["pi_sample"], warmstart["pi_feature"] duals_samp, duals_feat = warmstart["duals_sample"], warmstart["duals_feature"] # initialize log list_coot = [float("inf")] err = tol_bcd + 1e-3 for idx in range(nits_bcd): pi_samp_prev = nx.copy(pi_samp) # update sample coupling ot_cost = XY_sqr - 2 * X @ pi_feat @ Y.T # size nx_samp x ny_samp if eps_samp > 0: pi_samp, dict_log = sinkhorn(a=wx_samp, b=wy_samp, M=ot_cost, reg=eps_samp, method=method_sinkhorn, numItermax=nits_ot, stopThr=tol_sinkhorn, log=True, warmstart=duals_samp) duals_samp = (nx.log(dict_log["u"]), nx.log(dict_log["v"])) elif eps_samp == 0: pi_samp, dict_log = emd( a=wx_samp, b=wy_samp, M=ot_cost, numItermax=nits_ot, log=True) duals_samp = (dict_log["u"], dict_log["v"]) # update feature coupling ot_cost = XY_sqr_T - 2 * X.T @ pi_samp @ Y # size nx_feat x ny_feat if eps_feat > 0: pi_feat, dict_log = sinkhorn(a=wx_feat, b=wy_feat, M=ot_cost, reg=eps_feat, method=method_sinkhorn, numItermax=nits_ot, stopThr=tol_sinkhorn, log=True, warmstart=duals_feat) duals_feat = (nx.log(dict_log["u"]), nx.log(dict_log["v"])) elif eps_feat == 0: pi_feat, dict_log = emd( a=wx_feat, b=wy_feat, M=ot_cost, numItermax=nits_ot, log=True) duals_feat = (dict_log["u"], dict_log["v"]) if idx % eval_bcd == 0: # update error err = nx.sum(nx.abs(pi_samp - pi_samp_prev)) # COOT part coot = nx.sum(ot_cost * pi_feat) if alpha_samp != 0: coot = coot + alpha_samp * nx.sum(M_samp * pi_samp) # Entropic part if eps_samp != 0: coot = coot + eps_samp * compute_kl(pi_samp, wxy_samp) if eps_feat != 0: coot = coot + eps_feat * compute_kl(pi_feat, wxy_feat) list_coot.append(coot) if err < tol_bcd or abs(list_coot[-2] - list_coot[-1]) < early_stopping_tol: break if verbose: print( "CO-Optimal Transport cost at iteration {}: {}".format(idx + 1, coot)) # sanity check if nx.sum(nx.isnan(pi_samp)) > 0 or nx.sum(nx.isnan(pi_feat)) > 0: warnings.warn("There is NaN in coupling.") if log: dict_log = {"duals_sample": duals_samp, "duals_feature": duals_feat, "distances": list_coot[1:]} return pi_samp, pi_feat, dict_log else: return pi_samp, pi_feat def co_optimal_transport2(X, Y, wx_samp=None, wx_feat=None, wy_samp=None, wy_feat=None, epsilon=0, alpha=0, M_samp=None, M_feat=None, warmstart=None, log=False, verbose=False, early_stopping_tol=1e-6, nits_bcd=100, tol_bcd=1e-7, eval_bcd=1, nits_ot=500, tol_sinkhorn=1e-7, method_sinkhorn="sinkhorn"): r"""Compute the CO-Optimal Transport distance between two measures. Returns the CO-Optimal Transport distance between :math:`(\mathbf{X}, \mathbf{w}_{xs}, \mathbf{w}_{xf})` and :math:`(\mathbf{Y}, \mathbf{w}_{ys}, \mathbf{w}_{yf})`. The function solves the following CO-Optimal Transport (COOT) problem: .. math:: \mathbf{COOT}_{\alpha, \varepsilon} = \mathop{\arg \min}_{\mathbf{P}, \mathbf{Q}} &\quad \sum_{i,j,k,l} (\mathbf{X}_{i,k} - \mathbf{Y}_{j,l})^2 \mathbf{P}_{i,j} \mathbf{Q}_{k,l} + \alpha_1 \sum_{i,j} \mathbf{P}_{i,j} \mathbf{M^{(s)}}_{i, j} \\ &+ \alpha_2 \sum_{k, l} \mathbf{Q}_{k,l} \mathbf{M^{(f)}}_{k, l} + \varepsilon_1 \mathbf{KL}(\mathbf{P} | \mathbf{w}_{xs} \mathbf{w}_{ys}^T) + \varepsilon_2 \mathbf{KL}(\mathbf{Q} | \mathbf{w}_{xf} \mathbf{w}_{yf}^T) where : - :math:`\mathbf{X}`: Data matrix in the source space - :math:`\mathbf{Y}`: Data matrix in the target space - :math:`\mathbf{M^{(s)}}`: Additional sample matrix - :math:`\mathbf{M^{(f)}}`: Additional feature matrix - :math:`\mathbf{w}_{xs}`: Distribution of the samples in the source space - :math:`\mathbf{w}_{xf}`: Distribution of the features in the source space - :math:`\mathbf{w}_{ys}`: Distribution of the samples in the target space - :math:`\mathbf{w}_{yf}`: Distribution of the features in the target space .. note:: This function allows epsilon to be zero. In that case, the :any:`ot.lp.emd` solver of POT will be used. Parameters ---------- X : (n_sample_x, n_feature_x) array-like, float First input matrix. Y : (n_sample_y, n_feature_y) array-like, float Second input matrix. wx_samp : (n_sample_x, ) array-like, float, optional (default = None) Histogram assigned on rows (samples) of matrix X. Uniform distribution by default. wx_feat : (n_feature_x, ) array-like, float, optional (default = None) Histogram assigned on columns (features) of matrix X. Uniform distribution by default. wy_samp : (n_sample_y, ) array-like, float, optional (default = None) Histogram assigned on rows (samples) of matrix Y. Uniform distribution by default. wy_feat : (n_feature_y, ) array-like, float, optional (default = None) Histogram assigned on columns (features) of matrix Y. Uniform distribution by default. epsilon : scalar or indexable object of length 2, float or int, optional (default = 0) Regularization parameters for entropic approximation of sample and feature couplings. Allow the case where epsilon contains 0. In that case, the EMD solver is used instead of Sinkhorn solver. If epsilon is scalar, then the same epsilon is applied to both regularization of sample and feature couplings. alpha : scalar or indexable object of length 2, float or int, optional (default = 0) Coefficient parameter of linear terms with respect to the sample and feature couplings. If alpha is scalar, then the same alpha is applied to both linear terms. M_samp : (n_sample_x, n_sample_y), float, optional (default = None) Sample matrix with respect to the linear term on sample coupling. M_feat : (n_feature_x, n_feature_y), float, optional (default = None) Feature matrix with respect to the linear term on feature coupling. warmstart : dictionary, optional (default = None) Contains 4 keys: - "duals_sample" and "duals_feature" whose values are tuples of 2 vectors of size (n_sample_x, n_sample_y) and (n_feature_x, n_feature_y). Initialization of sample and feature dual vectors if using Sinkhorn algorithm. Zero vectors by default. - "pi_sample" and "pi_feature" whose values are matrices of size (n_sample_x, n_sample_y) and (n_feature_x, n_feature_y). Initialization of sample and feature couplings. Uniform distributions by default. nits_bcd : int, optional (default = 100) Number of Block Coordinate Descent (BCD) iterations to solve COOT. tol_bcd : float, optional (default = 1e-7) Tolerance of BCD scheme. If the L1-norm between the current and previous sample couplings is under this threshold, then stop BCD scheme. eval_bcd : int, optional (default = 1) Multiplier of iteration at which the COOT cost is evaluated. For example, if `eval_bcd = 8`, then the cost is calculated at iterations 8, 16, 24, etc... nits_ot : int, optional (default = 100) Number of iterations to solve each of the two optimal transport problems in each BCD iteration. tol_sinkhorn : float, optional (default = 1e-7) Tolerance of Sinkhorn algorithm to stop the Sinkhorn scheme for entropic optimal transport problem (if any) in each BCD iteration. Only triggered when Sinkhorn solver is used. method_sinkhorn : string, optional (default = "sinkhorn") Method used in POT's `ot.sinkhorn` solver. Only support "sinkhorn" and "sinkhorn_log". early_stopping_tol : float, optional (default = 1e-6) Tolerance for the early stopping. If the absolute difference between the last 2 recorded COOT distances is under this tolerance, then stop BCD scheme. log : bool, optional (default = False) If True then the cost and 4 dual vectors, including 2 from sample and 2 from feature couplings, are recorded. verbose : bool, optional (default = False) If True then print the COOT cost at every multiplier of `eval_bcd`-th iteration. Returns ------- float CO-Optimal Transport distance. dict Contains logged information from :any:`co_optimal_transport` solver. Only returned if `log` parameter is True References ---------- .. [47] I. Redko, T. Vayer, R. Flamary, and N. Courty, CO-Optimal Transport, Advances in Neural Information Processing ny_sampstems, 33 (2020). """ pi_samp, pi_feat, dict_log = co_optimal_transport(X=X, Y=Y, wx_samp=wx_samp, wx_feat=wx_feat, wy_samp=wy_samp, wy_feat=wy_feat, epsilon=epsilon, alpha=alpha, M_samp=M_samp, M_feat=M_feat, warmstart=warmstart, nits_bcd=nits_bcd, tol_bcd=tol_bcd, eval_bcd=eval_bcd, nits_ot=nits_ot, tol_sinkhorn=tol_sinkhorn, method_sinkhorn=method_sinkhorn, early_stopping_tol=early_stopping_tol, log=True, verbose=verbose) X, Y = list_to_array(X, Y) nx = get_backend(X, Y) nx_samp, nx_feat = X.shape ny_samp, ny_feat = Y.shape # measures on rows and columns if wx_samp is None: wx_samp = nx.ones(nx_samp, type_as=X) / nx_samp if wx_feat is None: wx_feat = nx.ones(nx_feat, type_as=X) / nx_feat if wy_samp is None: wy_samp = nx.ones(ny_samp, type_as=Y) / ny_samp if wy_feat is None: wy_feat = nx.ones(ny_feat, type_as=Y) / ny_feat vx_samp, vy_samp = dict_log["duals_sample"] vx_feat, vy_feat = dict_log["duals_feature"] gradX = 2 * X * (wx_samp[:, None] * wx_feat[None, :]) - \ 2 * pi_samp @ Y @ pi_feat.T # shape (nx_samp, nx_feat) gradY = 2 * Y * (wy_samp[:, None] * wy_feat[None, :]) - \ 2 * pi_samp.T @ X @ pi_feat # shape (ny_samp, ny_feat) coot = dict_log["distances"][-1] coot = nx.set_gradients(coot, (wx_samp, wx_feat, wy_samp, wy_feat, X, Y), (vx_samp, vx_feat, vy_samp, vy_feat, gradX, gradY)) if log: return coot, dict_log else: return coot python-pot-0.9.3+dfsg/ot/da.py000066400000000000000000002637271455713015700161740ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Domain adaptation with optimal transport """ # Author: Remi Flamary # Nicolas Courty # Michael Perrot # Nathalie Gayraud # Ievgen Redko # Eloi Tanguy # # License: MIT License import numpy as np import warnings from .backend import get_backend from .bregman import sinkhorn, jcpot_barycenter from .lp import emd from .utils import unif, dist, kernel, cost_normalization, label_normalization, laplacian, dots from .utils import BaseEstimator, check_params, deprecated, labels_to_masks, list_to_array from .unbalanced import sinkhorn_unbalanced from .gaussian import empirical_bures_wasserstein_mapping, empirical_gaussian_gromov_wasserstein_mapping from .optim import cg from .optim import gcg from .mapping import nearest_brenier_potential_fit, nearest_brenier_potential_predict_bounds, joint_OT_mapping_linear, \ joint_OT_mapping_kernel def sinkhorn_lpl1_mm(a, labels_a, b, M, reg, eta=0.1, numItermax=10, numInnerItermax=200, stopInnerThr=1e-9, verbose=False, log=False): r""" Solve the entropic regularization optimal transport problem with non-convex group lasso regularization The function solves the following optimization problem: .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg} \cdot \Omega_e(\gamma) + \eta \ \Omega_g(\gamma) s.t. \ \gamma \mathbf{1} = \mathbf{a} \gamma^T \mathbf{1} = \mathbf{b} \gamma \geq 0 where : - :math:`\mathbf{M}` is the (`ns`, `nt`) metric cost matrix - :math:`\Omega_e` is the entropic regularization term :math:`\Omega_e (\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`\Omega_g` is the group lasso regularization term :math:`\Omega_g(\gamma)=\sum_{i,c} \|\gamma_{i,\mathcal{I}_c}\|^{1/2}_1` where :math:`\mathcal{I}_c` are the index of samples from class `c` in the source domain. - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (sum to 1) The algorithm used for solving the problem is the generalized conditional gradient as proposed in :ref:`[5, 7] `. Parameters ---------- a : array-like (ns,) samples weights in the source domain labels_a : array-like (ns,) labels of samples in the source domain b : array-like (nt,) samples weights in the target domain M : array-like (ns,nt) loss matrix reg : float Regularization term for entropic regularization >0 eta : float, optional Regularization term for group lasso regularization >0 numItermax : int, optional Max number of iterations numInnerItermax : int, optional Max number of iterations (inner sinkhorn solver) stopInnerThr : float, optional Stop threshold on error (inner sinkhorn solver) (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- gamma : (ns, nt) array-like Optimal transportation matrix for the given parameters log : dict log dictionary return only if log==True in parameters .. _references-sinkhorn-lpl1-mm: References ---------- .. [5] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, "Optimal Transport for Domain Adaptation," in IEEE Transactions on Pattern Analysis and Machine Intelligence , vol.PP, no.99, pp.1-1 .. [7] Rakotomamonjy, A., Flamary, R., & Courty, N. (2015). Generalized conditional gradient: analysis of convergence and applications. arXiv preprint arXiv:1510.06567. See Also -------- ot.lp.emd : Unregularized OT ot.bregman.sinkhorn : Entropic regularized OT ot.optim.cg : General regularized OT """ a, labels_a, b, M = list_to_array(a, labels_a, b, M) nx = get_backend(a, labels_a, b, M) p = 0.5 epsilon = 1e-3 indices_labels = [] classes = nx.unique(labels_a) for c in classes: idxc, = nx.where(labels_a == c) indices_labels.append(idxc) W = nx.zeros(M.shape, type_as=M) for cpt in range(numItermax): Mreg = M + eta * W if log: transp, log = sinkhorn(a, b, Mreg, reg, numItermax=numInnerItermax, stopThr=stopInnerThr, log=True) else: transp = sinkhorn(a, b, Mreg, reg, numItermax=numInnerItermax, stopThr=stopInnerThr) # the transport has been computed. Check if classes are really # separated W = nx.ones(M.shape, type_as=M) for (i, c) in enumerate(classes): majs = nx.sum(transp[indices_labels[i]], axis=0) majs = p * ((majs + epsilon) ** (p - 1)) W[indices_labels[i]] = majs if log: return transp, log else: return transp def sinkhorn_l1l2_gl(a, labels_a, b, M, reg, eta=0.1, numItermax=10, numInnerItermax=200, stopInnerThr=1e-9, eps=1e-12, verbose=False, log=False): r""" Solve the entropic regularization optimal transport problem with group lasso regularization The function solves the following optimization problem: .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg} \cdot \Omega_e(\gamma) + \eta \ \Omega_g(\gamma) s.t. \ \gamma \mathbf{1} = \mathbf{a} \gamma^T \mathbf{1} = \mathbf{b} \gamma \geq 0 where : - :math:`\mathbf{M}` is the (`ns`, `nt`) metric cost matrix - :math:`\Omega_e` is the entropic regularization term :math:`\Omega_e(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`\Omega_g` is the group lasso regularization term :math:`\Omega_g(\gamma)=\sum_{i,c} \|\gamma_{i,\mathcal{I}_c}\|^2` where :math:`\mathcal{I}_c` are the index of samples from class `c` in the source domain. - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (sum to 1) The algorithm used for solving the problem is the generalized conditional gradient as proposed in :ref:`[5, 7] `. Parameters ---------- a : array-like (ns,) samples weights in the source domain labels_a : array-like (ns,) labels of samples in the source domain b : array-like (nt,) samples in the target domain M : array-like (ns,nt) loss matrix reg : float Regularization term for entropic regularization >0 eta : float, optional Regularization term for group lasso regularization >0 numItermax : int, optional Max number of iterations numInnerItermax : int, optional Max number of iterations (inner sinkhorn solver) stopInnerThr : float, optional Stop threshold on error (inner sinkhorn solver) (>0) eps: float, optional (default=1e-12) Small value to avoid division by zero verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- gamma : (ns, nt) array-like Optimal transportation matrix for the given parameters log : dict log dictionary return only if log==True in parameters .. _references-sinkhorn-l1l2-gl: References ---------- .. [5] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, "Optimal Transport for Domain Adaptation," in IEEE Transactions on Pattern Analysis and Machine Intelligence , vol.PP, no.99, pp.1-1 .. [7] Rakotomamonjy, A., Flamary, R., & Courty, N. (2015). Generalized conditional gradient: analysis of convergence and applications. arXiv preprint arXiv:1510.06567. See Also -------- ot.optim.gcg : Generalized conditional gradient for OT problems """ a, labels_a, b, M = list_to_array(a, labels_a, b, M) nx = get_backend(a, labels_a, b, M) labels_u, labels_idx = nx.unique(labels_a, return_inverse=True) n_labels = labels_u.shape[0] unroll_labels_idx = nx.eye(n_labels, type_as=labels_u)[None, labels_idx] def f(G): G_split = nx.repeat(G.T[:, :, None], n_labels, axis=2) return nx.sum(nx.norm(G_split * unroll_labels_idx, axis=1)) def df(G): G_split = nx.repeat(G.T[:, :, None], n_labels, axis=2) * unroll_labels_idx W = nx.norm(G_split * unroll_labels_idx, axis=1, keepdims=True) G_norm = G_split / nx.clip(W, eps, None) return nx.sum(G_norm, axis=2).T return gcg(a, b, M, reg, eta, f, df, G0=None, numItermax=numItermax, numInnerItermax=numInnerItermax, stopThr=stopInnerThr, verbose=verbose, log=log) OT_mapping_linear = deprecated(empirical_bures_wasserstein_mapping) def emd_laplace(a, b, xs, xt, M, sim='knn', sim_param=None, reg='pos', eta=1, alpha=.5, numItermax=100, stopThr=1e-9, numInnerItermax=100000, stopInnerThr=1e-9, log=False, verbose=False): r"""Solve the optimal transport problem (OT) with Laplacian regularization .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \eta \cdot \Omega_\alpha(\gamma) s.t. \ \gamma \mathbf{1} = \mathbf{a} \gamma^T \mathbf{1} = \mathbf{b} \gamma \geq 0 where: - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (sum to 1) - :math:`\mathbf{x_s}` and :math:`\mathbf{x_t}` are source and target samples - :math:`\mathbf{M}` is the (`ns`, `nt`) metric cost matrix - :math:`\Omega_\alpha` is the Laplacian regularization term .. math:: \Omega_\alpha = \frac{1 - \alpha}{n_s^2} \sum_{i,j} \mathbf{S^s}_{i,j} \|T(\mathbf{x}^s_i) - T(\mathbf{x}^s_j) \|^2 + \frac{\alpha}{n_t^2} \sum_{i,j} \mathbf{S^t}_{i,j} \|T(\mathbf{x}^t_i) - T(\mathbf{x}^t_j) \|^2 with :math:`\mathbf{S^s}_{i,j}, \mathbf{S^t}_{i,j}` denoting source and target similarity matrices and :math:`T(\cdot)` being a barycentric mapping. The algorithm used for solving the problem is the conditional gradient algorithm as proposed in :ref:`[5] `. Parameters ---------- a : array-like (ns,) samples weights in the source domain b : array-like (nt,) samples weights in the target domain xs : array-like (ns,d) samples in the source domain xt : array-like (nt,d) samples in the target domain M : array-like (ns,nt) loss matrix sim : string, optional Type of similarity ('knn' or 'gauss') used to construct the Laplacian. sim_param : int or float, optional Parameter (number of the nearest neighbors for sim='knn' or bandwidth for sim='gauss') used to compute the Laplacian. reg : string Type of Laplacian regularization eta : float Regularization term for Laplacian regularization alpha : float Regularization term for source domain's importance in regularization numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (inner emd solver) (>0) numInnerItermax : int, optional Max number of iterations (inner CG solver) stopInnerThr : float, optional Stop threshold on error (inner CG solver) (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- gamma : (ns, nt) array-like Optimal transportation matrix for the given parameters log : dict log dictionary return only if log==True in parameters .. _references-emd-laplace: References ---------- .. [5] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, "Optimal Transport for Domain Adaptation," in IEEE Transactions on Pattern Analysis and Machine Intelligence, vol.PP, no.99, pp.1-1 .. [30] R. Flamary, N. Courty, D. Tuia, A. Rakotomamonjy, "Optimal transport with Laplacian regularization: Applications to domain adaptation and shape matching," in NIPS Workshop on Optimal Transport and Machine Learning OTML, 2014. See Also -------- ot.lp.emd : Unregularized OT ot.optim.cg : General regularized OT """ if not isinstance(sim_param, (int, float, type(None))): raise ValueError( 'Similarity parameter should be an int or a float. Got {type} instead.'.format(type=type(sim_param).__name__)) a, b, xs, xt, M = list_to_array(a, b, xs, xt, M) nx = get_backend(a, b, xs, xt, M) if sim == 'gauss': if sim_param is None: sim_param = 1 / (2 * (nx.mean(dist(xs, xs, 'sqeuclidean')) ** 2)) sS = kernel(xs, xs, method=sim, sigma=sim_param) sT = kernel(xt, xt, method=sim, sigma=sim_param) elif sim == 'knn': if sim_param is None: sim_param = 3 from sklearn.neighbors import kneighbors_graph sS = nx.from_numpy(kneighbors_graph( X=nx.to_numpy(xs), n_neighbors=int(sim_param) ).toarray(), type_as=xs) sS = (sS + sS.T) / 2 sT = nx.from_numpy(kneighbors_graph( X=nx.to_numpy(xt), n_neighbors=int(sim_param) ).toarray(), type_as=xt) sT = (sT + sT.T) / 2 else: raise ValueError('Unknown similarity type {sim}. Currently supported similarity types are "knn" and "gauss".'.format(sim=sim)) lS = laplacian(sS) lT = laplacian(sT) def f(G): return ( alpha * nx.trace(dots(xt.T, G.T, lS, G, xt)) + (1 - alpha) * nx.trace(dots(xs.T, G, lT, G.T, xs)) ) ls2 = lS + lS.T lt2 = lT + lT.T xt2 = nx.dot(xt, xt.T) if reg == 'disp': Cs = -eta * alpha / xs.shape[0] * dots(ls2, xs, xt.T) Ct = -eta * (1 - alpha) / xt.shape[0] * dots(xs, xt.T, lt2) M = M + Cs + Ct def df(G): return ( alpha * dots(ls2, G, xt2) + (1 - alpha) * dots(xs, xs.T, G, lt2) ) return cg(a, b, M, reg=eta, f=f, df=df, G0=None, numItermax=numItermax, numItermaxEmd=numInnerItermax, stopThr=stopThr, stopThr2=stopInnerThr, verbose=verbose, log=log) def distribution_estimation_uniform(X): r"""estimates a uniform distribution from an array of samples :math:`\mathbf{X}` Parameters ---------- X : array-like, shape (n_samples, n_features) The array of samples Returns ------- mu : array-like, shape (n_samples,) The uniform distribution estimated from :math:`\mathbf{X}` """ return unif(X.shape[0], type_as=X) class BaseTransport(BaseEstimator): """Base class for OTDA objects .. note:: All estimators should specify all the parameters that can be set at the class level in their ``__init__`` as explicit keyword arguments (no ``*args`` or ``**kwargs``). The fit method should: - estimate a cost matrix and store it in a `cost_` attribute - estimate a coupling matrix and store it in a `coupling_` attribute - estimate distributions from source and target data and store them in `mu_s` and `mu_t` attributes - store `Xs` and `Xt` in attributes to be used later on in `transform` and `inverse_transform` methods `transform` method should always get as input a `Xs` parameter `inverse_transform` method should always get as input a `Xt` parameter `transform_labels` method should always get as input a `ys` parameter `inverse_transform_labels` method should always get as input a `yt` parameter """ def fit(self, Xs=None, ys=None, Xt=None, yt=None): r"""Build a coupling matrix from source and target sets of samples :math:`(\mathbf{X_s}, \mathbf{y_s})` and :math:`(\mathbf{X_t}, \mathbf{y_t})` Parameters ---------- Xs : array-like, shape (n_source_samples, n_features) The training input samples. ys : array-like, shape (n_source_samples,) The training class labels Xt : array-like, shape (n_target_samples, n_features) The training input samples. yt : array-like, shape (n_target_samples,) The class labels. If some target samples are unlabelled, fill the :math:`\mathbf{y_t}`'s elements with -1. Warning: Note that, due to this convention -1 cannot be used as a class label Returns ------- self : object Returns self. """ nx = self._get_backend(Xs, ys, Xt, yt) # check the necessary inputs parameters are here if check_params(Xs=Xs, Xt=Xt): # pairwise distance self.cost_ = dist(Xs, Xt, metric=self.metric) self.cost_ = cost_normalization(self.cost_, self.norm) if (ys is not None) and (yt is not None): if self.limit_max != np.infty: self.limit_max = self.limit_max * nx.max(self.cost_) # missing_labels is a (ns, nt) matrix of {0, 1} such that # the cells (i, j) has 0 iff either ys[i] or yt[j] is masked missing_ys = (ys == -1) + nx.zeros(ys.shape, type_as=ys) missing_yt = (yt == -1) + nx.zeros(yt.shape, type_as=yt) missing_labels = missing_ys[:, None] @ missing_yt[None, :] # labels_match is a (ns, nt) matrix of {True, False} such that # the cells (i, j) has False if ys[i] != yt[i] label_match = (ys[:, None] - yt[None, :]) != 0 # cost correction is a (ns, nt) matrix of {-Inf, float, Inf} such # that he cells (i, j) has -Inf where there's no correction necessary # by 'correction' we mean setting cost to a large value when # labels do not match # we suppress potential RuntimeWarning caused by Inf multiplication # (as we explicitly cover potential NANs later) with warnings.catch_warnings(): warnings.simplefilter('ignore', category=RuntimeWarning) cost_correction = label_match * missing_labels * self.limit_max # this operation is necessary because 0 * Inf = NAN # thus is irrelevant when limit_max is finite cost_correction = nx.nan_to_num(cost_correction, -np.infty) self.cost_ = nx.maximum(self.cost_, cost_correction) # distribution estimation self.mu_s = self.distribution_estimation(Xs) self.mu_t = self.distribution_estimation(Xt) # store arrays of samples self.xs_ = Xs self.xt_ = Xt return self def fit_transform(self, Xs=None, ys=None, Xt=None, yt=None): r"""Build a coupling matrix from source and target sets of samples :math:`(\mathbf{X_s}, \mathbf{y_s})` and :math:`(\mathbf{X_t}, \mathbf{y_t})` and transports source samples :math:`\mathbf{X_s}` onto target ones :math:`\mathbf{X_t}` Parameters ---------- Xs : array-like, shape (n_source_samples, n_features) The training input samples. ys : array-like, shape (n_source_samples,) The class labels for training samples Xt : array-like, shape (n_target_samples, n_features) The training input samples. yt : array-like, shape (n_target_samples,) The class labels. If some target samples are unlabelled, fill the :math:`\mathbf{y_t}`'s elements with -1. Warning: Note that, due to this convention -1 cannot be used as a class label Returns ------- transp_Xs : array-like, shape (n_source_samples, n_features) The source samples samples. """ return self.fit(Xs, ys, Xt, yt).transform(Xs, ys, Xt, yt) def transform(self, Xs=None, ys=None, Xt=None, yt=None, batch_size=128): r"""Transports source samples :math:`\mathbf{X_s}` onto target ones :math:`\mathbf{X_t}` Parameters ---------- Xs : array-like, shape (n_source_samples, n_features) The source input samples. ys : array-like, shape (n_source_samples,) The class labels for source samples Xt : array-like, shape (n_target_samples, n_features) The target input samples. yt : array-like, shape (n_target_samples,) The class labels for target. If some target samples are unlabelled, fill the :math:`\mathbf{y_t}`'s elements with -1. Warning: Note that, due to this convention -1 cannot be used as a class label batch_size : int, optional (default=128) The batch size for out of sample inverse transform Returns ------- transp_Xs : array-like, shape (n_source_samples, n_features) The transport source samples. """ nx = self.nx # check the necessary inputs parameters are here if check_params(Xs=Xs): if nx.array_equal(self.xs_, Xs): # perform standard barycentric mapping transp = self.coupling_ / nx.sum(self.coupling_, axis=1)[:, None] # set nans to 0 transp = nx.nan_to_num(transp, nan=0, posinf=0, neginf=0) # compute transported samples transp_Xs = nx.dot(transp, self.xt_) else: # perform out of sample mapping indices = nx.arange(Xs.shape[0]) batch_ind = [ indices[i:i + batch_size] for i in range(0, len(indices), batch_size)] transp_Xs = [] for bi in batch_ind: # get the nearest neighbor in the source domain D0 = dist(Xs[bi], self.xs_) idx = nx.argmin(D0, axis=1) # transport the source samples transp = self.coupling_ / nx.sum(self.coupling_, axis=1)[:, None] transp = nx.nan_to_num(transp, nan=0, posinf=0, neginf=0) transp_Xs_ = nx.dot(transp, self.xt_) # define the transported points transp_Xs_ = transp_Xs_[idx, :] + Xs[bi] - self.xs_[idx, :] transp_Xs.append(transp_Xs_) transp_Xs = nx.concatenate(transp_Xs, axis=0) return transp_Xs def transform_labels(self, ys=None): r"""Propagate source labels :math:`\mathbf{y_s}` to obtain estimated target labels as in :ref:`[27] `. Parameters ---------- ys : array-like, shape (n_source_samples,) The source class labels Returns ------- transp_ys : array-like, shape (n_target_samples, nb_classes) Estimated soft target labels. .. _references-basetransport-transform-labels: References ---------- .. [27] Ievgen Redko, Nicolas Courty, RĂ©mi Flamary, Devis Tuia "Optimal transport for multi-source domain adaptation under target shift", International Conference on Artificial Intelligence and Statistics (AISTATS), 2019. """ nx = self.nx # check the necessary inputs parameters are here if check_params(ys=ys): # perform label propagation transp = self.coupling_ / nx.sum(self.coupling_, axis=0)[None, :] # set nans to 0 transp = nx.nan_to_num(transp, nan=0, posinf=0, neginf=0) # compute propagated labels labels = label_normalization(ys) masks = labels_to_masks(labels, nx=nx, type_as=transp) transp_ys = nx.dot(masks.T, transp) return transp_ys.T def inverse_transform(self, Xs=None, ys=None, Xt=None, yt=None, batch_size=128): r"""Transports target samples :math:`\mathbf{X_t}` onto source samples :math:`\mathbf{X_s}` Parameters ---------- Xs : array-like, shape (n_source_samples, n_features) The source input samples. ys : array-like, shape (n_source_samples,) The source class labels Xt : array-like, shape (n_target_samples, n_features) The target input samples. yt : array-like, shape (n_target_samples,) The target class labels. If some target samples are unlabelled, fill the :math:`\mathbf{y_t}`'s elements with -1. Warning: Note that, due to this convention -1 cannot be used as a class label batch_size : int, optional (default=128) The batch size for out of sample inverse transform Returns ------- transp_Xt : array-like, shape (n_source_samples, n_features) The transported target samples. """ nx = self.nx # check the necessary inputs parameters are here if check_params(Xt=Xt): if nx.array_equal(self.xt_, Xt): # perform standard barycentric mapping transp_ = self.coupling_.T / nx.sum(self.coupling_, 0)[:, None] # set nans to 0 transp_ = nx.nan_to_num(transp_, nan=0, posinf=0, neginf=0) # compute transported samples transp_Xt = nx.dot(transp_, self.xs_) else: # perform out of sample mapping indices = nx.arange(Xt.shape[0]) batch_ind = [ indices[i:i + batch_size] for i in range(0, len(indices), batch_size)] transp_Xt = [] for bi in batch_ind: D0 = dist(Xt[bi], self.xt_) idx = nx.argmin(D0, axis=1) # transport the target samples transp_ = self.coupling_.T / nx.sum(self.coupling_, 0)[:, None] transp_ = nx.nan_to_num(transp_, nan=0, posinf=0, neginf=0) transp_Xt_ = nx.dot(transp_, self.xs_) # define the transported points transp_Xt_ = transp_Xt_[idx, :] + Xt[bi] - self.xt_[idx, :] transp_Xt.append(transp_Xt_) transp_Xt = nx.concatenate(transp_Xt, axis=0) return transp_Xt def inverse_transform_labels(self, yt=None): r"""Propagate target labels :math:`\mathbf{y_t}` to obtain estimated source labels :math:`\mathbf{y_s}` Parameters ---------- yt : array-like, shape (n_target_samples,) Returns ------- transp_ys : array-like, shape (n_source_samples, nb_classes) Estimated soft source labels. """ nx = self.nx # check the necessary inputs parameters are here if check_params(yt=yt): # perform label propagation transp = self.coupling_ / nx.sum(self.coupling_, 1)[:, None] # set nans to 0 transp = nx.nan_to_num(transp, nan=0, posinf=0, neginf=0) # compute propagated labels labels = label_normalization(yt) masks = labels_to_masks(labels, nx=nx, type_as=transp) transp_ys = nx.dot(masks.T, transp.T) return transp_ys.T class LinearTransport(BaseTransport): r""" OT linear operator between empirical distributions The function estimates the optimal linear operator that aligns the two empirical distributions. This is equivalent to estimating the closed form mapping between two Gaussian distributions :math:`\mathcal{N}(\mu_s,\Sigma_s)` and :math:`\mathcal{N}(\mu_t,\Sigma_t)` as proposed in :ref:`[14] ` and discussed in remark 2.29 in :ref:`[15] `. The linear operator from source to target :math:`M` .. math:: M(\mathbf{x})= \mathbf{A} \mathbf{x} + \mathbf{b} where : .. math:: \mathbf{A} &= \Sigma_s^{-1/2} \left(\Sigma_s^{1/2}\Sigma_t\Sigma_s^{1/2} \right)^{1/2} \Sigma_s^{-1/2} \mathbf{b} &= \mu_t - \mathbf{A} \mu_s Parameters ---------- reg : float,optional regularization added to the daigonals of covariances (>0) bias: boolean, optional estimate bias :math:`\mathbf{b}` else :math:`\mathbf{b} = 0` (default:True) log : bool, optional record log if True .. _references-lineartransport: References ---------- .. [14] Knott, M. and Smith, C. S. "On the optimal mapping of distributions", Journal of Optimization Theory and Applications Vol 43, 1984 .. [15] PeyrĂ©, G., & Cuturi, M. (2017). "Computational Optimal Transport", 2018. """ def __init__(self, reg=1e-8, bias=True, log=False, distribution_estimation=distribution_estimation_uniform): self.bias = bias self.log = log self.reg = reg self.distribution_estimation = distribution_estimation def fit(self, Xs=None, ys=None, Xt=None, yt=None): r"""Build a coupling matrix from source and target sets of samples :math:`(\mathbf{X_s}, \mathbf{y_s})` and :math:`(\mathbf{X_t}, \mathbf{y_t})` Parameters ---------- Xs : array-like, shape (n_source_samples, n_features) The training input samples. ys : array-like, shape (n_source_samples,) The class labels Xt : array-like, shape (n_target_samples, n_features) The training input samples. yt : array-like, shape (n_target_samples,) The class labels. If some target samples are unlabelled, fill the :math:`\mathbf{y_t}`'s elements with -1. Warning: Note that, due to this convention -1 cannot be used as a class label Returns ------- self : object Returns self. """ nx = self._get_backend(Xs, ys, Xt, yt) self.nx = nx self.mu_s = self.distribution_estimation(Xs) self.mu_t = self.distribution_estimation(Xt) # coupling estimation returned_ = empirical_bures_wasserstein_mapping(Xs, Xt, reg=self.reg, ws=nx.reshape(self.mu_s, (-1, 1)), wt=nx.reshape(self.mu_t, (-1, 1)), bias=self.bias, log=self.log) # deal with the value of log if self.log: self.A_, self.B_, self.log_ = returned_ else: self.A_, self.B_, = returned_ self.log_ = dict() # re compute inverse mapping self.A1_ = nx.inv(self.A_) self.B1_ = -nx.dot(self.B_, self.A1_) return self def transform(self, Xs=None, ys=None, Xt=None, yt=None, batch_size=128): r"""Transports source samples :math:`\mathbf{X_s}` onto target ones :math:`\mathbf{X_t}` Parameters ---------- Xs : array-like, shape (n_source_samples, n_features) The training input samples. ys : array-like, shape (n_source_samples,) The class labels Xt : array-like, shape (n_target_samples, n_features) The training input samples. yt : array-like, shape (n_target_samples,) The class labels. If some target samples are unlabelled, fill the :math:`\mathbf{y_t}`'s elements with -1. Warning: Note that, due to this convention -1 cannot be used as a class label batch_size : int, optional (default=128) The batch size for out of sample inverse transform Returns ------- transp_Xs : array-like, shape (n_source_samples, n_features) The transport source samples. """ nx = self.nx # check the necessary inputs parameters are here if check_params(Xs=Xs): transp_Xs = nx.dot(Xs, self.A_) + self.B_ return transp_Xs def inverse_transform(self, Xs=None, ys=None, Xt=None, yt=None, batch_size=128): r"""Transports target samples :math:`\mathbf{X_t}` onto source samples :math:`\mathbf{X_s}` Parameters ---------- Xs : array-like, shape (n_source_samples, n_features) The training input samples. ys : array-like, shape (n_source_samples,) The class labels Xt : array-like, shape (n_target_samples, n_features) The training input samples. yt : array-like, shape (n_target_samples,) The class labels. If some target samples are unlabelled, fill the :math:`\mathbf{y_t}`'s elements with -1. Warning: Note that, due to this convention -1 cannot be used as a class label batch_size : int, optional (default=128) The batch size for out of sample inverse transform Returns ------- transp_Xt : array-like, shape (n_source_samples, n_features) The transported target samples. """ nx = self.nx # check the necessary inputs parameters are here if check_params(Xt=Xt): transp_Xt = nx.dot(Xt, self.A1_) + self.B1_ return transp_Xt class LinearGWTransport(LinearTransport): r""" OT Gaussian Gromov-Wasserstein linear operator between empirical distributions The function estimates the optimal linear operator that aligns the two empirical distributions optimally wrt the Gromov-Wasserstein distance. This is equivalent to estimating the closed form mapping between two Gaussian distributions :math:`\mathcal{N}(\mu_s,\Sigma_s)` and :math:`\mathcal{N}(\mu_t,\Sigma_t)` as proposed in :ref:`[57] `. The linear operator from source to target :math:`M` .. math:: M(\mathbf{x})= \mathbf{A} \mathbf{x} + \mathbf{b} where the matrix :math:`\mathbf{A}` and the vector :math:`\mathbf{b}` are defined in :ref:`[57] `. Parameters ---------- sign_eigs : array-like (n_features), str, optional sign of the eigenvalues of the mapping matrix, by default all signs will be positive. If 'skewness' is provided, the sign of the eigenvalues is selected as the product of the sign of the skewness of the projected data. log : bool, optional record log if True .. _references-lineargwtransport: References ---------- .. [57] Delon, J., Desolneux, A., & Salmona, A. (2022). Gromov–Wasserstein distances between Gaussian distributions. Journal of Applied Probability, 59(4), 1178-1198. """ def __init__(self, log=False, sign_eigs=None, distribution_estimation=distribution_estimation_uniform): self.sign_eigs = sign_eigs self.log = log self.distribution_estimation = distribution_estimation def fit(self, Xs=None, ys=None, Xt=None, yt=None): r"""Build a coupling matrix from source and target sets of samples :math:`(\mathbf{X_s}, \mathbf{y_s})` and :math:`(\mathbf{X_t}, \mathbf{y_t})` Parameters ---------- Xs : array-like, shape (n_source_samples, n_features) The training input samples. ys : array-like, shape (n_source_samples,) The class labels Xt : array-like, shape (n_target_samples, n_features) The training input samples. yt : array-like, shape (n_target_samples,) The class labels. If some target samples are unlabelled, fill the :math:`\mathbf{y_t}`'s elements with -1. Warning: Note that, due to this convention -1 cannot be used as a class label Returns ------- self : object Returns self. """ nx = self._get_backend(Xs, ys, Xt, yt) self.nx = nx self.mu_s = self.distribution_estimation(Xs) self.mu_t = self.distribution_estimation(Xt) # coupling estimation returned_ = empirical_gaussian_gromov_wasserstein_mapping(Xs, Xt, ws=self.mu_s[:, None], wt=self.mu_t[:, None], sign_eigs=self.sign_eigs, log=self.log) # deal with the value of log if self.log: self.A_, self.B_, self.log_ = returned_ else: self.A_, self.B_, = returned_ self.log_ = dict() # re compute inverse mapping returned_1_ = empirical_gaussian_gromov_wasserstein_mapping(Xt, Xs, ws=self.mu_t[:, None], wt=self.mu_s[:, None], sign_eigs=self.sign_eigs, log=self.log) if self.log: self.A1_, self.B1_, self.log_1_ = returned_1_ else: self.A1_, self.B1_, = returned_1_ self.log_ = dict() return self class SinkhornTransport(BaseTransport): """Domain Adaptation OT method based on Sinkhorn Algorithm Parameters ---------- reg_e : float, optional (default=1) Entropic regularization parameter max_iter : int, float, optional (default=1000) The minimum number of iteration before stopping the optimization algorithm if it has not converged tol : float, optional (default=10e-9) The precision required to stop the optimization algorithm. verbose : bool, optional (default=False) Controls the verbosity of the optimization algorithm log : int, optional (default=False) Controls the logs of the optimization algorithm metric : string, optional (default="sqeuclidean") The ground metric for the Wasserstein problem norm : string, optional (default=None) If given, normalize the ground metric to avoid numerical errors that can occur with large metric values. distribution_estimation : callable, optional (defaults to the uniform) The kind of distribution estimation to employ out_of_sample_map : string, optional (default="ferradans") The kind of out of sample mapping to apply to transport samples from a domain into another one. Currently the only possible option is "ferradans" which uses the method proposed in :ref:`[6] `. limit_max: float, optional (default=np.infty) Controls the semi supervised mode. Transport between labeled source and target samples of different classes will exhibit an cost defined by this variable Attributes ---------- coupling_ : array-like, shape (n_source_samples, n_target_samples) The optimal coupling log_ : dictionary The dictionary of log, empty dict if parameter log is not True .. _references-sinkhorntransport: References ---------- .. [1] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, "Optimal Transport for Domain Adaptation," in IEEE Transactions on Pattern Analysis and Machine Intelligence , vol.PP, no.99, pp.1-1 .. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013 .. [6] Ferradans, S., Papadakis, N., PeyrĂ©, G., & Aujol, J. F. (2014). Regularized discrete optimal transport. SIAM Journal on Imaging Sciences, 7(3), 1853-1882. """ def __init__(self, reg_e=1., method="sinkhorn", max_iter=1000, tol=10e-9, verbose=False, log=False, metric="sqeuclidean", norm=None, distribution_estimation=distribution_estimation_uniform, out_of_sample_map='ferradans', limit_max=np.infty): self.reg_e = reg_e self.method = method self.max_iter = max_iter self.tol = tol self.verbose = verbose self.log = log self.metric = metric self.norm = norm self.limit_max = limit_max self.distribution_estimation = distribution_estimation self.out_of_sample_map = out_of_sample_map def fit(self, Xs=None, ys=None, Xt=None, yt=None): r"""Build a coupling matrix from source and target sets of samples :math:`(\mathbf{X_s}, \mathbf{y_s})` and :math:`(\mathbf{X_t}, \mathbf{y_t})` Parameters ---------- Xs : array-like, shape (n_source_samples, n_features) The training input samples. ys : array-like, shape (n_source_samples,) The class labels Xt : array-like, shape (n_target_samples, n_features) The training input samples. yt : array-like, shape (n_target_samples,) The class labels. If some target samples are unlabelled, fill the :math:`\mathbf{y_t}`'s elements with -1. Warning: Note that, due to this convention -1 cannot be used as a class label Returns ------- self : object Returns self. """ super(SinkhornTransport, self).fit(Xs, ys, Xt, yt) # coupling estimation returned_ = sinkhorn( a=self.mu_s, b=self.mu_t, M=self.cost_, reg=self.reg_e, method=self.method, numItermax=self.max_iter, stopThr=self.tol, verbose=self.verbose, log=self.log) # deal with the value of log if self.log: self.coupling_, self.log_ = returned_ else: self.coupling_ = returned_ self.log_ = dict() return self class EMDTransport(BaseTransport): """Domain Adaptation OT method based on Earth Mover's Distance Parameters ---------- metric : string, optional (default="sqeuclidean") The ground metric for the Wasserstein problem norm : string, optional (default=None) If given, normalize the ground metric to avoid numerical errors that can occur with large metric values. log : int, optional (default=False) Controls the logs of the optimization algorithm distribution_estimation : callable, optional (defaults to the uniform) The kind of distribution estimation to employ out_of_sample_map : string, optional (default="ferradans") The kind of out of sample mapping to apply to transport samples from a domain into another one. Currently the only possible option is "ferradans" which uses the method proposed in :ref:`[6] `. limit_max: float, optional (default=10) Controls the semi supervised mode. Transport between labeled source and target samples of different classes will exhibit an infinite cost (10 times the maximum value of the cost matrix) max_iter : int, optional (default=100000) The maximum number of iterations before stopping the optimization algorithm if it has not converged. Attributes ---------- coupling_ : array-like, shape (n_source_samples, n_target_samples) The optimal coupling .. _references-emdtransport: References ---------- .. [1] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, "Optimal Transport for Domain Adaptation," in IEEE Transactions on Pattern Analysis and Machine Intelligence , vol.PP, no.99, pp.1-1 .. [6] Ferradans, S., Papadakis, N., PeyrĂ©, G., & Aujol, J. F. (2014). Regularized discrete optimal transport. SIAM Journal on Imaging Sciences, 7(3), 1853-1882. """ def __init__(self, metric="sqeuclidean", norm=None, log=False, distribution_estimation=distribution_estimation_uniform, out_of_sample_map='ferradans', limit_max=10, max_iter=100000): self.metric = metric self.norm = norm self.log = log self.limit_max = limit_max self.distribution_estimation = distribution_estimation self.out_of_sample_map = out_of_sample_map self.max_iter = max_iter def fit(self, Xs, ys=None, Xt=None, yt=None): r"""Build a coupling matrix from source and target sets of samples :math:`(\mathbf{X_s}, \mathbf{y_s})` and :math:`(\mathbf{X_t}, \mathbf{y_t})` Parameters ---------- Xs : array-like, shape (n_source_samples, n_features) The training input samples. ys : array-like, shape (n_source_samples,) The class labels Xt : array-like, shape (n_target_samples, n_features) The training input samples. yt : array-like, shape (n_target_samples,) The class labels. If some target samples are unlabelled, fill the :math:`\mathbf{y_t}`'s elements with -1. Warning: Note that, due to this convention -1 cannot be used as a class label Returns ------- self : object Returns self. """ super(EMDTransport, self).fit(Xs, ys, Xt, yt) returned_ = emd( a=self.mu_s, b=self.mu_t, M=self.cost_, numItermax=self.max_iter, log=self.log) # coupling estimation if self.log: self.coupling_, self.log_ = returned_ else: self.coupling_ = returned_ self.log_ = dict() return self class SinkhornLpl1Transport(BaseTransport): r"""Domain Adaptation OT method based on sinkhorn algorithm + LpL1 class regularization. Parameters ---------- reg_e : float, optional (default=1) Entropic regularization parameter reg_cl : float, optional (default=0.1) Class regularization parameter max_iter : int, float, optional (default=10) The minimum number of iteration before stopping the optimization algorithm if it has not converged max_inner_iter : int, float, optional (default=200) The number of iteration in the inner loop log : bool, optional (default=False) Controls the logs of the optimization algorithm tol : float, optional (default=10e-9) Stop threshold on error (inner sinkhorn solver) (>0) verbose : bool, optional (default=False) Controls the verbosity of the optimization algorithm metric : string, optional (default="sqeuclidean") The ground metric for the Wasserstein problem norm : string, optional (default=None) If given, normalize the ground metric to avoid numerical errors that can occur with large metric values. distribution_estimation : callable, optional (defaults to the uniform) The kind of distribution estimation to employ out_of_sample_map : string, optional (default="ferradans") The kind of out of sample mapping to apply to transport samples from a domain into another one. Currently the only possible option is "ferradans" which uses the method proposed in :ref:`[6] `. limit_max: float, optional (default=np.infty) Controls the semi supervised mode. Transport between labeled source and target samples of different classes will exhibit a cost defined by limit_max. Attributes ---------- coupling_ : array-like, shape (n_source_samples, n_target_samples) The optimal coupling .. _references-sinkhornlpl1transport: References ---------- .. [1] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, "Optimal Transport for Domain Adaptation," in IEEE Transactions on Pattern Analysis and Machine Intelligence , vol.PP, no.99, pp.1-1 .. [2] Rakotomamonjy, A., Flamary, R., & Courty, N. (2015). Generalized conditional gradient: analysis of convergence and applications. arXiv preprint arXiv:1510.06567. .. [6] Ferradans, S., Papadakis, N., PeyrĂ©, G., & Aujol, J. F. (2014). Regularized discrete optimal transport. SIAM Journal on Imaging Sciences, 7(3), 1853-1882. """ def __init__(self, reg_e=1., reg_cl=0.1, max_iter=10, max_inner_iter=200, log=False, tol=10e-9, verbose=False, metric="sqeuclidean", norm=None, distribution_estimation=distribution_estimation_uniform, out_of_sample_map='ferradans', limit_max=np.infty): self.reg_e = reg_e self.reg_cl = reg_cl self.max_iter = max_iter self.max_inner_iter = max_inner_iter self.tol = tol self.log = log self.verbose = verbose self.metric = metric self.norm = norm self.distribution_estimation = distribution_estimation self.out_of_sample_map = out_of_sample_map self.limit_max = limit_max def fit(self, Xs, ys=None, Xt=None, yt=None): r"""Build a coupling matrix from source and target sets of samples :math:`(\mathbf{X_s}, \mathbf{y_s})` and :math:`(\mathbf{X_t}, \mathbf{y_t})` Parameters ---------- Xs : array-like, shape (n_source_samples, n_features) The training input samples. ys : array-like, shape (n_source_samples,) The class labels Xt : array-like, shape (n_target_samples, n_features) The training input samples. yt : array-like, shape (n_target_samples,) The class labels. If some target samples are unlabelled, fill the :math:`\mathbf{y_t}`'s elements with -1. Warning: Note that, due to this convention -1 cannot be used as a class label Returns ------- self : object Returns self. """ # check the necessary inputs parameters are here if check_params(Xs=Xs, Xt=Xt, ys=ys): super(SinkhornLpl1Transport, self).fit(Xs, ys, Xt, yt) returned_ = sinkhorn_lpl1_mm( a=self.mu_s, labels_a=ys, b=self.mu_t, M=self.cost_, reg=self.reg_e, eta=self.reg_cl, numItermax=self.max_iter, numInnerItermax=self.max_inner_iter, stopInnerThr=self.tol, verbose=self.verbose, log=self.log) # deal with the value of log if self.log: self.coupling_, self.log_ = returned_ else: self.coupling_ = returned_ self.log_ = dict() return self class EMDLaplaceTransport(BaseTransport): """Domain Adaptation OT method based on Earth Mover's Distance with Laplacian regularization Parameters ---------- reg_type : string optional (default='pos') Type of the regularization term: 'pos' and 'disp' for regularization term defined in :ref:`[2] ` and :ref:`[6] `, respectively. reg_lap : float, optional (default=1) Laplacian regularization parameter reg_src : float, optional (default=0.5) Source relative importance in regularization metric : string, optional (default="sqeuclidean") The ground metric for the Wasserstein problem norm : string, optional (default=None) If given, normalize the ground metric to avoid numerical errors that can occur with large metric values. similarity : string, optional (default="knn") The similarity to use either knn or gaussian similarity_param : int or float, optional (default=None) Parameter for the similarity: number of nearest neighbors or bandwidth if similarity="knn" or "gaussian", respectively. If None is provided, it is set to 3 or the average pairwise squared Euclidean distance, respectively. max_iter : int, optional (default=100) Max number of BCD iterations tol : float, optional (default=1e-5) Stop threshold on relative loss decrease (>0) max_inner_iter : int, optional (default=10) Max number of iterations (inner CG solver) inner_tol : float, optional (default=1e-6) Stop threshold on error (inner CG solver) (>0) log : int, optional (default=False) Controls the logs of the optimization algorithm distribution_estimation : callable, optional (defaults to the uniform) The kind of distribution estimation to employ out_of_sample_map : string, optional (default="ferradans") The kind of out of sample mapping to apply to transport samples from a domain into another one. Currently the only possible option is "ferradans" which uses the method proposed in :ref:`[6] `. Attributes ---------- coupling_ : array-like, shape (n_source_samples, n_target_samples) The optimal coupling .. _references-emdlaplacetransport: References ---------- .. [1] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, "Optimal Transport for Domain Adaptation," in IEEE Transactions on Pattern Analysis and Machine Intelligence , vol.PP, no.99, pp.1-1 .. [2] R. Flamary, N. Courty, D. Tuia, A. Rakotomamonjy, "Optimal transport with Laplacian regularization: Applications to domain adaptation and shape matching," in NIPS Workshop on Optimal Transport and Machine Learning OTML, 2014. .. [6] Ferradans, S., Papadakis, N., PeyrĂ©, G., & Aujol, J. F. (2014). Regularized discrete optimal transport. SIAM Journal on Imaging Sciences, 7(3), 1853-1882. """ def __init__(self, reg_type='pos', reg_lap=1., reg_src=1., metric="sqeuclidean", norm=None, similarity="knn", similarity_param=None, max_iter=100, tol=1e-9, max_inner_iter=100000, inner_tol=1e-9, log=False, verbose=False, distribution_estimation=distribution_estimation_uniform, out_of_sample_map='ferradans'): self.reg = reg_type self.reg_lap = reg_lap self.reg_src = reg_src self.metric = metric self.norm = norm self.similarity = similarity self.sim_param = similarity_param self.max_iter = max_iter self.tol = tol self.max_inner_iter = max_inner_iter self.inner_tol = inner_tol self.log = log self.verbose = verbose self.distribution_estimation = distribution_estimation self.out_of_sample_map = out_of_sample_map def fit(self, Xs, ys=None, Xt=None, yt=None): r"""Build a coupling matrix from source and target sets of samples :math:`(\mathbf{X_s}, \mathbf{y_s})` and :math:`(\mathbf{X_t}, \mathbf{y_t})` Parameters ---------- Xs : array-like, shape (n_source_samples, n_features) The training input samples. ys : array-like, shape (n_source_samples,) The class labels Xt : array-like, shape (n_target_samples, n_features) The training input samples. yt : array-like, shape (n_target_samples,) The class labels. If some target samples are unlabelled, fill the :math:`\mathbf{y_t}`'s elements with -1. Warning: Note that, due to this convention -1 cannot be used as a class label Returns ------- self : object Returns self. """ super(EMDLaplaceTransport, self).fit(Xs, ys, Xt, yt) returned_ = emd_laplace(a=self.mu_s, b=self.mu_t, xs=self.xs_, xt=self.xt_, M=self.cost_, sim=self.similarity, sim_param=self.sim_param, reg=self.reg, eta=self.reg_lap, alpha=self.reg_src, numItermax=self.max_iter, stopThr=self.tol, numInnerItermax=self.max_inner_iter, stopInnerThr=self.inner_tol, log=self.log, verbose=self.verbose) # coupling estimation if self.log: self.coupling_, self.log_ = returned_ else: self.coupling_ = returned_ self.log_ = dict() return self class SinkhornL1l2Transport(BaseTransport): """Domain Adaptation OT method based on sinkhorn algorithm + L1L2 class regularization. Parameters ---------- reg_e : float, optional (default=1) Entropic regularization parameter reg_cl : float, optional (default=0.1) Class regularization parameter max_iter : int, float, optional (default=10) The minimum number of iteration before stopping the optimization algorithm if it has not converged max_inner_iter : int, float, optional (default=200) The number of iteration in the inner loop tol : float, optional (default=10e-9) Stop threshold on error (inner sinkhorn solver) (>0) verbose : bool, optional (default=False) Controls the verbosity of the optimization algorithm log : bool, optional (default=False) Controls the logs of the optimization algorithm metric : string, optional (default="sqeuclidean") The ground metric for the Wasserstein problem norm : string, optional (default=None) If given, normalize the ground metric to avoid numerical errors that can occur with large metric values. distribution_estimation : callable, optional (defaults to the uniform) The kind of distribution estimation to employ out_of_sample_map : string, optional (default="ferradans") The kind of out of sample mapping to apply to transport samples from a domain into another one. Currently the only possible option is "ferradans" which uses the method proposed in :ref:`[6] `. limit_max: float, optional (default=10) Controls the semi supervised mode. Transport between labeled source and target samples of different classes will exhibit an infinite cost (10 times the maximum value of the cost matrix) Attributes ---------- coupling_ : array-like, shape (n_source_samples, n_target_samples) The optimal coupling log_ : dictionary The dictionary of log, empty dict if parameter log is not True .. _references-sinkhornl1l2transport: References ---------- .. [1] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, "Optimal Transport for Domain Adaptation," in IEEE Transactions on Pattern Analysis and Machine Intelligence , vol.PP, no.99, pp.1-1 .. [2] Rakotomamonjy, A., Flamary, R., & Courty, N. (2015). Generalized conditional gradient: analysis of convergence and applications. arXiv preprint arXiv:1510.06567. .. [6] Ferradans, S., Papadakis, N., PeyrĂ©, G., & Aujol, J. F. (2014). Regularized discrete optimal transport. SIAM Journal on Imaging Sciences, 7(3), 1853-1882. """ def __init__(self, reg_e=1., reg_cl=0.1, max_iter=10, max_inner_iter=200, tol=10e-9, verbose=False, log=False, metric="sqeuclidean", norm=None, distribution_estimation=distribution_estimation_uniform, out_of_sample_map='ferradans', limit_max=10): self.reg_e = reg_e self.reg_cl = reg_cl self.max_iter = max_iter self.max_inner_iter = max_inner_iter self.tol = tol self.verbose = verbose self.log = log self.metric = metric self.norm = norm self.distribution_estimation = distribution_estimation self.out_of_sample_map = out_of_sample_map self.limit_max = limit_max def fit(self, Xs, ys=None, Xt=None, yt=None): r"""Build a coupling matrix from source and target sets of samples :math:`(\mathbf{X_s}, \mathbf{y_s})` and :math:`(\mathbf{X_t}, \mathbf{y_t})` Parameters ---------- Xs : array-like, shape (n_source_samples, n_features) The training input samples. ys : array-like, shape (n_source_samples,) The class labels Xt : array-like, shape (n_target_samples, n_features) The training input samples. yt : array-like, shape (n_target_samples,) The class labels. If some target samples are unlabelled, fill the :math:`\mathbf{y_t}`'s elements with -1. Warning: Note that, due to this convention -1 cannot be used as a class label Returns ------- self : object Returns self. """ # check the necessary inputs parameters are here if check_params(Xs=Xs, Xt=Xt, ys=ys): super(SinkhornL1l2Transport, self).fit(Xs, ys, Xt, yt) returned_ = sinkhorn_l1l2_gl( a=self.mu_s, labels_a=ys, b=self.mu_t, M=self.cost_, reg=self.reg_e, eta=self.reg_cl, numItermax=self.max_iter, numInnerItermax=self.max_inner_iter, stopInnerThr=self.tol, verbose=self.verbose, log=self.log) # deal with the value of log if self.log: self.coupling_, self.log_ = returned_ else: self.coupling_ = returned_ self.log_ = dict() return self class MappingTransport(BaseEstimator): """MappingTransport: DA methods that aims at jointly estimating a optimal transport coupling and the associated mapping Parameters ---------- mu : float, optional (default=1) Weight for the linear OT loss (>0) eta : float, optional (default=0.001) Regularization term for the linear mapping `L` (>0) bias : bool, optional (default=False) Estimate linear mapping with constant bias metric : string, optional (default="sqeuclidean") The ground metric for the Wasserstein problem norm : string, optional (default=None) If given, normalize the ground metric to avoid numerical errors that can occur with large metric values. kernel : string, optional (default="linear") The kernel to use either linear or gaussian sigma : float, optional (default=1) The gaussian kernel parameter max_iter : int, optional (default=100) Max number of BCD iterations tol : float, optional (default=1e-5) Stop threshold on relative loss decrease (>0) max_inner_iter : int, optional (default=10) Max number of iterations (inner CG solver) inner_tol : float, optional (default=1e-6) Stop threshold on error (inner CG solver) (>0) log : bool, optional (default=False) record log if True verbose : bool, optional (default=False) Print information along iterations verbose2 : bool, optional (default=False) Print information along iterations Attributes ---------- coupling_ : array-like, shape (n_source_samples, n_target_samples) The optimal coupling mapping_ : The associated mapping - array-like, shape (`n_features` (+ 1), `n_features`), (if bias) for kernel == linear - array-like, shape (`n_source_samples` (+ 1), `n_features`), (if bias) for kernel == gaussian log_ : dictionary The dictionary of log, empty dict if parameter log is not True References ---------- .. [8] M. Perrot, N. Courty, R. Flamary, A. Habrard, "Mapping estimation for discrete optimal transport", Neural Information Processing Systems (NIPS), 2016. """ def __init__(self, mu=1, eta=0.001, bias=False, metric="sqeuclidean", norm=None, kernel="linear", sigma=1, max_iter=100, tol=1e-5, max_inner_iter=10, inner_tol=1e-6, log=False, verbose=False, verbose2=False): self.metric = metric self.norm = norm self.mu = mu self.eta = eta self.bias = bias self.kernel = kernel self.sigma = sigma self.max_iter = max_iter self.tol = tol self.max_inner_iter = max_inner_iter self.inner_tol = inner_tol self.log = log self.verbose = verbose self.verbose2 = verbose2 def fit(self, Xs=None, ys=None, Xt=None, yt=None): r"""Builds an optimal coupling and estimates the associated mapping from source and target sets of samples :math:`(\mathbf{X_s}, \mathbf{y_s})` and :math:`(\mathbf{X_t}, \mathbf{y_t})` Parameters ---------- Xs : array-like, shape (n_source_samples, n_features) The training input samples. ys : array-like, shape (n_source_samples,) The class labels Xt : array-like, shape (n_target_samples, n_features) The training input samples. yt : array-like, shape (n_target_samples,) The class labels. If some target samples are unlabelled, fill the :math:`\mathbf{y_t}`'s elements with -1. Warning: Note that, due to this convention -1 cannot be used as a class label Returns ------- self : object Returns self """ self._get_backend(Xs, ys, Xt, yt) # check the necessary inputs parameters are here if check_params(Xs=Xs, Xt=Xt): self.xs_ = Xs self.xt_ = Xt if self.kernel == "linear": returned_ = joint_OT_mapping_linear( Xs, Xt, mu=self.mu, eta=self.eta, bias=self.bias, verbose=self.verbose, verbose2=self.verbose2, numItermax=self.max_iter, numInnerItermax=self.max_inner_iter, stopThr=self.tol, stopInnerThr=self.inner_tol, log=self.log) elif self.kernel == "gaussian": returned_ = joint_OT_mapping_kernel( Xs, Xt, mu=self.mu, eta=self.eta, bias=self.bias, sigma=self.sigma, verbose=self.verbose, verbose2=self.verbose, numItermax=self.max_iter, numInnerItermax=self.max_inner_iter, stopInnerThr=self.inner_tol, stopThr=self.tol, log=self.log) # deal with the value of log if self.log: self.coupling_, self.mapping_, self.log_ = returned_ else: self.coupling_, self.mapping_ = returned_ self.log_ = dict() return self def transform(self, Xs): r"""Transports source samples :math:`\mathbf{X_s}` onto target ones :math:`\mathbf{X_t}` Parameters ---------- Xs : array-like, shape (n_source_samples, n_features) The training input samples. Returns ------- transp_Xs : array-like, shape (n_source_samples, n_features) The transport source samples. """ nx = self.nx # check the necessary inputs parameters are here if check_params(Xs=Xs): if nx.array_equal(self.xs_, Xs): # perform standard barycentric mapping transp = self.coupling_ / nx.sum(self.coupling_, 1)[:, None] # set nans to 0 transp[~ nx.isfinite(transp)] = 0 # compute transported samples transp_Xs = nx.dot(transp, self.xt_) else: if self.kernel == "gaussian": K = kernel(Xs, self.xs_, method=self.kernel, sigma=self.sigma) elif self.kernel == "linear": K = Xs if self.bias: K = nx.concatenate( [K, nx.ones((Xs.shape[0], 1), type_as=K)], axis=1 ) transp_Xs = nx.dot(K, self.mapping_) return transp_Xs class UnbalancedSinkhornTransport(BaseTransport): """Domain Adaptation unbalanced OT method based on sinkhorn algorithm Parameters ---------- reg_e : float, optional (default=1) Entropic regularization parameter reg_m : float, optional (default=0.1) Mass regularization parameter method : str method used for the solver either 'sinkhorn', 'sinkhorn_stabilized' or 'sinkhorn_epsilon_scaling', see those function for specific parameters max_iter : int, float, optional (default=10) The minimum number of iteration before stopping the optimization algorithm if it has not converged tol : float, optional (default=10e-9) Stop threshold on error (inner sinkhorn solver) (>0) verbose : bool, optional (default=False) Controls the verbosity of the optimization algorithm log : bool, optional (default=False) Controls the logs of the optimization algorithm metric : string, optional (default="sqeuclidean") The ground metric for the Wasserstein problem norm : string, optional (default=None) If given, normalize the ground metric to avoid numerical errors that can occur with large metric values. distribution_estimation : callable, optional (defaults to the uniform) The kind of distribution estimation to employ out_of_sample_map : string, optional (default="ferradans") The kind of out of sample mapping to apply to transport samples from a domain into another one. Currently the only possible option is "ferradans" which uses the method proposed in :ref:`[6] `. limit_max: float, optional (default=10) Controls the semi supervised mode. Transport between labeled source and target samples of different classes will exhibit an infinite cost (10 times the maximum value of the cost matrix) Attributes ---------- coupling_ : array-like, shape (n_source_samples, n_target_samples) The optimal coupling log_ : dictionary The dictionary of log, empty dict if parameter log is not True .. _references-unbalancedsinkhorntransport: References ---------- .. [1] Chizat, L., PeyrĂ©, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816. .. [6] Ferradans, S., Papadakis, N., PeyrĂ©, G., & Aujol, J. F. (2014). Regularized discrete optimal transport. SIAM Journal on Imaging Sciences, 7(3), 1853-1882. """ def __init__(self, reg_e=1., reg_m=0.1, method='sinkhorn', max_iter=10, tol=1e-9, verbose=False, log=False, metric="sqeuclidean", norm=None, distribution_estimation=distribution_estimation_uniform, out_of_sample_map='ferradans', limit_max=10): self.reg_e = reg_e self.reg_m = reg_m self.method = method self.max_iter = max_iter self.tol = tol self.verbose = verbose self.log = log self.metric = metric self.norm = norm self.distribution_estimation = distribution_estimation self.out_of_sample_map = out_of_sample_map self.limit_max = limit_max def fit(self, Xs, ys=None, Xt=None, yt=None): r"""Build a coupling matrix from source and target sets of samples :math:`(\mathbf{X_s}, \mathbf{y_s})` and :math:`(\mathbf{X_t}, \mathbf{y_t})` Parameters ---------- Xs : array-like, shape (n_source_samples, n_features) The training input samples. ys : array-like, shape (n_source_samples,) The class labels Xt : array-like, shape (n_target_samples, n_features) The training input samples. yt : array-like, shape (n_target_samples,) The class labels. If some target samples are unlabelled, fill the :math:`\mathbf{y_t}`'s elements with -1. Warning: Note that, due to this convention -1 cannot be used as a class label Returns ------- self : object Returns self. """ # check the necessary inputs parameters are here if check_params(Xs=Xs, Xt=Xt): super(UnbalancedSinkhornTransport, self).fit(Xs, ys, Xt, yt) returned_ = sinkhorn_unbalanced( a=self.mu_s, b=self.mu_t, M=self.cost_, reg=self.reg_e, reg_m=self.reg_m, method=self.method, numItermax=self.max_iter, stopThr=self.tol, verbose=self.verbose, log=self.log) # deal with the value of log if self.log: self.coupling_, self.log_ = returned_ else: self.coupling_ = returned_ self.log_ = dict() return self class JCPOTTransport(BaseTransport): """Domain Adaptation OT method for multi-source target shift based on Wasserstein barycenter algorithm. Parameters ---------- reg_e : float, optional (default=1) Entropic regularization parameter max_iter : int, float, optional (default=10) The minimum number of iteration before stopping the optimization algorithm if it has not converged tol : float, optional (default=10e-9) Stop threshold on error (inner sinkhorn solver) (>0) verbose : bool, optional (default=False) Controls the verbosity of the optimization algorithm log : bool, optional (default=False) Controls the logs of the optimization algorithm metric : string, optional (default="sqeuclidean") The ground metric for the Wasserstein problem norm : string, optional (default=None) If given, normalize the ground metric to avoid numerical errors that can occur with large metric values. distribution_estimation : callable, optional (defaults to the uniform) The kind of distribution estimation to employ out_of_sample_map : string, optional (default="ferradans") The kind of out of sample mapping to apply to transport samples from a domain into another one. Currently the only possible option is "ferradans" which uses the method proposed in :ref:`[6] `. Attributes ---------- coupling_ : list of array-like objects, shape K x (n_source_samples, n_target_samples) A set of optimal couplings between each source domain and the target domain proportions_ : array-like, shape (n_classes,) Estimated class proportions in the target domain log_ : dictionary The dictionary of log, empty dict if parameter log is not True .. _references-jcpottransport: References ---------- .. [1] Ievgen Redko, Nicolas Courty, RĂ©mi Flamary, Devis Tuia "Optimal transport for multi-source domain adaptation under target shift", International Conference on Artificial Intelligence and Statistics (AISTATS), vol. 89, p.849-858, 2019. .. [6] Ferradans, S., Papadakis, N., PeyrĂ©, G., & Aujol, J. F. (2014). Regularized discrete optimal transport. SIAM Journal on Imaging Sciences, 7(3), 1853-1882. """ def __init__(self, reg_e=.1, max_iter=10, tol=10e-9, verbose=False, log=False, metric="sqeuclidean", out_of_sample_map='ferradans'): self.reg_e = reg_e self.max_iter = max_iter self.tol = tol self.verbose = verbose self.log = log self.metric = metric self.out_of_sample_map = out_of_sample_map def fit(self, Xs, ys=None, Xt=None, yt=None): r"""Building coupling matrices from a list of source and target sets of samples :math:`(\mathbf{X_s}, \mathbf{y_s})` and :math:`(\mathbf{X_t}, \mathbf{y_t})` Parameters ---------- Xs : list of K array-like objects, shape K x (nk_source_samples, n_features) A list of the training input samples. ys : list of K array-like objects, shape K x (nk_source_samples,) A list of the class labels Xt : array-like, shape (n_target_samples, n_features) The training input samples. yt : array-like, shape (n_target_samples,) The class labels. If some target samples are unlabelled, fill the :math:`\mathbf{y_t}`'s elements with -1. Warning: Note that, due to this convention -1 cannot be used as a class label Returns ------- self : object Returns self. """ self._get_backend(*Xs, *ys, Xt, yt) # check the necessary inputs parameters are here if check_params(Xs=Xs, Xt=Xt, ys=ys): self.xs_ = Xs self.xt_ = Xt returned_ = jcpot_barycenter(Xs=Xs, Ys=ys, Xt=Xt, reg=self.reg_e, metric=self.metric, distrinumItermax=self.max_iter, stopThr=self.tol, verbose=self.verbose, log=True) self.coupling_ = returned_[1]['gamma'] # deal with the value of log if self.log: self.proportions_, self.log_ = returned_ else: self.proportions_ = returned_ self.log_ = dict() return self def transform(self, Xs=None, ys=None, Xt=None, yt=None, batch_size=128): r"""Transports source samples :math:`\mathbf{X_s}` onto target ones :math:`\mathbf{X_t}` Parameters ---------- Xs : list of K array-like objects, shape K x (nk_source_samples, n_features) A list of the training input samples. ys : list of K array-like objects, shape K x (nk_source_samples,) A list of the class labels Xt : array-like, shape (n_target_samples, n_features) The training input samples. yt : array-like, shape (n_target_samples,) The class labels. If some target samples are unlabelled, fill the :math:`\mathbf{y_t}`'s elements with -1. Warning: Note that, due to this convention -1 cannot be used as a class label batch_size : int, optional (default=128) The batch size for out of sample inverse transform """ nx = self.nx transp_Xs = [] # check the necessary inputs parameters are here if check_params(Xs=Xs): if all([nx.allclose(x, y) for x, y in zip(self.xs_, Xs)]): # perform standard barycentric mapping for each source domain for coupling in self.coupling_: transp = coupling / nx.sum(coupling, 1)[:, None] # set nans to 0 transp[~ nx.isfinite(transp)] = 0 # compute transported samples transp_Xs.append(nx.dot(transp, self.xt_)) else: # perform out of sample mapping indices = nx.arange(Xs.shape[0]) batch_ind = [ indices[i:i + batch_size] for i in range(0, len(indices), batch_size)] transp_Xs = [] for bi in batch_ind: transp_Xs_ = [] # get the nearest neighbor in the sources domains xs = nx.concatenate(self.xs_, axis=0) idx = nx.argmin(dist(Xs[bi], xs), axis=1) # transport the source samples for coupling in self.coupling_: transp = coupling / nx.sum(coupling, 1)[:, None] transp[~ nx.isfinite(transp)] = 0 transp_Xs_.append(nx.dot(transp, self.xt_)) transp_Xs_ = nx.concatenate(transp_Xs_, axis=0) # define the transported points transp_Xs_ = transp_Xs_[idx, :] + Xs[bi] - xs[idx, :] transp_Xs.append(transp_Xs_) transp_Xs = nx.concatenate(transp_Xs, axis=0) return transp_Xs def transform_labels(self, ys=None): r"""Propagate source labels :math:`\mathbf{y_s}` to obtain target labels as in :ref:`[27] ` Parameters ---------- ys : list of K array-like objects, shape K x (nk_source_samples,) A list of the class labels Returns ------- yt : array-like, shape (n_target_samples, nb_classes) Estimated soft target labels. .. _references-jcpottransport-transform-labels: References ---------- .. [27] Ievgen Redko, Nicolas Courty, RĂ©mi Flamary, Devis Tuia "Optimal transport for multi-source domain adaptation under target shift", International Conference on Artificial Intelligence and Statistics (AISTATS), 2019. """ nx = self.nx # check the necessary inputs parameters are here if check_params(ys=ys): yt = nx.zeros( (len(nx.unique(nx.concatenate(ys))), self.xt_.shape[0]), type_as=ys[0] ) for i in range(len(ys)): ysTemp = label_normalization(ys[i]) classes = nx.unique(ysTemp) n = len(classes) ns = len(ysTemp) # perform label propagation transp = self.coupling_[i] / nx.sum(self.coupling_[i], 1)[:, None] # set nans to 0 transp[~ nx.isfinite(transp)] = 0 if self.log: D1 = self.log_['D1'][i] else: D1 = nx.zeros((n, ns), type_as=transp) for c in classes: D1[int(c), ysTemp == c] = 1 # compute propagated labels yt = yt + nx.dot(D1, transp) / len(ys) return yt.T def inverse_transform_labels(self, yt=None): r"""Propagate target labels :math:`\mathbf{y_t}` to obtain estimated source labels :math:`\mathbf{y_s}` Parameters ---------- yt : array-like, shape (n_target_samples,) The target class labels Returns ------- transp_ys : list of K array-like objects, shape K x (nk_source_samples, nb_classes) A list of estimated soft source labels """ nx = self.nx # check the necessary inputs parameters are here if check_params(yt=yt): transp_ys = [] ytTemp = label_normalization(yt) classes = nx.unique(ytTemp) n = len(classes) D1 = nx.zeros((n, len(ytTemp)), type_as=self.coupling_[0]) for c in classes: D1[int(c), ytTemp == c] = 1 for i in range(len(self.xs_)): # perform label propagation transp = self.coupling_[i] / nx.sum(self.coupling_[i], 1)[:, None] # set nans to 0 transp[~ nx.isfinite(transp)] = 0 # compute propagated labels transp_ys.append(nx.dot(D1, transp.T).T) return transp_ys class NearestBrenierPotential(BaseTransport): r""" Smooth Strongly Convex Nearest Brenier Potentials (SSNB) is a method from :ref:`[58]` that computes an l-strongly convex potential :math:`\varphi` with an L-Lipschitz gradient such that :math:`\nabla \varphi \# \mu \approx \nu`. This regularity can be enforced only on the components of a partition of the ambient space (encoded by point classes), which is a relaxation compared to imposing global regularity. SSNBs approach the target measure by solving the optimisation problem: .. math:: :nowrap: \begin{gather*} \varphi \in \text{argmin}_{\varphi \in \mathcal{F}}\ \text{W}_2(\nabla \varphi \#\mu_s, \mu_t), \end{gather*} where :math:`\mathcal{F}` is the space functions that are on every set :math:`E_k` l-strongly convex with an L-Lipschitz gradient, given :math:`(E_k)_{k \in [K]}` a partition of the ambient source space. The problem is solved on "fitting" source and target data via a convex Quadratically Constrained Quadratic Program, yielding the values :code:`phi` and the gradients :code:`G` at at the source points. The images of "new" source samples are then found by solving a (simpler) Quadratically Constrained Linear Program at each point, using the fitting "parameters" :code:`phi` and :code:`G`. We provide two possible images, which correspond to "lower" and "upper potentials" (:ref:`[59]`, Theorem 3.14). Each of these two images are optimal solutions of the SSNB problem, and can be used in practice. .. warning:: This function requires the CVXPY library .. warning:: Accepts any backend but will convert to Numpy then back to the backend. Parameters ---------- strongly_convex_constant : float, optional constant for the strong convexity of the input potential phi, defaults to 0.6 gradient_lipschitz_constant : float, optional constant for the Lipschitz property of the input gradient G, defaults to 1.4 its: int, optional number of iterations, defaults to 100 log : bool, optional record log if true seed: int or RandomState or None, optional Seed used for random number generator (for the initialisation in :code:`fit`. References ---------- .. [58] François-Pierre Paty, Alexandre d’Aspremont, and Marco Cuturi. Regularity as regularization: Smooth and strongly convex brenier potentials in optimal transport. In International Conference on Artificial Intelligence and Statistics, pages 1222–1232. PMLR, 2020. .. [59] Adrien B Taylor. Convex interpolation and performance estimation of first-order methods for convex optimization. PhD thesis, Catholic University of Louvain, Louvain-la-Neuve, Belgium, 2017. See Also -------- ot.mapping.nearest_brenier_potential_fit : Fitting the SSNB on source and target data ot.mapping.nearest_brenier_potential_predict_bounds : Predicting SSNB images on new source data """ def __init__(self, strongly_convex_constant=0.6, gradient_lipschitz_constant=1.4, log=False, its=100, seed=None): self.strongly_convex_constant = strongly_convex_constant self.gradient_lipschitz_constant = gradient_lipschitz_constant self.log = log self.its = its self.seed = seed self.fit_log, self.predict_log = None, None self.phi, self.G = None, None self.fit_Xs, self.fit_ys, self.fit_Xt = None, None, None def fit(self, Xs=None, ys=None, Xt=None, yt=None): r""" Fits the Smooth Strongly Convex Nearest Brenier Potential [58] to the source data :code:`Xs` to the target data :code:`Xt`, with the partition given by the (optional) labels :code:`ys`. Wrapper for :code:`ot.mapping.nearest_brenier_potential_fit`. .. warning:: This function requires the CVXPY library .. warning:: Accepts any backend but will convert to Numpy then back to the backend. Parameters ---------- Xs : array-like (n, d) source points used to compute the optimal values phi and G ys : array-like (n,), optional classes of the reference points, defaults to a single class Xt : array-like (n, d) values of the gradients at the reference points X yt : optional ignored. Returns ------- self : object Returns self. References ---------- .. [58] François-Pierre Paty, Alexandre d’Aspremont, and Marco Cuturi. Regularity as regularization: Smooth and strongly convex brenier potentials in optimal transport. In International Conference on Artificial Intelligence and Statistics, pages 1222–1232. PMLR, 2020. See Also -------- ot.mapping.nearest_brenier_potential_fit : Fitting the SSNB on source and target data """ self.fit_Xs, self.fit_ys, self.fit_Xt = Xs, ys, Xt returned = nearest_brenier_potential_fit(Xs, Xt, X_classes=ys, strongly_convex_constant=self.strongly_convex_constant, gradient_lipschitz_constant=self.gradient_lipschitz_constant, its=self.its, log=self.log) if self.log: self.phi, self.G, self.fit_log = returned else: self.phi, self.G = returned return self def transform(self, Xs, ys=None): r""" Computes the images of the new source samples :code:`Xs` of classes :code:`ys` by the fitted Smooth Strongly Convex Nearest Brenier Potentials (SSNB) :ref:`[58]`. The output is the images of two SSNB optimal maps, called 'lower' and 'upper' potentials (from :ref:`[59]`, Theorem 3.14). Wrapper for :code:`nearest_brenier_potential_predict_bounds`. .. warning:: This function requires the CVXPY library .. warning:: Accepts any backend but will convert to Numpy then back to the backend. Parameters ---------- Xs : array-like (m, d) input source points ys : : array_like (m,), optional classes of the input source points, defaults to a single class Returns ------- G_lu : array-like (2, m, d) gradients of the lower and upper bounding potentials at Y (images of the source inputs) References ---------- .. [58] François-Pierre Paty, Alexandre d’Aspremont, and Marco Cuturi. Regularity as regularization: Smooth and strongly convex brenier potentials in optimal transport. In International Conference on Artificial Intelligence and Statistics, pages 1222–1232. PMLR, 2020. .. [59] Adrien B Taylor. Convex interpolation and performance estimation of first-order methods for convex optimization. PhD thesis, Catholic University of Louvain, Louvain-la-Neuve, Belgium, 2017. See Also -------- ot.mapping.nearest_brenier_potential_predict_bounds : Predicting SSNB images on new source data """ returned = nearest_brenier_potential_predict_bounds( self.fit_Xs, self.phi, self.G, Xs, X_classes=self.fit_ys, Y_classes=ys, strongly_convex_constant=self.strongly_convex_constant, gradient_lipschitz_constant=self.gradient_lipschitz_constant, log=self.log) if self.log: _, G_lu, self.predict_log = returned else: _, G_lu = returned return G_lu python-pot-0.9.3+dfsg/ot/datasets.py000066400000000000000000000122771455713015700174100ustar00rootroot00000000000000""" Simple example datasets """ # Author: Remi Flamary # # License: MIT License import numpy as np import scipy as sp from .utils import check_random_state, deprecated def make_1D_gauss(n, m, s): """return a 1D histogram for a gaussian distribution (`n` bins, mean `m` and std `s`) Parameters ---------- n : int number of bins in the histogram m : float mean value of the gaussian distribution s : float standard deviation of the gaussian distribution Returns ------- h : ndarray (`n`,) 1D histogram for a gaussian distribution """ x = np.arange(n, dtype=np.float64) h = np.exp(-(x - m) ** 2 / (2 * s ** 2)) return h / h.sum() @deprecated() def get_1D_gauss(n, m, sigma): """ Deprecated see make_1D_gauss """ return make_1D_gauss(n, m, sigma) def make_2D_samples_gauss(n, m, sigma, random_state=None): r"""Return `n` samples drawn from 2D gaussian :math:`\mathcal{N}(m, \sigma)` Parameters ---------- n : int number of samples to make m : ndarray, shape (2,) mean value of the gaussian distribution sigma : ndarray, shape (2, 2) covariance matrix of the gaussian distribution random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : ndarray, shape (`n`, 2) n samples drawn from :math:`\mathcal{N}(m, \sigma)`. """ generator = check_random_state(random_state) if np.isscalar(sigma): sigma = np.array([sigma, ]) if len(sigma) > 1: P = sp.linalg.sqrtm(sigma) res = generator.randn(n, 2).dot(P) + m else: res = generator.randn(n, 2) * np.sqrt(sigma) + m return res @deprecated() def get_2D_samples_gauss(n, m, sigma, random_state=None): """ Deprecated see make_2D_samples_gauss """ return make_2D_samples_gauss(n, m, sigma, random_state=None) def make_data_classif(dataset, n, nz=.5, theta=0, p=.5, random_state=None, **kwargs): """Dataset generation for classification problems Parameters ---------- dataset : str type of classification problem (see code) n : int number of training samples nz : float noise level (>0) p : float proportion of one class in the binary setting random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : ndarray, shape (n, d) `n` observation of size `d` y : ndarray, shape (n,) labels of the samples. """ generator = check_random_state(random_state) if dataset.lower() == '3gauss': y = np.floor((np.arange(n) * 1.0 / n * 3)) + 1 x = np.zeros((n, 2)) # class 1 x[y == 1, 0] = -1. x[y == 1, 1] = -1. x[y == 2, 0] = -1. x[y == 2, 1] = 1. x[y == 3, 0] = 1. x[y == 3, 1] = 0 x[y != 3, :] += 1.5 * nz * generator.randn(sum(y != 3), 2) x[y == 3, :] += 2 * nz * generator.randn(sum(y == 3), 2) elif dataset.lower() == '3gauss2': y = np.floor((np.arange(n) * 1.0 / n * 3)) + 1 x = np.zeros((n, 2)) y[y == 4] = 3 # class 1 x[y == 1, 0] = -2. x[y == 1, 1] = -2. x[y == 2, 0] = -2. x[y == 2, 1] = 2. x[y == 3, 0] = 2. x[y == 3, 1] = 0 x[y != 3, :] += nz * generator.randn(sum(y != 3), 2) x[y == 3, :] += 2 * nz * generator.randn(sum(y == 3), 2) elif dataset.lower() == 'gaussrot': rot = np.array( [[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]]) m1 = np.array([-1, 1]) m2 = np.array([1, -1]) y = np.floor((np.arange(n) * 1.0 / n * 2)) + 1 n1 = np.sum(y == 1) n2 = np.sum(y == 2) x = np.zeros((n, 2)) x[y == 1, :] = make_2D_samples_gauss(n1, m1, nz, random_state=generator) x[y == 2, :] = make_2D_samples_gauss(n2, m2, nz, random_state=generator) x = x.dot(rot) elif dataset.lower() == '2gauss_prop': y = np.concatenate((np.ones(int(p * n)), np.zeros(int((1 - p) * n)))) x = np.hstack((0 * y[:, None] - 0, 1 - 2 * y[:, None])) + nz * generator.randn(len(y), 2) if ('bias' not in kwargs) and ('b' not in kwargs): kwargs['bias'] = np.array([0, 2]) x[:, 0] += kwargs['bias'][0] x[:, 1] += kwargs['bias'][1] else: x = np.array(0) y = np.array(0) print("unknown dataset") return x, y.astype(int) @deprecated() def get_data_classif(dataset, n, nz=.5, theta=0, random_state=None, **kwargs): """ Deprecated see make_data_classif """ return make_data_classif(dataset, n, nz=.5, theta=0, random_state=None, **kwargs) python-pot-0.9.3+dfsg/ot/dr.py000066400000000000000000000401541455713015700162000ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Dimension reduction with OT .. warning:: Note that by default the module is not imported in :mod:`ot`. In order to use it you need to explicitly import :mod:`ot.dr` """ # Author: Remi Flamary # Minhui Huang # Jakub Zadrozny # Antoine Collas # # License: MIT License from scipy import linalg import autograd.numpy as np from sklearn.decomposition import PCA import pymanopt import pymanopt.manifolds import pymanopt.optimizers from .bregman import sinkhorn as sinkhorn_bregman from .utils import dist as dist_utils, check_random_state def dist(x1, x2): r""" Compute squared euclidean distance between samples (autograd) """ x1p2 = np.sum(np.square(x1), 1) x2p2 = np.sum(np.square(x2), 1) return x1p2.reshape((-1, 1)) + x2p2.reshape((1, -1)) - 2 * np.dot(x1, x2.T) def sinkhorn(w1, w2, M, reg, k): r"""Sinkhorn algorithm with fixed number of iteration (autograd) """ K = np.exp(-M / reg) ui = np.ones((M.shape[0],)) vi = np.ones((M.shape[1],)) for i in range(k): vi = w2 / (np.dot(K.T, ui) + 1e-50) ui = w1 / (np.dot(K, vi) + 1e-50) G = ui.reshape((M.shape[0], 1)) * K * vi.reshape((1, M.shape[1])) return G def logsumexp(M, axis): r"""Log-sum-exp reduction compatible with autograd (no numpy implementation) """ amax = np.amax(M, axis=axis, keepdims=True) return np.log(np.sum(np.exp(M - amax), axis=axis)) + np.squeeze(amax, axis=axis) def sinkhorn_log(w1, w2, M, reg, k): r"""Sinkhorn algorithm in log-domain with fixed number of iteration (autograd) """ Mr = -M / reg ui = np.zeros((M.shape[0],)) vi = np.zeros((M.shape[1],)) log_w1 = np.log(w1) log_w2 = np.log(w2) for i in range(k): vi = log_w2 - logsumexp(Mr + ui[:, None], 0) ui = log_w1 - logsumexp(Mr + vi[None, :], 1) G = np.exp(ui[:, None] + Mr + vi[None, :]) return G def split_classes(X, y): r"""split samples in :math:`\mathbf{X}` by classes in :math:`\mathbf{y}` """ lstsclass = np.unique(y) return [X[y == i, :].astype(np.float32) for i in lstsclass] def fda(X, y, p=2, reg=1e-16): r"""Fisher Discriminant Analysis Parameters ---------- X : ndarray, shape (n, d) Training samples. y : ndarray, shape (n,) Labels for training samples. p : int, optional Size of dimensionality reduction. reg : float, optional Regularization term >0 (ridge regularization) Returns ------- P : ndarray, shape (d, p) Optimal transportation matrix for the given parameters proj : callable projection function including mean centering """ mx = np.mean(X) X -= mx.reshape((1, -1)) # data split between classes d = X.shape[1] xc = split_classes(X, y) nc = len(xc) p = min(nc - 1, p) Cw = 0 for x in xc: Cw += np.cov(x, rowvar=False) Cw /= nc mxc = np.zeros((d, nc)) for i in range(nc): mxc[:, i] = np.mean(xc[i]) mx0 = np.mean(mxc, 1) Cb = 0 for i in range(nc): Cb += (mxc[:, i] - mx0).reshape((-1, 1)) * \ (mxc[:, i] - mx0).reshape((1, -1)) w, V = linalg.eig(Cb, Cw + reg * np.eye(d)) idx = np.argsort(w.real) Popt = V[:, idx[-p:]] def proj(X): return (X - mx.reshape((1, -1))).dot(Popt) return Popt, proj def wda(X, y, p=2, reg=1, k=10, solver=None, sinkhorn_method='sinkhorn', maxiter=100, verbose=0, P0=None, normalize=False): r""" Wasserstein Discriminant Analysis :ref:`[11] ` The function solves the following optimization problem: .. math:: \mathbf{P} = \mathop{\arg \min}_\mathbf{P} \quad \frac{\sum\limits_i W(P \mathbf{X}^i, P \mathbf{X}^i)}{\sum\limits_{i, j \neq i} W(P \mathbf{X}^i, P \mathbf{X}^j)} where : - :math:`P` is a linear projection operator in the Stiefel(`p`, `d`) manifold - :math:`W` is entropic regularized Wasserstein distances - :math:`\mathbf{X}^i` are samples in the dataset corresponding to class i **Choosing a Sinkhorn solver** By default and when using a regularization parameter that is not too small the default sinkhorn solver should be enough. If you need to use a small regularization to get sparse cost matrices, you should use the :py:func:`ot.dr.sinkhorn_log` solver that will avoid numerical errors, but can be slow in practice. Parameters ---------- X : ndarray, shape (n, d) Training samples. y : ndarray, shape (n,) Labels for training samples. p : int, optional Size of dimensionality reduction. reg : float, optional Regularization term >0 (entropic regularization) solver : None | str, optional None for steepest descent or 'TrustRegions' for trust regions algorithm else should be a pymanopt.solvers sinkhorn_method : str method used for the Sinkhorn solver, either 'sinkhorn' or 'sinkhorn_log' P0 : ndarray, shape (d, p) Initial starting point for projection. normalize : bool, optional Normalize the Wasserstaiun distance by the average distance on P0 (default : False) verbose : int, optional Print information along iterations. Returns ------- P : ndarray, shape (d, p) Optimal transportation matrix for the given parameters proj : callable Projection function including mean centering. .. _references-wda: References ---------- .. [11] Flamary, R., Cuturi, M., Courty, N., & Rakotomamonjy, A. (2016). Wasserstein Discriminant Analysis. arXiv preprint arXiv:1608.08063. """ # noqa if sinkhorn_method.lower() == 'sinkhorn': sinkhorn_solver = sinkhorn elif sinkhorn_method.lower() == 'sinkhorn_log': sinkhorn_solver = sinkhorn_log else: raise ValueError("Unknown Sinkhorn method '%s'." % sinkhorn_method) mx = np.mean(X) X -= mx.reshape((1, -1)) # data split between classes d = X.shape[1] xc = split_classes(X, y) # compute uniform weighs wc = [np.ones((x.shape[0]), dtype=np.float32) / x.shape[0] for x in xc] # pre-compute reg_c,c' if P0 is not None and normalize: regmean = np.zeros((len(xc), len(xc))) for i, xi in enumerate(xc): xi = np.dot(xi, P0) for j, xj in enumerate(xc[i:]): xj = np.dot(xj, P0) M = dist(xi, xj) regmean[i, j] = np.sum(M) / (len(xi) * len(xj)) else: regmean = np.ones((len(xc), len(xc))) manifold = pymanopt.manifolds.Stiefel(d, p) @pymanopt.function.autograd(manifold) def cost(P): # wda loss loss_b = 0 loss_w = 0 for i, xi in enumerate(xc): xi = np.dot(xi, P) for j, xj in enumerate(xc[i:]): xj = np.dot(xj, P) M = dist(xi, xj) G = sinkhorn_solver(wc[i], wc[j + i], M, reg * regmean[i, j], k) if j == 0: loss_w += np.sum(G * M) else: loss_b += np.sum(G * M) # loss inversed because minimization return loss_w / loss_b # declare manifold and problem problem = pymanopt.Problem(manifold=manifold, cost=cost) # declare solver and solve if solver is None: solver = pymanopt.optimizers.SteepestDescent(max_iterations=maxiter, log_verbosity=verbose) elif solver in ['tr', 'TrustRegions']: solver = pymanopt.optimizers.TrustRegions(max_iterations=maxiter, log_verbosity=verbose) Popt = solver.run(problem, initial_point=P0) def proj(X): return (X - mx.reshape((1, -1))).dot(Popt.point) return Popt.point, proj def projection_robust_wasserstein(X, Y, a, b, tau, U0=None, reg=0.1, k=2, stopThr=1e-3, maxiter=100, verbose=0, random_state=None): r""" Projection Robust Wasserstein Distance :ref:`[32] ` The function solves the following optimization problem: .. math:: \max_{U \in St(d, k)} \ \min_{\pi \in \Pi(\mu,\nu)} \quad \sum_{i,j} \pi_{i,j} \|U^T(\mathbf{x}_i - \mathbf{y}_j)\|^2 - \mathrm{reg} \cdot H(\pi) - :math:`U` is a linear projection operator in the Stiefel(`d`, `k`) manifold - :math:`H(\pi)` is entropy regularizer - :math:`\mathbf{x}_i`, :math:`\mathbf{y}_j` are samples of measures :math:`\mu` and :math:`\nu` respectively Parameters ---------- X : ndarray, shape (n, d) Samples from measure :math:`\mu` Y : ndarray, shape (n, d) Samples from measure :math:`\nu` a : ndarray, shape (n, ) weights for measure :math:`\mu` b : ndarray, shape (n, ) weights for measure :math:`\nu` tau : float stepsize for Riemannian Gradient Descent U0 : ndarray, shape (d, p) Initial starting point for projection. reg : float, optional Regularization term >0 (entropic regularization) k : int Subspace dimension stopThr : float, optional Stop threshold on error (>0) verbose : int, optional Print information along iterations. random_state : int, RandomState instance or None, default=None Determines random number generation for initial value of projection operator when U0 is not given. Returns ------- pi : ndarray, shape (n, n) Optimal transportation matrix for the given parameters U : ndarray, shape (d, k) Projection operator. .. _references-projection-robust-wasserstein: References ---------- .. [32] Huang, M. , Ma S. & Lai L. (2021). A Riemannian Block Coordinate Descent Method for Computing the Projection Robust Wasserstein Distance, ICML. """ # noqa # initialization n, d = X.shape m, d = Y.shape a = np.asarray(a, dtype=np.float64) b = np.asarray(b, dtype=np.float64) u = np.ones(n) / n v = np.ones(m) / m ones = np.ones((n, m)) assert d > k if U0 is None: rng = check_random_state(random_state) U = rng.randn(d, k) U, _ = np.linalg.qr(U) else: U = U0 def Vpi(X, Y, a, b, pi): # Return the second order matrix of the displacements: sum_ij { (pi)_ij (X_i-Y_j)(X_i-Y_j)^T }. A = X.T.dot(pi).dot(Y) return X.T.dot(np.diag(a)).dot(X) + Y.T.dot(np.diag(np.sum(pi, 0))).dot(Y) - A - A.T err = 1 iter = 0 while err > stopThr and iter < maxiter: # Projected cost matrix UUT = U.dot(U.T) M = np.diag(np.diag(X.dot(UUT.dot(X.T)))).dot(ones) + ones.dot( np.diag(np.diag(Y.dot(UUT.dot(Y.T))))) - 2 * X.dot(UUT.dot(Y.T)) A = np.empty(M.shape, dtype=M.dtype) np.divide(M, -reg, out=A) np.exp(A, out=A) # Sinkhorn update Ap = (1 / a).reshape(-1, 1) * A AtransposeU = np.dot(A.T, u) v = np.divide(b, AtransposeU) u = 1. / np.dot(Ap, v) pi = u.reshape((-1, 1)) * A * v.reshape((1, -1)) V = Vpi(X, Y, a, b, pi) # Riemannian gradient descent G = 2 / reg * V.dot(U) GTU = G.T.dot(U) xi = G - U.dot(GTU + GTU.T) / 2 # Riemannian gradient U, _ = np.linalg.qr(U + tau * xi) # Retraction by QR decomposition grad_norm = np.linalg.norm(xi) err = max(reg * grad_norm, np.linalg.norm(np.sum(pi, 0) - b, 1)) f_val = np.trace(U.T.dot(V.dot(U))) if verbose: print('RBCD Iteration: ', iter, ' error', err, '\t fval: ', f_val) iter = iter + 1 return pi, U def ewca(X, U0=None, reg=1, k=2, method='BCD', sinkhorn_method='sinkhorn', stopThr=1e-6, maxiter=100, maxiter_sink=1000, maxiter_MM=10, verbose=0): r""" Entropic Wasserstein Component Analysis :ref:`[52] `. The function solves the following optimization problem: .. math:: \mathbf{U} = \mathop{\arg \min}_\mathbf{U} \quad W(\mathbf{X}, \mathbf{U}\mathbf{U}^T \mathbf{X}) where : - :math:`\mathbf{U}` is a matrix in the Stiefel(`p`, `d`) manifold - :math:`W` is entropic regularized Wasserstein distances - :math:`\mathbf{X}` are samples Parameters ---------- X : ndarray, shape (n, d) Samples from measure :math:`\mu`. U0 : ndarray, shape (d, k), optional Initial starting point for projection. reg : float, optional Regularization term >0 (entropic regularization). k : int, optional Subspace dimension. method : str, optional Eather 'BCD' or 'MM' (Block Coordinate Descent or Majorization-Minimization). Prefer MM when d is large. sinkhorn_method : str Method used for the Sinkhorn solver, see :ref:`ot.bregman.sinkhorn` for more details. stopThr : float, optional Stop threshold on error (>0). maxiter : int, optional Maximum number of iterations of the BCD/MM. maxiter_sink : int, optional Maximum number of iterations of the Sinkhorn solver. maxiter_MM : int, optional Maximum number of iterations of the MM (only used when method='MM'). verbose : int, optional Print information along iterations. Returns ------- pi : ndarray, shape (n, n) Optimal transportation matrix for the given parameters. U : ndarray, shape (d, k) Matrix Stiefel manifold. .. _references-entropic-wasserstein-component_analysis: References ---------- .. [52] Collas, A., Vayer, T., Flamary, F., & Breloy, A. (2023). Entropic Wasserstein Component Analysis. """ # noqa n, d = X.shape X = X - X.mean(0) if U0 is None: pca_fitted = PCA(n_components=k).fit(X) U = pca_fitted.components_.T if method == 'MM': lambda_scm = pca_fitted.explained_variance_[0] else: U = U0 # marginals u0 = (1. / n) * np.ones(n) # print iterations if verbose > 0: print('{:4s}|{:13s}|{:12s}|{:12s}'.format('It.', 'Loss', 'Crit.', 'Thres.') + '\n' + '-' * 40) def compute_loss(M, pi, reg): return np.sum(M * pi) + reg * np.sum(pi * (np.log(pi) - 1)) def grassmann_distance(U1, U2): proj = U1.T @ U2 _, s, _ = np.linalg.svd(proj) s[s > 1] = 1 s = np.arccos(s) return np.linalg.norm(s) # loop it = 0 crit = np.inf sinkhorn_warmstart = None while (it < maxiter) and (crit > stopThr): U_old = U # Solve transport M = dist_utils(X, (X @ U) @ U.T) pi, log_sinkhorn = sinkhorn_bregman( u0, u0, M, reg, numItermax=maxiter_sink, method=sinkhorn_method, warmstart=sinkhorn_warmstart, warn=False, log=True ) key_warmstart = 'warmstart' if key_warmstart in log_sinkhorn: sinkhorn_warmstart = log_sinkhorn[key_warmstart] if (pi >= 1e-300).all(): loss = compute_loss(M, pi, reg) else: loss = np.inf # Solve PCA pi_sym = (pi + pi.T) / 2 if method == 'BCD': # block coordinate descent S = X.T @ (2 * pi_sym - (1. / n) * np.eye(n)) @ X _, U = np.linalg.eigh(S) U = U[:, ::-1][:, :k] elif method == 'MM': # majorization-minimization eig, _ = np.linalg.eigh(pi_sym) lambda_pi = eig[0] for _ in range(maxiter_MM): X_proj = X @ U X_T_X_proj = X.T @ X_proj R = (1 / n) * X_T_X_proj alpha = 1 - 2 * n * lambda_pi if alpha > 0: R = alpha * (R - lambda_scm * U) else: R = alpha * R R = R - (2 * X.T @ (pi_sym @ X_proj)) + (2 * lambda_pi * X_T_X_proj) U, _ = np.linalg.qr(R) else: raise ValueError(f"Unknown method '{method}', use 'BCD' or 'MM'.") # stop or not it += 1 crit = grassmann_distance(U_old, U) # print if verbose > 0: print('{:4d}|{:8e}|{:8e}|{:8e}'.format(it, loss, crit, stopThr)) return pi, U python-pot-0.9.3+dfsg/ot/factored.py000066400000000000000000000102141455713015700173540ustar00rootroot00000000000000""" Factored OT solvers (low rank, cost or OT plan) """ # Author: Remi Flamary # # License: MIT License from .backend import get_backend from .utils import dist, get_lowrank_lazytensor from .lp import emd from .bregman import sinkhorn __all__ = ['factored_optimal_transport'] def factored_optimal_transport(Xa, Xb, a=None, b=None, reg=0.0, r=100, X0=None, stopThr=1e-7, numItermax=100, verbose=False, log=False, **kwargs): r"""Solves factored OT problem and return OT plans and intermediate distribution This function solve the following OT problem [40]_ .. math:: \mathop{\arg \min}_\mu \quad W_2^2(\mu_a,\mu)+ W_2^2(\mu,\mu_b) where : - :math:`\mu_a` and :math:`\mu_b` are empirical distributions. - :math:`\mu` is an empirical distribution with r samples And returns the two OT plans between .. note:: This function is backend-compatible and will work on arrays from all compatible backends. But the algorithm uses the C++ CPU backend which can lead to copy overhead on GPU arrays. Uses the conditional gradient algorithm to solve the problem proposed in :ref:`[39] `. Parameters ---------- Xa : (ns,d) array-like, float Source samples Xb : (nt,d) array-like, float Target samples a : (ns,) array-like, float Source histogram (uniform weight if empty list) b : (nt,) array-like, float Target histogram (uniform weight if empty list)) numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on the relative variation (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- Ga: array-like, shape (ns, r) Optimal transportation matrix between source and the intermediate distribution Gb: array-like, shape (r, nt) Optimal transportation matrix between the intermediate and target distribution X: array-like, shape (r, d) Support of the intermediate distribution log: dict, optional If input log is true, a dictionary containing the cost and dual variables and exit status .. _references-factored: References ---------- .. [40] Forrow, A., HĂĽtter, J. C., Nitzan, M., Rigollet, P., Schiebinger, G., & Weed, J. (2019, April). Statistical optimal transport via factored couplings. In The 22nd International Conference on Artificial Intelligence and Statistics (pp. 2454-2465). PMLR. See Also -------- ot.bregman.sinkhorn : Entropic regularized OT ot.optim.cg : General regularized OT """ nx = get_backend(Xa, Xb) n_a = Xa.shape[0] n_b = Xb.shape[0] d = Xa.shape[1] if a is None: a = nx.ones((n_a), type_as=Xa) / n_a if b is None: b = nx.ones((n_b), type_as=Xb) / n_b if X0 is None: X = nx.randn(r, d, type_as=Xa) else: X = X0 w = nx.ones(r, type_as=Xa) / r def solve_ot(X1, X2, w1, w2): M = dist(X1, X2) if reg > 0: G, log = sinkhorn(w1, w2, M, reg, log=True, **kwargs) log['cost'] = nx.sum(G * M) return G, log else: return emd(w1, w2, M, log=True, **kwargs) norm_delta = [] # solve the barycenter for i in range(numItermax): old_X = X # solve OT with template Ga, loga = solve_ot(Xa, X, a, w) Gb, logb = solve_ot(X, Xb, w, b) X = 0.5 * (nx.dot(Ga.T, Xa) + nx.dot(Gb, Xb)) * r delta = nx.norm(X - old_X) if delta < stopThr: break if log: norm_delta.append(delta) if log: log_dic = {'delta_iter': norm_delta, 'ua': loga['u'], 'va': loga['v'], 'ub': logb['u'], 'vb': logb['v'], 'costa': loga['cost'], 'costb': logb['cost'], 'lazy_plan': get_lowrank_lazytensor(Ga * r, Gb.T, nx=nx), } return Ga, Gb, X, log_dic return Ga, Gb, X python-pot-0.9.3+dfsg/ot/gaussian.py000066400000000000000000000605621455713015700174120ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Optimal transport for Gaussian distributions """ # Author: Theo Gnassounou # Remi Flamary # # License: MIT License import warnings from .backend import get_backend from .utils import dots, is_all_finite, list_to_array def bures_wasserstein_mapping(ms, mt, Cs, Ct, log=False): r"""Return OT linear operator between samples. The function estimates the optimal linear operator that aligns the two empirical distributions. This is equivalent to estimating the closed form mapping between two Gaussian distributions :math:`\mathcal{N}(\mu_s,\Sigma_s)` and :math:`\mathcal{N}(\mu_t,\Sigma_t)` as proposed in :ref:`[1] ` and discussed in remark 2.29 in :ref:`[2] `. The linear operator from source to target :math:`M` .. math:: M(\mathbf{x})= \mathbf{A} \mathbf{x} + \mathbf{b} where : .. math:: \mathbf{A} &= \Sigma_s^{-1/2} \left(\Sigma_s^{1/2}\Sigma_t\Sigma_s^{1/2} \right)^{1/2} \Sigma_s^{-1/2} \mathbf{b} &= \mu_t - \mathbf{A} \mu_s Parameters ---------- ms : array-like (d,) mean of the source distribution mt : array-like (d,) mean of the target distribution Cs : array-like (d,d) covariance of the source distribution Ct : array-like (d,d) covariance of the target distribution log : bool, optional record log if True Returns ------- A : (d, d) array-like Linear operator b : (1, d) array-like bias log : dict log dictionary return only if log==True in parameters .. _references-OT-mapping-linear: References ---------- .. [1] Knott, M. and Smith, C. S. "On the optimal mapping of distributions", Journal of Optimization Theory and Applications Vol 43, 1984 .. [2] PeyrĂ©, G., & Cuturi, M. (2017). "Computational Optimal Transport", 2018. """ ms, mt, Cs, Ct = list_to_array(ms, mt, Cs, Ct) nx = get_backend(ms, mt, Cs, Ct) Cs12 = nx.sqrtm(Cs) Cs12inv = nx.inv(Cs12) M0 = nx.sqrtm(dots(Cs12, Ct, Cs12)) A = dots(Cs12inv, M0, Cs12inv) b = mt - nx.dot(ms, A) if log: log = {} log['Cs12'] = Cs12 log['Cs12inv'] = Cs12inv return A, b, log else: return A, b def empirical_bures_wasserstein_mapping(xs, xt, reg=1e-6, ws=None, wt=None, bias=True, log=False): r"""Return OT linear operator between samples. The function estimates the optimal linear operator that aligns the two empirical distributions. This is equivalent to estimating the closed form mapping between two Gaussian distributions :math:`\mathcal{N}(\mu_s,\Sigma_s)` and :math:`\mathcal{N}(\mu_t,\Sigma_t)` as proposed in :ref:`[1] ` and discussed in remark 2.29 in :ref:`[2] `. The linear operator from source to target :math:`M` .. math:: M(\mathbf{x})= \mathbf{A} \mathbf{x} + \mathbf{b} where : .. math:: \mathbf{A} &= \Sigma_s^{-1/2} \left(\Sigma_s^{1/2}\Sigma_t\Sigma_s^{1/2} \right)^{1/2} \Sigma_s^{-1/2} \mathbf{b} &= \mu_t - \mathbf{A} \mu_s Parameters ---------- xs : array-like (ns,d) samples in the source domain xt : array-like (nt,d) samples in the target domain reg : float,optional regularization added to the diagonals of covariances (>0) ws : array-like (ns,1), optional weights for the source samples wt : array-like (ns,1), optional weights for the target samples bias: boolean, optional estimate bias :math:`\mathbf{b}` else :math:`\mathbf{b} = 0` (default:True) log : bool, optional record log if True Returns ------- A : (d, d) array-like Linear operator b : (1, d) array-like bias log : dict log dictionary return only if log==True in parameters .. _references-OT-mapping-linear: References ---------- .. [1] Knott, M. and Smith, C. S. "On the optimal mapping of distributions", Journal of Optimization Theory and Applications Vol 43, 1984 .. [2] PeyrĂ©, G., & Cuturi, M. (2017). "Computational Optimal Transport", 2018. """ xs, xt = list_to_array(xs, xt) nx = get_backend(xs, xt) is_input_finite = is_all_finite(xs, xt) d = xs.shape[1] if bias: mxs = nx.mean(xs, axis=0)[None, :] mxt = nx.mean(xt, axis=0)[None, :] xs = xs - mxs xt = xt - mxt else: mxs = nx.zeros((1, d), type_as=xs) mxt = nx.zeros((1, d), type_as=xs) if ws is None: ws = nx.ones((xs.shape[0], 1), type_as=xs) / xs.shape[0] if wt is None: wt = nx.ones((xt.shape[0], 1), type_as=xt) / xt.shape[0] Cs = nx.dot((xs * ws).T, xs) / nx.sum(ws) + reg * nx.eye(d, type_as=xs) Ct = nx.dot((xt * wt).T, xt) / nx.sum(wt) + reg * nx.eye(d, type_as=xt) if log: A, b, log = bures_wasserstein_mapping(mxs, mxt, Cs, Ct, log=log) else: A, b = bures_wasserstein_mapping(mxs, mxt, Cs, Ct) if is_input_finite and not is_all_finite(A, b): warnings.warn( "Numerical errors were encountered in ot.gaussian.empirical_bures_wasserstein_mapping. " "Consider increasing the regularization parameter `reg`.") if log: log['Cs'] = Cs log['Ct'] = Ct return A, b, log else: return A, b def bures_wasserstein_distance(ms, mt, Cs, Ct, log=False): r"""Return Bures Wasserstein distance between samples. The function estimates the Bures-Wasserstein distance between two empirical distributions source :math:`\mu_s` and target :math:`\mu_t`, discussed in remark 2.31 :ref:`[1] `. The Bures Wasserstein distance between source and target distribution :math:`\mathcal{W}` .. math:: \mathcal{W}(\mu_s, \mu_t)_2^2= \left\lVert \mathbf{m}_s - \mathbf{m}_t \right\rVert^2 + \mathcal{B}(\Sigma_s, \Sigma_t)^{2} where : .. math:: \mathbf{B}(\Sigma_s, \Sigma_t)^{2} = \text{Tr}\left(\Sigma_s + \Sigma_t - 2 \sqrt{\Sigma_s^{1/2}\Sigma_t\Sigma_s^{1/2}} \right) Parameters ---------- ms : array-like (d,) mean of the source distribution mt : array-like (d,) mean of the target distribution Cs : array-like (d,d) covariance of the source distribution Ct : array-like (d,d) covariance of the target distribution log : bool, optional record log if True Returns ------- W : float Bures Wasserstein distance log : dict log dictionary return only if log==True in parameters .. _references-bures-wasserstein-distance: References ---------- .. [1] PeyrĂ©, G., & Cuturi, M. (2017). "Computational Optimal Transport", 2018. """ ms, mt, Cs, Ct = list_to_array(ms, mt, Cs, Ct) nx = get_backend(ms, mt, Cs, Ct) Cs12 = nx.sqrtm(Cs) B = nx.trace(Cs + Ct - 2 * nx.sqrtm(dots(Cs12, Ct, Cs12))) W = nx.sqrt(nx.maximum(nx.norm(ms - mt)**2 + B, 0)) if log: log = {} log['Cs12'] = Cs12 return W, log else: return W def empirical_bures_wasserstein_distance(xs, xt, reg=1e-6, ws=None, wt=None, bias=True, log=False): r"""Return Bures Wasserstein distance from mean and covariance of distribution. The function estimates the Bures-Wasserstein distance between two empirical distributions source :math:`\mu_s` and target :math:`\mu_t`, discussed in remark 2.31 :ref:`[1] `. The Bures Wasserstein distance between source and target distribution :math:`\mathcal{W}` .. math:: \mathcal{W}(\mu_s, \mu_t)_2^2= \left\lVert \mathbf{m}_s - \mathbf{m}_t \right\rVert^2 + \mathcal{B}(\Sigma_s, \Sigma_t)^{2} where : .. math:: \mathbf{B}(\Sigma_s, \Sigma_t)^{2} = \text{Tr}\left(\Sigma_s + \Sigma_t - 2 \sqrt{\Sigma_s^{1/2}\Sigma_t\Sigma_s^{1/2}} \right) Parameters ---------- xs : array-like (ns,d) samples in the source domain xt : array-like (nt,d) samples in the target domain reg : float,optional regularization added to the diagonals of covariances (>0) ws : array-like (ns), optional weights for the source samples wt : array-like (ns), optional weights for the target samples bias: boolean, optional estimate bias :math:`\mathbf{b}` else :math:`\mathbf{b} = 0` (default:True) log : bool, optional record log if True Returns ------- W : float Bures Wasserstein distance log : dict log dictionary return only if log==True in parameters .. _references-bures-wasserstein-distance: References ---------- .. [1] PeyrĂ©, G., & Cuturi, M. (2017). "Computational Optimal Transport", 2018. """ xs, xt = list_to_array(xs, xt) nx = get_backend(xs, xt) d = xs.shape[1] if bias: mxs = nx.mean(xs, axis=0)[None, :] mxt = nx.mean(xt, axis=0)[None, :] xs = xs - mxs xt = xt - mxt else: mxs = nx.zeros((1, d), type_as=xs) mxt = nx.zeros((1, d), type_as=xs) if ws is None: ws = nx.ones((xs.shape[0], 1), type_as=xs) / xs.shape[0] if wt is None: wt = nx.ones((xt.shape[0], 1), type_as=xt) / xt.shape[0] Cs = nx.dot((xs * ws).T, xs) / nx.sum(ws) + reg * nx.eye(d, type_as=xs) Ct = nx.dot((xt * wt).T, xt) / nx.sum(wt) + reg * nx.eye(d, type_as=xt) if log: W, log = bures_wasserstein_distance(mxs, mxt, Cs, Ct, log=log) log['Cs'] = Cs log['Ct'] = Ct return W, log else: W = bures_wasserstein_distance(mxs, mxt, Cs, Ct) return W def bures_wasserstein_barycenter(m, C, weights=None, num_iter=1000, eps=1e-7, log=False): r"""Return OT linear operator between samples. The function estimates the optimal barycenter of the empirical distributions. This is equivalent to resolving the fixed point algorithm for multiple Gaussian distributions :math:`\left{\mathcal{N}(\mu,\Sigma)\right}_{i=1}^n` :ref:`[1] `. The barycenter still following a Gaussian distribution :math:`\mathcal{N}(\mu_b,\Sigma_b)` where : .. math:: \mu_b = \sum_{i=1}^n w_i \mu_i And the barycentric covariance is the solution of the following fixed-point algorithm: .. math:: \Sigma_b = \sum_{i=1}^n w_i \left(\Sigma_b^{1/2}\Sigma_i^{1/2}\Sigma_b^{1/2}\right)^{1/2} Parameters ---------- m : array-like (k,d) mean of k distributions C : array-like (k,d,d) covariance of k distributions weights : array-like (k), optional weights for each distribution num_iter : int, optional number of iteration for the fixed point algorithm eps : float, optional tolerance for the fixed point algorithm log : bool, optional record log if True Returns ------- mb : (d,) array-like mean of the barycenter Cb : (d, d) array-like covariance of the barycenter log : dict log dictionary return only if log==True in parameters .. _references-OT-mapping-linear-barycenter: References ---------- .. [1] M. Agueh and G. Carlier, "Barycenters in the Wasserstein space", SIAM Journal on Mathematical Analysis, vol. 43, no. 2, pp. 904-924, 2011. """ nx = get_backend(*C, *m,) if weights is None: weights = nx.ones(C.shape[0], type_as=C[0]) / C.shape[0] # Compute the mean barycenter mb = nx.sum(m * weights[:, None], axis=0) # Init the covariance barycenter Cb = nx.mean(C * weights[:, None, None], axis=0) for it in range(num_iter): # fixed point update Cb12 = nx.sqrtm(Cb) Cnew = Cb12 @ C @ Cb12 C_ = [] for i in range(len(C)): C_.append(nx.sqrtm(Cnew[i])) Cnew = nx.stack(C_, axis=0) Cnew *= weights[:, None, None] Cnew = nx.sum(Cnew, axis=0) # check convergence diff = nx.norm(Cb - Cnew) if diff <= eps: break Cb = Cnew else: print("Dit not converge.") if log: log = {} log['num_iter'] = it log['final_diff'] = diff return mb, Cb, log else: return mb, Cb def empirical_bures_wasserstein_barycenter( X, reg=1e-6, weights=None, num_iter=1000, eps=1e-7, w=None, bias=True, log=False ): r"""Return OT linear operator between samples. The function estimates the optimal barycenter of the empirical distributions. This is equivalent to resolving the fixed point algorithm for multiple Gaussian distributions :math:`\left{\mathcal{N}(\mu,\Sigma)\right}_{i=1}^n` :ref:`[1] `. The barycenter still following a Gaussian distribution :math:`\mathcal{N}(\mu_b,\Sigma_b)` where : .. math:: \mu_b = \sum_{i=1}^n w_i \mu_i And the barycentric covariance is the solution of the following fixed-point algorithm: .. math:: \Sigma_b = \sum_{i=1}^n w_i \left(\Sigma_b^{1/2}\Sigma_i^{1/2}\Sigma_b^{1/2}\right)^{1/2} Parameters ---------- X : list of array-like (n,d) samples in each distribution reg : float,optional regularization added to the diagonals of covariances (>0) weights : array-like (n,), optional weights for each distribution num_iter : int, optional number of iteration for the fixed point algorithm eps : float, optional tolerance for the fixed point algorithm w : list of array-like (n,), optional weights for each sample in each distribution bias: boolean, optional estimate bias :math:`\mathbf{b}` else :math:`\mathbf{b} = 0` (default:True) log : bool, optional record log if True Returns ------- mb : (d,) array-like mean of the barycenter Cb : (d, d) array-like covariance of the barycenter log : dict log dictionary return only if log==True in parameters .. _references-OT-mapping-linear-barycenter: References ---------- .. [1] M. Agueh and G. Carlier, "Barycenters in the Wasserstein space", SIAM Journal on Mathematical Analysis, vol. 43, no. 2, pp. 904-924, 2011. """ X = list_to_array(*X) nx = get_backend(*X) k = len(X) d = [X[i].shape[1] for i in range(k)] if bias: m = [nx.mean(X[i], axis=0)[None, :] for i in range(k)] X = [X[i] - m[i] for i in range(k)] else: m = [nx.zeros((1, d[i]), type_as=X[i]) for i in range(k)] if w is None: w = [nx.ones((X[i].shape[0], 1), type_as=X[i]) / X[i].shape[0] for i in range(k)] C = [ nx.dot((X[i] * w[i]).T, X[i]) / nx.sum(w[i]) + reg * nx.eye(d[i], type_as=X[i]) for i in range(k) ] m = nx.stack(m, axis=0) C = nx.stack(C, axis=0) if log: mb, Cb, log = bures_wasserstein_barycenter(m, C, weights=weights, num_iter=num_iter, eps=eps, log=log) return mb, Cb, log else: mb, Cb = bures_wasserstein_barycenter(m, C, weights=weights, num_iter=num_iter, eps=eps, log=log) return mb, Cb def gaussian_gromov_wasserstein_distance(Cov_s, Cov_t, log=False): r""" Return the Gaussian Gromov-Wasserstein value from [57]. This function return the closed form value of the Gaussian Gromov-Wasserstein distance between two Gaussian distributions :math:`\mathcal{N}(\mu_s,\Sigma_s)` and :math:`\mathcal{N}(\mu_t,\Sigma_t)` when the OT plan is assumed to be also Gaussian. See [57] Theorem 4.1 for more details. Parameters ---------- Cov_s : array-like (ds,ds) covariance of the source distribution Cov_t : array-like (dt,dt) covariance of the target distribution Returns ------- G : float Gaussian Gromov-Wasserstein distance .. _references-gaussien_gromov_wasserstein_distance: References ---------- .. [57] Delon, J., Desolneux, A., & Salmona, A. (2022). Gromov–Wasserstein distances between Gaussian distributions. Journal of Applied Probability, 59(4), 1178-1198. """ nx = get_backend(Cov_s, Cov_t) # ensure that Cov_s is the largest covariance matrix # that is m >= n if Cov_s.shape[0] < Cov_t.shape[0]: Cov_s, Cov_t = Cov_t, Cov_s n = Cov_t.shape[0] # compte and sort eigenvalues decerasingly d_s = nx.flip(nx.sort(nx.eigh(Cov_s)[0])) d_t = nx.flip(nx.sort(nx.eigh(Cov_t)[0])) # compute the gaussien Gromov-Wasserstein distance res = 4 * (nx.sum(d_s) - nx.sum(d_t))**2 + 8 * nx.sum((d_s[:n] - d_t)**2) + 8 * nx.sum((d_s[n:])**2) if log: log = {} log['d_s'] = d_s log['d_t'] = d_t return nx.sqrt(res), log else: return nx.sqrt(res) def empirical_gaussian_gromov_wasserstein_distance(xs, xt, ws=None, wt=None, log=False): r"""Return Gaussian Gromov-Wasserstein distance between samples. The function estimates the Gaussian Gromov-Wasserstein distance between two Gaussien distributions source :math:`\mu_s` and target :math:`\mu_t`, whose parameters are estimated from the provided samples :math:`\mathcal{X}_s` and :math:`\mathcal{X}_t`. See [57] Theorem 4.1 for more details. Parameters ---------- xs : array-like (ns,d) samples in the source domain xt : array-like (nt,d) samples in the target domain ws : array-like (ns,1), optional weights for the source samples wt : array-like (ns,1), optional weights for the target samples log : bool, optional record log if True Returns ------- G : float Gaussian Gromov-Wasserstein distance .. _references-gaussien_gromov_wasserstein: References ---------- .. [57] Delon, J., Desolneux, A., & Salmona, A. (2022). Gromov–Wasserstein distances between Gaussian distributions. Journal of Applied Probability, 59(4), 1178-1198. """ xs, xt = list_to_array(xs, xt) nx = get_backend(xs, xt) if ws is None: ws = nx.ones((xs.shape[0], 1), type_as=xs) / xs.shape[0] if wt is None: wt = nx.ones((xt.shape[0], 1), type_as=xt) / xt.shape[0] mxs = nx.dot(ws.T, xs) / nx.sum(ws) mxt = nx.dot(wt.T, xt) / nx.sum(wt) xs = xs - mxs xt = xt - mxt Cs = nx.dot((xs * ws).T, xs) / nx.sum(ws) Ct = nx.dot((xt * wt).T, xt) / nx.sum(wt) if log: G, log = gaussian_gromov_wasserstein_distance(Cs, Ct, log=log) log['Cov_s'] = Cs log['Cov_t'] = Ct return G, log else: G = gaussian_gromov_wasserstein_distance(Cs, Ct) return G def gaussian_gromov_wasserstein_mapping(mu_s, mu_t, Cov_s, Cov_t, sign_eigs=None, log=False): r""" Return the Gaussian Gromov-Wasserstein mapping from [57]. This function return the closed form value of the Gaussian Gromov-Wasserstein mapping between two Gaussian distributions :math:`\mathcal{N}(\mu_s,\Sigma_s)` and :math:`\mathcal{N}(\mu_t,\Sigma_t)` when the OT plan is assumed to be also Gaussian. See [57] Theorem 4.1 for more details. Parameters ---------- mu_s : array-like (ds,) mean of the source distribution mu_t : array-like (dt,) mean of the target distribution Cov_s : array-like (ds,ds) covariance of the source distribution Cov_t : array-like (dt,dt) covariance of the target distribution log : bool, optional record log if True Returns ------- A : (dt, ds) array-like Linear operator b : (1, dt) array-like bias .. _references-gaussien_gromov_wasserstein_mapping: References ---------- .. [57] Delon, J., Desolneux, A., & Salmona, A. (2022). Gromov–Wasserstein distances between Gaussian distributions. Journal of Applied Probability, 59(4), 1178-1198. """ nx = get_backend(mu_s, mu_t, Cov_s, Cov_t) n = Cov_t.shape[0] m = Cov_s.shape[0] # compte and sort eigenvalues/eigenvectors decreasingly d_s, U_s = nx.eigh(Cov_s) id_s = nx.flip(nx.argsort(d_s)) d_s, U_s = d_s[id_s], U_s[:, id_s] d_t, U_t = nx.eigh(Cov_t) id_t = nx.flip(nx.argsort(d_t)) d_t, U_t = d_t[id_t], U_t[:, id_t] if sign_eigs is None: sign_eigs = nx.ones(min(m, n), type_as=mu_s) if m >= n: A = nx.concatenate((nx.diag(sign_eigs * nx.sqrt(d_t) / nx.sqrt(d_s[:n])), nx.zeros((n, m - n), type_as=mu_s)), axis=1).T else: A = nx.concatenate((nx.diag(sign_eigs * nx.sqrt(d_t[:m]) / nx.sqrt(d_s)), nx.zeros((n - m, m), type_as=mu_s)), axis=0).T A = nx.dot(nx.dot(U_s, A), U_t.T) # compute the gaussien Gromov-Wasserstein dis b = mu_t - nx.dot(mu_s, A) if log: log = {} log['d_s'] = d_s log['d_t'] = d_t log['U_s'] = U_s log['U_t'] = U_t return A, b, log else: return A, b def empirical_gaussian_gromov_wasserstein_mapping(xs, xt, ws=None, wt=None, sign_eigs=None, log=False): r"""Return Gaussian Gromov-Wasserstein mapping between samples. The function estimates the Gaussian Gromov-Wasserstein mapping between two Gaussien distributions source :math:`\mu_s` and target :math:`\mu_t`, whose parameters are estimated from the provided samples :math:`\mathcal{X}_s` and :math:`\mathcal{X}_t`. See [57] Theorem 4.1 for more details. Parameters ---------- xs : array-like (ns,ds) samples in the source domain xt : array-like (nt,dt) samples in the target domain ws : array-like (ns,1), optional weights for the source samples wt : array-like (ns,1), optional weights for the target samples sign_eigs : array-like (min(ds,dt),) or string, optional sign of the eigenvalues of the mapping matrix, by default all signs will be positive. If 'skewness' is provided, the sign of the eigenvalues is selected as the product of the sign of the skewness of the projected data. log : bool, optional record log if True Returns ------- A : (dt, ds) array-like Linear operator b : (1, dt) array-like bias .. _references-empirical_gaussian_gromov_wasserstein_mapping: References ---------- .. [57] Delon, J., Desolneux, A., & Salmona, A. (2022). Gromov–Wasserstein distances between Gaussian distributions. Journal of Applied Probability, 59(4), 1178-1198. """ xs, xt = list_to_array(xs, xt) nx = get_backend(xs, xt) m = xs.shape[1] n = xt.shape[1] if ws is None: ws = nx.ones((xs.shape[0], 1), type_as=xs) / xs.shape[0] if wt is None: wt = nx.ones((xt.shape[0], 1), type_as=xt) / xt.shape[0] # estimate mean and covariance mu_s = nx.dot(ws.T, xs) / nx.sum(ws) mu_t = nx.dot(wt.T, xt) / nx.sum(wt) xs = xs - mu_s xt = xt - mu_t Cov_s = nx.dot((xs * ws).T, xs) / nx.sum(ws) Cov_t = nx.dot((xt * wt).T, xt) / nx.sum(wt) # compte and sort eigenvalues/eigenvectors decreasingly d_s, U_s = nx.eigh(Cov_s) id_s = nx.flip(nx.argsort(d_s)) d_s, U_s = d_s[id_s], U_s[:, id_s] d_t, U_t = nx.eigh(Cov_t) id_t = nx.flip(nx.argsort(d_t)) d_t, U_t = d_t[id_t], U_t[:, id_t] # select the sign of the eigenvalues if sign_eigs is None: sign_eigs = nx.ones(min(m, n), type_as=mu_s) elif sign_eigs == 'skewness': size = min(m, n) skew_s = nx.sum((nx.dot(xs, U_s[:, :size]))**3 * ws, axis=0) skew_t = nx.sum((nx.dot(xt, U_t[:, :size]))**3 * wt, axis=0) sign_eigs = nx.sign(skew_t * skew_s) if m >= n: A = nx.concatenate((nx.diag(sign_eigs * nx.sqrt(d_t) / nx.sqrt(d_s[:n])), nx.zeros((n, m - n), type_as=mu_s)), axis=1).T else: A = nx.concatenate((nx.diag(sign_eigs * nx.sqrt(d_t[:m]) / nx.sqrt(d_s)), nx.zeros((n - m, m), type_as=mu_s)), axis=0).T A = nx.dot(nx.dot(U_s, A), U_t.T) # compute the gaussien Gromov-Wasserstein dis b = mu_t - nx.dot(mu_s, A) if log: log = {} log['d_s'] = d_s log['d_t'] = d_t log['U_s'] = U_s log['U_t'] = U_t log['Cov_s'] = Cov_s log['Cov_t'] = Cov_t return A, b, log else: return A, b python-pot-0.9.3+dfsg/ot/gnn/000077500000000000000000000000001455713015700157775ustar00rootroot00000000000000python-pot-0.9.3+dfsg/ot/gnn/__init__.py000066400000000000000000000013431455713015700201110ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Layers and functions for optimal transport in Graph Neural Networks. .. warning:: Note that by default the module is not imported in :mod:`ot`. In order to use it you need to explicitly import :mod:`ot.gnn`. This module is PyTorch Geometric dependent. The layers are compatible with their API. """ # Author: Sonia Mazelet # RĂ©mi Flamary # # License: MIT License # All submodules and packages from ._utils import (FGW_distance_to_templates,wasserstein_distance_to_templates) from ._layers import (TFGWPooling,TWPooling) __all__ = [ 'FGW_distance_to_templates', 'wasserstein_distance_to_templates','TFGWPooling','TWPooling']python-pot-0.9.3+dfsg/ot/gnn/_layers.py000066400000000000000000000246211455713015700200140ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Template Fused Gromov Wasserstein """ # Author: Sonia Mazelet # RĂ©mi Flamary # # License: MIT License import torch import torch.nn as nn from ._utils import TFGW_template_initialization, FGW_distance_to_templates, wasserstein_distance_to_templates class TFGWPooling(nn.Module): r""" Template Fused Gromov-Wasserstein (TFGW) layer. This layer is a pooling layer for graph neural networks. Computes the fused Gromov-Wasserstein distances between the graph and a set of templates. .. math:: TFGW_{ \overline{ \mathcal{G} }, \alpha }(C,F,h)=[ FGW_{\alpha}(C,F,h,\overline{C}_k,\overline{F}_k,\overline{h}_k)]_{k=1}^{K} where : - :math:`\mathcal{G}=\{(\overline{C}_k,\overline{F}_k,\overline{h}_k) \}_{k \in \{1,...,K \}} \}` is the set of :math:`K` templates characterized by their adjacency matrices :math:`\overline{C}_k`, their feature matrices :math:`\overline{F}_k` and their node weights :math:`\overline{h}_k`. - :math:`C`, :math:`F` and :math:`h` are respectively the adjacency matrix, the feature matrix and the node weights of the graph. - :math:`\alpha` is the trade-off parameter between features and structure for the Fused Gromov-Wasserstein distance. Parameters ---------- n_features : int Feature dimension of the nodes. n_tplt : int Number of graph templates. n_tplt_nodes : int Number of nodes in each template. alpha : float, optional FGW trade-off parameter (0 < alpha < 1). If None alpha is trained, else it is fixed at the given value. Weights features (alpha=0) and structure (alpha=1). train_node_weights : bool, optional If True, the templates node weights are learned. Else, they are uniform. multi_alpha: bool, optional If True, the alpha parameter is a vector of size n_tplt. feature_init_mean: float, optional Mean of the random normal law to initialize the template features. feature_init_std: float, optional Standard deviation of the random normal law to initialize the template features. References ---------- .. [53] CĂ©dric Vincent-Cuaz, RĂ©mi Flamary, Marco Corneli, Titouan Vayer, Nicolas Courty. "Template based graph neural network with optimal transport distances" """ def __init__(self, n_features, n_tplt=2, n_tplt_nodes=2, alpha=None, train_node_weights=True, multi_alpha=False, feature_init_mean=0., feature_init_std=1.): """ Template Fused Gromov-Wasserstein (TFGW) layer. This layer is a pooling layer for graph neural networks. Computes the fused Gromov-Wasserstein distances between the graph and a set of templates. .. math:: TFGW_{\overline{\mathcal{G}},\alpha}(C,F,h)=[FGW_{\alpha}(C,F,h,\overline{C}_k,\overline{F}_k,\overline{h}_k)]_{k=1}^{K} where : - :math:`\mathcal{G}=\{(\overline{C}_k,\overline{F}_k,\overline{h}_k) \}_{k \in \{1,...,K \}} }` is the set of :math:`K` templates charactersised by their adjacency matrices :math:`\overline{C}_k`, their feature matrices :math:`\overline{F}_k` and their node weights :math:`\overline{h}_k`. - :math:`C`, :math:`F` and :math:`h` are respectively the adjacency matrix, the feature matrix and the node weights of the graph. - :math:`\alpha` is the trade-off parameter between features and structure for the Fused Gromov-Wasserstein distance. Parameters ---------- n_features : int Feature dimension of the nodes. n_tplt : int Number of graph templates. n_tplt_nodes : int Number of nodes in each template. alpha : float, optional FGW trade-off parameter (0 < alpha < 1). If None alpha is trained, else it is fixed at the given value. Weights features (alpha=0) and structure (alpha=1). train_node_weights : bool, optional If True, the templates node weights are learned. Else, they are uniform. multi_alpha: bool, optional If True, the alpha parameter is a vector of size n_tplt. feature_init_mean: float, optional Mean of the random normal law to initialize the template features. feature_init_std: float, optional Standard deviation of the random normal law to initialize the template features. References ---------- .. [53] CĂ©dric Vincent-Cuaz, RĂ©mi Flamary, Marco Corneli, Titouan Vayer, Nicolas Courty. "Template based graph neural network with optimal transport distances" """ super().__init__() self.n_tplt = n_tplt self.n_tplt_nodes = n_tplt_nodes self.n_features = n_features self.multi_alpha = multi_alpha self.feature_init_mean = feature_init_mean self.feature_init_std = feature_init_std tplt_adjacencies, tplt_features, self.q0 = TFGW_template_initialization(self.n_tplt, self.n_tplt_nodes, self.n_features, self.feature_init_mean, self.feature_init_std) self.tplt_adjacencies = nn.Parameter(tplt_adjacencies) self.tplt_features = nn.Parameter(tplt_features) self.softmax = nn.Softmax(dim=1) if train_node_weights: self.q0 = nn.Parameter(self.q0) if alpha is None: if multi_alpha: self.alpha0 = torch.Tensor([0] * self.n_tplt) else: self.alpha0 = torch.Tensor([0]) self.alpha0 = nn.Parameter(self.alpha0) else: if multi_alpha: self.alpha0 = torch.Tensor([alpha] * self.n_tplt) else: self.alpha0 = torch.Tensor([alpha]) self.alpha0 = torch.logit(self.alpha0) def forward(self, x, edge_index, batch=None): """ Parameters ---------- x : torch.Tensor Node features. edge_index : torch.Tensor Edge indices. batch : torch.Tensor, optional Batch vector which assigns each node to its graph. """ alpha = torch.sigmoid(self.alpha0) q = self.softmax(self.q0) x = FGW_distance_to_templates(edge_index, self.tplt_adjacencies, x, self.tplt_features, q, alpha, self.multi_alpha, batch) return x class TWPooling(nn.Module): r""" Template Wasserstein (TW) layer, also kown as OT-GNN layer. This layer is a pooling layer for graph neural networks. Computes the Wasserstein distances between the features of the graph features and a set of templates. .. math:: TW_{\overline{\mathcal{G}}}(C,F,h)=[W(F,h,\overline{F}_k,\overline{h}_k)]_{k=1}^{K} where : - :math:`\mathcal{G}=\{(\overline{F}_k,\overline{h}_k) \}_{k \in \{1,...,K \}} \}` is the set of :math:`K` templates charactersised by their feature matrices :math:`\overline{F}_k` and their node weights :math:`\overline{h}_k`. - :math:`F` and :math:`h` are respectively the feature matrix and the node weights of the graph. Parameters ---------- n_features : int Feature dimension of the nodes. n_tplt : int Number of graph templates. n_tplt_nodes : int Number of nodes in each template. train_node_weights : bool, optional If True, the templates node weights are learned. Else, they are uniform. feature_init_mean: float, optional Mean of the random normal law to initialize the template features. feature_init_std: float, optional Standard deviation of the random normal law to initialize the template features. References ---------- .. [54] BĂ©cigneul, G., Ganea, O. E., Chen, B., Barzilay, R., & Jaakkola, T. S. (2020). [Optimal transport graph neural networks] """ def __init__(self, n_features, n_tplt=2, n_tplt_nodes=2, train_node_weights=True, feature_init_mean=0., feature_init_std=1.): """ Template Wasserstein (TW) layer, also kown as OT-GNN layer. This layer is a pooling layer for graph neural networks. Computes the Wasserstein distances between the features of the graph features and a set of templates. .. math:: TW_{\overline{\mathcal{G}}}(C,F,h)=[W(F,h,\overline{F}_k,\overline{h}_k)]_{k=1}^{K} where : - :math:`\mathcal{G}=\{(\overline{F}_k,\overline{h}_k) \}_{k \in \llbracket 1;K \rrbracket }` is the set of :math:`K` templates charactersised by their feature matrices :math:`\overline{F}_k` and their node weights :math:`\overline{h}_k`. - :math:`F` and :math:`h` are respectively the feature matrix and the node weights of the graph. Parameters ---------- n_features : int Feature dimension of the nodes. n_tplt : int Number of graph templates. n_tplt_nodes : int Number of nodes in each template. train_node_weights : bool, optional If True, the templates node weights are learned. Else, they are uniform. feature_init_mean: float, optional Mean of the random normal law to initialize the template features. feature_init_std: float, optional Standard deviation of the random normal law to initialize the template features. References ---------- .. [54] BĂ©cigneul, G., Ganea, O. E., Chen, B., Barzilay, R., & Jaakkola, T. S. (2020). [Optimal transport graph neural networks] """ super().__init__() self.n_tplt = n_tplt self.n_tplt_nodes = n_tplt_nodes self.n_features = n_features self.feature_init_mean = feature_init_mean self.feature_init_std = feature_init_std _, tplt_features, self.q0 = TFGW_template_initialization(self.n_tplt, self.n_tplt_nodes, self.n_features, self.feature_init_mean, self.feature_init_std) self.tplt_features = nn.Parameter(tplt_features) self.softmax = nn.Softmax(dim=1) if train_node_weights: self.q0 = nn.Parameter(self.q0) def forward(self, x, edge_index=None, batch=None): """ Parameters ---------- x : torch.Tensor Node features. edge_index : torch.Tensor Edge indices. batch : torch.Tensor, optional Batch vector which assigns each node to its graph. """ q = self.softmax(self.q0) x = wasserstein_distance_to_templates(x, self.tplt_features, q, batch) return x python-pot-0.9.3+dfsg/ot/gnn/_utils.py000066400000000000000000000214051455713015700176520ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ GNN layers utils """ # Author: Sonia Mazelet # RĂ©mi Flamary # # License: MIT License import torch from ..utils import dist from ..gromov import fused_gromov_wasserstein2 from ..lp import emd2 from torch_geometric.utils import subgraph def TFGW_template_initialization(n_tplt, n_tplt_nodes, n_features, feature_init_mean=0., feature_init_std=1.): """ Initializes templates for the Template Fused Gromov Wasserstein layer. Returns the adjacency matrices and the features of the nodes of the templates. Adjacency matrices are intialised uniformly with values in :math:`[0,1]`. Node features are intialized following a normal distribution. Parameters ---------- n_tplt: int Number of templates. n_tplt_nodes: int Number of nodes per template. n_features: int Number of features for the nodes. feature_init_mean: float, optional Mean of the random normal law to initialize the template features. feature_init_std: float, optional Standard deviation of the random normal law to initialize the template features. Returns ---------- tplt_adjacencies: torch.Tensor, shape (n_templates, n_template_nodes, n_template_nodes) Adjancency matrices for the templates. tplt_features: torch.Tensor, shape (n_templates, n_template_nodes, n_features) Node features for each template. q: torch.Tensor, shape (n_templates, n_template_nodes) weight on the template nodes. """ tplt_adjacencies = torch.rand((n_tplt, n_tplt_nodes, n_tplt_nodes)) tplt_features = torch.Tensor(n_tplt, n_tplt_nodes, n_features) torch.nn.init.normal_(tplt_features, mean=feature_init_mean, std=feature_init_std) q = torch.zeros(n_tplt, n_tplt_nodes) tplt_adjacencies = 0.5 * (tplt_adjacencies + torch.transpose(tplt_adjacencies, 1, 2)) return tplt_adjacencies, tplt_features, q def FGW_distance_to_templates(G_edges, tplt_adjacencies, G_features, tplt_features, tplt_weights, alpha=0.5, multi_alpha=False, batch=None): """ Computes the FGW distances between a graph and templates. Parameters ---------- G_edges : torch.Tensor, shape (n_edges, 2) Edge indices of the graph in the Pytorch Geometric format. tplt_adjacencies : list of torch.Tensor, shape (n_templates, n_template_nodes, n_templates_nodes) List of the adjacency matrices of the templates. G_features : torch.Tensor, shape (n_nodes, n_features) Graph node features. tplt_features : list of torch.Tensor, shape (n_templates, n_template_nodes, n_features) List of the node features of the templates. weights : torch.Tensor, shape (n_templates, n_template_nodes) Weights on the nodes of the templates. alpha : float, optional Trade-off parameter (0 < alpha < 1). Weights features (alpha=0) and structure (alpha=1). multi_alpha: bool, optional If True, the alpha parameter is a vector of size n_templates. batch: torch.Tensor, optional Batch vector which assigns each node to its graph. Returns ------- distances : torch.Tensor, shape (n_templates) if batch=None, else shape (n_graphs, n_templates). Vector of fused Gromov-Wasserstein distances between the graph and the templates. """ if batch is None: n, n_feat = G_features.shape n_T, _, n_feat_T = tplt_features.shape weights_G = torch.ones(n) / n C = torch.sparse_coo_tensor(G_edges, torch.ones(len(G_edges[0])), size=(n, n)).type(torch.float) C = C.to_dense() if not n_feat == n_feat_T: raise ValueError('The templates and the graphs must have the same feature dimension.') distances = torch.zeros(n_T) for j in range(n_T): template_features = tplt_features[j].reshape(len(tplt_features[j]), n_feat_T) M = dist(G_features, template_features).type(torch.float) #if alpha is zero the emd distance is used if multi_alpha and torch.any(alpha > 0): embedding = fused_gromov_wasserstein2(M, C, tplt_adjacencies[j], weights_G, tplt_weights[j], alpha=alpha[j], symmetric=True, max_iter=50) elif not multi_alpha and torch.all(alpha == 0): embedding = emd2(weights_G, tplt_weights[j], M, numItermax=50) elif not multi_alpha and alpha > 0: embedding = fused_gromov_wasserstein2(M, C, tplt_adjacencies[j], weights_G, tplt_weights[j], alpha=alpha, symmetric=True, max_iter=50) else: embedding = emd2(weights_G, tplt_weights[j], M, numItermax=50) distances[j] = embedding else: n_T, _, n_feat_T = tplt_features.shape num_graphs = torch.max(batch) + 1 distances = torch.zeros(num_graphs, n_T) #iterate over the graphs in the batch for i in range(num_graphs): nodes = torch.where(batch == i)[0] G_edges_i, _ = subgraph(nodes, edge_index=G_edges, relabel_nodes=True) G_features_i = G_features[nodes] n, n_feat = G_features_i.shape weights_G = torch.ones(n) / n n_edges = len(G_edges_i[0]) C = torch.sparse_coo_tensor(G_edges_i, torch.ones(n_edges), size=(n, n)).type(torch.float) C = C.to_dense() if not n_feat == n_feat_T: raise ValueError('The templates and the graphs must have the same feature dimension.') for j in range(n_T): template_features = tplt_features[j].reshape(len(tplt_features[j]), n_feat_T) M = dist(G_features_i, template_features).type(torch.float) #if alpha is zero the emd distance is used if multi_alpha and torch.any(alpha > 0): embedding = fused_gromov_wasserstein2(M, C, tplt_adjacencies[j], weights_G, tplt_weights[j], alpha=alpha[j], symmetric=True, max_iter=50) elif not multi_alpha and torch.all(alpha == 0): embedding = emd2(weights_G, tplt_weights[j], M, numItermax=50) elif not multi_alpha and alpha > 0: embedding = fused_gromov_wasserstein2(M, C, tplt_adjacencies[j], weights_G, tplt_weights[j], alpha=alpha, symmetric=True, max_iter=50) else: embedding = emd2(weights_G, tplt_weights[j], M, numItermax=50) distances[i, j] = embedding return distances def wasserstein_distance_to_templates(G_features, tplt_features, tplt_weights, batch=None): """ Computes the Wasserstein distances between a graph and graph templates. Parameters ---------- G_features : torch.Tensor, shape (n_nodes, n_features) Node features of the graph. tplt_features : list of torch.Tensor, shape (n_templates, n_template_nodes, n_features) List of the node features of the templates. weights : torch.Tensor, shape (n_templates, n_template_nodes) Weights on the nodes of the templates. batch: torch.Tensor, optional Batch vector which assigns each node to its graph. Returns ------- distances : torch.Tensor, shape (n_templates) if batch=None, else shape (n_graphs, n_templates) Vector of Wasserstein distances between the graph and the templates. """ if batch is None: n, n_feat = G_features.shape n_T, _, n_feat_T = tplt_features.shape weights_G = torch.ones(n) / n if not n_feat == n_feat_T: raise ValueError('The templates and the graphs must have the same feature dimension.') distances = torch.zeros(n_T) for j in range(n_T): template_features = tplt_features[j].reshape(len(tplt_features[j]), n_feat_T) M = dist(G_features, template_features).type(torch.float) distances[j] = emd2(weights_G, tplt_weights[j], M, numItermax=50) else: n_T, _, n_feat_T = tplt_features.shape num_graphs = torch.max(batch) + 1 distances = torch.zeros(num_graphs, n_T) #iterate over the graphs in the batch for i in range(num_graphs): nodes = torch.where(batch == i)[0] G_features_i = G_features[nodes] n, n_feat = G_features_i.shape weights_G = torch.ones(n) / n if not n_feat == n_feat_T: raise ValueError('The templates and the graphs must have the same feature dimension.') for j in range(n_T): template_features = tplt_features[j].reshape(len(tplt_features[j]), n_feat_T) M = dist(G_features_i, template_features).type(torch.float) distances[i, j] = emd2(weights_G, tplt_weights[j], M, numItermax=50) return distances python-pot-0.9.3+dfsg/ot/gromov/000077500000000000000000000000001455713015700165265ustar00rootroot00000000000000python-pot-0.9.3+dfsg/ot/gromov/__init__.py000066400000000000000000000071121455713015700206400ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Solvers related to Gromov-Wasserstein problems. """ # Author: Remi Flamary # Cedric Vincent-Cuaz # # License: MIT License # All submodules and packages from ._utils import (init_matrix, tensor_product, gwloss, gwggrad, update_square_loss, update_kl_loss, update_feature_matrix, init_matrix_semirelaxed) from ._gw import (gromov_wasserstein, gromov_wasserstein2, fused_gromov_wasserstein, fused_gromov_wasserstein2, solve_gromov_linesearch, gromov_barycenters, fgw_barycenters) from ._bregman import (entropic_gromov_wasserstein, entropic_gromov_wasserstein2, BAPG_gromov_wasserstein, BAPG_gromov_wasserstein2, entropic_gromov_barycenters, entropic_fused_gromov_wasserstein, entropic_fused_gromov_wasserstein2, BAPG_fused_gromov_wasserstein, BAPG_fused_gromov_wasserstein2, entropic_fused_gromov_barycenters) from ._estimators import (GW_distance_estimation, pointwise_gromov_wasserstein, sampled_gromov_wasserstein) from ._semirelaxed import (semirelaxed_gromov_wasserstein, semirelaxed_gromov_wasserstein2, semirelaxed_fused_gromov_wasserstein, semirelaxed_fused_gromov_wasserstein2, solve_semirelaxed_gromov_linesearch, entropic_semirelaxed_gromov_wasserstein, entropic_semirelaxed_gromov_wasserstein2, entropic_semirelaxed_fused_gromov_wasserstein, entropic_semirelaxed_fused_gromov_wasserstein2) from ._dictionary import (gromov_wasserstein_dictionary_learning, gromov_wasserstein_linear_unmixing, fused_gromov_wasserstein_dictionary_learning, fused_gromov_wasserstein_linear_unmixing) __all__ = ['init_matrix', 'tensor_product', 'gwloss', 'gwggrad', 'update_square_loss', 'update_kl_loss', 'update_feature_matrix', 'init_matrix_semirelaxed', 'gromov_wasserstein', 'gromov_wasserstein2', 'fused_gromov_wasserstein', 'fused_gromov_wasserstein2', 'solve_gromov_linesearch', 'gromov_barycenters', 'fgw_barycenters', 'entropic_gromov_wasserstein', 'entropic_gromov_wasserstein2', 'BAPG_gromov_wasserstein', 'BAPG_gromov_wasserstein2', 'entropic_gromov_barycenters', 'entropic_fused_gromov_wasserstein', 'entropic_fused_gromov_wasserstein2', 'BAPG_fused_gromov_wasserstein', 'BAPG_fused_gromov_wasserstein2', 'entropic_fused_gromov_barycenters', 'GW_distance_estimation', 'pointwise_gromov_wasserstein', 'sampled_gromov_wasserstein', 'semirelaxed_gromov_wasserstein', 'semirelaxed_gromov_wasserstein2', 'semirelaxed_fused_gromov_wasserstein', 'semirelaxed_fused_gromov_wasserstein2', 'solve_semirelaxed_gromov_linesearch', 'entropic_semirelaxed_gromov_wasserstein', 'entropic_semirelaxed_gromov_wasserstein2', 'entropic_semirelaxed_fused_gromov_wasserstein', 'entropic_semirelaxed_fused_gromov_wasserstein2', 'gromov_wasserstein_dictionary_learning', 'gromov_wasserstein_linear_unmixing', 'fused_gromov_wasserstein_dictionary_learning', 'fused_gromov_wasserstein_linear_unmixing'] python-pot-0.9.3+dfsg/ot/gromov/_bregman.py000066400000000000000000002077471455713015700206730ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Bregman projections solvers for entropic Gromov-Wasserstein """ # Author: Erwan Vautier # Nicolas Courty # RĂ©mi Flamary # Titouan Vayer # CĂ©dric Vincent-Cuaz # # License: MIT License import numpy as np import warnings from ..bregman import sinkhorn from ..utils import dist, UndefinedParameter, list_to_array, check_random_state, unif from ..backend import get_backend from ._utils import init_matrix, gwloss, gwggrad from ._utils import update_square_loss, update_kl_loss, update_feature_matrix def entropic_gromov_wasserstein( C1, C2, p=None, q=None, loss_fun='square_loss', epsilon=0.1, symmetric=None, G0=None, max_iter=1000, tol=1e-9, solver='PGD', warmstart=False, verbose=False, log=False, **kwargs): r""" Returns the Gromov-Wasserstein transport between :math:`(\mathbf{C_1}, \mathbf{p})` and :math:`(\mathbf{C_2}, \mathbf{q})` estimated using Sinkhorn projections. If `solver="PGD"`, the function solves the following entropic-regularized Gromov-Wasserstein optimization problem using Projected Gradient Descent [12]: .. math:: \mathbf{T}^* \in \mathop{\arg\min}_\mathbf{T} \quad \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} - \epsilon H(\mathbf{T}) s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T}^T \mathbf{1} &= \mathbf{q} \mathbf{T} &\geq 0 Else if `solver="PPA"`, the function solves the following Gromov-Wasserstein optimization problem using Proximal Point Algorithm [51]: .. math:: \mathbf{T}^* \in \mathop{\arg\min}_\mathbf{T} \quad \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T}^T \mathbf{1} &= \mathbf{q} \mathbf{T} &\geq 0 Where : - :math:`\mathbf{C_1}`: Metric cost matrix in the source space - :math:`\mathbf{C_2}`: Metric cost matrix in the target space - :math:`\mathbf{p}`: distribution in the source space - :math:`\mathbf{q}`: distribution in the target space - `L`: loss function to account for the misfit between the similarity matrices - `H`: entropy .. note:: If the inner solver `ot.sinkhorn` did not convergence, the optimal coupling :math:`\mathbf{T}` returned by this function does not necessarily satisfy the marginal constraints :math:`\mathbf{T}\mathbf{1}=\mathbf{p}` and :math:`\mathbf{T}^T\mathbf{1}=\mathbf{q}`. So the returned Gromov-Wasserstein loss does not necessarily satisfy distance properties and may be negative. Parameters ---------- C1 : array-like, shape (ns, ns) Metric cost matrix in the source space C2 : array-like, shape (nt, nt) Metric cost matrix in the target space p : array-like, shape (ns,), optional Distribution in the source space. If let to its default value None, uniform distribution is taken. q : array-like, shape (nt,), optional Distribution in the target space. If let to its default value None, uniform distribution is taken. loss_fun : string, optional (default='square_loss') Loss function used for the solver either 'square_loss' or 'kl_loss' epsilon : float, optional Regularization term >0 symmetric : bool, optional Either C1 and C2 are to be assumed symmetric or not. If let to its default None value, a symmetry test will be conducted. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymmetric). G0: array-like, shape (ns,nt), optional If None the initial transport plan of the solver is pq^T. Otherwise G0 will be used as initial transport of the solver. G0 is not required to satisfy marginal constraints but we strongly recommend it to correctly estimate the GW distance. max_iter : int, optional Max number of iterations tol : float, optional Stop threshold on error (>0) solver: string, optional Solver to use either 'PGD' for Projected Gradient Descent or 'PPA' for Proximal Point Algorithm. Default value is 'PGD'. warmstart: bool, optional Either to perform warmstart of dual potentials in the successive Sinkhorn projections. verbose : bool, optional Print information along iterations log : bool, optional Record log if True. **kwargs: dict parameters can be directly passed to the ot.sinkhorn solver. Such as `numItermax` and `stopThr` to control its estimation precision, e.g [51] suggests to use `numItermax=1`. Returns ------- T : array-like, shape (`ns`, `nt`) Optimal coupling between the two spaces References ---------- .. [12] Gabriel PeyrĂ©, Marco Cuturi, and Justin Solomon, "Gromov-Wasserstein averaging of kernel and distance matrices." International Conference on Machine Learning (ICML). 2016. .. [47] Chowdhury, S., & MĂ©moli, F. (2019). The gromov–wasserstein distance between networks and stable network invariants. Information and Inference: A Journal of the IMA, 8(4), 757-787. .. [51] Xu, H., Luo, D., Zha, H., & Duke, L. C. (2019). Gromov-wasserstein learning for graph matching and node embedding. In International Conference on Machine Learning (ICML), 2019. """ if solver not in ['PGD', 'PPA']: raise ValueError("Unknown solver '%s'. Pick one in ['PGD', 'PPA']." % solver) if loss_fun not in ('square_loss', 'kl_loss'): raise ValueError(f"Unknown `loss_fun='{loss_fun}'`. Use one of: {'square_loss', 'kl_loss'}.") C1, C2 = list_to_array(C1, C2) arr = [C1, C2] if p is not None: arr.append(list_to_array(p)) else: p = unif(C1.shape[0], type_as=C1) if q is not None: arr.append(list_to_array(q)) else: q = unif(C2.shape[0], type_as=C2) if G0 is not None: arr.append(G0) nx = get_backend(*arr) if G0 is None: G0 = nx.outer(p, q) T = G0 constC, hC1, hC2 = init_matrix(C1, C2, p, q, loss_fun, nx) if symmetric is None: symmetric = nx.allclose(C1, C1.T, atol=1e-10) and nx.allclose(C2, C2.T, atol=1e-10) if not symmetric: constCt, hC1t, hC2t = init_matrix(C1.T, C2.T, p, q, loss_fun, nx) cpt = 0 err = 1 if warmstart: # initialize potentials to cope with ot.sinkhorn initialization N1, N2 = C1.shape[0], C2.shape[0] mu = nx.zeros(N1, type_as=C1) - np.log(N1) nu = nx.zeros(N2, type_as=C2) - np.log(N2) if log: log = {'err': []} while (err > tol and cpt < max_iter): Tprev = T # compute the gradient if symmetric: tens = gwggrad(constC, hC1, hC2, T, nx) else: tens = 0.5 * (gwggrad(constC, hC1, hC2, T, nx) + gwggrad(constCt, hC1t, hC2t, T, nx)) if solver == 'PPA': tens = tens - epsilon * nx.log(T) if warmstart: T, loginn = sinkhorn(p, q, tens, epsilon, method='sinkhorn', log=True, warmstart=(mu, nu), **kwargs) mu = epsilon * nx.log(loginn['u']) nu = epsilon * nx.log(loginn['v']) else: T = sinkhorn(p, q, tens, epsilon, method='sinkhorn', **kwargs) if cpt % 10 == 0: # we can speed up the process by checking for the error only all # the 10th iterations err = nx.norm(T - Tprev) if log: log['err'].append(err) if verbose: if cpt % 200 == 0: print('{:5s}|{:12s}'.format( 'It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(cpt, err)) cpt += 1 if abs(nx.sum(T) - 1) > 1e-5: warnings.warn("Solver failed to produce a transport plan. You might " "want to increase the regularization parameter `epsilon`.") if log: log['gw_dist'] = gwloss(constC, hC1, hC2, T, nx) return T, log else: return T def entropic_gromov_wasserstein2( C1, C2, p=None, q=None, loss_fun='square_loss', epsilon=0.1, symmetric=None, G0=None, max_iter=1000, tol=1e-9, solver='PGD', warmstart=False, verbose=False, log=False, **kwargs): r""" Returns the Gromov-Wasserstein loss :math:`\mathbf{GW}` between :math:`(\mathbf{C_1}, \mathbf{p})` and :math:`(\mathbf{C_2}, \mathbf{q})` estimated using Sinkhorn projections. To recover the Gromov-Wasserstein distance as defined in [13] compute :math:`d_{GW} = \frac{1}{2} \sqrt{\mathbf{GW}}`. If `solver="PGD"`, the function solves the following entropic-regularized Gromov-Wasserstein optimization problem using Projected Gradient Descent [12]: .. math:: \mathbf{GW} = \mathop{\min}_\mathbf{T} \quad \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} - \epsilon H(\mathbf{T}) s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T}^T \mathbf{1} &= \mathbf{q} \mathbf{T} &\geq 0 Else if `solver="PPA"`, the function solves the following Gromov-Wasserstein optimization problem using Proximal Point Algorithm [51]: .. math:: \mathbf{GW} = \mathop{\min}_\mathbf{T} \quad \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T}^T \mathbf{1} &= \mathbf{q} \mathbf{T} &\geq 0 Where : - :math:`\mathbf{C_1}`: Metric cost matrix in the source space - :math:`\mathbf{C_2}`: Metric cost matrix in the target space - :math:`\mathbf{p}`: distribution in the source space - :math:`\mathbf{q}`: distribution in the target space - `L`: loss function to account for the misfit between the similarity matrices - `H`: entropy .. note:: If the inner solver `ot.sinkhorn` did not convergence, the optimal coupling :math:`\mathbf{T}` returned by this function does not necessarily satisfy the marginal constraints :math:`\mathbf{T}\mathbf{1}=\mathbf{p}` and :math:`\mathbf{T}^T\mathbf{1}=\mathbf{q}`. So the returned Gromov-Wasserstein loss does not necessarily satisfy distance properties and may be negative. Parameters ---------- C1 : array-like, shape (ns, ns) Metric cost matrix in the source space C2 : array-like, shape (nt, nt) Metric cost matrix in the target space p : array-like, shape (ns,), optional Distribution in the source space. If let to its default value None, uniform distribution is taken. q : array-like, shape (nt,), optional Distribution in the target space. If let to its default value None, uniform distribution is taken. loss_fun : string, optional (default='square_loss') Loss function used for the solver either 'square_loss' or 'kl_loss' epsilon : float, optional Regularization term >0 symmetric : bool, optional Either C1 and C2 are to be assumed symmetric or not. If let to its default None value, a symmetry test will be conducted. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymmetric). G0: array-like, shape (ns,nt), optional If None the initial transport plan of the solver is pq^T. Otherwise G0 will be used as initial transport of the solver. G0 is not required to satisfy marginal constraints but we strongly recommand it to correcly estimate the GW distance. max_iter : int, optional Max number of iterations tol : float, optional Stop threshold on error (>0) solver: string, optional Solver to use either 'PGD' for Projected Gradient Descent or 'PPA' for Proximal Point Algorithm. Default value is 'PGD'. warmstart: bool, optional Either to perform warmstart of dual potentials in the successive Sinkhorn projections. verbose : bool, optional Print information along iterations log : bool, optional Record log if True. **kwargs: dict parameters can be directly passed to the ot.sinkhorn solver. Such as `numItermax` and `stopThr` to control its estimation precision, e.g [51] suggests to use `numItermax=1`. Returns ------- gw_dist : float Gromov-Wasserstein distance References ---------- .. [12] Gabriel PeyrĂ©, Marco Cuturi, and Justin Solomon, "Gromov-Wasserstein averaging of kernel and distance matrices." International Conference on Machine Learning (ICML). 2016. .. [51] Xu, H., Luo, D., Zha, H., & Duke, L. C. (2019). Gromov-wasserstein learning for graph matching and node embedding. In International Conference on Machine Learning (ICML), 2019. """ T, logv = entropic_gromov_wasserstein( C1, C2, p, q, loss_fun, epsilon, symmetric, G0, max_iter, tol, solver, warmstart, verbose, log=True, **kwargs) logv['T'] = T if log: return logv['gw_dist'], logv else: return logv['gw_dist'] def BAPG_gromov_wasserstein( C1, C2, p=None, q=None, loss_fun='square_loss', epsilon=0.1, symmetric=None, G0=None, max_iter=1000, tol=1e-9, marginal_loss=False, verbose=False, log=False): r""" Returns the Gromov-Wasserstein transport between :math:`(\mathbf{C_1}, \mathbf{p})` and :math:`(\mathbf{C_2}, \mathbf{q})` estimated using Bregman Alternated Projected Gradient method. If `marginal_loss=True`, the function solves the following Gromov-Wasserstein optimization problem : .. math:: \mathbf{T}^* \in \mathop{\arg\min}_\mathbf{T} \quad \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T}^T \mathbf{1} &= \mathbf{q} \mathbf{T} &\geq 0 Else, the function solves an equivalent problem [63], where constant terms only depending on the marginals :math:`\mathbf{p}`: and :math:`\mathbf{q}`: are discarded while assuming that L decomposes as in Proposition 1 in [12]: .. math:: \mathbf{T}^* \in\mathop{\arg\min}_\mathbf{T} \quad - \langle h_1(\mathbf{C}_1) \mathbf{T} h_2(\mathbf{C_2})^\top , \mathbf{T} \rangle_F s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T}^T \mathbf{1} &= \mathbf{q} \mathbf{T} &\geq 0 Where : - :math:`\mathbf{C_1}`: Metric cost matrix in the source space - :math:`\mathbf{C_2}`: Metric cost matrix in the target space - :math:`\mathbf{p}`: distribution in the source space - :math:`\mathbf{q}`: distribution in the target space - `L`: loss function to account for the misfit between the similarity matrices satisfying :math:`L(a, b) = f_1(a) + f_2(b) - h_1(a) h_2(b)` .. note:: By algorithmic design the optimal coupling :math:`\mathbf{T}` returned by this function does not necessarily satisfy the marginal constraints :math:`\mathbf{T}\mathbf{1}=\mathbf{p}` and :math:`\mathbf{T}^T\mathbf{1}=\mathbf{q}`. So the returned Gromov-Wasserstein loss does not necessarily satisfy distance properties and may be negative. Parameters ---------- C1 : array-like, shape (ns, ns) Metric cost matrix in the source space C2 : array-like, shape (nt, nt) Metric cost matrix in the target space p : array-like, shape (ns,), optional Distribution in the source space. If let to its default value None, uniform distribution is taken. q : array-like, shape (nt,), optional Distribution in the target space. If let to its default value None, uniform distribution is taken. loss_fun : string, optional (default='square_loss') Loss function used for the solver either 'square_loss' or 'kl_loss' epsilon : float, optional Regularization term >0 symmetric : bool, optional Either C1 and C2 are to be assumed symmetric or not. If let to its default None value, a symmetry test will be conducted. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymmetric). G0: array-like, shape (ns,nt), optional If None the initial transport plan of the solver is pq^T. Otherwise G0 will be used as initial transport of the solver. G0 is not required to satisfy marginal constraints but we strongly recommend it to correctly estimate the GW distance. max_iter : int, optional Max number of iterations tol : float, optional Stop threshold on error (>0) marginal_loss: bool, optional. Default is False. Include constant marginal terms or not in the objective function. verbose : bool, optional Print information along iterations log : bool, optional Record log if True. Returns ------- T : array-like, shape (`ns`, `nt`) Optimal coupling between the two spaces References ---------- .. [63] Li, J., Tang, J., Kong, L., Liu, H., Li, J., So, A. M. C., & Blanchet, J. "A Convergent Single-Loop Algorithm for Relaxation of Gromov-Wasserstein in Graph Data". International Conference on Learning Representations (ICLR), 2022. """ if loss_fun not in ('square_loss', 'kl_loss'): raise ValueError(f"Unknown `loss_fun='{loss_fun}'`. Use one of: {'square_loss', 'kl_loss'}.") C1, C2 = list_to_array(C1, C2) arr = [C1, C2] if p is not None: arr.append(list_to_array(p)) else: p = unif(C1.shape[0], type_as=C1) if q is not None: arr.append(list_to_array(q)) else: q = unif(C2.shape[0], type_as=C2) if G0 is not None: arr.append(G0) nx = get_backend(*arr) if G0 is None: G0 = nx.outer(p, q) T = G0 constC, hC1, hC2 = init_matrix(C1, C2, p, q, loss_fun, nx) if symmetric is None: symmetric = nx.allclose(C1, C1.T, atol=1e-10) and nx.allclose(C2, C2.T, atol=1e-10) if not symmetric: constCt, hC1t, hC2t = init_matrix(C1.T, C2.T, p, q, loss_fun, nx) if marginal_loss: if symmetric: def df(T): return gwggrad(constC, hC1, hC2, T, nx) else: def df(T): return 0.5 * (gwggrad(constC, hC1, hC2, T, nx) + gwggrad(constCt, hC1t, hC2t, T, nx)) else: if symmetric: def df(T): A = - nx.dot(nx.dot(hC1, T), hC2.T) return 2 * A else: def df(T): A = - nx.dot(nx.dot(hC1, T), hC2t) At = - nx.dot(nx.dot(hC1t, T), hC2) return A + At cpt = 0 err = 1e15 if log: log = {'err': []} while (err > tol and cpt < max_iter): Tprev = T # rows update T = T * nx.exp(- df(T) / epsilon) row_scaling = p / nx.sum(T, 1) T = nx.reshape(row_scaling, (-1, 1)) * T # columns update T = T * nx.exp(- df(T) / epsilon) column_scaling = q / nx.sum(T, 0) T = nx.reshape(column_scaling, (1, -1)) * T if cpt % 10 == 0: # we can speed up the process by checking for the error only all # the 10th iterations err = nx.norm(T - Tprev) if log: log['err'].append(err) if verbose: if cpt % 200 == 0: print('{:5s}|{:12s}'.format( 'It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(cpt, err)) cpt += 1 if nx.any(nx.isnan(T)): warnings.warn("Solver failed to produce a transport plan. You might " "want to increase the regularization parameter `epsilon`.", UserWarning) if log: log['gw_dist'] = gwloss(constC, hC1, hC2, T, nx) if not marginal_loss: log['loss'] = log['gw_dist'] - nx.sum(constC * T) return T, log else: return T def BAPG_gromov_wasserstein2( C1, C2, p=None, q=None, loss_fun='square_loss', epsilon=0.1, symmetric=None, G0=None, max_iter=1000, tol=1e-9, marginal_loss=False, verbose=False, log=False): r""" Returns the Gromov-Wasserstein loss :math:`\mathbf{GW}` between :math:`(\mathbf{C_1}, \mathbf{p})` and :math:`(\mathbf{C_2}, \mathbf{q})` estimated using Bregman Alternated Projected Gradient method. If `marginal_loss=True`, the function solves the following Gromov-Wasserstein optimization problem : .. math:: \mathbf{GW} = \mathop{\min}_\mathbf{T} \quad \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T}^T \mathbf{1} &= \mathbf{q} \mathbf{T} &\geq 0 Else, the function solves an equivalent problem [63, 64], where constant terms only depending on the marginals :math:`\mathbf{p}`: and :math:`\mathbf{q}`: are discarded while assuming that L decomposes as in Proposition 1 in [12]: .. math:: \mathop{\min}_\mathbf{T} \quad - \langle h_1(\mathbf{C}_1) \mathbf{T} h_2(\mathbf{C_2})^\top , \mathbf{T} \rangle_F s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T}^T \mathbf{1} &= \mathbf{q} \mathbf{T} &\geq 0 Where : - :math:`\mathbf{C_1}`: Metric cost matrix in the source space - :math:`\mathbf{C_2}`: Metric cost matrix in the target space - :math:`\mathbf{p}`: distribution in the source space - :math:`\mathbf{q}`: distribution in the target space - `L`: loss function to account for the misfit between the similarity matrices satisfying :math:`L(a, b) = f_1(a) + f_2(b) - h_1(a) h_2(b)` .. note:: By algorithmic design the optimal coupling :math:`\mathbf{T}` returned by this function does not necessarily satisfy the marginal constraints :math:`\mathbf{T}\mathbf{1}=\mathbf{p}` and :math:`\mathbf{T}^T\mathbf{1}=\mathbf{q}`. So the returned Gromov-Wasserstein loss does not necessarily satisfy distance properties and may be negative. Parameters ---------- C1 : array-like, shape (ns, ns) Metric cost matrix in the source space C2 : array-like, shape (nt, nt) Metric cost matrix in the target space p : array-like, shape (ns,), optional Distribution in the source space. If let to its default value None, uniform distribution is taken. q : array-like, shape (nt,), optional Distribution in the target space. If let to its default value None, uniform distribution is taken. loss_fun : string, optional (default='square_loss') Loss function used for the solver either 'square_loss' or 'kl_loss' epsilon : float, optional Regularization term >0 symmetric : bool, optional Either C1 and C2 are to be assumed symmetric or not. If let to its default None value, a symmetry test will be conducted. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymmetric). G0: array-like, shape (ns,nt), optional If None the initial transport plan of the solver is pq^T. Otherwise G0 will be used as initial transport of the solver. G0 is not required to satisfy marginal constraints but we strongly recommand it to correcly estimate the GW distance. max_iter : int, optional Max number of iterations tol : float, optional Stop threshold on error (>0) marginal_loss: bool, optional. Default is False. Include constant marginal terms or not in the objective function. verbose : bool, optional Print information along iterations log : bool, optional Record log if True. Returns ------- gw_dist : float Gromov-Wasserstein distance References ---------- .. [63] Li, J., Tang, J., Kong, L., Liu, H., Li, J., So, A. M. C., & Blanchet, J. "A Convergent Single-Loop Algorithm for Relaxation of Gromov-Wasserstein in Graph Data". International Conference on Learning Representations (ICLR), 2023. """ T, logv = BAPG_gromov_wasserstein( C1, C2, p, q, loss_fun, epsilon, symmetric, G0, max_iter, tol, marginal_loss, verbose, log=True) logv['T'] = T if log: return logv['gw_dist'], logv else: return logv['gw_dist'] def entropic_gromov_barycenters( N, Cs, ps=None, p=None, lambdas=None, loss_fun='square_loss', epsilon=0.1, symmetric=True, max_iter=1000, tol=1e-9, stop_criterion='barycenter', warmstartT=False, verbose=False, log=False, init_C=None, random_state=None, **kwargs): r""" Returns the Gromov-Wasserstein barycenters of `S` measured similarity matrices :math:`(\mathbf{C}_s)_{1 \leq s \leq S}` estimated using Gromov-Wasserstein transports from Sinkhorn projections. The function solves the following optimization problem: .. math:: \mathbf{C}^* = \mathop{\arg \min}_{\mathbf{C}\in \mathbb{R}^{N \times N}} \quad \sum_s \lambda_s \mathrm{GW}(\mathbf{C}, \mathbf{C}_s, \mathbf{p}, \mathbf{p}_s) Where : - :math:`\mathbf{C}_s`: metric cost matrix - :math:`\mathbf{p}_s`: distribution Parameters ---------- N : int Size of the targeted barycenter Cs : list of S array-like of shape (ns,ns) Metric cost matrices ps : list of S array-like of shape (ns,), optional Sample weights in the `S` spaces. If let to its default value None, uniform distributions are taken. p : array-like, shape (N,), optional Weights in the targeted barycenter. If let to its default value None, uniform distribution is taken. lambdas : list of float, optional List of the `S` spaces' weights. If let to its default value None, uniform weights are taken. loss_fun : string, optional (default='square_loss') Loss function used for the solver either 'square_loss' or 'kl_loss' epsilon : float, optional Regularization term >0 symmetric : bool, optional. Either structures are to be assumed symmetric or not. Default value is True. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymmetric). max_iter : int, optional Max number of iterations tol : float, optional Stop threshold on error (>0) stop_criterion : str, optional. Default is 'barycenter'. Convergence criterion taking values in ['barycenter', 'loss']. If set to 'barycenter' uses absolute norm variations of estimated barycenters. Else if set to 'loss' uses the relative variations of the loss. warmstartT: bool, optional Either to perform warmstart of transport plans in the successive gromov-wasserstein transport problems. verbose : bool, optional Print information along iterations. log : bool, optional Record log if True. init_C : bool | array-like, shape (N, N) Random initial value for the :math:`\mathbf{C}` matrix provided by user. random_state : int or RandomState instance, optional Fix the seed for reproducibility **kwargs: dict parameters can be directly passed to the `ot.entropic_gromov_wasserstein` solver. Returns ------- C : array-like, shape (`N`, `N`) Similarity matrix in the barycenter space (permutated arbitrarily) log : dict Only returned when log=True. It contains the keys: - :math:`\mathbf{T}`: list of (`N`, `ns`) transport matrices - :math:`\mathbf{p}`: (`N`,) barycenter weights - values used in convergence evaluation. References ---------- .. [12] Gabriel PeyrĂ©, Marco Cuturi, and Justin Solomon, "Gromov-Wasserstein averaging of kernel and distance matrices." International Conference on Machine Learning (ICML). 2016. """ if loss_fun not in ('square_loss', 'kl_loss'): raise ValueError(f"Unknown `loss_fun='{loss_fun}'`. Use one of: {'square_loss', 'kl_loss'}.") if stop_criterion not in ['barycenter', 'loss']: raise ValueError(f"Unknown `stop_criterion='{stop_criterion}'`. Use one of: {'barycenter', 'loss'}.") Cs = list_to_array(*Cs) arr = [*Cs] if ps is not None: arr += list_to_array(*ps) else: ps = [unif(C.shape[0], type_as=C) for C in Cs] if p is not None: arr.append(list_to_array(p)) else: p = unif(N, type_as=Cs[0]) nx = get_backend(*arr) S = len(Cs) if lambdas is None: lambdas = [1. / S] * S # Initialization of C : random SPD matrix (if not provided by user) if init_C is None: generator = check_random_state(random_state) xalea = generator.randn(N, 2) C = dist(xalea, xalea) C /= C.max() C = nx.from_numpy(C, type_as=p) else: C = init_C cpt = 0 err = 1e15 # either the error on 'barycenter' or 'loss' if warmstartT: T = [None] * S if stop_criterion == 'barycenter': inner_log = False else: inner_log = True curr_loss = 1e15 if log: log_ = {} log_['err'] = [] if stop_criterion == 'loss': log_['loss'] = [] while (err > tol) and (cpt < max_iter): if stop_criterion == 'barycenter': Cprev = C else: prev_loss = curr_loss # get transport plans if warmstartT: res = [entropic_gromov_wasserstein( C, Cs[s], p, ps[s], loss_fun, epsilon, symmetric, T[s], max_iter, 1e-4, verbose=verbose, log=inner_log, **kwargs) for s in range(S)] else: res = [entropic_gromov_wasserstein( C, Cs[s], p, ps[s], loss_fun, epsilon, symmetric, None, max_iter, 1e-4, verbose=verbose, log=inner_log, **kwargs) for s in range(S)] if stop_criterion == 'barycenter': T = res else: T = [output[0] for output in res] curr_loss = np.sum([output[1]['gw_dist'] for output in res]) # update barycenters if loss_fun == 'square_loss': C = update_square_loss(p, lambdas, T, Cs, nx) elif loss_fun == 'kl_loss': C = update_kl_loss(p, lambdas, T, Cs, nx) # update convergence criterion if stop_criterion == 'barycenter': err = nx.norm(C - Cprev) if log: log_['err'].append(err) else: err = abs(curr_loss - prev_loss) / prev_loss if prev_loss != 0. else np.nan if log: log_['loss'].append(curr_loss) log_['err'].append(err) if verbose: if cpt % 200 == 0: print('{:5s}|{:12s}'.format( 'It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(cpt, err)) cpt += 1 if log: log_['T'] = T log_['p'] = p return C, log_ else: return C def entropic_fused_gromov_wasserstein( M, C1, C2, p=None, q=None, loss_fun='square_loss', epsilon=0.1, symmetric=None, alpha=0.5, G0=None, max_iter=1000, tol=1e-9, solver='PGD', warmstart=False, verbose=False, log=False, **kwargs): r""" Returns the Fused Gromov-Wasserstein transport between :math:`(\mathbf{C_1}, \mathbf{Y_1}, \mathbf{p})` and :math:`(\mathbf{C_2}, \mathbf{Y_2}, \mathbf{q})` with pairwise distance matrix :math:`\mathbf{M}` between node feature matrices :math:`\mathbf{Y_1}` and :math:`\mathbf{Y_2}`, estimated using Sinkhorn projections. If `solver="PGD"`, the function solves the following entropic-regularized Fused Gromov-Wasserstein optimization problem using Projected Gradient Descent [12]: .. math:: \mathbf{T}^* \in \mathop{\arg\min}_\mathbf{T} \quad (1 - \alpha) \langle \mathbf{T}, \mathbf{M} \rangle_F + \alpha \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} - \epsilon H(\mathbf{T}) s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T}^T \mathbf{1} &= \mathbf{q} \mathbf{T} &\geq 0 Else if `solver="PPA"`, the function solves the following Fused Gromov-Wasserstein optimization problem using Proximal Point Algorithm [51]: .. math:: \mathbf{T}^* \in\mathop{\arg\min}_\mathbf{T} \quad (1 - \alpha) \langle \mathbf{T}, \mathbf{M} \rangle_F + \alpha \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T}^T \mathbf{1} &= \mathbf{q} \mathbf{T} &\geq 0 Where : - :math:`\mathbf{M}`: metric cost matrix between features across domains - :math:`\mathbf{C_1}`: Metric cost matrix in the source space - :math:`\mathbf{C_2}`: Metric cost matrix in the target space - :math:`\mathbf{p}`: distribution in the source space - :math:`\mathbf{q}`: distribution in the target space - `L`: loss function to account for the misfit between the similarity and feature matrices - `H`: entropy - :math:`\alpha`: trade-off parameter .. note:: If the inner solver `ot.sinkhorn` did not convergence, the optimal coupling :math:`\mathbf{T}` returned by this function does not necessarily satisfy the marginal constraints :math:`\mathbf{T}\mathbf{1}=\mathbf{p}` and :math:`\mathbf{T}^T\mathbf{1}=\mathbf{q}`. So the returned Fused Gromov-Wasserstein loss does not necessarily satisfy distance properties and may be negative. Parameters ---------- M : array-like, shape (ns, nt) Metric cost matrix between features across domains C1 : array-like, shape (ns, ns) Metric cost matrix in the source space C2 : array-like, shape (nt, nt) Metric cost matrix in the target space p : array-like, shape (ns,), optional Distribution in the source space. If let to its default value None, uniform distribution is taken. q : array-like, shape (nt,), optional Distribution in the target space. If let to its default value None, uniform distribution is taken. loss_fun : string, optional (default='square_loss') Loss function used for the solver either 'square_loss' or 'kl_loss' epsilon : float, optional Regularization term >0 symmetric : bool, optional Either C1 and C2 are to be assumed symmetric or not. If let to its default None value, a symmetry test will be conducted. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymmetric). alpha : float, optional Trade-off parameter (0 < alpha < 1) G0: array-like, shape (ns,nt), optional If None the initial transport plan of the solver is pq^T. Otherwise G0 will be used as initial transport of the solver. G0 is not required to satisfy marginal constraints but we strongly recommend it to correctly estimate the GW distance. max_iter : int, optional Max number of iterations tol : float, optional Stop threshold on error (>0) solver: string, optional Solver to use either 'PGD' for Projected Gradient Descent or 'PPA' for Proximal Point Algorithm. Default value is 'PGD'. warmstart: bool, optional Either to perform warmstart of dual potentials in the successive Sinkhorn projections. verbose : bool, optional Print information along iterations log : bool, optional Record log if True. **kwargs: dict parameters can be directly passed to the ot.sinkhorn solver. Such as `numItermax` and `stopThr` to control its estimation precision, e.g [51] suggests to use `numItermax=1`. Returns ------- T : array-like, shape (`ns`, `nt`) Optimal coupling between the two joint spaces References ---------- .. [12] Gabriel PeyrĂ©, Marco Cuturi, and Justin Solomon, "Gromov-Wasserstein averaging of kernel and distance matrices." International Conference on Machine Learning (ICML). 2016. .. [47] Chowdhury, S., & MĂ©moli, F. (2019). The gromov–wasserstein distance between networks and stable network invariants. Information and Inference: A Journal of the IMA, 8(4), 757-787. .. [51] Xu, H., Luo, D., Zha, H., & Duke, L. C. (2019). Gromov-wasserstein learning for graph matching and node embedding. In International Conference on Machine Learning (ICML), 2019. .. [24] Vayer Titouan, Chapel Laetitia, Flamary RĂ©mi, Tavenard Romain and Courty Nicolas "Optimal Transport for structured data with application on graphs", International Conference on Machine Learning (ICML). 2019. """ if solver not in ['PGD', 'PPA']: raise ValueError("Unknown solver '%s'. Pick one in ['PGD', 'PPA']." % solver) if loss_fun not in ('square_loss', 'kl_loss'): raise ValueError(f"Unknown `loss_fun='{loss_fun}'`. Use one of: {'square_loss', 'kl_loss'}.") M, C1, C2 = list_to_array(M, C1, C2) arr = [M, C1, C2] if p is not None: arr.append(list_to_array(p)) else: p = unif(C1.shape[0], type_as=C1) if q is not None: arr.append(list_to_array(q)) else: q = unif(C2.shape[0], type_as=C2) if G0 is not None: arr.append(G0) nx = get_backend(*arr) if G0 is None: G0 = nx.outer(p, q) T = G0 constC, hC1, hC2 = init_matrix(C1, C2, p, q, loss_fun, nx) if symmetric is None: symmetric = nx.allclose(C1, C1.T, atol=1e-10) and nx.allclose(C2, C2.T, atol=1e-10) if not symmetric: constCt, hC1t, hC2t = init_matrix(C1.T, C2.T, p, q, loss_fun, nx) cpt = 0 err = 1 if warmstart: # initialize potentials to cope with ot.sinkhorn initialization N1, N2 = C1.shape[0], C2.shape[0] mu = nx.zeros(N1, type_as=C1) - np.log(N1) nu = nx.zeros(N2, type_as=C2) - np.log(N2) if log: log = {'err': []} while (err > tol and cpt < max_iter): Tprev = T # compute the gradient if symmetric: tens = alpha * gwggrad(constC, hC1, hC2, T, nx) + (1 - alpha) * M else: tens = (alpha * 0.5) * (gwggrad(constC, hC1, hC2, T, nx) + gwggrad(constCt, hC1t, hC2t, T, nx)) + (1 - alpha) * M if solver == 'PPA': tens = tens - epsilon * nx.log(T) if warmstart: T, loginn = sinkhorn(p, q, tens, epsilon, method='sinkhorn', log=True, warmstart=(mu, nu), **kwargs) mu = epsilon * nx.log(loginn['u']) nu = epsilon * nx.log(loginn['v']) else: T = sinkhorn(p, q, tens, epsilon, method='sinkhorn', **kwargs) if cpt % 10 == 0: # we can speed up the process by checking for the error only all # the 10th iterations err = nx.norm(T - Tprev) if log: log['err'].append(err) if verbose: if cpt % 200 == 0: print('{:5s}|{:12s}'.format( 'It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(cpt, err)) cpt += 1 if abs(nx.sum(T) - 1) > 1e-5: warnings.warn("Solver failed to produce a transport plan. You might " "want to increase the regularization parameter `epsilon`.") if log: log['fgw_dist'] = (1 - alpha) * nx.sum(M * T) + alpha * gwloss(constC, hC1, hC2, T, nx) return T, log else: return T def entropic_fused_gromov_wasserstein2( M, C1, C2, p=None, q=None, loss_fun='square_loss', epsilon=0.1, symmetric=None, alpha=0.5, G0=None, max_iter=1000, tol=1e-9, solver='PGD', warmstart=False, verbose=False, log=False, **kwargs): r""" Returns the Fused Gromov-Wasserstein distance between :math:`(\mathbf{C_1}, \mathbf{Y_1}, \mathbf{p})` and :math:`(\mathbf{C_2}, \mathbf{Y_2}, \mathbf{q})` with pairwise distance matrix :math:`\mathbf{M}` between node feature matrices :math:`\mathbf{Y_1}` and :math:`\mathbf{Y_2}`, estimated using Sinkhorn projections. If `solver="PGD"`, the function solves the following entropic-regularized Fused Gromov-Wasserstein optimization problem using Projected Gradient Descent [12]: .. math:: \mathbf{FGW} = \mathop{\min}_\mathbf{T} \quad (1 - \alpha) \langle \mathbf{T}, \mathbf{M} \rangle_F + \alpha \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} - \epsilon H(\mathbf{T}) s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T}^T \mathbf{1} &= \mathbf{q} \mathbf{T} &\geq 0 Else if `solver="PPA"`, the function solves the following Fused Gromov-Wasserstein optimization problem using Proximal Point Algorithm [51]: .. math:: \mathbf{FGW} = \mathop{\min}_\mathbf{T} \quad (1 - \alpha) \langle \mathbf{T}, \mathbf{M} \rangle_F + \alpha \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T}^T \mathbf{1} &= \mathbf{q} \mathbf{T} &\geq 0 Where : - :math:`\mathbf{M}`: metric cost matrix between features across domains - :math:`\mathbf{C_1}`: Metric cost matrix in the source space - :math:`\mathbf{C_2}`: Metric cost matrix in the target space - :math:`\mathbf{p}`: distribution in the source space - :math:`\mathbf{q}`: distribution in the target space - `L`: loss function to account for the misfit between the similarity and feature matrices - `H`: entropy - :math:`\alpha`: trade-off parameter .. note:: If the inner solver `ot.sinkhorn` did not convergence, the optimal coupling :math:`\mathbf{T}` returned by this function does not necessarily satisfy the marginal constraints :math:`\mathbf{T}\mathbf{1}=\mathbf{p}` and :math:`\mathbf{T}^T\mathbf{1}=\mathbf{q}`. So the returned Fused Gromov-Wasserstein loss does not necessarily satisfy distance properties and may be negative. Parameters ---------- M : array-like, shape (ns, nt) Metric cost matrix between features across domains C1 : array-like, shape (ns, ns) Metric cost matrix in the source space C2 : array-like, shape (nt, nt) Metric cost matrix in the target space p : array-like, shape (ns,), optional Distribution in the source space. If let to its default value None, uniform distribution is taken. q : array-like, shape (nt,), optional Distribution in the target space. If let to its default value None, uniform distribution is taken. loss_fun : string, optional (default='square_loss') Loss function used for the solver either 'square_loss' or 'kl_loss' epsilon : float, optional Regularization term >0 symmetric : bool, optional Either C1 and C2 are to be assumed symmetric or not. If let to its default None value, a symmetry test will be conducted. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymetric). alpha : float, optional Trade-off parameter (0 < alpha < 1) G0: array-like, shape (ns,nt), optional If None the initial transport plan of the solver is pq^T. Otherwise G0 will be used as initial transport of the solver. G0 is not required to satisfy marginal constraints but we strongly recommend it to correctly estimate the GW distance. max_iter : int, optional Max number of iterations tol : float, optional Stop threshold on error (>0) verbose : bool, optional Print information along iterations log : bool, optional Record log if True. Returns ------- fgw_dist : float Fused Gromov-Wasserstein distance References ---------- .. [12] Gabriel PeyrĂ©, Marco Cuturi, and Justin Solomon, "Gromov-Wasserstein averaging of kernel and distance matrices." International Conference on Machine Learning (ICML). 2016. .. [51] Xu, H., Luo, D., Zha, H., & Duke, L. C. (2019). Gromov-wasserstein learning for graph matching and node embedding. In International Conference on Machine Learning (ICML), 2019. .. [24] Vayer Titouan, Chapel Laetitia, Flamary RĂ©mi, Tavenard Romain and Courty Nicolas "Optimal Transport for structured data with application on graphs", International Conference on Machine Learning (ICML). 2019. """ nx = get_backend(M, C1, C2) T, logv = entropic_fused_gromov_wasserstein( M, C1, C2, p, q, loss_fun, epsilon, symmetric, alpha, G0, max_iter, tol, solver, warmstart, verbose, log=True, **kwargs) logv['T'] = T lin_term = nx.sum(T * M) logv['quad_loss'] = (logv['fgw_dist'] - (1 - alpha) * lin_term) logv['lin_loss'] = lin_term * (1 - alpha) if log: return logv['fgw_dist'], logv else: return logv['fgw_dist'] def BAPG_fused_gromov_wasserstein( M, C1, C2, p=None, q=None, loss_fun='square_loss', epsilon=0.1, symmetric=None, alpha=0.5, G0=None, max_iter=1000, tol=1e-9, marginal_loss=False, verbose=False, log=False): r""" Returns the Fused Gromov-Wasserstein transport between :math:`(\mathbf{C_1}, \mathbf{Y_1}, \mathbf{p})` and :math:`(\mathbf{C_2}, \mathbf{Y_2}, \mathbf{q})` with pairwise distance matrix :math:`\mathbf{M}` between node feature matrices :math:`\mathbf{Y_1}` and :math:`\mathbf{Y_2}`, estimated using Bregman Alternated Projected Gradient method. If `marginal_loss=True`, the function solves the following Fused Gromov-Wasserstein optimization problem : .. math:: \mathbf{T}^* \in\mathop{\arg\min}_\mathbf{T} \quad (1 - \alpha) \langle \mathbf{T}, \mathbf{M} \rangle_F + \alpha \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T}^T \mathbf{1} &= \mathbf{q} \mathbf{T} &\geq 0 Else, the function solves an equivalent problem [63, 64], where constant terms only depending on the marginals :math:`\mathbf{p}`: and :math:`\mathbf{q}`: are discarded while assuming that L decomposes as in Proposition 1 in [12]: .. math:: \mathbf{T}^* \in\mathop{\arg\min}_\mathbf{T} \quad (1 - \alpha) \langle \mathbf{T}, \mathbf{M} \rangle_F - \alpha \langle h_1(\mathbf{C}_1) \mathbf{T} h_2(\mathbf{C_2})^\top , \mathbf{T} \rangle_F s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T}^T \mathbf{1} &= \mathbf{q} \mathbf{T} &\geq 0 Where : - :math:`\mathbf{M}`: pairwise relation matrix between features across domains - :math:`\mathbf{C_1}`: Metric cost matrix in the source space - :math:`\mathbf{C_2}`: Metric cost matrix in the target space - :math:`\mathbf{p}`: distribution in the source space - :math:`\mathbf{q}`: distribution in the target space - `L`: loss function to account for the misfit between the similarity and feature matrices satisfying :math:`L(a, b) = f_1(a) + f_2(b) - h_1(a) h_2(b)` - :math:`\alpha`: trade-off parameter .. note:: By algorithmic design the optimal coupling :math:`\mathbf{T}` returned by this function does not necessarily satisfy the marginal constraints :math:`\mathbf{T}\mathbf{1}=\mathbf{p}` and :math:`\mathbf{T}^T\mathbf{1}=\mathbf{q}`. So the returned Fused Gromov-Wasserstein loss does not necessarily satisfy distance properties and may be negative. Parameters ---------- M : array-like, shape (ns, nt) Pairwise relation matrix between features across domains C1 : array-like, shape (ns, ns) Metric cost matrix in the source space C2 : array-like, shape (nt, nt) Metric cost matrix in the target space p : array-like, shape (ns,), optional Distribution in the source space. If let to its default value None, uniform distribution is taken. q : array-like, shape (nt,), optional Distribution in the target space. If let to its default value None, uniform distribution is taken. loss_fun : string, optional (default='square_loss') Loss function used for the solver either 'square_loss' or 'kl_loss' epsilon : float, optional Regularization term >0 symmetric : bool, optional Either C1 and C2 are to be assumed symmetric or not. If let to its default None value, a symmetry test will be conducted. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymmetric). alpha : float, optional Trade-off parameter (0 < alpha < 1) G0: array-like, shape (ns,nt), optional If None the initial transport plan of the solver is pq^T. Otherwise G0 will be used as initial transport of the solver. G0 is not required to satisfy marginal constraints but we strongly recommend it to correctly estimate the GW distance. max_iter : int, optional Max number of iterations tol : float, optional Stop threshold on error (>0) marginal_loss: bool, optional. Default is False. Include constant marginal terms or not in the objective function. verbose : bool, optional Print information along iterations log : bool, optional Record log if True. Returns ------- T : array-like, shape (`ns`, `nt`) Optimal coupling between the two joint spaces References ---------- .. [63] Li, J., Tang, J., Kong, L., Liu, H., Li, J., So, A. M. C., & Blanchet, J. "A Convergent Single-Loop Algorithm for Relaxation of Gromov-Wasserstein in Graph Data". International Conference on Learning Representations (ICLR), 2023. .. [64] Ma, X., Chu, X., Wang, Y., Lin, Y., Zhao, J., Ma, L., & Zhu, W. "Fused Gromov-Wasserstein Graph Mixup for Graph-level Classifications". In Thirty-seventh Conference on Neural Information Processing Systems. """ if loss_fun not in ('square_loss', 'kl_loss'): raise ValueError(f"Unknown `loss_fun='{loss_fun}'`. Use one of: {'square_loss', 'kl_loss'}.") M, C1, C2 = list_to_array(M, C1, C2) arr = [M, C1, C2] if p is not None: arr.append(list_to_array(p)) else: p = unif(C1.shape[0], type_as=C1) if q is not None: arr.append(list_to_array(q)) else: q = unif(C2.shape[0], type_as=C2) if G0 is not None: arr.append(G0) nx = get_backend(*arr) if G0 is None: G0 = nx.outer(p, q) T = G0 constC, hC1, hC2 = init_matrix(C1, C2, p, q, loss_fun, nx) if symmetric is None: symmetric = nx.allclose(C1, C1.T, atol=1e-10) and nx.allclose(C2, C2.T, atol=1e-10) if not symmetric: constCt, hC1t, hC2t = init_matrix(C1.T, C2.T, p, q, loss_fun, nx) # Define gradients if marginal_loss: if symmetric: def df(T): return alpha * gwggrad(constC, hC1, hC2, T, nx) + (1 - alpha) * M else: def df(T): return (alpha * 0.5) * (gwggrad(constC, hC1, hC2, T, nx) + gwggrad(constCt, hC1t, hC2t, T, nx)) + (1 - alpha) * M else: if symmetric: def df(T): A = - nx.dot(nx.dot(hC1, T), hC2.T) return 2 * alpha * A + (1 - alpha) * M else: def df(T): A = - nx.dot(nx.dot(hC1, T), hC2t) At = - nx.dot(nx.dot(hC1t, T), hC2) return alpha * (A + At) + (1 - alpha) * M cpt = 0 err = 1e15 if log: log = {'err': []} while (err > tol and cpt < max_iter): Tprev = T # rows update T = T * nx.exp(- df(T) / epsilon) row_scaling = p / nx.sum(T, 1) T = nx.reshape(row_scaling, (-1, 1)) * T # columns update T = T * nx.exp(- df(T) / epsilon) column_scaling = q / nx.sum(T, 0) T = nx.reshape(column_scaling, (1, -1)) * T if cpt % 10 == 0: # we can speed up the process by checking for the error only all # the 10th iterations err = nx.norm(T - Tprev) if log: log['err'].append(err) if verbose: if cpt % 200 == 0: print('{:5s}|{:12s}'.format( 'It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(cpt, err)) cpt += 1 if nx.any(nx.isnan(T)): warnings.warn("Solver failed to produce a transport plan. You might " "want to increase the regularization parameter `epsilon`.", UserWarning) if log: log['fgw_dist'] = (1 - alpha) * nx.sum(M * T) + alpha * gwloss(constC, hC1, hC2, T, nx) if not marginal_loss: log['loss'] = log['fgw_dist'] - alpha * nx.sum(constC * T) return T, log else: return T def BAPG_fused_gromov_wasserstein2( M, C1, C2, p=None, q=None, loss_fun='square_loss', epsilon=0.1, symmetric=None, alpha=0.5, G0=None, max_iter=1000, tol=1e-9, marginal_loss=False, verbose=False, log=False): r""" Returns the Fused Gromov-Wasserstein loss between :math:`(\mathbf{C_1}, \mathbf{Y_1}, \mathbf{p})` and :math:`(\mathbf{C_2}, \mathbf{Y_2}, \mathbf{q})` with pairwise distance matrix :math:`\mathbf{M}` between node feature matrices :math:`\mathbf{Y_1}` and :math:`\mathbf{Y_2}`, estimated using Bregman Alternated Projected Gradient method. If `marginal_loss=True`, the function solves the following Fused Gromov-Wasserstein optimization problem : .. math:: \mathbf{FGW} = \mathop{\min}_\mathbf{T} \quad (1 - \alpha) \langle \mathbf{T}, \mathbf{M} \rangle_F + \alpha \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T}^T \mathbf{1} &= \mathbf{q} \mathbf{T} &\geq 0 Else, the function solves an equivalent problem [63, 64], where constant terms only depending on the marginals :math:`\mathbf{p}`: and :math:`\mathbf{q}`: are discarded while assuming that L decomposes as in Proposition 1 in [12]: .. math:: \mathop{\min}_\mathbf{T} \quad (1 - \alpha) \langle \mathbf{T}, \mathbf{M} \rangle_F - \alpha \langle h_1(\mathbf{C}_1) \mathbf{T} h_2(\mathbf{C_2})^\top , \mathbf{T} \rangle_F s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T}^T \mathbf{1} &= \mathbf{q} \mathbf{T} &\geq 0 Where : - :math:`\mathbf{M}`: metric cost matrix between features across domains - :math:`\mathbf{C_1}`: Metric cost matrix in the source space - :math:`\mathbf{C_2}`: Metric cost matrix in the target space - :math:`\mathbf{p}`: distribution in the source space - :math:`\mathbf{q}`: distribution in the target space - `L`: loss function to account for the misfit between the similarity and feature matrices satisfying :math:`L(a, b) = f_1(a) + f_2(b) - h_1(a) h_2(b)` - :math:`\alpha`: trade-off parameter .. note:: By algorithmic design the optimal coupling :math:`\mathbf{T}` returned by this function does not necessarily satisfy the marginal constraints :math:`\mathbf{T}\mathbf{1}=\mathbf{p}` and :math:`\mathbf{T}^T\mathbf{1}=\mathbf{q}`. So the returned Fused Gromov-Wasserstein loss does not necessarily satisfy distance properties and may be negative. Parameters ---------- M : array-like, shape (ns, nt) Metric cost matrix between features across domains C1 : array-like, shape (ns, ns) Metric cost matrix in the source space C2 : array-like, shape (nt, nt) Metric cost matrix in the target space p : array-like, shape (ns,), optional Distribution in the source space. If let to its default value None, uniform distribution is taken. q : array-like, shape (nt,), optional Distribution in the target space. If let to its default value None, uniform distribution is taken. loss_fun : string, optional (default='square_loss') Loss function used for the solver either 'square_loss' or 'kl_loss' epsilon : float, optional Regularization term >0 symmetric : bool, optional Either C1 and C2 are to be assumed symmetric or not. If let to its default None value, a symmetry test will be conducted. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymmetric). alpha : float, optional Trade-off parameter (0 < alpha < 1) G0: array-like, shape (ns,nt), optional If None the initial transport plan of the solver is pq^T. Otherwise G0 will be used as initial transport of the solver. G0 is not required to satisfy marginal constraints but we strongly recommend it to correctly estimate the GW distance. max_iter : int, optional Max number of iterations tol : float, optional Stop threshold on error (>0) marginal_loss: bool, optional. Default is False. Include constant marginal terms or not in the objective function. verbose : bool, optional Print information along iterations log : bool, optional Record log if True. Returns ------- T : array-like, shape (`ns`, `nt`) Optimal coupling between the two joint spaces References ---------- .. [63] Li, J., Tang, J., Kong, L., Liu, H., Li, J., So, A. M. C., & Blanchet, J. "A Convergent Single-Loop Algorithm for Relaxation of Gromov-Wasserstein in Graph Data". International Conference on Learning Representations (ICLR), 2023. .. [64] Ma, X., Chu, X., Wang, Y., Lin, Y., Zhao, J., Ma, L., & Zhu, W. "Fused Gromov-Wasserstein Graph Mixup for Graph-level Classifications". In Thirty-seventh Conference on Neural Information Processing Systems. """ nx = get_backend(M, C1, C2) T, logv = BAPG_fused_gromov_wasserstein( M, C1, C2, p, q, loss_fun, epsilon, symmetric, alpha, G0, max_iter, tol, marginal_loss, verbose, log=True) logv['T'] = T lin_term = nx.sum(T * M) logv['quad_loss'] = (logv['fgw_dist'] - (1 - alpha) * lin_term) logv['lin_loss'] = lin_term * (1 - alpha) if log: return logv['fgw_dist'], logv else: return logv['fgw_dist'] def entropic_fused_gromov_barycenters( N, Ys, Cs, ps=None, p=None, lambdas=None, loss_fun='square_loss', epsilon=0.1, symmetric=True, alpha=0.5, max_iter=1000, tol=1e-9, stop_criterion='barycenter', warmstartT=False, verbose=False, log=False, init_C=None, init_Y=None, fixed_structure=False, fixed_features=False, random_state=None, **kwargs): r""" Returns the Fused Gromov-Wasserstein barycenters of `S` measurable networks with node features :math:`(\mathbf{C}_s, \mathbf{Y}_s, \mathbf{p}_s)_{1 \leq s \leq S}` estimated using Fused Gromov-Wasserstein transports from Sinkhorn projections. The function solves the following optimization problem: .. math:: \mathbf{C}^*, \mathbf{Y}^* = \mathop{\arg \min}_{\mathbf{C}\in \mathbb{R}^{N \times N}, \mathbf{Y}\in \mathbb{Y}^{N \times d}} \quad \sum_s \lambda_s \mathrm{FGW}_{\alpha}(\mathbf{C}, \mathbf{C}_s, \mathbf{Y}, \mathbf{Y}_s, \mathbf{p}, \mathbf{p}_s) Where : - :math:`\mathbf{Y}_s`: feature matrix - :math:`\mathbf{C}_s`: metric cost matrix - :math:`\mathbf{p}_s`: distribution Parameters ---------- N : int Size of the targeted barycenter Ys: list of array-like, each element has shape (ns,d) Features of all samples Cs : list of S array-like of shape (ns,ns) Metric cost matrices ps : list of S array-like of shape (ns,), optional Sample weights in the `S` spaces. If let to its default value None, uniform distributions are taken. p : array-like, shape (N,), optional Weights in the targeted barycenter. If let to its default value None, uniform distribution is taken. lambdas : list of float, optional List of the `S` spaces' weights. If let to its default value None, uniform weights are taken. loss_fun : string, optional (default='square_loss') Loss function used for the solver either 'square_loss' or 'kl_loss' epsilon : float, optional Regularization term >0 symmetric : bool, optional. Either structures are to be assumed symmetric or not. Default value is True. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymmetric). alpha : float, optional Trade-off parameter (0 < alpha < 1) max_iter : int, optional Max number of iterations tol : float, optional Stop threshold on error (>0) stop_criterion : str, optional. Default is 'barycenter'. Stop criterion taking values in ['barycenter', 'loss']. If set to 'barycenter' uses absolute norm variations of estimated barycenters. Else if set to 'loss' uses the relative variations of the loss. warmstartT: bool, optional Either to perform warmstart of transport plans in the successive fused gromov-wasserstein transport problems. verbose : bool, optional Print information along iterations. log : bool, optional Record log if True. init_C : bool | array-like, shape (N, N) Random initial value for the :math:`\mathbf{C}` matrix provided by user. init_Y : array-like, shape (N,d), optional Initialization for the barycenters' features. If not set a random init is used. fixed_structure : bool, optional Whether to fix the structure of the barycenter during the updates. fixed_features : bool, optional Whether to fix the feature of the barycenter during the updates random_state : int or RandomState instance, optional Fix the seed for reproducibility **kwargs: dict parameters can be directly passed to the `ot.entropic_fused_gromov_wasserstein` solver. Returns ------- Y : array-like, shape (`N`, `d`) Feature matrix in the barycenter space (permutated arbitrarily) C : array-like, shape (`N`, `N`) Similarity matrix in the barycenter space (permutated as Y's rows) log : dict Only returned when log=True. It contains the keys: - :math:`\mathbf{T}`: list of (`N`, `ns`) transport matrices - :math:`\mathbf{p}`: (`N`,) barycenter weights - :math:`(\mathbf{M}_s)_s`: all distance matrices between the feature of the barycenter and the other features :math:`(dist(\mathbf{X}, \mathbf{Y}_s))_s` shape (`N`, `ns`) - values used in convergence evaluation. References ---------- .. [12] Gabriel PeyrĂ©, Marco Cuturi, and Justin Solomon, "Gromov-Wasserstein averaging of kernel and distance matrices." International Conference on Machine Learning (ICML). 2016. .. [24] Vayer Titouan, Chapel Laetitia, Flamary RĂ©mi, Tavenard Romain and Courty Nicolas "Optimal Transport for structured data with application on graphs" International Conference on Machine Learning (ICML). 2019. """ if loss_fun not in ('square_loss', 'kl_loss'): raise ValueError(f"Unknown `loss_fun='{loss_fun}'`. Use one of: {'square_loss', 'kl_loss'}.") if stop_criterion not in ['barycenter', 'loss']: raise ValueError(f"Unknown `stop_criterion='{stop_criterion}'`. Use one of: {'barycenter', 'loss'}.") Cs = list_to_array(*Cs) Ys = list_to_array(*Ys) arr = [*Cs, *Ys] if ps is not None: arr += list_to_array(*ps) else: ps = [unif(C.shape[0], type_as=C) for C in Cs] if p is not None: arr.append(list_to_array(p)) else: p = unif(N, type_as=Cs[0]) nx = get_backend(*arr) S = len(Cs) if lambdas is None: lambdas = [1. / S] * S d = Ys[0].shape[1] # dimension on the node features # Initialization of C : random euclidean distance matrix (if not provided by user) if fixed_structure: if init_C is None: raise UndefinedParameter('If C is fixed it must be initialized') else: C = init_C else: if init_C is None: generator = check_random_state(random_state) xalea = generator.randn(N, 2) C = dist(xalea, xalea) C = nx.from_numpy(C, type_as=ps[0]) else: C = init_C # Initialization of Y if fixed_features: if init_Y is None: raise UndefinedParameter('If Y is fixed it must be initialized') else: Y = init_Y else: if init_Y is None: Y = nx.zeros((N, d), type_as=ps[0]) else: Y = init_Y Ms = [dist(Y, Ys[s]) for s in range(len(Ys))] if warmstartT: T = [None] * S cpt = 0 if stop_criterion == 'barycenter': inner_log = False err_feature = 1e15 err_structure = 1e15 err_rel_loss = 0. else: inner_log = True err_feature = 0. err_structure = 0. curr_loss = 1e15 err_rel_loss = 1e15 if log: log_ = {} if stop_criterion == 'barycenter': log_['err_feature'] = [] log_['err_structure'] = [] log_['Ts_iter'] = [] else: log_['loss'] = [] log_['err_rel_loss'] = [] while ((err_feature > tol or err_structure > tol or err_rel_loss > tol) and cpt < max_iter): if stop_criterion == 'barycenter': Cprev = C Yprev = Y else: prev_loss = curr_loss # get transport plans if warmstartT: res = [entropic_fused_gromov_wasserstein( Ms[s], C, Cs[s], p, ps[s], loss_fun, epsilon, symmetric, alpha, T[s], max_iter, 1e-4, verbose=verbose, log=inner_log, **kwargs) for s in range(S)] else: res = [entropic_fused_gromov_wasserstein( Ms[s], C, Cs[s], p, ps[s], loss_fun, epsilon, symmetric, alpha, None, max_iter, 1e-4, verbose=verbose, log=inner_log, **kwargs) for s in range(S)] if stop_criterion == 'barycenter': T = res else: T = [output[0] for output in res] curr_loss = np.sum([output[1]['fgw_dist'] for output in res]) # update barycenters if not fixed_features: Ys_temp = [y.T for y in Ys] X = update_feature_matrix(lambdas, Ys_temp, T, p, nx).T Ms = [dist(X, Ys[s]) for s in range(len(Ys))] if not fixed_structure: if loss_fun == 'square_loss': C = update_square_loss(p, lambdas, T, Cs, nx) elif loss_fun == 'kl_loss': C = update_kl_loss(p, lambdas, T, Cs, nx) # update convergence criterion if stop_criterion == 'barycenter': err_feature, err_structure = 0., 0. if not fixed_features: err_feature = nx.norm(Y - Yprev) if not fixed_structure: err_structure = nx.norm(C - Cprev) if log: log_['err_feature'].append(err_feature) log_['err_structure'].append(err_structure) log_['Ts_iter'].append(T) if verbose: if cpt % 200 == 0: print('{:5s}|{:12s}'.format( 'It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(cpt, err_structure)) print('{:5d}|{:8e}|'.format(cpt, err_feature)) else: err_rel_loss = abs(curr_loss - prev_loss) / prev_loss if prev_loss != 0. else np.nan if log: log_['loss'].append(curr_loss) log_['err_rel_loss'].append(err_rel_loss) if verbose: if cpt % 200 == 0: print('{:5s}|{:12s}'.format( 'It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(cpt, err_rel_loss)) cpt += 1 if log: log_['T'] = T log_['p'] = p log_['Ms'] = Ms return Y, C, log_ else: return Y, C python-pot-0.9.3+dfsg/ot/gromov/_dictionary.py000066400000000000000000001403721455713015700214130ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ (Fused) Gromov-Wasserstein dictionary learning. """ # Author: RĂ©mi Flamary # CĂ©dric Vincent-Cuaz # # License: MIT License import numpy as np from ..utils import unif, check_random_state from ..backend import get_backend from ._gw import gromov_wasserstein, fused_gromov_wasserstein def gromov_wasserstein_dictionary_learning(Cs, D, nt, reg=0., ps=None, q=None, epochs=20, batch_size=32, learning_rate=1., Cdict_init=None, projection='nonnegative_symmetric', use_log=True, tol_outer=10**(-5), tol_inner=10**(-5), max_iter_outer=20, max_iter_inner=200, use_adam_optimizer=True, verbose=False, random_state=None, **kwargs): r""" Infer Gromov-Wasserstein linear dictionary :math:`\{ (\mathbf{C_{dict}[d]}, q) \}_{d \in [D]}` from the list of structures :math:`\{ (\mathbf{C_s},\mathbf{p_s}) \}_s` .. math:: \min_{\mathbf{C_{dict}}, \{\mathbf{w_s} \}_{s \leq S}} \sum_{s=1}^S GW_2(\mathbf{C_s}, \sum_{d=1}^D w_{s,d}\mathbf{C_{dict}[d]}, \mathbf{p_s}, \mathbf{q}) - reg\| \mathbf{w_s} \|_2^2 such that, :math:`\forall s \leq S` : - :math:`\mathbf{w_s}^\top \mathbf{1}_D = 1` - :math:`\mathbf{w_s} \geq \mathbf{0}_D` Where : - :math:`\forall s \leq S, \mathbf{C_s}` is a (ns,ns) pairwise similarity matrix of variable size ns. - :math:`\mathbf{C_{dict}}` is a (D, nt, nt) tensor of D pairwise similarity matrix of fixed size nt. - :math:`\forall s \leq S, \mathbf{p_s}` is the source distribution corresponding to :math:`\mathbf{C_s}` - :math:`\mathbf{q}` is the target distribution assigned to every structures in the embedding space. - reg is the regularization coefficient. The stochastic algorithm used for estimating the graph dictionary atoms as proposed in [38]_ Parameters ---------- Cs : list of S symmetric array-like, shape (ns, ns) List of Metric/Graph cost matrices of variable size (ns, ns). D: int Number of dictionary atoms to learn nt: int Number of samples within each dictionary atoms reg : float, optional Coefficient of the negative quadratic regularization used to promote sparsity of w. The default is 0. ps : list of S array-like, shape (ns,), optional Distribution in each source space C of Cs. Default is None and corresponds to uniform distibutions. q : array-like, shape (nt,), optional Distribution in the embedding space whose structure will be learned. Default is None and corresponds to uniform distributions. epochs: int, optional Number of epochs used to learn the dictionary. Default is 32. batch_size: int, optional Batch size for each stochastic gradient update of the dictionary. Set to the dataset size if the provided batch_size is higher than the dataset size. Default is 32. learning_rate: float, optional Learning rate used for the stochastic gradient descent. Default is 1. Cdict_init: list of D array-like with shape (nt, nt), optional Used to initialize the dictionary. If set to None (Default), the dictionary will be initialized randomly. Else Cdict must have shape (D, nt, nt) i.e match provided shape features. projection: str , optional If 'nonnegative' and/or 'symmetric' is in projection, the corresponding projection will be performed at each stochastic update of the dictionary Else the set of atoms is :math:`R^{nt * nt}`. Default is 'nonnegative_symmetric' log: bool, optional If set to True, losses evolution by batches and epochs are tracked. Default is False. use_adam_optimizer: bool, optional If set to True, adam optimizer with default settings is used as adaptative learning rate strategy. Else perform SGD with fixed learning rate. Default is True. tol_outer : float, optional Solver precision for the BCD algorithm, measured by absolute relative error on consecutive losses. Default is :math:`10^{-5}`. tol_inner : float, optional Solver precision for the Conjugate Gradient algorithm used to get optimal w at a fixed transport, measured by absolute relative error on consecutive losses. Default is :math:`10^{-5}`. max_iter_outer : int, optional Maximum number of iterations for the BCD. Default is 20. max_iter_inner : int, optional Maximum number of iterations for the Conjugate Gradient. Default is 200. verbose : bool, optional Print the reconstruction loss every epoch. Default is False. random_state : int, RandomState instance or None, default=None Determines random number generation. Pass an int for reproducible output across multiple function calls. Returns ------- Cdict_best_state : D array-like, shape (D,nt,nt) Metric/Graph cost matrices composing the dictionary. The dictionary leading to the best loss over an epoch is saved and returned. log: dict If use_log is True, contains loss evolutions by batches and epochs. References ------- .. [38] C. Vincent-Cuaz, T. Vayer, R. Flamary, M. Corneli, N. Courty, Online Graph Dictionary Learning, International Conference on Machine Learning (ICML), 2021. """ # Handle backend of non-optional arguments Cs0 = Cs nx = get_backend(*Cs0) Cs = [nx.to_numpy(C) for C in Cs0] dataset_size = len(Cs) # Handle backend of optional arguments if ps is None: ps = [unif(C.shape[0]) for C in Cs] else: ps = [nx.to_numpy(p) for p in ps] if q is None: q = unif(nt) else: q = nx.to_numpy(q) rng = check_random_state(random_state) if Cdict_init is None: # Initialize randomly structures of dictionary atoms based on samples dataset_means = [C.mean() for C in Cs] Cdict = rng.normal(loc=np.mean(dataset_means), scale=np.std(dataset_means), size=(D, nt, nt)) else: Cdict = nx.to_numpy(Cdict_init).copy() assert Cdict.shape == (D, nt, nt) if 'symmetric' in projection: Cdict = 0.5 * (Cdict + Cdict.transpose((0, 2, 1))) symmetric = True else: symmetric = False if 'nonnegative' in projection: Cdict[Cdict < 0.] = 0 if use_adam_optimizer: adam_moments = _initialize_adam_optimizer(Cdict) log = {'loss_batches': [], 'loss_epochs': []} const_q = q[:, None] * q[None, :] Cdict_best_state = Cdict.copy() loss_best_state = np.inf if batch_size > dataset_size: batch_size = dataset_size iter_by_epoch = dataset_size // batch_size + int((dataset_size % batch_size) > 0) for epoch in range(epochs): cumulated_loss_over_epoch = 0. for _ in range(iter_by_epoch): # batch sampling batch = rng.choice(range(dataset_size), size=batch_size, replace=False) cumulated_loss_over_batch = 0. unmixings = np.zeros((batch_size, D)) Cs_embedded = np.zeros((batch_size, nt, nt)) Ts = [None] * batch_size for batch_idx, C_idx in enumerate(batch): # BCD solver for Gromov-Wasserstein linear unmixing used independently on each structure of the sampled batch unmixings[batch_idx], Cs_embedded[batch_idx], Ts[batch_idx], current_loss = gromov_wasserstein_linear_unmixing( Cs[C_idx], Cdict, reg=reg, p=ps[C_idx], q=q, tol_outer=tol_outer, tol_inner=tol_inner, max_iter_outer=max_iter_outer, max_iter_inner=max_iter_inner, symmetric=symmetric, **kwargs ) cumulated_loss_over_batch += current_loss cumulated_loss_over_epoch += cumulated_loss_over_batch if use_log: log['loss_batches'].append(cumulated_loss_over_batch) # Stochastic projected gradient step over dictionary atoms grad_Cdict = np.zeros_like(Cdict) for batch_idx, C_idx in enumerate(batch): shared_term_structures = Cs_embedded[batch_idx] * const_q - (Cs[C_idx].dot(Ts[batch_idx])).T.dot(Ts[batch_idx]) grad_Cdict += unmixings[batch_idx][:, None, None] * shared_term_structures[None, :, :] grad_Cdict *= 2 / batch_size if use_adam_optimizer: Cdict, adam_moments = _adam_stochastic_updates(Cdict, grad_Cdict, learning_rate, adam_moments) else: Cdict -= learning_rate * grad_Cdict if 'symmetric' in projection: Cdict = 0.5 * (Cdict + Cdict.transpose((0, 2, 1))) if 'nonnegative' in projection: Cdict[Cdict < 0.] = 0. if use_log: log['loss_epochs'].append(cumulated_loss_over_epoch) if loss_best_state > cumulated_loss_over_epoch: loss_best_state = cumulated_loss_over_epoch Cdict_best_state = Cdict.copy() if verbose: print('--- epoch =', epoch, ' cumulated reconstruction error: ', cumulated_loss_over_epoch) return nx.from_numpy(Cdict_best_state), log def _initialize_adam_optimizer(variable): # Initialization for our numpy implementation of adam optimizer atoms_adam_m = np.zeros_like(variable) # Initialize first moment tensor atoms_adam_v = np.zeros_like(variable) # Initialize second moment tensor atoms_adam_count = 1 return {'mean': atoms_adam_m, 'var': atoms_adam_v, 'count': atoms_adam_count} def _adam_stochastic_updates(variable, grad, learning_rate, adam_moments, beta_1=0.9, beta_2=0.99, eps=1e-09): adam_moments['mean'] = beta_1 * adam_moments['mean'] + (1 - beta_1) * grad adam_moments['var'] = beta_2 * adam_moments['var'] + (1 - beta_2) * (grad**2) unbiased_m = adam_moments['mean'] / (1 - beta_1**adam_moments['count']) unbiased_v = adam_moments['var'] / (1 - beta_2**adam_moments['count']) variable -= learning_rate * unbiased_m / (np.sqrt(unbiased_v) + eps) adam_moments['count'] += 1 return variable, adam_moments def gromov_wasserstein_linear_unmixing(C, Cdict, reg=0., p=None, q=None, tol_outer=10**(-5), tol_inner=10**(-5), max_iter_outer=20, max_iter_inner=200, symmetric=None, **kwargs): r""" Returns the Gromov-Wasserstein linear unmixing of :math:`(\mathbf{C},\mathbf{p})` onto the dictionary :math:`\{ (\mathbf{C_{dict}[d]}, \mathbf{q}) \}_{d \in [D]}`. .. math:: \min_{ \mathbf{w}} GW_2(\mathbf{C}, \sum_{d=1}^D w_d\mathbf{C_{dict}[d]}, \mathbf{p}, \mathbf{q}) - reg \| \mathbf{w} \|_2^2 such that: - :math:`\mathbf{w}^\top \mathbf{1}_D = 1` - :math:`\mathbf{w} \geq \mathbf{0}_D` Where : - :math:`\mathbf{C}` is the (ns,ns) pairwise similarity matrix. - :math:`\mathbf{C_{dict}}` is a (D, nt, nt) tensor of D pairwise similarity matrices of size nt. - :math:`\mathbf{p}` and :math:`\mathbf{q}` are source and target weights. - reg is the regularization coefficient. The algorithm used for solving the problem is a Block Coordinate Descent as discussed in [38]_ , algorithm 1. Parameters ---------- C : array-like, shape (ns, ns) Metric/Graph cost matrix. Cdict : D array-like, shape (D,nt,nt) Metric/Graph cost matrices composing the dictionary on which to embed C. reg : float, optional. Coefficient of the negative quadratic regularization used to promote sparsity of w. Default is 0. p : array-like, shape (ns,), optional Distribution in the source space C. Default is None and corresponds to uniform distribution. q : array-like, shape (nt,), optional Distribution in the space depicted by the dictionary. Default is None and corresponds to uniform distribution. tol_outer : float, optional Solver precision for the BCD algorithm. tol_inner : float, optional Solver precision for the Conjugate Gradient algorithm used to get optimal w at a fixed transport. Default is :math:`10^{-5}`. max_iter_outer : int, optional Maximum number of iterations for the BCD. Default is 20. max_iter_inner : int, optional Maximum number of iterations for the Conjugate Gradient. Default is 200. Returns ------- w: array-like, shape (D,) Gromov-Wasserstein linear unmixing of :math:`(\mathbf{C},\mathbf{p})` onto the span of the dictionary. Cembedded: array-like, shape (nt,nt) embedded structure of :math:`(\mathbf{C},\mathbf{p})` onto the dictionary, :math:`\sum_d w_d\mathbf{C_{dict}[d]}`. T: array-like (ns, nt) Gromov-Wasserstein transport plan between :math:`(\mathbf{C},\mathbf{p})` and :math:`(\sum_d w_d\mathbf{C_{dict}[d]}, \mathbf{q})` current_loss: float reconstruction error References ------- .. [38] C. Vincent-Cuaz, T. Vayer, R. Flamary, M. Corneli, N. Courty, Online Graph Dictionary Learning, International Conference on Machine Learning (ICML), 2021. """ C0, Cdict0 = C, Cdict nx = get_backend(C0, Cdict0) C = nx.to_numpy(C0) Cdict = nx.to_numpy(Cdict0) if p is None: p = unif(C.shape[0]) else: p = nx.to_numpy(p) if q is None: q = unif(Cdict.shape[-1]) else: q = nx.to_numpy(q) T = p[:, None] * q[None, :] D = len(Cdict) w = unif(D) # Initialize uniformly the unmixing w Cembedded = np.sum(w[:, None, None] * Cdict, axis=0) const_q = q[:, None] * q[None, :] # Trackers for BCD convergence convergence_criterion = np.inf current_loss = 10**15 outer_count = 0 while (convergence_criterion > tol_outer) and (outer_count < max_iter_outer): previous_loss = current_loss # 1. Solve GW transport between (C,p) and (\sum_d Cdictionary[d],q) fixing the unmixing w T, log = gromov_wasserstein( C1=C, C2=Cembedded, p=p, q=q, loss_fun='square_loss', G0=T, max_iter=max_iter_inner, tol_rel=tol_inner, tol_abs=0., log=True, armijo=False, symmetric=symmetric, **kwargs) current_loss = log['gw_dist'] if reg != 0: current_loss -= reg * np.sum(w**2) # 2. Solve linear unmixing problem over w with a fixed transport plan T w, Cembedded, current_loss = _cg_gromov_wasserstein_unmixing( C=C, Cdict=Cdict, Cembedded=Cembedded, w=w, const_q=const_q, T=T, starting_loss=current_loss, reg=reg, tol=tol_inner, max_iter=max_iter_inner, **kwargs ) if previous_loss != 0: convergence_criterion = abs(previous_loss - current_loss) / abs(previous_loss) else: # handle numerical issues around 0 convergence_criterion = abs(previous_loss - current_loss) / 10**(-15) outer_count += 1 return nx.from_numpy(w), nx.from_numpy(Cembedded), nx.from_numpy(T), nx.from_numpy(current_loss) def _cg_gromov_wasserstein_unmixing(C, Cdict, Cembedded, w, const_q, T, starting_loss, reg=0., tol=10**(-5), max_iter=200, **kwargs): r""" Returns for a fixed admissible transport plan, the linear unmixing w minimizing the Gromov-Wasserstein cost between :math:`(\mathbf{C},\mathbf{p})` and :math:`(\sum_d w[d]*\mathbf{C_{dict}[d]}, \mathbf{q})` .. math:: \min_{\mathbf{w}} \sum_{ijkl} (C_{i,j} - \sum_{d=1}^D w_d*C_{dict}[d]_{k,l} )^2 T_{i,k}T_{j,l} - reg* \| \mathbf{w} \|_2^2 Such that: - :math:`\mathbf{w}^\top \mathbf{1}_D = 1` - :math:`\mathbf{w} \geq \mathbf{0}_D` Where : - :math:`\mathbf{C}` is the (ns,ns) pairwise similarity matrix. - :math:`\mathbf{C_{dict}}` is a (D, nt, nt) tensor of D pairwise similarity matrices of nt points. - :math:`\mathbf{p}` and :math:`\mathbf{q}` are source and target weights. - :math:`\mathbf{w}` is the linear unmixing of :math:`(\mathbf{C}, \mathbf{p})` onto :math:`(\sum_d w_d \mathbf{Cdict[d]}, \mathbf{q})`. - :math:`\mathbf{T}` is the optimal transport plan conditioned by the current state of :math:`\mathbf{w}`. - reg is the regularization coefficient. The algorithm used for solving the problem is a Conditional Gradient Descent as discussed in [38]_ Parameters ---------- C : array-like, shape (ns, ns) Metric/Graph cost matrix. Cdict : list of D array-like, shape (nt,nt) Metric/Graph cost matrices composing the dictionary on which to embed C. Each matrix in the dictionary must have the same size (nt,nt). Cembedded: array-like, shape (nt,nt) Embedded structure :math:`(\sum_d w[d]*Cdict[d],q)` of :math:`(\mathbf{C},\mathbf{p})` onto the dictionary. Used to avoid redundant computations. w: array-like, shape (D,) Linear unmixing of the input structure onto the dictionary const_q: array-like, shape (nt,nt) product matrix :math:`\mathbf{q}\mathbf{q}^\top` where q is the target space distribution. Used to avoid redundant computations. T: array-like, shape (ns,nt) fixed transport plan between the input structure and its representation in the dictionary. p : array-like, shape (ns,) Distribution in the source space. q : array-like, shape (nt,) Distribution in the embedding space depicted by the dictionary. reg : float, optional. Coefficient of the negative quadratic regularization used to promote sparsity of w. Default is 0. Returns ------- w: ndarray (D,) optimal unmixing of :math:`(\mathbf{C},\mathbf{p})` onto the dictionary span given OT starting from previously optimal unmixing. """ convergence_criterion = np.inf current_loss = starting_loss count = 0 const_TCT = np.transpose(C.dot(T)).dot(T) while (convergence_criterion > tol) and (count < max_iter): previous_loss = current_loss # 1) Compute gradient at current point w grad_w = 2 * np.sum(Cdict * (Cembedded[None, :, :] * const_q[None, :, :] - const_TCT[None, :, :]), axis=(1, 2)) grad_w -= 2 * reg * w # 2) Conditional gradient direction finding: x= \argmin_x x^T.grad_w min_ = np.min(grad_w) x = (grad_w == min_).astype(np.float64) x /= np.sum(x) # 3) Line-search step: solve \argmin_{\gamma \in [0,1]} a*gamma^2 + b*gamma + c gamma, a, b, Cembedded_diff = _linesearch_gromov_wasserstein_unmixing(w, grad_w, x, Cdict, Cembedded, const_q, const_TCT, reg) # 4) Updates: w <-- (1-gamma)*w + gamma*x w += gamma * (x - w) Cembedded += gamma * Cembedded_diff current_loss += a * (gamma**2) + b * gamma if previous_loss != 0: # not that the loss can be negative if reg >0 convergence_criterion = abs(previous_loss - current_loss) / abs(previous_loss) else: # handle numerical issues around 0 convergence_criterion = abs(previous_loss - current_loss) / 10**(-15) count += 1 return w, Cembedded, current_loss def _linesearch_gromov_wasserstein_unmixing(w, grad_w, x, Cdict, Cembedded, const_q, const_TCT, reg, **kwargs): r""" Compute optimal steps for the line search problem of Gromov-Wasserstein linear unmixing .. math:: \min_{\gamma \in [0,1]} \sum_{ijkl} (C_{i,j} - \sum_{d=1}^D z_d(\gamma)C_{dict}[d]_{k,l} )^2 T_{i,k}T_{j,l} - reg\| \mathbf{z}(\gamma) \|_2^2 Such that: - :math:`\mathbf{z}(\gamma) = (1- \gamma)\mathbf{w} + \gamma \mathbf{x}` Parameters ---------- w : array-like, shape (D,) Unmixing. grad_w : array-like, shape (D, D) Gradient of the reconstruction loss with respect to w. x: array-like, shape (D,) Conditional gradient direction. Cdict : list of D array-like, shape (nt,nt) Metric/Graph cost matrices composing the dictionary on which to embed C. Each matrix in the dictionary must have the same size (nt,nt). Cembedded: array-like, shape (nt,nt) Embedded structure :math:`(\sum_d w_dCdict[d],q)` of :math:`(\mathbf{C},\mathbf{p})` onto the dictionary. Used to avoid redundant computations. const_q: array-like, shape (nt,nt) product matrix :math:`\mathbf{q}\mathbf{q}^\top` where q is the target space distribution. Used to avoid redundant computations. const_TCT: array-like, shape (nt, nt) :math:`\mathbf{T}^\top \mathbf{C}^\top \mathbf{T}`. Used to avoid redundant computations. Returns ------- gamma: float Optimal value for the line-search step a: float Constant factor appearing in the factorization :math:`a \gamma^2 + b \gamma +c` of the reconstruction loss b: float Constant factor appearing in the factorization :math:`a \gamma^2 + b \gamma +c` of the reconstruction loss Cembedded_diff: numpy array, shape (nt, nt) Difference between models evaluated in :math:`\mathbf{w}` and in :math:`\mathbf{w}`. reg : float, optional. Coefficient of the negative quadratic regularization used to promote sparsity of :math:`\mathbf{w}`. """ # 3) Line-search step: solve \argmin_{\gamma \in [0,1]} a*gamma^2 + b*gamma + c Cembedded_x = np.sum(x[:, None, None] * Cdict, axis=0) Cembedded_diff = Cembedded_x - Cembedded trace_diffx = np.sum(Cembedded_diff * Cembedded_x * const_q) trace_diffw = np.sum(Cembedded_diff * Cembedded * const_q) a = trace_diffx - trace_diffw b = 2 * (trace_diffw - np.sum(Cembedded_diff * const_TCT)) if reg != 0: a -= reg * np.sum((x - w)**2) b -= 2 * reg * np.sum(w * (x - w)) if a > 0: gamma = min(1, max(0, - b / (2 * a))) elif a + b < 0: gamma = 1 else: gamma = 0 return gamma, a, b, Cembedded_diff def fused_gromov_wasserstein_dictionary_learning(Cs, Ys, D, nt, alpha, reg=0., ps=None, q=None, epochs=20, batch_size=32, learning_rate_C=1., learning_rate_Y=1., Cdict_init=None, Ydict_init=None, projection='nonnegative_symmetric', use_log=False, tol_outer=10**(-5), tol_inner=10**(-5), max_iter_outer=20, max_iter_inner=200, use_adam_optimizer=True, verbose=False, random_state=None, **kwargs): r""" Infer Fused Gromov-Wasserstein linear dictionary :math:`\{ (\mathbf{C_{dict}[d]}, \mathbf{Y_{dict}[d]}, \mathbf{q}) \}_{d \in [D]}` from the list of S attributed structures :math:`\{ (\mathbf{C_s}, \mathbf{Y_s},\mathbf{p_s}) \}_s` .. math:: \min_{\mathbf{C_{dict}},\mathbf{Y_{dict}}, \{\mathbf{w_s}\}_{s}} \sum_{s=1}^S FGW_{2,\alpha}(\mathbf{C_s}, \mathbf{Y_s}, \sum_{d=1}^D w_{s,d}\mathbf{C_{dict}[d]},\sum_{d=1}^D w_{s,d}\mathbf{Y_{dict}[d]}, \mathbf{p_s}, \mathbf{q}) \\ - reg\| \mathbf{w_s} \|_2^2 Such that :math:`\forall s \leq S` : - :math:`\mathbf{w_s}^\top \mathbf{1}_D = 1` - :math:`\mathbf{w_s} \geq \mathbf{0}_D` Where : - :math:`\forall s \leq S, \mathbf{C_s}` is a (ns,ns) pairwise similarity matrix of variable size ns. - :math:`\forall s \leq S, \mathbf{Y_s}` is a (ns,d) features matrix of variable size ns and fixed dimension d. - :math:`\mathbf{C_{dict}}` is a (D, nt, nt) tensor of D pairwise similarity matrix of fixed size nt. - :math:`\mathbf{Y_{dict}}` is a (D, nt, d) tensor of D features matrix of fixed size nt and fixed dimension d. - :math:`\forall s \leq S, \mathbf{p_s}` is the source distribution corresponding to :math:`\mathbf{C_s}` - :math:`\mathbf{q}` is the target distribution assigned to every structures in the embedding space. - :math:`\alpha` is the trade-off parameter of Fused Gromov-Wasserstein - reg is the regularization coefficient. The stochastic algorithm used for estimating the attributed graph dictionary atoms as proposed in [38]_ Parameters ---------- Cs : list of S symmetric array-like, shape (ns, ns) List of Metric/Graph cost matrices of variable size (ns,ns). Ys : list of S array-like, shape (ns, d) List of feature matrix of variable size (ns,d) with d fixed. D: int Number of dictionary atoms to learn nt: int Number of samples within each dictionary atoms alpha : float Trade-off parameter of Fused Gromov-Wasserstein reg : float, optional Coefficient of the negative quadratic regularization used to promote sparsity of w. The default is 0. ps : list of S array-like, shape (ns,), optional Distribution in each source space C of Cs. Default is None and corresponds to uniform distibutions. q : array-like, shape (nt,), optional Distribution in the embedding space whose structure will be learned. Default is None and corresponds to uniform distributions. epochs: int, optional Number of epochs used to learn the dictionary. Default is 32. batch_size: int, optional Batch size for each stochastic gradient update of the dictionary. Set to the dataset size if the provided batch_size is higher than the dataset size. Default is 32. learning_rate_C: float, optional Learning rate used for the stochastic gradient descent on Cdict. Default is 1. learning_rate_Y: float, optional Learning rate used for the stochastic gradient descent on Ydict. Default is 1. Cdict_init: list of D array-like with shape (nt, nt), optional Used to initialize the dictionary structures Cdict. If set to None (Default), the dictionary will be initialized randomly. Else Cdict must have shape (D, nt, nt) i.e match provided shape features. Ydict_init: list of D array-like with shape (nt, d), optional Used to initialize the dictionary features Ydict. If set to None, the dictionary features will be initialized randomly. Else Ydict must have shape (D, nt, d) where d is the features dimension of inputs Ys and also match provided shape features. projection: str, optional If 'nonnegative' and/or 'symmetric' is in projection, the corresponding projection will be performed at each stochastic update of the dictionary Else the set of atoms is :math:`R^{nt * nt}`. Default is 'nonnegative_symmetric' log: bool, optional If set to True, losses evolution by batches and epochs are tracked. Default is False. use_adam_optimizer: bool, optional If set to True, adam optimizer with default settings is used as adaptative learning rate strategy. Else perform SGD with fixed learning rate. Default is True. tol_outer : float, optional Solver precision for the BCD algorithm, measured by absolute relative error on consecutive losses. Default is :math:`10^{-5}`. tol_inner : float, optional Solver precision for the Conjugate Gradient algorithm used to get optimal w at a fixed transport, measured by absolute relative error on consecutive losses. Default is :math:`10^{-5}`. max_iter_outer : int, optional Maximum number of iterations for the BCD. Default is 20. max_iter_inner : int, optional Maximum number of iterations for the Conjugate Gradient. Default is 200. verbose : bool, optional Print the reconstruction loss every epoch. Default is False. random_state : int, RandomState instance or None, default=None Determines random number generation. Pass an int for reproducible output across multiple function calls. Returns ------- Cdict_best_state : D array-like, shape (D,nt,nt) Metric/Graph cost matrices composing the dictionary. The dictionary leading to the best loss over an epoch is saved and returned. Ydict_best_state : D array-like, shape (D,nt,d) Feature matrices composing the dictionary. The dictionary leading to the best loss over an epoch is saved and returned. log: dict If use_log is True, contains loss evolutions by batches and epochs. References ------- .. [38] C. Vincent-Cuaz, T. Vayer, R. Flamary, M. Corneli, N. Courty, Online Graph Dictionary Learning, International Conference on Machine Learning (ICML), 2021. """ Cs0, Ys0 = Cs, Ys nx = get_backend(*Cs0, *Ys0) Cs = [nx.to_numpy(C) for C in Cs0] Ys = [nx.to_numpy(Y) for Y in Ys0] d = Ys[0].shape[-1] dataset_size = len(Cs) if ps is None: ps = [unif(C.shape[0]) for C in Cs] else: ps = [nx.to_numpy(p) for p in ps] if q is None: q = unif(nt) else: q = nx.to_numpy(q) rng = check_random_state(random_state) if Cdict_init is None: # Initialize randomly structures of dictionary atoms based on samples dataset_means = [C.mean() for C in Cs] Cdict = rng.normal(loc=np.mean(dataset_means), scale=np.std(dataset_means), size=(D, nt, nt)) else: Cdict = nx.to_numpy(Cdict_init).copy() assert Cdict.shape == (D, nt, nt) if Ydict_init is None: # Initialize randomly features of dictionary atoms based on samples distribution by feature component dataset_feature_means = np.stack([F.mean(axis=0) for F in Ys]) Ydict = rng.normal(loc=dataset_feature_means.mean(axis=0), scale=dataset_feature_means.std(axis=0), size=(D, nt, d)) else: Ydict = nx.to_numpy(Ydict_init).copy() assert Ydict.shape == (D, nt, d) if 'symmetric' in projection: Cdict = 0.5 * (Cdict + Cdict.transpose((0, 2, 1))) symmetric = True else: symmetric = False if 'nonnegative' in projection: Cdict[Cdict < 0.] = 0. if use_adam_optimizer: adam_moments_C = _initialize_adam_optimizer(Cdict) adam_moments_Y = _initialize_adam_optimizer(Ydict) log = {'loss_batches': [], 'loss_epochs': []} const_q = q[:, None] * q[None, :] diag_q = np.diag(q) Cdict_best_state = Cdict.copy() Ydict_best_state = Ydict.copy() loss_best_state = np.inf if batch_size > dataset_size: batch_size = dataset_size iter_by_epoch = dataset_size // batch_size + int((dataset_size % batch_size) > 0) for epoch in range(epochs): cumulated_loss_over_epoch = 0. for _ in range(iter_by_epoch): # Batch iterations batch = rng.choice(range(dataset_size), size=batch_size, replace=False) cumulated_loss_over_batch = 0. unmixings = np.zeros((batch_size, D)) Cs_embedded = np.zeros((batch_size, nt, nt)) Ys_embedded = np.zeros((batch_size, nt, d)) Ts = [None] * batch_size for batch_idx, C_idx in enumerate(batch): # BCD solver for Gromov-Wasserstein linear unmixing used independently on each structure of the sampled batch unmixings[batch_idx], Cs_embedded[batch_idx], Ys_embedded[batch_idx], Ts[batch_idx], current_loss = fused_gromov_wasserstein_linear_unmixing( Cs[C_idx], Ys[C_idx], Cdict, Ydict, alpha, reg=reg, p=ps[C_idx], q=q, tol_outer=tol_outer, tol_inner=tol_inner, max_iter_outer=max_iter_outer, max_iter_inner=max_iter_inner, symmetric=symmetric, **kwargs ) cumulated_loss_over_batch += current_loss cumulated_loss_over_epoch += cumulated_loss_over_batch if use_log: log['loss_batches'].append(cumulated_loss_over_batch) # Stochastic projected gradient step over dictionary atoms grad_Cdict = np.zeros_like(Cdict) grad_Ydict = np.zeros_like(Ydict) for batch_idx, C_idx in enumerate(batch): shared_term_structures = Cs_embedded[batch_idx] * const_q - (Cs[C_idx].dot(Ts[batch_idx])).T.dot(Ts[batch_idx]) shared_term_features = diag_q.dot(Ys_embedded[batch_idx]) - Ts[batch_idx].T.dot(Ys[C_idx]) grad_Cdict += alpha * unmixings[batch_idx][:, None, None] * shared_term_structures[None, :, :] grad_Ydict += (1 - alpha) * unmixings[batch_idx][:, None, None] * shared_term_features[None, :, :] grad_Cdict *= 2 / batch_size grad_Ydict *= 2 / batch_size if use_adam_optimizer: Cdict, adam_moments_C = _adam_stochastic_updates(Cdict, grad_Cdict, learning_rate_C, adam_moments_C) Ydict, adam_moments_Y = _adam_stochastic_updates(Ydict, grad_Ydict, learning_rate_Y, adam_moments_Y) else: Cdict -= learning_rate_C * grad_Cdict Ydict -= learning_rate_Y * grad_Ydict if 'symmetric' in projection: Cdict = 0.5 * (Cdict + Cdict.transpose((0, 2, 1))) if 'nonnegative' in projection: Cdict[Cdict < 0.] = 0. if use_log: log['loss_epochs'].append(cumulated_loss_over_epoch) if loss_best_state > cumulated_loss_over_epoch: loss_best_state = cumulated_loss_over_epoch Cdict_best_state = Cdict.copy() Ydict_best_state = Ydict.copy() if verbose: print('--- epoch: ', epoch, ' cumulated reconstruction error: ', cumulated_loss_over_epoch) return nx.from_numpy(Cdict_best_state), nx.from_numpy(Ydict_best_state), log def fused_gromov_wasserstein_linear_unmixing(C, Y, Cdict, Ydict, alpha, reg=0., p=None, q=None, tol_outer=10**(-5), tol_inner=10**(-5), max_iter_outer=20, max_iter_inner=200, symmetric=True, **kwargs): r""" Returns the Fused Gromov-Wasserstein linear unmixing of :math:`(\mathbf{C},\mathbf{Y},\mathbf{p})` onto the attributed dictionary atoms :math:`\{ (\mathbf{C_{dict}[d]},\mathbf{Y_{dict}[d]}, \mathbf{q}) \}_{d \in [D]}` .. math:: \min_{\mathbf{w}} FGW_{2,\alpha}(\mathbf{C},\mathbf{Y}, \sum_{d=1}^D w_d\mathbf{C_{dict}[d]},\sum_{d=1}^D w_d\mathbf{Y_{dict}[d]}, \mathbf{p}, \mathbf{q}) - reg \| \mathbf{w} \|_2^2 such that, :math:`\forall s \leq S` : - :math:`\mathbf{w_s}^\top \mathbf{1}_D = 1` - :math:`\mathbf{w_s} \geq \mathbf{0}_D` Where : - :math:`\mathbf{C}` is a (ns,ns) pairwise similarity matrix of variable size ns. - :math:`\mathbf{Y}` is a (ns,d) features matrix of variable size ns and fixed dimension d. - :math:`\mathbf{C_{dict}}` is a (D, nt, nt) tensor of D pairwise similarity matrix of fixed size nt. - :math:`\mathbf{Y_{dict}}` is a (D, nt, d) tensor of D features matrix of fixed size nt and fixed dimension d. - :math:`\mathbf{p}` is the source distribution corresponding to :math:`\mathbf{C_s}` - :math:`\mathbf{q}` is the target distribution assigned to every structures in the embedding space. - :math:`\alpha` is the trade-off parameter of Fused Gromov-Wasserstein - reg is the regularization coefficient. The algorithm used for solving the problem is a Block Coordinate Descent as discussed in [38]_, algorithm 6. Parameters ---------- C : array-like, shape (ns, ns) Metric/Graph cost matrix. Y : array-like, shape (ns, d) Feature matrix. Cdict : D array-like, shape (D,nt,nt) Metric/Graph cost matrices composing the dictionary on which to embed (C,Y). Ydict : D array-like, shape (D,nt,d) Feature matrices composing the dictionary on which to embed (C,Y). alpha: float, Trade-off parameter of Fused Gromov-Wasserstein. reg : float, optional Coefficient of the negative quadratic regularization used to promote sparsity of w. The default is 0. p : array-like, shape (ns,), optional Distribution in the source space C. Default is None and corresponds to uniform distribution. q : array-like, shape (nt,), optional Distribution in the space depicted by the dictionary. Default is None and corresponds to uniform distribution. tol_outer : float, optional Solver precision for the BCD algorithm. tol_inner : float, optional Solver precision for the Conjugate Gradient algorithm used to get optimal w at a fixed transport. Default is :math:`10^{-5}`. max_iter_outer : int, optional Maximum number of iterations for the BCD. Default is 20. max_iter_inner : int, optional Maximum number of iterations for the Conjugate Gradient. Default is 200. Returns ------- w: array-like, shape (D,) fused Gromov-Wasserstein linear unmixing of (C,Y,p) onto the span of the dictionary. Cembedded: array-like, shape (nt,nt) embedded structure of :math:`(\mathbf{C},\mathbf{Y}, \mathbf{p})` onto the dictionary, :math:`\sum_d w_d\mathbf{C_{dict}[d]}`. Yembedded: array-like, shape (nt,d) embedded features of :math:`(\mathbf{C},\mathbf{Y}, \mathbf{p})` onto the dictionary, :math:`\sum_d w_d\mathbf{Y_{dict}[d]}`. T: array-like (ns,nt) Fused Gromov-Wasserstein transport plan between :math:`(\mathbf{C},\mathbf{p})` and :math:`(\sum_d w_d\mathbf{C_{dict}[d]}, \sum_d w_d\mathbf{Y_{dict}[d]},\mathbf{q})`. current_loss: float reconstruction error References ------- .. [38] C. Vincent-Cuaz, T. Vayer, R. Flamary, M. Corneli, N. Courty, Online Graph Dictionary Learning, International Conference on Machine Learning (ICML), 2021. """ C0, Y0, Cdict0, Ydict0 = C, Y, Cdict, Ydict nx = get_backend(C0, Y0, Cdict0, Ydict0) C = nx.to_numpy(C0) Y = nx.to_numpy(Y0) Cdict = nx.to_numpy(Cdict0) Ydict = nx.to_numpy(Ydict0) if p is None: p = unif(C.shape[0]) else: p = nx.to_numpy(p) if q is None: q = unif(Cdict.shape[-1]) else: q = nx.to_numpy(q) T = p[:, None] * q[None, :] D = len(Cdict) d = Y.shape[-1] w = unif(D) # Initialize with uniform weights ns = C.shape[-1] nt = Cdict.shape[-1] # modeling (C,Y) Cembedded = np.sum(w[:, None, None] * Cdict, axis=0) Yembedded = np.sum(w[:, None, None] * Ydict, axis=0) # constants depending on q const_q = q[:, None] * q[None, :] diag_q = np.diag(q) # Trackers for BCD convergence convergence_criterion = np.inf current_loss = 10**15 outer_count = 0 Ys_constM = (Y**2).dot(np.ones((d, nt))) # constant in computing euclidean pairwise feature matrix while (convergence_criterion > tol_outer) and (outer_count < max_iter_outer): previous_loss = current_loss # 1. Solve GW transport between (C,p) and (\sum_d Cdictionary[d],q) fixing the unmixing w Yt_varM = (np.ones((ns, d))).dot((Yembedded**2).T) M = Ys_constM + Yt_varM - 2 * Y.dot(Yembedded.T) # euclidean distance matrix between features T, log = fused_gromov_wasserstein( M, C, Cembedded, p, q, loss_fun='square_loss', alpha=alpha, max_iter=max_iter_inner, tol_rel=tol_inner, tol_abs=0., armijo=False, G0=T, log=True, symmetric=symmetric, **kwargs) current_loss = log['fgw_dist'] if reg != 0: current_loss -= reg * np.sum(w**2) # 2. Solve linear unmixing problem over w with a fixed transport plan T w, Cembedded, Yembedded, current_loss = _cg_fused_gromov_wasserstein_unmixing(C, Y, Cdict, Ydict, Cembedded, Yembedded, w, T, p, q, const_q, diag_q, current_loss, alpha, reg, tol=tol_inner, max_iter=max_iter_inner, **kwargs) if previous_loss != 0: convergence_criterion = abs(previous_loss - current_loss) / abs(previous_loss) else: convergence_criterion = abs(previous_loss - current_loss) / 10**(-12) outer_count += 1 return nx.from_numpy(w), nx.from_numpy(Cembedded), nx.from_numpy(Yembedded), nx.from_numpy(T), nx.from_numpy(current_loss) def _cg_fused_gromov_wasserstein_unmixing(C, Y, Cdict, Ydict, Cembedded, Yembedded, w, T, p, q, const_q, diag_q, starting_loss, alpha, reg, tol=10**(-6), max_iter=200, **kwargs): r""" Returns for a fixed admissible transport plan, the optimal linear unmixing :math:`\mathbf{w}` minimizing the Fused Gromov-Wasserstein cost between :math:`(\mathbf{C},\mathbf{Y},\mathbf{p})` and :math:`(\sum_d w_d \mathbf{C_{dict}[d]},\sum_d w_d*\mathbf{Y_{dict}[d]}, \mathbf{q})` .. math:: \min_{\mathbf{w}} \alpha \sum_{ijkl} (C_{i,j} - \sum_{d=1}^D w_d C_{dict}[d]_{k,l} )^2 T_{i,k}T_{j,l} \\+ (1-\alpha) \sum_{ij} \| \mathbf{Y_i} - \sum_d w_d \mathbf{Y_{dict}[d]_j} \|_2^2 T_{ij}- reg \| \mathbf{w} \|_2^2 Such that : - :math:`\mathbf{w}^\top \mathbf{1}_D = 1` - :math:`\mathbf{w} \geq \mathbf{0}_D` Where : - :math:`\mathbf{C}` is a (ns,ns) pairwise similarity matrix of variable size ns. - :math:`\mathbf{Y}` is a (ns,d) features matrix of variable size ns and fixed dimension d. - :math:`\mathbf{C_{dict}}` is a (D, nt, nt) tensor of D pairwise similarity matrix of fixed size nt. - :math:`\mathbf{Y_{dict}}` is a (D, nt, d) tensor of D features matrix of fixed size nt and fixed dimension d. - :math:`\mathbf{p}` is the source distribution corresponding to :math:`\mathbf{C_s}` - :math:`\mathbf{q}` is the target distribution assigned to every structures in the embedding space. - :math:`\mathbf{T}` is the optimal transport plan conditioned by the previous state of :math:`\mathbf{w}` - :math:`\alpha` is the trade-off parameter of Fused Gromov-Wasserstein - reg is the regularization coefficient. The algorithm used for solving the problem is a Conditional Gradient Descent as discussed in [38]_, algorithm 7. Parameters ---------- C : array-like, shape (ns, ns) Metric/Graph cost matrix. Y : array-like, shape (ns, d) Feature matrix. Cdict : list of D array-like, shape (nt,nt) Metric/Graph cost matrices composing the dictionary on which to embed (C,Y). Each matrix in the dictionary must have the same size (nt,nt). Ydict : list of D array-like, shape (nt,d) Feature matrices composing the dictionary on which to embed (C,Y). Each matrix in the dictionary must have the same size (nt,d). Cembedded: array-like, shape (nt,nt) Embedded structure of (C,Y) onto the dictionary Yembedded: array-like, shape (nt,d) Embedded features of (C,Y) onto the dictionary w: array-like, shape (n_D,) Linear unmixing of (C,Y) onto (Cdict,Ydict) const_q: array-like, shape (nt,nt) product matrix :math:`\mathbf{qq}^\top` where :math:`\mathbf{q}` is the target space distribution. diag_q: array-like, shape (nt,nt) diagonal matrix with values of q on the diagonal. T: array-like, shape (ns,nt) fixed transport plan between (C,Y) and its model p : array-like, shape (ns,) Distribution in the source space (C,Y). q : array-like, shape (nt,) Distribution in the embedding space depicted by the dictionary. alpha: float, Trade-off parameter of Fused Gromov-Wasserstein. reg : float, optional Coefficient of the negative quadratic regularization used to promote sparsity of w. Returns ------- w: ndarray (D,) linear unmixing of :math:`(\mathbf{C},\mathbf{Y},\mathbf{p})` onto the span of :math:`(C_{dict},Y_{dict})` given OT corresponding to previous unmixing. """ convergence_criterion = np.inf current_loss = starting_loss count = 0 const_TCT = np.transpose(C.dot(T)).dot(T) ones_ns_d = np.ones(Y.shape) while (convergence_criterion > tol) and (count < max_iter): previous_loss = current_loss # 1) Compute gradient at current point w # structure grad_w = alpha * np.sum(Cdict * (Cembedded[None, :, :] * const_q[None, :, :] - const_TCT[None, :, :]), axis=(1, 2)) # feature grad_w += (1 - alpha) * np.sum(Ydict * (diag_q.dot(Yembedded)[None, :, :] - T.T.dot(Y)[None, :, :]), axis=(1, 2)) grad_w -= reg * w grad_w *= 2 # 2) Conditional gradient direction finding: x= \argmin_x x^T.grad_w min_ = np.min(grad_w) x = (grad_w == min_).astype(np.float64) x /= np.sum(x) # 3) Line-search step: solve \argmin_{\gamma \in [0,1]} a*gamma^2 + b*gamma + c gamma, a, b, Cembedded_diff, Yembedded_diff = _linesearch_fused_gromov_wasserstein_unmixing(w, grad_w, x, Y, Cdict, Ydict, Cembedded, Yembedded, T, const_q, const_TCT, ones_ns_d, alpha, reg) # 4) Updates: w <-- (1-gamma)*w + gamma*x w += gamma * (x - w) Cembedded += gamma * Cembedded_diff Yembedded += gamma * Yembedded_diff current_loss += a * (gamma**2) + b * gamma if previous_loss != 0: convergence_criterion = abs(previous_loss - current_loss) / abs(previous_loss) else: convergence_criterion = abs(previous_loss - current_loss) / 10**(-12) count += 1 return w, Cembedded, Yembedded, current_loss def _linesearch_fused_gromov_wasserstein_unmixing(w, grad_w, x, Y, Cdict, Ydict, Cembedded, Yembedded, T, const_q, const_TCT, ones_ns_d, alpha, reg, **kwargs): r""" Compute optimal steps for the line search problem of Fused Gromov-Wasserstein linear unmixing .. math:: \min_{\gamma \in [0,1]} \alpha \sum_{ijkl} (C_{i,j} - \sum_{d=1}^D z_d(\gamma)C_{dict}[d]_{k,l} )^2 T_{i,k}T_{j,l} \\ + (1-\alpha) \sum_{ij} \| \mathbf{Y_i} - \sum_d z_d(\gamma) \mathbf{Y_{dict}[d]_j} \|_2^2 - reg\| \mathbf{z}(\gamma) \|_2^2 Such that : - :math:`\mathbf{z}(\gamma) = (1- \gamma)\mathbf{w} + \gamma \mathbf{x}` Parameters ---------- w : array-like, shape (D,) Unmixing. grad_w : array-like, shape (D, D) Gradient of the reconstruction loss with respect to w. x: array-like, shape (D,) Conditional gradient direction. Y: arrat-like, shape (ns,d) Feature matrix of the input space Cdict : list of D array-like, shape (nt, nt) Metric/Graph cost matrices composing the dictionary on which to embed (C,Y). Each matrix in the dictionary must have the same size (nt,nt). Ydict : list of D array-like, shape (nt, d) Feature matrices composing the dictionary on which to embed (C,Y). Each matrix in the dictionary must have the same size (nt,d). Cembedded: array-like, shape (nt, nt) Embedded structure of (C,Y) onto the dictionary Yembedded: array-like, shape (nt, d) Embedded features of (C,Y) onto the dictionary T: array-like, shape (ns, nt) Fixed transport plan between (C,Y) and its current model. const_q: array-like, shape (nt,nt) product matrix :math:`\mathbf{q}\mathbf{q}^\top` where q is the target space distribution. Used to avoid redundant computations. const_TCT: array-like, shape (nt, nt) :math:`\mathbf{T}^\top \mathbf{C}^\top \mathbf{T}`. Used to avoid redundant computations. ones_ns_d: array-like, shape (ns, d) :math:`\mathbf{1}_{ ns \times d}`. Used to avoid redundant computations. alpha: float, Trade-off parameter of Fused Gromov-Wasserstein. reg : float, optional Coefficient of the negative quadratic regularization used to promote sparsity of w. Returns ------- gamma: float Optimal value for the line-search step a: float Constant factor appearing in the factorization :math:`a \gamma^2 + b \gamma +c` of the reconstruction loss b: float Constant factor appearing in the factorization :math:`a \gamma^2 + b \gamma +c` of the reconstruction loss Cembedded_diff: numpy array, shape (nt, nt) Difference between structure matrix of models evaluated in :math:`\mathbf{w}` and in :math:`\mathbf{w}`. Yembedded_diff: numpy array, shape (nt, nt) Difference between feature matrix of models evaluated in :math:`\mathbf{w}` and in :math:`\mathbf{w}`. """ # polynomial coefficients from quadratic objective (with respect to w) on structures Cembedded_x = np.sum(x[:, None, None] * Cdict, axis=0) Cembedded_diff = Cembedded_x - Cembedded trace_diffx = np.sum(Cembedded_diff * Cembedded_x * const_q) trace_diffw = np.sum(Cembedded_diff * Cembedded * const_q) # Constant factor appearing in the factorization a*gamma^2 + b*g + c of the Gromov-Wasserstein reconstruction loss a_gw = trace_diffx - trace_diffw b_gw = 2 * (trace_diffw - np.sum(Cembedded_diff * const_TCT)) # polynomial coefficient from quadratic objective (with respect to w) on features Yembedded_x = np.sum(x[:, None, None] * Ydict, axis=0) Yembedded_diff = Yembedded_x - Yembedded # Constant factor appearing in the factorization a*gamma^2 + b*g + c of the Gromov-Wasserstein reconstruction loss a_w = np.sum(ones_ns_d.dot((Yembedded_diff**2).T) * T) b_w = 2 * np.sum(T * (ones_ns_d.dot((Yembedded * Yembedded_diff).T) - Y.dot(Yembedded_diff.T))) a = alpha * a_gw + (1 - alpha) * a_w b = alpha * b_gw + (1 - alpha) * b_w if reg != 0: a -= reg * np.sum((x - w)**2) b -= 2 * reg * np.sum(w * (x - w)) if a > 0: gamma = min(1, max(0, -b / (2 * a))) elif a + b < 0: gamma = 1 else: gamma = 0 return gamma, a, b, Cembedded_diff, Yembedded_diff python-pot-0.9.3+dfsg/ot/gromov/_estimators.py000066400000000000000000000372721455713015700214440ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Gromov-Wasserstein and Fused-Gromov-Wasserstein stochastic estimators. """ # Author: RĂ©mi Flamary # Tanguy Kerdoncuff # # License: MIT License import numpy as np from ..bregman import sinkhorn from ..utils import list_to_array, check_random_state from ..lp import emd_1d, emd from ..backend import get_backend def GW_distance_estimation(C1, C2, p, q, loss_fun, T, nb_samples_p=None, nb_samples_q=None, std=True, random_state=None): r""" Returns an approximation of the Gromov-Wasserstein loss between :math:`(\mathbf{C_1}, \mathbf{p})` and :math:`(\mathbf{C_2}, \mathbf{q})` with a fixed transport plan :math:`\mathbf{T}`. To recover an approximation of the Gromov-Wasserstein distance as defined in [13] compute :math:`d_{GW} = \frac{1}{2} \sqrt{\mathbf{GW}}`. The function gives an unbiased approximation of the following equation: .. math:: \mathbf{GW} = \sum_{i,j,k,l} L(\mathbf{C_{1}}_{i,k}, \mathbf{C_{2}}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} Where : - :math:`\mathbf{C_1}`: Metric cost matrix in the source space - :math:`\mathbf{C_2}`: Metric cost matrix in the target space - `L` : Loss function to account for the misfit between the similarity matrices - :math:`\mathbf{T}`: Matrix with marginal :math:`\mathbf{p}` and :math:`\mathbf{q}` Parameters ---------- C1 : array-like, shape (ns, ns) Metric cost matrix in the source space C2 : array-like, shape (nt, nt) Metric cost matrix in the target space p : array-like, shape (ns,) Distribution in the source space q : array-like, shape (nt,) Distribution in the target space loss_fun : function: :math:`\mathbb{R} \times \mathbb{R} \mapsto \mathbb{R}` Loss function used for the distance, the transport plan does not depend on the loss function T : csr or array-like, shape (ns, nt) Transport plan matrix, either a sparse csr or a dense matrix nb_samples_p : int, optional `nb_samples_p` is the number of samples (without replacement) along the first dimension of :math:`\mathbf{T}` nb_samples_q : int, optional `nb_samples_q` is the number of samples along the second dimension of :math:`\mathbf{T}`, for each sample along the first std : bool, optional Standard deviation associated with the prediction of the gromov-wasserstein cost random_state : int or RandomState instance, optional Fix the seed for reproducibility Returns ------- : float Gromov-wasserstein cost References ---------- .. [14] Kerdoncuff, Tanguy, Emonet, RĂ©mi, Sebban, Marc "Sampled Gromov Wasserstein." Machine Learning Journal (MLJ). 2021. """ C1, C2, p, q = list_to_array(C1, C2, p, q) nx = get_backend(C1, C2, p, q) generator = check_random_state(random_state) len_p = p.shape[0] len_q = q.shape[0] # It is always better to sample from the biggest distribution first. if len_p < len_q: p, q = q, p len_p, len_q = len_q, len_p C1, C2 = C2, C1 T = T.T if nb_samples_p is None: if nx.issparse(T): # If T is sparse, it probably mean that PoGroW was used, thus the number of sample is reduced nb_samples_p = min(int(5 * (len_p * np.log(len_p)) ** 0.5), len_p) else: nb_samples_p = len_p else: # The number of sample along the first dimension is without replacement. nb_samples_p = min(nb_samples_p, len_p) if nb_samples_q is None: nb_samples_q = 1 if std: nb_samples_q = max(2, nb_samples_q) index_k = np.zeros((nb_samples_p, nb_samples_q), dtype=int) index_l = np.zeros((nb_samples_p, nb_samples_q), dtype=int) index_i = generator.choice( len_p, size=nb_samples_p, p=nx.to_numpy(p), replace=False ) index_j = generator.choice( len_p, size=nb_samples_p, p=nx.to_numpy(p), replace=False ) for i in range(nb_samples_p): if nx.issparse(T): T_indexi = nx.reshape(nx.todense(T[index_i[i], :]), (-1,)) T_indexj = nx.reshape(nx.todense(T[index_j[i], :]), (-1,)) else: T_indexi = T[index_i[i], :] T_indexj = T[index_j[i], :] # For each of the row sampled, the column is sampled. index_k[i] = generator.choice( len_q, size=nb_samples_q, p=nx.to_numpy(T_indexi / nx.sum(T_indexi)), replace=True ) index_l[i] = generator.choice( len_q, size=nb_samples_q, p=nx.to_numpy(T_indexj / nx.sum(T_indexj)), replace=True ) list_value_sample = nx.stack([ loss_fun( C1[np.ix_(index_i, index_j)], C2[np.ix_(index_k[:, n], index_l[:, n])] ) for n in range(nb_samples_q) ], axis=2) if std: std_value = nx.sum(nx.std(list_value_sample, axis=2) ** 2) ** 0.5 return nx.mean(list_value_sample), std_value / (nb_samples_p * nb_samples_p) else: return nx.mean(list_value_sample) def pointwise_gromov_wasserstein(C1, C2, p, q, loss_fun, alpha=1, max_iter=100, threshold_plan=0, log=False, verbose=False, random_state=None): r""" Returns the gromov-wasserstein transport between :math:`(\mathbf{C_1}, \mathbf{p})` and :math:`(\mathbf{C_2}, \mathbf{q})` using a stochastic Frank-Wolfe. This method has a :math:`\mathcal{O}(\mathrm{max\_iter} \times PN^2)` time complexity with `P` the number of Sinkhorn iterations. The function solves the following optimization problem: .. math:: \mathbf{GW} = \mathop{\arg \min}_\mathbf{T} \quad \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T}^T \mathbf{1} &= \mathbf{q} \mathbf{T} &\geq 0 Where : - :math:`\mathbf{C_1}`: Metric cost matrix in the source space - :math:`\mathbf{C_2}`: Metric cost matrix in the target space - :math:`\mathbf{p}`: distribution in the source space - :math:`\mathbf{q}`: distribution in the target space - `L`: loss function to account for the misfit between the similarity matrices Parameters ---------- C1 : array-like, shape (ns, ns) Metric cost matrix in the source space C2 : array-like, shape (nt, nt) Metric cost matrix in the target space p : array-like, shape (ns,) Distribution in the source space q : array-like, shape (nt,) Distribution in the target space loss_fun : function: :math:`\mathbb{R} \times \mathbb{R} \mapsto \mathbb{R}` Loss function used for the distance, the transport plan does not depend on the loss function alpha : float Step of the Frank-Wolfe algorithm, should be between 0 and 1 max_iter : int, optional Max number of iterations threshold_plan : float, optional Deleting very small values in the transport plan. If above zero, it violates the marginal constraints. verbose : bool, optional Print information along iterations log : bool, optional Gives the distance estimated and the standard deviation random_state : int or RandomState instance, optional Fix the seed for reproducibility Returns ------- T : array-like, shape (`ns`, `nt`) Optimal coupling between the two spaces References ---------- .. [14] Kerdoncuff, Tanguy, Emonet, RĂ©mi, Sebban, Marc "Sampled Gromov Wasserstein." Machine Learning Journal (MLJ). 2021. """ C1, C2, p, q = list_to_array(C1, C2, p, q) nx = get_backend(C1, C2, p, q) len_p = p.shape[0] len_q = q.shape[0] generator = check_random_state(random_state) index = np.zeros(2, dtype=int) # Initialize with default marginal index[0] = generator.choice(len_p, size=1, p=nx.to_numpy(p)) index[1] = generator.choice(len_q, size=1, p=nx.to_numpy(q)) T = nx.tocsr(emd_1d(C1[index[0]], C2[index[1]], a=p, b=q, dense=False)) best_gw_dist_estimated = np.inf for cpt in range(max_iter): index[0] = generator.choice(len_p, size=1, p=nx.to_numpy(p)) T_index0 = nx.reshape(nx.todense(T[index[0], :]), (-1,)) index[1] = generator.choice( len_q, size=1, p=nx.to_numpy(T_index0 / nx.sum(T_index0)) ) if alpha == 1: T = nx.tocsr( emd_1d(C1[index[0]], C2[index[1]], a=p, b=q, dense=False) ) else: new_T = nx.tocsr( emd_1d(C1[index[0]], C2[index[1]], a=p, b=q, dense=False) ) T = (1 - alpha) * T + alpha * new_T # To limit the number of non 0, the values below the threshold are set to 0. T = nx.eliminate_zeros(T, threshold=threshold_plan) if cpt % 10 == 0 or cpt == (max_iter - 1): gw_dist_estimated = GW_distance_estimation( C1=C1, C2=C2, loss_fun=loss_fun, p=p, q=q, T=T, std=False, random_state=generator ) if gw_dist_estimated < best_gw_dist_estimated: best_gw_dist_estimated = gw_dist_estimated best_T = nx.copy(T) if verbose: if cpt % 200 == 0: print('{:5s}|{:12s}'.format('It.', 'Best gw estimated') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(cpt, best_gw_dist_estimated)) if log: log = {} log["gw_dist_estimated"], log["gw_dist_std"] = GW_distance_estimation( C1=C1, C2=C2, loss_fun=loss_fun, p=p, q=q, T=best_T, random_state=generator ) return best_T, log return best_T def sampled_gromov_wasserstein(C1, C2, p, q, loss_fun, nb_samples_grad=100, epsilon=1, max_iter=500, log=False, verbose=False, random_state=None): r""" Returns the gromov-wasserstein transport between :math:`(\mathbf{C_1}, \mathbf{p})` and :math:`(\mathbf{C_2}, \mathbf{q})` using a 1-stochastic Frank-Wolfe. This method has a :math:`\mathcal{O}(\mathrm{max\_iter} \times N \log(N))` time complexity by relying on the 1D Optimal Transport solver. The function solves the following optimization problem: .. math:: \mathbf{GW} = \mathop{\arg \min}_\mathbf{T} \quad \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T}^T \mathbf{1} &= \mathbf{q} \mathbf{T} &\geq 0 Where : - :math:`\mathbf{C_1}`: Metric cost matrix in the source space - :math:`\mathbf{C_2}`: Metric cost matrix in the target space - :math:`\mathbf{p}`: distribution in the source space - :math:`\mathbf{q}`: distribution in the target space - `L`: loss function to account for the misfit between the similarity matrices Parameters ---------- C1 : array-like, shape (ns, ns) Metric cost matrix in the source space C2 : array-like, shape (nt, nt) Metric cost matrix in the target space p : array-like, shape (ns,) Distribution in the source space q : array-like, shape (nt,) Distribution in the target space loss_fun : function: :math:`\mathbb{R} \times \mathbb{R} \mapsto \mathbb{R}` Loss function used for the distance, the transport plan does not depend on the loss function nb_samples_grad : int Number of samples to approximate the gradient epsilon : float Weight of the Kullback-Leibler regularization max_iter : int, optional Max number of iterations verbose : bool, optional Print information along iterations log : bool, optional Gives the distance estimated and the standard deviation random_state : int or RandomState instance, optional Fix the seed for reproducibility Returns ------- T : array-like, shape (`ns`, `nt`) Optimal coupling between the two spaces References ---------- .. [14] Kerdoncuff, Tanguy, Emonet, RĂ©mi, Sebban, Marc "Sampled Gromov Wasserstein." Machine Learning Journal (MLJ). 2021. """ C1, C2, p, q = list_to_array(C1, C2, p, q) nx = get_backend(C1, C2, p, q) len_p = p.shape[0] len_q = q.shape[0] generator = check_random_state(random_state) # The most natural way to define nb_sample is with a simple integer. if isinstance(nb_samples_grad, int): if nb_samples_grad > len_p: # As the sampling along the first dimension is done without replacement, the rest is reported to the second # dimension. nb_samples_grad_p, nb_samples_grad_q = len_p, nb_samples_grad // len_p else: nb_samples_grad_p, nb_samples_grad_q = nb_samples_grad, 1 else: nb_samples_grad_p, nb_samples_grad_q = nb_samples_grad T = nx.outer(p, q) # continue_loop allows to stop the loop if there is several successive small modification of T. continue_loop = 0 # The gradient of GW is more complex if the two matrices are not symmetric. C_are_symmetric = nx.allclose(C1, C1.T, rtol=1e-10, atol=1e-10) and nx.allclose(C2, C2.T, rtol=1e-10, atol=1e-10) for cpt in range(max_iter): index0 = generator.choice( len_p, size=nb_samples_grad_p, p=nx.to_numpy(p), replace=False ) Lik = 0 for i, index0_i in enumerate(index0): index1 = generator.choice( len_q, size=nb_samples_grad_q, p=nx.to_numpy(T[index0_i, :] / nx.sum(T[index0_i, :])), replace=False ) # If the matrices C are not symmetric, the gradient has 2 terms, thus the term is chosen randomly. if (not C_are_symmetric) and generator.rand(1) > 0.5: Lik += nx.mean(loss_fun( C1[:, [index0[i]] * nb_samples_grad_q][:, None, :], C2[:, index1][None, :, :] ), axis=2) else: Lik += nx.mean(loss_fun( C1[[index0[i]] * nb_samples_grad_q, :][:, :, None], C2[index1, :][:, None, :] ), axis=0) max_Lik = nx.max(Lik) if max_Lik == 0: continue # This division by the max is here to facilitate the choice of epsilon. Lik /= max_Lik if epsilon > 0: # Set to infinity all the numbers below exp(-200) to avoid log of 0. log_T = nx.log(nx.clip(T, np.exp(-200), 1)) log_T = nx.where(log_T == -200, -np.inf, log_T) Lik = Lik - epsilon * log_T try: new_T = sinkhorn(a=p, b=q, M=Lik, reg=epsilon) except (RuntimeWarning, UserWarning): print("Warning catched in Sinkhorn: Return last stable T") break else: new_T = emd(a=p, b=q, M=Lik) change_T = nx.mean((T - new_T) ** 2) if change_T <= 10e-20: continue_loop += 1 if continue_loop > 100: # Number max of low modifications of T T = nx.copy(new_T) break else: continue_loop = 0 if verbose and cpt % 10 == 0: if cpt % 200 == 0: print('{:5s}|{:12s}'.format('It.', '||T_n - T_{n+1}||') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(cpt, change_T)) T = nx.copy(new_T) if log: log = {} log["gw_dist_estimated"], log["gw_dist_std"] = GW_distance_estimation( C1=C1, C2=C2, loss_fun=loss_fun, p=p, q=q, T=T, random_state=generator ) return T, log return T python-pot-0.9.3+dfsg/ot/gromov/_gw.py000066400000000000000000001340651455713015700176650ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Gromov-Wasserstein and Fused-Gromov-Wasserstein conditional gradient solvers. """ # Author: Erwan Vautier # Nicolas Courty # RĂ©mi Flamary # Titouan Vayer # CĂ©dric Vincent-Cuaz # # License: MIT License import numpy as np import warnings from ..utils import dist, UndefinedParameter, list_to_array from ..optim import cg, line_search_armijo, solve_1d_linesearch_quad from ..utils import check_random_state, unif from ..backend import get_backend, NumpyBackend from ._utils import init_matrix, gwloss, gwggrad from ._utils import update_square_loss, update_kl_loss, update_feature_matrix def gromov_wasserstein(C1, C2, p=None, q=None, loss_fun='square_loss', symmetric=None, log=False, armijo=False, G0=None, max_iter=1e4, tol_rel=1e-9, tol_abs=1e-9, **kwargs): r""" Returns the Gromov-Wasserstein transport between :math:`(\mathbf{C_1}, \mathbf{p})` and :math:`(\mathbf{C_2}, \mathbf{q})`. The function solves the following optimization problem using Conditional Gradient: .. math:: \mathbf{T}^* \in \mathop{\arg \min}_\mathbf{T} \quad \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} s.t. \ \mathbf{\gamma} \mathbf{1} &= \mathbf{p} \mathbf{\gamma}^T \mathbf{1} &= \mathbf{q} \mathbf{\gamma} &\geq 0 Where : - :math:`\mathbf{C_1}`: Metric cost matrix in the source space - :math:`\mathbf{C_2}`: Metric cost matrix in the target space - :math:`\mathbf{p}`: distribution in the source space - :math:`\mathbf{q}`: distribution in the target space - `L`: loss function to account for the misfit between the similarity matrices .. note:: This function is backend-compatible and will work on arrays from all compatible backends. But the algorithm uses the C++ CPU backend which can lead to copy overhead on GPU arrays. .. note:: All computations in the conjugate gradient solver are done with numpy to limit memory overhead. .. note:: This function will cast the computed transport plan to the data type of the provided input :math:`\mathbf{C}_1`. Casting to an integer tensor might result in a loss of precision. If this behaviour is unwanted, please make sure to provide a floating point input. Parameters ---------- C1 : array-like, shape (ns, ns) Metric cost matrix in the source space C2 : array-like, shape (nt, nt) Metric cost matrix in the target space p : array-like, shape (ns,), optional Distribution in the source space. If let to its default value None, uniform distribution is taken. q : array-like, shape (nt,), optional Distribution in the target space. If let to its default value None, uniform distribution is taken. loss_fun : str, optional loss function used for the solver either 'square_loss' or 'kl_loss' symmetric : bool, optional Either C1 and C2 are to be assumed symmetric or not. If let to its default None value, a symmetry test will be conducted. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymmetric). verbose : bool, optional Print information along iterations log : bool, optional record log if True armijo : bool, optional If True the step of the line-search is found via an armijo research. Else closed form is used. If there are convergence issues use False. G0: array-like, shape (ns,nt), optional If None the initial transport plan of the solver is pq^T. Otherwise G0 must satisfy marginal constraints and will be used as initial transport of the solver. max_iter : int, optional Max number of iterations tol_rel : float, optional Stop threshold on relative error (>0) tol_abs : float, optional Stop threshold on absolute error (>0) **kwargs : dict parameters can be directly passed to the ot.optim.cg solver Returns ------- T : array-like, shape (`ns`, `nt`) Coupling between the two spaces that minimizes: :math:`\sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l}` log : dict Convergence information and loss. References ---------- .. [12] Gabriel PeyrĂ©, Marco Cuturi, and Justin Solomon, "Gromov-Wasserstein averaging of kernel and distance matrices." International Conference on Machine Learning (ICML). 2016. .. [13] MĂ©moli, Facundo. Gromov–Wasserstein distances and the metric approach to object matching. Foundations of computational mathematics 11.4 (2011): 417-487. .. [47] Chowdhury, S., & MĂ©moli, F. (2019). The gromov–wasserstein distance between networks and stable network invariants. Information and Inference: A Journal of the IMA, 8(4), 757-787. """ arr = [C1, C2] if p is not None: arr.append(list_to_array(p)) else: p = unif(C1.shape[0], type_as=C1) if q is not None: arr.append(list_to_array(q)) else: q = unif(C2.shape[0], type_as=C1) if G0 is not None: G0_ = G0 arr.append(G0) nx = get_backend(*arr) p0, q0, C10, C20 = p, q, C1, C2 p = nx.to_numpy(p0) q = nx.to_numpy(q0) C1 = nx.to_numpy(C10) C2 = nx.to_numpy(C20) if symmetric is None: symmetric = np.allclose(C1, C1.T, atol=1e-10) and np.allclose(C2, C2.T, atol=1e-10) if G0 is None: G0 = p[:, None] * q[None, :] else: G0 = nx.to_numpy(G0_) # Check marginals of G0 np.testing.assert_allclose(G0.sum(axis=1), p, atol=1e-08) np.testing.assert_allclose(G0.sum(axis=0), q, atol=1e-08) # cg for GW is implemented using numpy on CPU np_ = NumpyBackend() constC, hC1, hC2 = init_matrix(C1, C2, p, q, loss_fun, np_) def f(G): return gwloss(constC, hC1, hC2, G, np_) if symmetric: def df(G): return gwggrad(constC, hC1, hC2, G, np_) else: constCt, hC1t, hC2t = init_matrix(C1.T, C2.T, p, q, loss_fun, np_) def df(G): return 0.5 * (gwggrad(constC, hC1, hC2, G, np_) + gwggrad(constCt, hC1t, hC2t, G, np_)) if armijo: def line_search(cost, G, deltaG, Mi, cost_G, **kwargs): return line_search_armijo(cost, G, deltaG, Mi, cost_G, nx=np_, **kwargs) else: def line_search(cost, G, deltaG, Mi, cost_G, **kwargs): return solve_gromov_linesearch(G, deltaG, cost_G, hC1, hC2, M=0., reg=1., nx=np_, **kwargs) if not nx.is_floating_point(C10): warnings.warn( "Input structure matrix consists of integer. The transport plan will be " "casted accordingly, possibly resulting in a loss of precision. " "If this behaviour is unwanted, please make sure your input " "structure matrix consists of floating point elements.", stacklevel=2 ) if log: res, log = cg(p, q, 0., 1., f, df, G0, line_search, log=True, numItermax=max_iter, stopThr=tol_rel, stopThr2=tol_abs, **kwargs) log['gw_dist'] = nx.from_numpy(log['loss'][-1], type_as=C10) log['u'] = nx.from_numpy(log['u'], type_as=C10) log['v'] = nx.from_numpy(log['v'], type_as=C10) return nx.from_numpy(res, type_as=C10), log else: return nx.from_numpy(cg(p, q, 0., 1., f, df, G0, line_search, log=False, numItermax=max_iter, stopThr=tol_rel, stopThr2=tol_abs, **kwargs), type_as=C10) def gromov_wasserstein2(C1, C2, p=None, q=None, loss_fun='square_loss', symmetric=None, log=False, armijo=False, G0=None, max_iter=1e4, tol_rel=1e-9, tol_abs=1e-9, **kwargs): r""" Returns the Gromov-Wasserstein loss :math:`\mathbf{GW}` between :math:`(\mathbf{C_1}, \mathbf{p})` and :math:`(\mathbf{C_2}, \mathbf{q})`. To recover the Gromov-Wasserstein distance as defined in [13] compute :math:`d_{GW} = \frac{1}{2} \sqrt{\mathbf{GW}}`. The function solves the following optimization problem using Conditional Gradient: .. math:: \mathbf{GW} = \min_\mathbf{T} \quad \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} s.t. \ \mathbf{\gamma} \mathbf{1} &= \mathbf{p} \mathbf{\gamma}^T \mathbf{1} &= \mathbf{q} \mathbf{\gamma} &\geq 0 Where : - :math:`\mathbf{C_1}`: Metric cost matrix in the source space - :math:`\mathbf{C_2}`: Metric cost matrix in the target space - :math:`\mathbf{p}`: distribution in the source space - :math:`\mathbf{q}`: distribution in the target space - `L`: loss function to account for the misfit between the similarity matrices Note that when using backends, this loss function is differentiable wrt the matrices (C1, C2) and weights (p, q) for quadratic loss using the gradients from [38]_. .. note:: This function is backend-compatible and will work on arrays from all compatible backends. But the algorithm uses the C++ CPU backend which can lead to copy overhead on GPU arrays. .. note:: All computations in the conjugate gradient solver are done with numpy to limit memory overhead. .. note:: This function will cast the computed transport plan to the data type of the provided input :math:`\mathbf{C}_1`. Casting to an integer tensor might result in a loss of precision. If this behaviour is unwanted, please make sure to provide a floating point input. Parameters ---------- C1 : array-like, shape (ns, ns) Metric cost matrix in the source space C2 : array-like, shape (nt, nt) Metric cost matrix in the target space p : array-like, shape (ns,), optional Distribution in the source space. If let to its default value None, uniform distribution is taken. q : array-like, shape (nt,), optional Distribution in the target space. If let to its default value None, uniform distribution is taken. loss_fun : str loss function used for the solver either 'square_loss' or 'kl_loss' symmetric : bool, optional Either C1 and C2 are to be assumed symmetric or not. If let to its default None value, a symmetry test will be conducted. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymmetric). verbose : bool, optional Print information along iterations log : bool, optional record log if True armijo : bool, optional If True the step of the line-search is found via an armijo research. Else closed form is used. If there are convergence issues use False. G0: array-like, shape (ns,nt), optional If None the initial transport plan of the solver is pq^T. Otherwise G0 must satisfy marginal constraints and will be used as initial transport of the solver. max_iter : int, optional Max number of iterations tol_rel : float, optional Stop threshold on relative error (>0) tol_abs : float, optional Stop threshold on absolute error (>0) **kwargs : dict parameters can be directly passed to the ot.optim.cg solver Returns ------- gw_dist : float Gromov-Wasserstein distance log : dict convergence information and Coupling matrix References ---------- .. [12] Gabriel PeyrĂ©, Marco Cuturi, and Justin Solomon, "Gromov-Wasserstein averaging of kernel and distance matrices." International Conference on Machine Learning (ICML). 2016. .. [13] MĂ©moli, Facundo. Gromov–Wasserstein distances and the metric approach to object matching. Foundations of computational mathematics 11.4 (2011): 417-487. .. [38] C. Vincent-Cuaz, T. Vayer, R. Flamary, M. Corneli, N. Courty, Online Graph Dictionary Learning, International Conference on Machine Learning (ICML), 2021. .. [47] Chowdhury, S., & MĂ©moli, F. (2019). The gromov–wasserstein distance between networks and stable network invariants. Information and Inference: A Journal of the IMA, 8(4), 757-787. """ # simple get_backend as the full one will be handled in gromov_wasserstein nx = get_backend(C1, C2) # init marginals if set as None if p is None: p = unif(C1.shape[0], type_as=C1) if q is None: q = unif(C2.shape[0], type_as=C1) T, log_gw = gromov_wasserstein( C1, C2, p, q, loss_fun, symmetric, log=True, armijo=armijo, G0=G0, max_iter=max_iter, tol_rel=tol_rel, tol_abs=tol_abs, **kwargs) log_gw['T'] = T gw = log_gw['gw_dist'] if loss_fun == 'square_loss': gC1 = 2 * C1 * nx.outer(p, p) - 2 * nx.dot(T, nx.dot(C2, T.T)) gC2 = 2 * C2 * nx.outer(q, q) - 2 * nx.dot(T.T, nx.dot(C1, T)) elif loss_fun == 'kl_loss': gC1 = nx.log(C1 + 1e-15) * nx.outer(p, p) - nx.dot(T, nx.dot(nx.log(C2 + 1e-15), T.T)) gC2 = nx.dot(T.T, nx.dot(C1, T)) / (C2 + 1e-15) + nx.outer(q, q) gw = nx.set_gradients(gw, (p, q, C1, C2), (log_gw['u'] - nx.mean(log_gw['u']), log_gw['v'] - nx.mean(log_gw['v']), gC1, gC2)) if log: return gw, log_gw else: return gw def fused_gromov_wasserstein(M, C1, C2, p=None, q=None, loss_fun='square_loss', symmetric=None, alpha=0.5, armijo=False, G0=None, log=False, max_iter=1e4, tol_rel=1e-9, tol_abs=1e-9, **kwargs): r""" Returns the Fused Gromov-Wasserstein transport between :math:`(\mathbf{C_1}, \mathbf{Y_1}, \mathbf{p})` and :math:`(\mathbf{C_2}, \mathbf{Y_2}, \mathbf{q})` with pairwise distance matrix :math:`\mathbf{M}` between node feature matrices :math:`\mathbf{Y_1}` and :math:`\mathbf{Y_2}` (see :ref:`[24] `). The function solves the following optimization problem using Conditional Gradient: .. math:: \mathbf{T}^* \in\mathop{\arg\min}_\mathbf{T} \quad (1 - \alpha) \langle \mathbf{T}, \mathbf{M} \rangle_F + \alpha \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T}^T \mathbf{1} &= \mathbf{q} \mathbf{T} &\geq 0 Where : - :math:`\mathbf{M}`: metric cost matrix between features across domains - :math:`\mathbf{C_1}`: Metric cost matrix in the source space - :math:`\mathbf{C_2}`: Metric cost matrix in the target space - :math:`\mathbf{p}`: distribution in the source space - :math:`\mathbf{q}`: distribution in the target space - `L`: loss function to account for the misfit between the similarity and feature matrices - :math:`\alpha`: trade-off parameter .. note:: This function is backend-compatible and will work on arrays from all compatible backends. But the algorithm uses the C++ CPU backend which can lead to copy overhead on GPU arrays. .. note:: All computations in the conjugate gradient solver are done with numpy to limit memory overhead. .. note:: This function will cast the computed transport plan to the data type of the provided input :math:`\mathbf{M}`. Casting to an integer tensor might result in a loss of precision. If this behaviour is unwanted, please make sure to provide a floating point input. Parameters ---------- M : array-like, shape (ns, nt) Metric cost matrix between features across domains C1 : array-like, shape (ns, ns) Metric cost matrix representative of the structure in the source space C2 : array-like, shape (nt, nt) Metric cost matrix representative of the structure in the target space p : array-like, shape (ns,), optional Distribution in the source space. If let to its default value None, uniform distribution is taken. q : array-like, shape (nt,), optional Distribution in the target space. If let to its default value None, uniform distribution is taken. loss_fun : str, optional Loss function used for the solver symmetric : bool, optional Either C1 and C2 are to be assumed symmetric or not. If let to its default None value, a symmetry test will be conducted. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymmetric). alpha : float, optional Trade-off parameter (0 < alpha < 1) armijo : bool, optional If True the step of the line-search is found via an armijo research. Else closed form is used. If there are convergence issues use False. G0: array-like, shape (ns,nt), optional If None the initial transport plan of the solver is pq^T. Otherwise G0 must satisfy marginal constraints and will be used as initial transport of the solver. log : bool, optional record log if True max_iter : int, optional Max number of iterations tol_rel : float, optional Stop threshold on relative error (>0) tol_abs : float, optional Stop threshold on absolute error (>0) **kwargs : dict parameters can be directly passed to the ot.optim.cg solver Returns ------- T : array-like, shape (`ns`, `nt`) Optimal transportation matrix for the given parameters. log : dict Log dictionary return only if log==True in parameters. .. _references-fused-gromov-wasserstein: References ---------- .. [24] Vayer Titouan, Chapel Laetitia, Flamary RĂ©mi, Tavenard Romain and Courty Nicolas "Optimal Transport for structured data with application on graphs", International Conference on Machine Learning (ICML). 2019. .. [47] Chowdhury, S., & MĂ©moli, F. (2019). The gromov–wasserstein distance between networks and stable network invariants. Information and Inference: A Journal of the IMA, 8(4), 757-787. """ arr = [C1, C2, M] if p is not None: arr.append(list_to_array(p)) else: p = unif(C1.shape[0], type_as=M) if q is not None: arr.append(list_to_array(q)) else: q = unif(C2.shape[0], type_as=M) if G0 is not None: G0_ = G0 arr.append(G0) nx = get_backend(*arr) p0, q0, C10, C20, M0, alpha0 = p, q, C1, C2, M, alpha p = nx.to_numpy(p0) q = nx.to_numpy(q0) C1 = nx.to_numpy(C10) C2 = nx.to_numpy(C20) M = nx.to_numpy(M0) alpha = nx.to_numpy(alpha0) if symmetric is None: symmetric = np.allclose(C1, C1.T, atol=1e-10) and np.allclose(C2, C2.T, atol=1e-10) if G0 is None: G0 = p[:, None] * q[None, :] else: G0 = nx.to_numpy(G0_) # Check marginals of G0 np.testing.assert_allclose(G0.sum(axis=1), p, atol=1e-08) np.testing.assert_allclose(G0.sum(axis=0), q, atol=1e-08) # cg for GW is implemented using numpy on CPU np_ = NumpyBackend() constC, hC1, hC2 = init_matrix(C1, C2, p, q, loss_fun, np_) def f(G): return gwloss(constC, hC1, hC2, G, np_) if symmetric: def df(G): return gwggrad(constC, hC1, hC2, G, np_) else: constCt, hC1t, hC2t = init_matrix(C1.T, C2.T, p, q, loss_fun, np_) def df(G): return 0.5 * (gwggrad(constC, hC1, hC2, G, np_) + gwggrad(constCt, hC1t, hC2t, G, np_)) if armijo: def line_search(cost, G, deltaG, Mi, cost_G, **kwargs): return line_search_armijo(cost, G, deltaG, Mi, cost_G, nx=np_, **kwargs) else: def line_search(cost, G, deltaG, Mi, cost_G, **kwargs): return solve_gromov_linesearch(G, deltaG, cost_G, hC1, hC2, M=(1 - alpha) * M, reg=alpha, nx=np_, **kwargs) if not nx.is_floating_point(M0): warnings.warn( "Input feature matrix consists of integer. The transport plan will be " "casted accordingly, possibly resulting in a loss of precision. " "If this behaviour is unwanted, please make sure your input " "feature matrix consists of floating point elements.", stacklevel=2 ) if log: res, log = cg(p, q, (1 - alpha) * M, alpha, f, df, G0, line_search, log=True, numItermax=max_iter, stopThr=tol_rel, stopThr2=tol_abs, **kwargs) log['fgw_dist'] = nx.from_numpy(log['loss'][-1], type_as=M0) log['u'] = nx.from_numpy(log['u'], type_as=M0) log['v'] = nx.from_numpy(log['v'], type_as=M0) return nx.from_numpy(res, type_as=M0), log else: return nx.from_numpy(cg(p, q, (1 - alpha) * M, alpha, f, df, G0, line_search, log=False, numItermax=max_iter, stopThr=tol_rel, stopThr2=tol_abs, **kwargs), type_as=M0) def fused_gromov_wasserstein2(M, C1, C2, p=None, q=None, loss_fun='square_loss', symmetric=None, alpha=0.5, armijo=False, G0=None, log=False, max_iter=1e4, tol_rel=1e-9, tol_abs=1e-9, **kwargs): r""" Returns the Fused Gromov-Wasserstein distance between :math:`(\mathbf{C_1}, \mathbf{Y_1}, \mathbf{p})` and :math:`(\mathbf{C_2}, \mathbf{Y_2}, \mathbf{q})` with pairwise distance matrix :math:`\mathbf{M}` between node feature matrices :math:`\mathbf{Y_1}` and :math:`\mathbf{Y_2}` (see :ref:`[24] `). The function solves the following optimization problem using Conditional Gradient: .. math:: \mathbf{FGW} = \mathop{\min}_\mathbf{T} \quad (1 - \alpha) \langle \mathbf{T}, \mathbf{M} \rangle_F + \alpha \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T}^T \mathbf{1} &= \mathbf{q} \mathbf{T} &\geq 0 Where : - :math:`\mathbf{M}`: metric cost matrix between features across domains - :math:`\mathbf{C_1}`: Metric cost matrix in the source space - :math:`\mathbf{C_2}`: Metric cost matrix in the target space - :math:`\mathbf{p}`: distribution in the source space - :math:`\mathbf{q}`: distribution in the target space - `L`: loss function to account for the misfit between the similarity and feature matrices - :math:`\alpha`: trade-off parameter Note that when using backends, this loss function is differentiable wrt the matrices (C1, C2, M) and weights (p, q) for quadratic loss using the gradients from [38]_. .. note:: This function is backend-compatible and will work on arrays from all compatible backends. But the algorithm uses the C++ CPU backend which can lead to copy overhead on GPU arrays. .. note:: All computations in the conjugate gradient solver are done with numpy to limit memory overhead. .. note:: This function will cast the computed transport plan to the data type of the provided input :math:`\mathbf{M}`. Casting to an integer tensor might result in a loss of precision. If this behaviour is unwanted, please make sure to provide a floating point input. Parameters ---------- M : array-like, shape (ns, nt) Metric cost matrix between features across domains C1 : array-like, shape (ns, ns) Metric cost matrix representative of the structure in the source space. C2 : array-like, shape (nt, nt) Metric cost matrix representative of the structure in the target space. p : array-like, shape (ns,), optional Distribution in the source space. If let to its default value None, uniform distribution is taken. q : array-like, shape (nt,), optional Distribution in the target space. If let to its default value None, uniform distribution is taken. loss_fun : str, optional Loss function used for the solver. symmetric : bool, optional Either C1 and C2 are to be assumed symmetric or not. If let to its default None value, a symmetry test will be conducted. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymmetric). alpha : float, optional Trade-off parameter (0 < alpha < 1) armijo : bool, optional If True the step of the line-search is found via an armijo research. Else closed form is used. If there are convergence issues use False. G0: array-like, shape (ns,nt), optional If None the initial transport plan of the solver is pq^T. Otherwise G0 must satisfy marginal constraints and will be used as initial transport of the solver. log : bool, optional Record log if True. max_iter : int, optional Max number of iterations tol_rel : float, optional Stop threshold on relative error (>0) tol_abs : float, optional Stop threshold on absolute error (>0) **kwargs : dict Parameters can be directly passed to the ot.optim.cg solver. Returns ------- fgw-distance : float Fused Gromov-Wasserstein distance for the given parameters. log : dict Log dictionary return only if log==True in parameters. .. _references-fused-gromov-wasserstein2: References ---------- .. [24] Vayer Titouan, Chapel Laetitia, Flamary RĂ©mi, Tavenard Romain and Courty Nicolas "Optimal Transport for structured data with application on graphs" International Conference on Machine Learning (ICML). 2019. .. [38] C. Vincent-Cuaz, T. Vayer, R. Flamary, M. Corneli, N. Courty, Online Graph Dictionary Learning, International Conference on Machine Learning (ICML), 2021. .. [47] Chowdhury, S., & MĂ©moli, F. (2019). The gromov–wasserstein distance between networks and stable network invariants. Information and Inference: A Journal of the IMA, 8(4), 757-787. """ nx = get_backend(C1, C2, M) # init marginals if set as None if p is None: p = unif(C1.shape[0], type_as=M) if q is None: q = unif(C2.shape[0], type_as=M) T, log_fgw = fused_gromov_wasserstein( M, C1, C2, p, q, loss_fun, symmetric, alpha, armijo, G0, log=True, max_iter=max_iter, tol_rel=tol_rel, tol_abs=tol_abs, **kwargs) fgw_dist = log_fgw['fgw_dist'] log_fgw['T'] = T # compute separate terms for gradients and log lin_term = nx.sum(T * M) log_fgw['quad_loss'] = (fgw_dist - (1 - alpha) * lin_term) log_fgw['lin_loss'] = lin_term * (1 - alpha) gw_term = log_fgw['quad_loss'] / alpha if loss_fun == 'square_loss': gC1 = 2 * C1 * nx.outer(p, p) - 2 * nx.dot(T, nx.dot(C2, T.T)) gC2 = 2 * C2 * nx.outer(q, q) - 2 * nx.dot(T.T, nx.dot(C1, T)) elif loss_fun == 'kl_loss': gC1 = nx.log(C1 + 1e-15) * nx.outer(p, p) - nx.dot(T, nx.dot(nx.log(C2 + 1e-15), T.T)) gC2 = nx.dot(T.T, nx.dot(C1, T)) / (C2 + 1e-15) + nx.outer(q, q) if isinstance(alpha, int) or isinstance(alpha, float): fgw_dist = nx.set_gradients(fgw_dist, (p, q, C1, C2, M), (log_fgw['u'] - nx.mean(log_fgw['u']), log_fgw['v'] - nx.mean(log_fgw['v']), alpha * gC1, alpha * gC2, (1 - alpha) * T)) else: fgw_dist = nx.set_gradients(fgw_dist, (p, q, C1, C2, M, alpha), (log_fgw['u'] - nx.mean(log_fgw['u']), log_fgw['v'] - nx.mean(log_fgw['v']), alpha * gC1, alpha * gC2, (1 - alpha) * T, gw_term - lin_term)) if log: return fgw_dist, log_fgw else: return fgw_dist def solve_gromov_linesearch(G, deltaG, cost_G, C1, C2, M, reg, alpha_min=None, alpha_max=None, nx=None, **kwargs): """ Solve the linesearch in the FW iterations for any inner loss that decomposes as in Proposition 1 in :ref:`[12] `. Parameters ---------- G : array-like, shape(ns,nt) The transport map at a given iteration of the FW deltaG : array-like (ns,nt) Difference between the optimal map found by linearization in the FW algorithm and the value at a given iteration cost_G : float Value of the cost at `G` C1 : array-like (ns,ns), optional Transformed Structure matrix in the source domain. For the 'square_loss' and 'kl_loss', we provide hC1 from ot.gromov.init_matrix C2 : array-like (nt,nt), optional Transformed Structure matrix in the source domain. For the 'square_loss' and 'kl_loss', we provide hC2 from ot.gromov.init_matrix M : array-like (ns,nt) Cost matrix between the features. reg : float Regularization parameter. alpha_min : float, optional Minimum value for alpha alpha_max : float, optional Maximum value for alpha nx : backend, optional If let to its default value None, a backend test will be conducted. Returns ------- alpha : float The optimal step size of the FW fc : int nb of function call. Useless here cost_G : float The value of the cost for the next iteration .. _references-solve-linesearch: References ---------- .. [24] Vayer Titouan, Chapel Laetitia, Flamary RĂ©mi, Tavenard Romain and Courty Nicolas "Optimal Transport for structured data with application on graphs" International Conference on Machine Learning (ICML). 2019. .. [12] Gabriel PeyrĂ©, Marco Cuturi, and Justin Solomon, "Gromov-Wasserstein averaging of kernel and distance matrices." International Conference on Machine Learning (ICML). 2016. """ if nx is None: G, deltaG, C1, C2, M = list_to_array(G, deltaG, C1, C2, M) if isinstance(M, int) or isinstance(M, float): nx = get_backend(G, deltaG, C1, C2) else: nx = get_backend(G, deltaG, C1, C2, M) dot = nx.dot(nx.dot(C1, deltaG), C2.T) a = - reg * nx.sum(dot * deltaG) b = nx.sum(M * deltaG) - reg * (nx.sum(dot * G) + nx.sum(nx.dot(nx.dot(C1, G), C2.T) * deltaG)) alpha = solve_1d_linesearch_quad(a, b) if alpha_min is not None or alpha_max is not None: alpha = np.clip(alpha, alpha_min, alpha_max) # the new cost is deduced from the line search quadratic function cost_G = cost_G + a * (alpha ** 2) + b * alpha return alpha, 1, cost_G def gromov_barycenters( N, Cs, ps=None, p=None, lambdas=None, loss_fun='square_loss', symmetric=True, armijo=False, max_iter=1000, tol=1e-9, stop_criterion='barycenter', warmstartT=False, verbose=False, log=False, init_C=None, random_state=None, **kwargs): r""" Returns the Gromov-Wasserstein barycenters of `S` measured similarity matrices :math:`(\mathbf{C}_s)_{1 \leq s \leq S}` The function solves the following optimization problem with block coordinate descent: .. math:: \mathbf{C}^* = \mathop{\arg \min}_{\mathbf{C}\in \mathbb{R}^{N \times N}} \quad \sum_s \lambda_s \mathrm{GW}(\mathbf{C}, \mathbf{C}_s, \mathbf{p}, \mathbf{p}_s) Where : - :math:`\mathbf{C}_s`: metric cost matrix - :math:`\mathbf{p}_s`: distribution Parameters ---------- N : int Size of the targeted barycenter Cs : list of S array-like of shape (ns, ns) Metric cost matrices ps : list of S array-like of shape (ns,), optional Sample weights in the `S` spaces. If let to its default value None, uniform distributions are taken. p : array-like, shape (N,), optional Weights in the targeted barycenter. If let to its default value None, uniform distribution is taken. lambdas : list of float, optional List of the `S` spaces' weights. If let to its default value None, uniform weights are taken. loss_fun : callable, optional tensor-matrix multiplication function based on specific loss function symmetric : bool, optional. Either structures are to be assumed symmetric or not. Default value is True. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymmetric). armijo : bool, optional If True the step of the line-search is found via an armijo research. Else closed form is used. If there are convergence issues use False. max_iter : int, optional Max number of iterations tol : float, optional Stop threshold on relative error (>0) stop_criterion : str, optional. Default is 'barycenter'. Stop criterion taking values in ['barycenter', 'loss']. If set to 'barycenter' uses absolute norm variations of estimated barycenters. Else if set to 'loss' uses the relative variations of the loss. warmstartT: bool, optional Either to perform warmstart of transport plans in the successive fused gromov-wasserstein transport problems.s verbose : bool, optional Print information along iterations. log : bool, optional Record log if True. init_C : bool | array-like, shape(N,N) Random initial value for the :math:`\mathbf{C}` matrix provided by user. random_state : int or RandomState instance, optional Fix the seed for reproducibility Returns ------- C : array-like, shape (`N`, `N`) Similarity matrix in the barycenter space (permutated arbitrarily) log : dict Only returned when log=True. It contains the keys: - :math:`\mathbf{T}`: list of (`N`, `ns`) transport matrices - :math:`\mathbf{p}`: (`N`,) barycenter weights - values used in convergence evaluation. References ---------- .. [12] Gabriel PeyrĂ©, Marco Cuturi, and Justin Solomon, "Gromov-Wasserstein averaging of kernel and distance matrices." International Conference on Machine Learning (ICML). 2016. """ if stop_criterion not in ['barycenter', 'loss']: raise ValueError(f"Unknown `stop_criterion='{stop_criterion}'`. Use one of: {'barycenter', 'loss'}.") Cs = list_to_array(*Cs) arr = [*Cs] if ps is not None: arr += list_to_array(*ps) else: ps = [unif(C.shape[0], type_as=C) for C in Cs] if p is not None: arr.append(list_to_array(p)) else: p = unif(N, type_as=Cs[0]) nx = get_backend(*arr) S = len(Cs) if lambdas is None: lambdas = [1. / S] * S # Initialization of C : random SPD matrix (if not provided by user) if init_C is None: generator = check_random_state(random_state) xalea = generator.randn(N, 2) C = dist(xalea, xalea) C /= C.max() C = nx.from_numpy(C, type_as=p) else: C = init_C cpt = 0 err = 1e15 # either the error on 'barycenter' or 'loss' if warmstartT: T = [None] * S if stop_criterion == 'barycenter': inner_log = False else: inner_log = True curr_loss = 1e15 if log: log_ = {} log_['err'] = [] if stop_criterion == 'loss': log_['loss'] = [] while (err > tol and cpt < max_iter): if stop_criterion == 'barycenter': Cprev = C else: prev_loss = curr_loss # get transport plans if warmstartT: res = [gromov_wasserstein( C, Cs[s], p, ps[s], loss_fun, symmetric=symmetric, armijo=armijo, G0=T[s], max_iter=max_iter, tol_rel=1e-5, tol_abs=0., log=inner_log, verbose=verbose, **kwargs) for s in range(S)] else: res = [gromov_wasserstein( C, Cs[s], p, ps[s], loss_fun, symmetric=symmetric, armijo=armijo, G0=None, max_iter=max_iter, tol_rel=1e-5, tol_abs=0., log=inner_log, verbose=verbose, **kwargs) for s in range(S)] if stop_criterion == 'barycenter': T = res else: T = [output[0] for output in res] curr_loss = np.sum([output[1]['gw_dist'] for output in res]) # update barycenters if loss_fun == 'square_loss': C = update_square_loss(p, lambdas, T, Cs, nx) elif loss_fun == 'kl_loss': C = update_kl_loss(p, lambdas, T, Cs, nx) # update convergence criterion if stop_criterion == 'barycenter': err = nx.norm(C - Cprev) if log: log_['err'].append(err) else: err = abs(curr_loss - prev_loss) / prev_loss if prev_loss != 0. else np.nan if log: log_['loss'].append(curr_loss) log_['err'].append(err) if verbose: if cpt % 200 == 0: print('{:5s}|{:12s}'.format( 'It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(cpt, err)) cpt += 1 if log: log_['T'] = T log_['p'] = p return C, log_ else: return C def fgw_barycenters( N, Ys, Cs, ps=None, lambdas=None, alpha=0.5, fixed_structure=False, fixed_features=False, p=None, loss_fun='square_loss', armijo=False, symmetric=True, max_iter=100, tol=1e-9, stop_criterion='barycenter', warmstartT=False, verbose=False, log=False, init_C=None, init_X=None, random_state=None, **kwargs): r""" Returns the Fused Gromov-Wasserstein barycenters of `S` measurable networks with node features :math:`(\mathbf{C}_s, \mathbf{Y}_s, \mathbf{p}_s)_{1 \leq s \leq S}` (see eq (5) in :ref:`[24] `), estimated using Fused Gromov-Wasserstein transports from Conditional Gradient solvers. The function solves the following optimization problem: .. math:: \mathbf{C}^*, \mathbf{Y}^* = \mathop{\arg \min}_{\mathbf{C}\in \mathbb{R}^{N \times N}, \mathbf{Y}\in \mathbb{Y}^{N \times d}} \quad \sum_s \lambda_s \mathrm{FGW}_{\alpha}(\mathbf{C}, \mathbf{C}_s, \mathbf{Y}, \mathbf{Y}_s, \mathbf{p}, \mathbf{p}_s) Where : - :math:`\mathbf{Y}_s`: feature matrix - :math:`\mathbf{C}_s`: metric cost matrix - :math:`\mathbf{p}_s`: distribution Parameters ---------- N : int Desired number of samples of the target barycenter Ys: list of array-like, each element has shape (ns,d) Features of all samples Cs : list of array-like, each element has shape (ns,ns) Structure matrices of all samples ps : list of array-like, each element has shape (ns,), optional Masses of all samples. If let to its default value None, uniform distributions are taken. lambdas : list of float, optional List of the `S` spaces' weights. If let to its default value None, uniform weights are taken. alpha : float, optional Alpha parameter for the fgw distance. fixed_structure : bool, optional Whether to fix the structure of the barycenter during the updates. fixed_features : bool, optional Whether to fix the feature of the barycenter during the updates p : array-like, shape (N,), optional Weights in the targeted barycenter. If let to its default value None, uniform distribution is taken. loss_fun : str, optional Loss function used for the solver either 'square_loss' or 'kl_loss' armijo : bool, optional If True the step of the line-search is found via an armijo research. Else closed form is used. If there are convergence issues use False. symmetric : bool, optional Either structures are to be assumed symmetric or not. Default value is True. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymmetric). max_iter : int, optional Max number of iterations tol : float, optional Stop threshold on relative error (>0) stop_criterion : str, optional. Default is 'barycenter'. Stop criterion taking values in ['barycenter', 'loss']. If set to 'barycenter' uses absolute norm variations of estimated barycenters. Else if set to 'loss' uses the relative variations of the loss. warmstartT: bool, optional Either to perform warmstart of transport plans in the successive fused gromov-wasserstein transport problems. verbose : bool, optional Print information along iterations. log : bool, optional Record log if True. init_C : array-like, shape (N,N), optional Initialization for the barycenters' structure matrix. If not set a random init is used. init_X : array-like, shape (N,d), optional Initialization for the barycenters' features. If not set a random init is used. random_state : int or RandomState instance, optional Fix the seed for reproducibility Returns ------- X : array-like, shape (`N`, `d`) Barycenters' features C : array-like, shape (`N`, `N`) Barycenters' structure matrix log : dict Only returned when log=True. It contains the keys: - :math:`\mathbf{T}`: list of (`N`, `ns`) transport matrices - :math:`\mathbf{p}`: (`N`,) barycenter weights - :math:`(\mathbf{M}_s)_s`: all distance matrices between the feature of the barycenter and the other features :math:`(dist(\mathbf{X}, \mathbf{Y}_s))_s` shape (`N`, `ns`) - values used in convergence evaluation. .. _references-fgw-barycenters: References ---------- .. [24] Vayer Titouan, Chapel Laetitia, Flamary RĂ©mi, Tavenard Romain and Courty Nicolas "Optimal Transport for structured data with application on graphs" International Conference on Machine Learning (ICML). 2019. """ if stop_criterion not in ['barycenter', 'loss']: raise ValueError(f"Unknown `stop_criterion='{stop_criterion}'`. Use one of: {'barycenter', 'loss'}.") Cs = list_to_array(*Cs) Ys = list_to_array(*Ys) arr = [*Cs, *Ys] if ps is not None: arr += list_to_array(*ps) else: ps = [unif(C.shape[0], type_as=C) for C in Cs] if p is not None: arr.append(list_to_array(p)) else: p = unif(N, type_as=Cs[0]) nx = get_backend(*arr) S = len(Cs) if lambdas is None: lambdas = [1. / S] * S d = Ys[0].shape[1] # dimension on the node features if fixed_structure: if init_C is None: raise UndefinedParameter('If C is fixed it must be initialized') else: C = init_C else: if init_C is None: generator = check_random_state(random_state) xalea = generator.randn(N, 2) C = dist(xalea, xalea) C = nx.from_numpy(C, type_as=ps[0]) else: C = init_C if fixed_features: if init_X is None: raise UndefinedParameter('If X is fixed it must be initialized') else: X = init_X else: if init_X is None: X = nx.zeros((N, d), type_as=ps[0]) else: X = init_X Ms = [dist(X, Ys[s]) for s in range(len(Ys))] if warmstartT: T = [None] * S cpt = 0 if stop_criterion == 'barycenter': inner_log = False err_feature = 1e15 err_structure = 1e15 err_rel_loss = 0. else: inner_log = True err_feature = 0. err_structure = 0. curr_loss = 1e15 err_rel_loss = 1e15 if log: log_ = {} if stop_criterion == 'barycenter': log_['err_feature'] = [] log_['err_structure'] = [] log_['Ts_iter'] = [] else: log_['loss'] = [] log_['err_rel_loss'] = [] while ((err_feature > tol or err_structure > tol or err_rel_loss > tol) and cpt < max_iter): if stop_criterion == 'barycenter': Cprev = C Xprev = X else: prev_loss = curr_loss # get transport plans if warmstartT: res = [fused_gromov_wasserstein( Ms[s], C, Cs[s], p, ps[s], loss_fun=loss_fun, alpha=alpha, armijo=armijo, symmetric=symmetric, G0=T[s], max_iter=max_iter, tol_rel=1e-5, tol_abs=0., log=inner_log, verbose=verbose, **kwargs) for s in range(S)] else: res = [fused_gromov_wasserstein( Ms[s], C, Cs[s], p, ps[s], loss_fun=loss_fun, alpha=alpha, armijo=armijo, symmetric=symmetric, G0=None, max_iter=max_iter, tol_rel=1e-5, tol_abs=0., log=inner_log, verbose=verbose, **kwargs) for s in range(S)] if stop_criterion == 'barycenter': T = res else: T = [output[0] for output in res] curr_loss = np.sum([output[1]['fgw_dist'] for output in res]) # update barycenters if not fixed_features: Ys_temp = [y.T for y in Ys] X = update_feature_matrix(lambdas, Ys_temp, T, p, nx).T Ms = [dist(X, Ys[s]) for s in range(len(Ys))] if not fixed_structure: if loss_fun == 'square_loss': C = update_square_loss(p, lambdas, T, Cs, nx) elif loss_fun == 'kl_loss': C = update_kl_loss(p, lambdas, T, Cs, nx) # update convergence criterion if stop_criterion == 'barycenter': err_feature, err_structure = 0., 0. if not fixed_features: err_feature = nx.norm(X - Xprev) if not fixed_structure: err_structure = nx.norm(C - Cprev) if log: log_['err_feature'].append(err_feature) log_['err_structure'].append(err_structure) log_['Ts_iter'].append(T) if verbose: if cpt % 200 == 0: print('{:5s}|{:12s}'.format( 'It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(cpt, err_structure)) print('{:5d}|{:8e}|'.format(cpt, err_feature)) else: err_rel_loss = abs(curr_loss - prev_loss) / prev_loss if prev_loss != 0. else np.nan if log: log_['loss'].append(curr_loss) log_['err_rel_loss'].append(err_rel_loss) if verbose: if cpt % 200 == 0: print('{:5s}|{:12s}'.format( 'It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(cpt, err_rel_loss)) cpt += 1 if log: log_['T'] = T log_['p'] = p log_['Ms'] = Ms return X, C, log_ else: return X, C python-pot-0.9.3+dfsg/ot/gromov/_semirelaxed.py000066400000000000000000001257601455713015700215540ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Semi-relaxed Gromov-Wasserstein and Fused-Gromov-Wasserstein solvers. """ # Author: RĂ©mi Flamary # CĂ©dric Vincent-Cuaz # # License: MIT License import numpy as np from ..utils import list_to_array, unif from ..optim import semirelaxed_cg, solve_1d_linesearch_quad from ..backend import get_backend from ._utils import init_matrix_semirelaxed, gwloss, gwggrad def semirelaxed_gromov_wasserstein(C1, C2, p=None, loss_fun='square_loss', symmetric=None, log=False, G0=None, max_iter=1e4, tol_rel=1e-9, tol_abs=1e-9, **kwargs): r""" Returns the semi-relaxed Gromov-Wasserstein divergence transport from :math:`(\mathbf{C_1}, \mathbf{p})` to :math:`\mathbf{C_2}` (see [48]). The function solves the following optimization problem using Conditional Gradient: .. math:: \mathbf{T}^* \in \mathop{\arg \min}_{\mathbf{T}} \quad \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T} &\geq 0 Where : - :math:`\mathbf{C_1}`: Metric cost matrix in the source space - :math:`\mathbf{C_2}`: Metric cost matrix in the target space - :math:`\mathbf{p}`: distribution in the source space - `L`: loss function to account for the misfit between the similarity matrices .. note:: This function is backend-compatible and will work on arrays from all compatible backends. However all the steps in the conditional gradient are not differentiable. Parameters ---------- C1 : array-like, shape (ns, ns) Metric cost matrix in the source space C2 : array-like, shape (nt, nt) Metric cost matrix in the target space p : array-like, shape (ns,), optional Distribution in the source space. If let to its default value None, uniform distribution is taken. loss_fun : str loss function used for the solver either 'square_loss' or 'kl_loss'. symmetric : bool, optional Either C1 and C2 are to be assumed symmetric or not. If let to its default None value, a symmetry test will be conducted. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymmetric). verbose : bool, optional Print information along iterations log : bool, optional record log if True G0: array-like, shape (ns,nt), optional If None the initial transport plan of the solver is pq^T. Otherwise G0 must satisfy marginal constraints and will be used as initial transport of the solver. max_iter : int, optional Max number of iterations tol_rel : float, optional Stop threshold on relative error (>0) tol_abs : float, optional Stop threshold on absolute error (>0) **kwargs : dict parameters can be directly passed to the ot.optim.cg solver Returns ------- T : array-like, shape (`ns`, `nt`) Coupling between the two spaces that minimizes: :math:`\sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l}` log : dict Convergence information and loss. References ---------- .. [48] CĂ©dric Vincent-Cuaz, RĂ©mi Flamary, Marco Corneli, Titouan Vayer, Nicolas Courty. "Semi-relaxed Gromov-Wasserstein divergence and applications on graphs" International Conference on Learning Representations (ICLR), 2022. .. [62] H. Van Assel, C. Vincent-Cuaz, T. Vayer, R. Flamary, N. Courty. "Interpolating between Clustering and Dimensionality Reduction with Gromov-Wasserstein". NeurIPS 2023 Workshop OTML. """ arr = [C1, C2] if p is not None: arr.append(list_to_array(p)) else: p = unif(C1.shape[0], type_as=C1) if G0 is not None: arr.append(G0) nx = get_backend(*arr) if symmetric is None: symmetric = nx.allclose(C1, C1.T, atol=1e-10) and nx.allclose(C2, C2.T, atol=1e-10) if G0 is None: q = unif(C2.shape[0], type_as=p) G0 = nx.outer(p, q) else: q = nx.sum(G0, 0) # Check first marginal of G0 np.testing.assert_allclose(nx.sum(G0, 1), p, atol=1e-08) constC, hC1, hC2, fC2t = init_matrix_semirelaxed(C1, C2, p, loss_fun, nx) ones_p = nx.ones(p.shape[0], type_as=p) def f(G): qG = nx.sum(G, 0) marginal_product = nx.outer(ones_p, nx.dot(qG, fC2t)) return gwloss(constC + marginal_product, hC1, hC2, G, nx) if symmetric: def df(G): qG = nx.sum(G, 0) marginal_product = nx.outer(ones_p, nx.dot(qG, fC2t)) return gwggrad(constC + marginal_product, hC1, hC2, G, nx) else: constCt, hC1t, hC2t, fC2 = init_matrix_semirelaxed(C1.T, C2.T, p, loss_fun, nx) def df(G): qG = nx.sum(G, 0) marginal_product_1 = nx.outer(ones_p, nx.dot(qG, fC2t)) marginal_product_2 = nx.outer(ones_p, nx.dot(qG, fC2)) return 0.5 * (gwggrad(constC + marginal_product_1, hC1, hC2, G, nx) + gwggrad(constCt + marginal_product_2, hC1t, hC2t, G, nx)) def line_search(cost, G, deltaG, Mi, cost_G, **kwargs): return solve_semirelaxed_gromov_linesearch(G, deltaG, cost_G, hC1, hC2, ones_p, M=0., reg=1., fC2t=fC2t, nx=nx, **kwargs) if log: res, log = semirelaxed_cg(p, q, 0., 1., f, df, G0, line_search, log=True, numItermax=max_iter, stopThr=tol_rel, stopThr2=tol_abs, **kwargs) log['srgw_dist'] = log['loss'][-1] return res, log else: return semirelaxed_cg(p, q, 0., 1., f, df, G0, line_search, log=False, numItermax=max_iter, stopThr=tol_rel, stopThr2=tol_abs, **kwargs) def semirelaxed_gromov_wasserstein2(C1, C2, p=None, loss_fun='square_loss', symmetric=None, log=False, G0=None, max_iter=1e4, tol_rel=1e-9, tol_abs=1e-9, **kwargs): r""" Returns the semi-relaxed Gromov-Wasserstein divergence from :math:`(\mathbf{C_1}, \mathbf{p})` to :math:`\mathbf{C_2}` (see [48]). The function solves the following optimization problem using Conditional Gradient: .. math:: \text{srGW} = \min_{\mathbf{T}} \quad \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T} &\geq 0 Where : - :math:`\mathbf{C_1}`: Metric cost matrix in the source space - :math:`\mathbf{C_2}`: Metric cost matrix in the target space - :math:`\mathbf{p}`: distribution in the source space - `L`: loss function to account for the misfit between the similarity matrices Note that when using backends, this loss function is differentiable wrt the matrices (C1, C2) but not yet for the weights p. .. note:: This function is backend-compatible and will work on arrays from all compatible backends. However all the steps in the conditional gradient are not differentiable. Parameters ---------- C1 : array-like, shape (ns, ns) Metric cost matrix in the source space C2 : array-like, shape (nt, nt) Metric cost matrix in the target space p : array-like, shape (ns,), optional Distribution in the source space. If let to its default value None, uniform distribution is taken. loss_fun : str loss function used for the solver either 'square_loss' or 'kl_loss'. symmetric : bool, optional Either C1 and C2 are to be assumed symmetric or not. If let to its default None value, a symmetry test will be conducted. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymmetric). verbose : bool, optional Print information along iterations log : bool, optional record log if True G0: array-like, shape (ns,nt), optional If None the initial transport plan of the solver is pq^T. Otherwise G0 must satisfy marginal constraints and will be used as initial transport of the solver. max_iter : int, optional Max number of iterations tol_rel : float, optional Stop threshold on relative error (>0) tol_abs : float, optional Stop threshold on absolute error (>0) **kwargs : dict parameters can be directly passed to the ot.optim.cg solver Returns ------- srgw : float Semi-relaxed Gromov-Wasserstein divergence log : dict convergence information and Coupling matrix References ---------- .. [48] CĂ©dric Vincent-Cuaz, RĂ©mi Flamary, Marco Corneli, Titouan Vayer, Nicolas Courty. "Semi-relaxed Gromov-Wasserstein divergence and applications on graphs" International Conference on Learning Representations (ICLR), 2022. .. [62] H. Van Assel, C. Vincent-Cuaz, T. Vayer, R. Flamary, N. Courty. "Interpolating between Clustering and Dimensionality Reduction with Gromov-Wasserstein". NeurIPS 2023 Workshop OTML. """ # partial get_backend as the full one will be handled in gromov_wasserstein nx = get_backend(C1, C2) # init marginals if set as None if p is None: p = unif(C1.shape[0], type_as=C1) T, log_srgw = semirelaxed_gromov_wasserstein( C1, C2, p, loss_fun, symmetric, log=True, G0=G0, max_iter=max_iter, tol_rel=tol_rel, tol_abs=tol_abs, **kwargs) q = nx.sum(T, 0) log_srgw['T'] = T srgw = log_srgw['srgw_dist'] if loss_fun == 'square_loss': gC1 = 2 * C1 * nx.outer(p, p) - 2 * nx.dot(T, nx.dot(C2, T.T)) gC2 = 2 * C2 * nx.outer(q, q) - 2 * nx.dot(T.T, nx.dot(C1, T)) elif loss_fun == 'kl_loss': gC1 = nx.log(C1 + 1e-15) * nx.outer(p, p) - nx.dot(T, nx.dot(nx.log(C2 + 1e-15), T.T)) gC2 = nx.dot(T.T, nx.dot(C1, T)) / (C2 + 1e-15) + nx.outer(q, q) srgw = nx.set_gradients(srgw, (C1, C2), (gC1, gC2)) if log: return srgw, log_srgw else: return srgw def semirelaxed_fused_gromov_wasserstein( M, C1, C2, p=None, loss_fun='square_loss', symmetric=None, alpha=0.5, G0=None, log=False, max_iter=1e4, tol_rel=1e-9, tol_abs=1e-9, **kwargs): r""" Computes the semi-relaxed Fused Gromov-Wasserstein transport between two graphs (see [48]). .. math:: \mathbf{T}^* \in \mathop{\arg \min}_{\mathbf{T}} \quad (1 - \alpha) \langle \mathbf{T}, \mathbf{M} \rangle_F + \alpha \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) T_{i,j} T_{k,l} s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T} &\geq 0 where : - :math:`\mathbf{M}` is the (`ns`, `nt`) metric cost matrix - :math:`\mathbf{p}` source weights (sum to 1) - `L` is a loss function to account for the misfit between the similarity matrices .. note:: This function is backend-compatible and will work on arrays from all compatible backends. However all the steps in the conditional gradient are not differentiable. The algorithm used for solving the problem is conditional gradient as discussed in :ref:`[48] ` Parameters ---------- M : array-like, shape (ns, nt) Metric cost matrix between features across domains C1 : array-like, shape (ns, ns) Metric cost matrix representative of the structure in the source space C2 : array-like, shape (nt, nt) Metric cost matrix representative of the structure in the target space p : array-like, shape (ns,), optional Distribution in the source space. If let to its default value None, uniform distribution is taken. loss_fun : str loss function used for the solver either 'square_loss' or 'kl_loss'. symmetric : bool, optional Either C1 and C2 are to be assumed symmetric or not. If let to its default None value, a symmetry test will be conducted. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymmetric). alpha : float, optional Trade-off parameter (0 < alpha < 1) G0: array-like, shape (ns,nt), optional If None the initial transport plan of the solver is pq^T. Otherwise G0 must satisfy marginal constraints and will be used as initial transport of the solver. log : bool, optional record log if True max_iter : int, optional Max number of iterations tol_rel : float, optional Stop threshold on relative error (>0) tol_abs : float, optional Stop threshold on absolute error (>0) **kwargs : dict parameters can be directly passed to the ot.optim.cg solver Returns ------- gamma : array-like, shape (`ns`, `nt`) Optimal transportation matrix for the given parameters. log : dict Log dictionary return only if log==True in parameters. .. _references-semirelaxed-fused-gromov-wasserstein: References ---------- .. [24] Vayer Titouan, Chapel Laetitia, Flamary RĂ©mi, Tavenard Romain and Courty Nicolas "Optimal Transport for structured data with application on graphs", International Conference on Machine Learning (ICML). 2019. .. [48] CĂ©dric Vincent-Cuaz, RĂ©mi Flamary, Marco Corneli, Titouan Vayer, Nicolas Courty. "Semi-relaxed Gromov-Wasserstein divergence and applications on graphs" International Conference on Learning Representations (ICLR), 2022. .. [62] H. Van Assel, C. Vincent-Cuaz, T. Vayer, R. Flamary, N. Courty. "Interpolating between Clustering and Dimensionality Reduction with Gromov-Wasserstein". NeurIPS 2023 Workshop OTML. """ arr = [M, C1, C2] if p is not None: arr.append(list_to_array(p)) else: p = unif(C1.shape[0], type_as=C1) if G0 is not None: arr.append(G0) nx = get_backend(*arr) if symmetric is None: symmetric = nx.allclose(C1, C1.T, atol=1e-10) and nx.allclose(C2, C2.T, atol=1e-10) if G0 is None: q = unif(C2.shape[0], type_as=p) G0 = nx.outer(p, q) else: q = nx.sum(G0, 0) # Check marginals of G0 np.testing.assert_allclose(nx.sum(G0, 1), p, atol=1e-08) constC, hC1, hC2, fC2t = init_matrix_semirelaxed(C1, C2, p, loss_fun, nx) ones_p = nx.ones(p.shape[0], type_as=p) def f(G): qG = nx.sum(G, 0) marginal_product = nx.outer(ones_p, nx.dot(qG, fC2t)) return gwloss(constC + marginal_product, hC1, hC2, G, nx) if symmetric: def df(G): qG = nx.sum(G, 0) marginal_product = nx.outer(ones_p, nx.dot(qG, fC2t)) return gwggrad(constC + marginal_product, hC1, hC2, G, nx) else: constCt, hC1t, hC2t, fC2 = init_matrix_semirelaxed(C1.T, C2.T, p, loss_fun, nx) def df(G): qG = nx.sum(G, 0) marginal_product_1 = nx.outer(ones_p, nx.dot(qG, fC2t)) marginal_product_2 = nx.outer(ones_p, nx.dot(qG, fC2)) return 0.5 * (gwggrad(constC + marginal_product_1, hC1, hC2, G, nx) + gwggrad(constCt + marginal_product_2, hC1t, hC2t, G, nx)) def line_search(cost, G, deltaG, Mi, cost_G, **kwargs): return solve_semirelaxed_gromov_linesearch( G, deltaG, cost_G, hC1, hC2, ones_p, M=(1 - alpha) * M, reg=alpha, fC2t=fC2t, nx=nx, **kwargs) if log: res, log = semirelaxed_cg(p, q, (1 - alpha) * M, alpha, f, df, G0, line_search, log=True, numItermax=max_iter, stopThr=tol_rel, stopThr2=tol_abs, **kwargs) log['srfgw_dist'] = log['loss'][-1] return res, log else: return semirelaxed_cg(p, q, (1 - alpha) * M, alpha, f, df, G0, line_search, log=False, numItermax=max_iter, stopThr=tol_rel, stopThr2=tol_abs, **kwargs) def semirelaxed_fused_gromov_wasserstein2(M, C1, C2, p=None, loss_fun='square_loss', symmetric=None, alpha=0.5, G0=None, log=False, max_iter=1e4, tol_rel=1e-9, tol_abs=1e-9, **kwargs): r""" Computes the semi-relaxed FGW divergence between two graphs (see [48]). .. math:: \mathbf{srFGW}_{\alpha} = \min_{\mathbf{T}} \quad (1 - \alpha) \langle \mathbf{T}, \mathbf{M} \rangle_F + \alpha \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) T_{i,j} T_{k,l} s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T} &\geq 0 where : - :math:`\mathbf{M}` is the (`ns`, `nt`) metric cost matrix - :math:`\mathbf{p}` source weights (sum to 1) - `L` is a loss function to account for the misfit between the similarity matrices The algorithm used for solving the problem is conditional gradient as discussed in :ref:`[48] ` Note that when using backends, this loss function is differentiable wrt the matrices (C1, C2) but not yet for the weights p. .. note:: This function is backend-compatible and will work on arrays from all compatible backends. However all the steps in the conditional gradient are not differentiable. Parameters ---------- M : array-like, shape (ns, nt) Metric cost matrix between features across domains C1 : array-like, shape (ns, ns) Metric cost matrix representative of the structure in the source space. C2 : array-like, shape (nt, nt) Metric cost matrix representative of the structure in the target space. p : array-like, shape (ns,) Distribution in the source space. If let to its default value None, uniform distribution is taken. loss_fun : str, optional loss function used for the solver either 'square_loss' or 'kl_loss'. symmetric : bool, optional Either C1 and C2 are to be assumed symmetric or not. If let to its default None value, a symmetry test will be conducted. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymmetric). alpha : float, optional Trade-off parameter (0 < alpha < 1) G0: array-like, shape (ns,nt), optional If None the initial transport plan of the solver is pq^T. Otherwise G0 must satisfy marginal constraints and will be used as initial transport of the solver. log : bool, optional Record log if True. max_iter : int, optional Max number of iterations tol_rel : float, optional Stop threshold on relative error (>0) tol_abs : float, optional Stop threshold on absolute error (>0) **kwargs : dict Parameters can be directly passed to the ot.optim.cg solver. Returns ------- srfgw-divergence : float Semi-relaxed Fused Gromov-Wasserstein divergence for the given parameters. log : dict Log dictionary return only if log==True in parameters. .. _references-semirelaxed-fused-gromov-wasserstein2: References ---------- .. [24] Vayer Titouan, Chapel Laetitia, Flamary RĂ©mi, Tavenard Romain and Courty Nicolas "Optimal Transport for structured data with application on graphs", International Conference on Machine Learning (ICML). 2019. .. [48] CĂ©dric Vincent-Cuaz, RĂ©mi Flamary, Marco Corneli, Titouan Vayer, Nicolas Courty. "Semi-relaxed Gromov-Wasserstein divergence and applications on graphs" International Conference on Learning Representations (ICLR), 2022. .. [62] H. Van Assel, C. Vincent-Cuaz, T. Vayer, R. Flamary, N. Courty. "Interpolating between Clustering and Dimensionality Reduction with Gromov-Wasserstein". NeurIPS 2023 Workshop OTML. """ # partial get_backend as the full one will be handled in gromov_wasserstein nx = get_backend(C1, C2) # init marginals if set as None if p is None: p = unif(C1.shape[0], type_as=C1) T, log_fgw = semirelaxed_fused_gromov_wasserstein( M, C1, C2, p, loss_fun, symmetric, alpha, G0, log=True, max_iter=max_iter, tol_rel=tol_rel, tol_abs=tol_abs, **kwargs) q = nx.sum(T, 0) srfgw_dist = log_fgw['srfgw_dist'] log_fgw['T'] = T log_fgw['lin_loss'] = nx.sum(M * T) * (1 - alpha) log_fgw['quad_loss'] = srfgw_dist - log_fgw['lin_loss'] if loss_fun == 'square_loss': gC1 = 2 * C1 * nx.outer(p, p) - 2 * nx.dot(T, nx.dot(C2, T.T)) gC2 = 2 * C2 * nx.outer(q, q) - 2 * nx.dot(T.T, nx.dot(C1, T)) elif loss_fun == 'kl_loss': gC1 = nx.log(C1 + 1e-15) * nx.outer(p, p) - nx.dot(T, nx.dot(nx.log(C2 + 1e-15), T.T)) gC2 = nx.dot(T.T, nx.dot(C1, T)) / (C2 + 1e-15) + nx.outer(q, q) if isinstance(alpha, int) or isinstance(alpha, float): srfgw_dist = nx.set_gradients(srfgw_dist, (C1, C2, M), (alpha * gC1, alpha * gC2, (1 - alpha) * T)) else: lin_term = nx.sum(T * M) srgw_term = (srfgw_dist - (1 - alpha) * lin_term) / alpha srfgw_dist = nx.set_gradients(srfgw_dist, (C1, C2, M, alpha), (alpha * gC1, alpha * gC2, (1 - alpha) * T, srgw_term - lin_term)) if log: return srfgw_dist, log_fgw else: return srfgw_dist def solve_semirelaxed_gromov_linesearch(G, deltaG, cost_G, C1, C2, ones_p, M, reg, fC2t=None, alpha_min=None, alpha_max=None, nx=None, **kwargs): """ Solve the linesearch in the Conditional Gradient iterations for the semi-relaxed Gromov-Wasserstein divergence. Parameters ---------- G : array-like, shape(ns,nt) The transport map at a given iteration of the FW deltaG : array-like (ns,nt) Difference between the optimal map found by linearization in the FW algorithm and the value at a given iteration cost_G : float Value of the cost at `G` C1 : array-like (ns,ns), optional Transformed Structure matrix in the source domain. Note that for the 'square_loss' and 'kl_loss', we provide hC1 from ot.gromov.init_matrix_semirelaxed C2 : array-like (nt,nt), optional Transformed Structure matrix in the source domain. Note that for the 'square_loss' and 'kl_loss', we provide hC2 from ot.gromov.init_matrix_semirelaxed ones_p: array-like (ns,1) Array of ones of size ns M : array-like (ns,nt) Cost matrix between the features. reg : float Regularization parameter. fC2t: array-like (nt,nt), optional Transformed Structure matrix in the source domain. Note that for the 'square_loss' and 'kl_loss', we provide fC2t from ot.gromov.init_matrix_semirelaxed. If fC2t is not provided, it is by default fC2t corresponding to the 'square_loss'. alpha_min : float, optional Minimum value for alpha alpha_max : float, optional Maximum value for alpha nx : backend, optional If let to its default value None, a backend test will be conducted. Returns ------- alpha : float The optimal step size of the FW fc : int nb of function call. Useless here cost_G : float The value of the cost for the next iteration References ---------- .. [48] CĂ©dric Vincent-Cuaz, RĂ©mi Flamary, Marco Corneli, Titouan Vayer, Nicolas Courty. "Semi-relaxed Gromov-Wasserstein divergence and applications on graphs" International Conference on Learning Representations (ICLR), 2021. .. [62] H. Van Assel, C. Vincent-Cuaz, T. Vayer, R. Flamary, N. Courty. "Interpolating between Clustering and Dimensionality Reduction with Gromov-Wasserstein". NeurIPS 2023 Workshop OTML. """ if nx is None: G, deltaG, C1, C2, M = list_to_array(G, deltaG, C1, C2, M) if isinstance(M, int) or isinstance(M, float): nx = get_backend(G, deltaG, C1, C2) else: nx = get_backend(G, deltaG, C1, C2, M) qG, qdeltaG = nx.sum(G, 0), nx.sum(deltaG, 0) dot = nx.dot(nx.dot(C1, deltaG), C2.T) if fC2t is None: fC2t = C2.T ** 2 dot_qG = nx.dot(nx.outer(ones_p, qG), fC2t) dot_qdeltaG = nx.dot(nx.outer(ones_p, qdeltaG), fC2t) a = reg * nx.sum((dot_qdeltaG - dot) * deltaG) b = nx.sum(M * deltaG) + reg * (nx.sum((dot_qdeltaG - dot) * G) + nx.sum((dot_qG - nx.dot(nx.dot(C1, G), C2.T)) * deltaG)) alpha = solve_1d_linesearch_quad(a, b) if alpha_min is not None or alpha_max is not None: alpha = np.clip(alpha, alpha_min, alpha_max) # the new cost can be deduced from the line search quadratic function cost_G = cost_G + a * (alpha ** 2) + b * alpha return alpha, 1, cost_G def entropic_semirelaxed_gromov_wasserstein( C1, C2, p=None, loss_fun='square_loss', epsilon=0.1, symmetric=None, G0=None, max_iter=1e4, tol=1e-9, log=False, verbose=False, **kwargs): r""" Returns the entropic-regularized semi-relaxed gromov-wasserstein divergence transport plan from :math:`(\mathbf{C_1}, \mathbf{p})` to :math:`\mathbf{C_2}` estimated using a Mirror Descent algorithm following the KL geometry. The function solves the following optimization problem: .. math:: \mathbf{T}^* \in \mathop{\arg \min}_\mathbf{T} \quad \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T} &\geq 0 Where : - :math:`\mathbf{C_1}`: Metric cost matrix in the source space - :math:`\mathbf{C_2}`: Metric cost matrix in the target space - :math:`\mathbf{p}`: distribution in the source space - `L`: loss function to account for the misfit between the similarity matrices .. note:: This function is backend-compatible and will work on arrays from all compatible backends. However all the steps in the conditional gradient are not differentiable. Parameters ---------- C1 : array-like, shape (ns, ns) Metric cost matrix in the source space C2 : array-like, shape (nt, nt) Metric cost matrix in the target space p : array-like, shape (ns,), optional Distribution in the source space. If let to its default value None, uniform distribution is taken. loss_fun : str loss function used for the solver either 'square_loss' or 'kl_loss'. epsilon : float Regularization term >0 symmetric : bool, optional Either C1 and C2 are to be assumed symmetric or not. If let to its default None value, a symmetry test will be conducted. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymetric). verbose : bool, optional Print information along iterations G0: array-like, shape (ns,nt), optional If None the initial transport plan of the solver is pq^T. Otherwise G0 must satisfy marginal constraints and will be used as initial transport of the solver. max_iter : int, optional Max number of iterations tol : float, optional Stop threshold on error computed on transport plans log : bool, optional record log if True verbose : bool, optional Print information along iterations Returns ------- G : array-like, shape (`ns`, `nt`) Coupling between the two spaces that minimizes: :math:`\sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l}` log : dict Convergence information and loss. References ---------- .. [48] CĂ©dric Vincent-Cuaz, RĂ©mi Flamary, Marco Corneli, Titouan Vayer, Nicolas Courty. "Semi-relaxed Gromov-Wasserstein divergence and applications on graphs" International Conference on Learning Representations (ICLR), 2022. """ arr = [C1, C2] if p is not None: arr.append(list_to_array(p)) else: p = unif(C1.shape[0], type_as=C1) if G0 is not None: arr.append(G0) nx = get_backend(*arr) if symmetric is None: symmetric = nx.allclose(C1, C1.T, atol=1e-10) and nx.allclose(C2, C2.T, atol=1e-10) if G0 is None: q = unif(C2.shape[0], type_as=p) G0 = nx.outer(p, q) else: q = nx.sum(G0, 0) # Check first marginal of G0 np.testing.assert_allclose(nx.sum(G0, 1), p, atol=1e-08) constC, hC1, hC2, fC2t = init_matrix_semirelaxed(C1, C2, p, loss_fun, nx) ones_p = nx.ones(p.shape[0], type_as=p) if symmetric: def df(G): qG = nx.sum(G, 0) marginal_product = nx.outer(ones_p, nx.dot(qG, fC2t)) return gwggrad(constC + marginal_product, hC1, hC2, G, nx) else: constCt, hC1t, hC2t, fC2 = init_matrix_semirelaxed(C1.T, C2.T, p, loss_fun, nx) def df(G): qG = nx.sum(G, 0) marginal_product_1 = nx.outer(ones_p, nx.dot(qG, fC2t)) marginal_product_2 = nx.outer(ones_p, nx.dot(qG, fC2)) return 0.5 * (gwggrad(constC + marginal_product_1, hC1, hC2, G, nx) + gwggrad(constCt + marginal_product_2, hC1t, hC2t, G, nx)) cpt = 0 err = 1e15 G = G0 if log: log = {'err': []} while (err > tol and cpt < max_iter): Gprev = G # compute the kernel K = G * nx.exp(- df(G) / epsilon) scaling = p / nx.sum(K, 1) G = nx.reshape(scaling, (-1, 1)) * K if cpt % 10 == 0: # we can speed up the process by checking for the error only all # the 10th iterations err = nx.norm(G - Gprev) if log: log['err'].append(err) if verbose: if cpt % 200 == 0: print('{:5s}|{:12s}'.format( 'It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(cpt, err)) cpt += 1 if log: qG = nx.sum(G, 0) marginal_product = nx.outer(ones_p, nx.dot(qG, fC2t)) log['srgw_dist'] = gwloss(constC + marginal_product, hC1, hC2, G, nx) return G, log else: return G def entropic_semirelaxed_gromov_wasserstein2( C1, C2, p=None, loss_fun='square_loss', epsilon=0.1, symmetric=None, G0=None, max_iter=1e4, tol=1e-9, log=False, verbose=False, **kwargs): r""" Returns the entropic-regularized semi-relaxed gromov-wasserstein divergence from :math:`(\mathbf{C_1}, \mathbf{p})` to :math:`\mathbf{C_2}` estimated using a Mirror Descent algorithm following the KL geometry. The function solves the following optimization problem: .. math:: \mathbf{srGW} = \min_{\mathbf{T}} \quad \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T} &\geq 0 Where : - :math:`\mathbf{C_1}`: Metric cost matrix in the source space - :math:`\mathbf{C_2}`: Metric cost matrix in the target space - :math:`\mathbf{p}`: distribution in the source space - `L`: loss function to account for the misfit between the similarity matrices Note that when using backends, this loss function is differentiable wrt the matrices (C1, C2) but not yet for the weights p. .. note:: This function is backend-compatible and will work on arrays from all compatible backends. However all the steps in the conditional gradient are not differentiable. Parameters ---------- C1 : array-like, shape (ns, ns) Metric cost matrix in the source space C2 : array-like, shape (nt, nt) Metric cost matrix in the target space p : array-like, shape (ns,), optional Distribution in the source space. If let to its default value None, uniform distribution is taken. loss_fun : str loss function used for the solver either 'square_loss' or 'kl_loss'. epsilon : float Regularization term >0 symmetric : bool, optional Either C1 and C2 are to be assumed symmetric or not. If let to its default None value, a symmetry test will be conducted. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymetric). verbose : bool, optional Print information along iterations G0: array-like, shape (ns,nt), optional If None the initial transport plan of the solver is pq^T. Otherwise G0 must satisfy marginal constraints and will be used as initial transport of the solver. max_iter : int, optional Max number of iterations tol : float, optional Stop threshold on error computed on transport plans log : bool, optional record log if True verbose : bool, optional Print information along iterations **kwargs : dict parameters can be directly passed to the ot.optim.cg solver Returns ------- srgw : float Semi-relaxed Gromov-Wasserstein divergence log : dict convergence information and Coupling matrix References ---------- .. [48] CĂ©dric Vincent-Cuaz, RĂ©mi Flamary, Marco Corneli, Titouan Vayer, Nicolas Courty. "Semi-relaxed Gromov-Wasserstein divergence and applications on graphs" International Conference on Learning Representations (ICLR), 2022. """ T, log_srgw = entropic_semirelaxed_gromov_wasserstein( C1, C2, p, loss_fun, epsilon, symmetric, G0, max_iter, tol, log=True, verbose=verbose, **kwargs) log_srgw['T'] = T if log: return log_srgw['srgw_dist'], log_srgw else: return log_srgw['srgw_dist'] def entropic_semirelaxed_fused_gromov_wasserstein( M, C1, C2, p=None, loss_fun='square_loss', symmetric=None, epsilon=0.1, alpha=0.5, G0=None, max_iter=1e4, tol=1e-9, log=False, verbose=False, **kwargs): r""" Computes the entropic-regularized semi-relaxed FGW transport between two graphs (see :ref:`[48] `) estimated using a Mirror Descent algorithm following the KL geometry. .. math:: \mathbf{T}^* \in \mathop{\arg \min}_{\mathbf{T}} \quad (1 - \alpha) \langle \mathbf{T}, \mathbf{M} \rangle_F + \alpha \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T} &\geq 0 where : - :math:`\mathbf{M}` is the (`ns`, `nt`) metric cost matrix between features - :math:`\mathbf{C_1}`: Metric cost matrix in the source space - :math:`\mathbf{C_2}`: Metric cost matrix in the target space - :math:`\mathbf{p}` source weights (sum to 1) - `L` is a loss function to account for the misfit between the similarity matrices .. note:: This function is backend-compatible and will work on arrays from all compatible backends. However all the steps in the conditional gradient are not differentiable. The algorithm used for solving the problem is conditional gradient as discussed in :ref:`[48] ` Parameters ---------- M : array-like, shape (ns, nt) Metric cost matrix between features across domains C1 : array-like, shape (ns, ns) Metric cost matrix representative of the structure in the source space C2 : array-like, shape (nt, nt) Metric cost matrix representative of the structure in the target space p : array-like, shape (ns,), optional Distribution in the source space. If let to its default value None, uniform distribution is taken. loss_fun : str loss function used for the solver either 'square_loss' or 'kl_loss'. epsilon : float Regularization term >0 symmetric : bool, optional Either C1 and C2 are to be assumed symmetric or not. If let to its default None value, a symmetry test will be conducted. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymetric). alpha : float, optional Trade-off parameter (0 < alpha < 1) G0: array-like, shape (ns,nt), optional If None the initial transport plan of the solver is pq^T. Otherwise G0 must satisfy marginal constraints and will be used as initial transport of the solver. max_iter : int, optional Max number of iterations tol : float, optional Stop threshold on error computed on transport plans log : bool, optional record log if True verbose : bool, optional Print information along iterations **kwargs : dict parameters can be directly passed to the ot.optim.cg solver Returns ------- G : array-like, shape (`ns`, `nt`) Optimal transportation matrix for the given parameters. log : dict Log dictionary return only if log==True in parameters. .. _references-semirelaxed-fused-gromov-wasserstein: References ---------- .. [48] CĂ©dric Vincent-Cuaz, RĂ©mi Flamary, Marco Corneli, Titouan Vayer, Nicolas Courty. "Semi-relaxed Gromov-Wasserstein divergence and applications on graphs" International Conference on Learning Representations (ICLR), 2022. """ arr = [M, C1, C2] if p is not None: arr.append(list_to_array(p)) else: p = unif(C1.shape[0], type_as=C1) if G0 is not None: arr.append(G0) nx = get_backend(*arr) if symmetric is None: symmetric = nx.allclose(C1, C1.T, atol=1e-10) and nx.allclose(C2, C2.T, atol=1e-10) if G0 is None: q = unif(C2.shape[0], type_as=p) G0 = nx.outer(p, q) else: q = nx.sum(G0, 0) # Check first marginal of G0 np.testing.assert_allclose(nx.sum(G0, 1), p, atol=1e-08) constC, hC1, hC2, fC2t = init_matrix_semirelaxed(C1, C2, p, loss_fun, nx) ones_p = nx.ones(p.shape[0], type_as=p) dM = (1 - alpha) * M if symmetric: def df(G): qG = nx.sum(G, 0) marginal_product = nx.outer(ones_p, nx.dot(qG, fC2t)) return alpha * gwggrad(constC + marginal_product, hC1, hC2, G, nx) + dM else: constCt, hC1t, hC2t, fC2 = init_matrix_semirelaxed(C1.T, C2.T, p, loss_fun, nx) def df(G): qG = nx.sum(G, 0) marginal_product_1 = nx.outer(ones_p, nx.dot(qG, fC2t)) marginal_product_2 = nx.outer(ones_p, nx.dot(qG, fC2)) return 0.5 * alpha * (gwggrad(constC + marginal_product_1, hC1, hC2, G, nx) + gwggrad(constCt + marginal_product_2, hC1t, hC2t, G, nx)) + dM cpt = 0 err = 1e15 G = G0 if log: log = {'err': []} while (err > tol and cpt < max_iter): Gprev = G # compute the kernel K = G * nx.exp(- df(G) / epsilon) scaling = p / nx.sum(K, 1) G = nx.reshape(scaling, (-1, 1)) * K if cpt % 10 == 0: # we can speed up the process by checking for the error only all # the 10th iterations err = nx.norm(G - Gprev) if log: log['err'].append(err) if verbose: if cpt % 200 == 0: print('{:5s}|{:12s}'.format( 'It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(cpt, err)) cpt += 1 if log: qG = nx.sum(G, 0) marginal_product = nx.outer(ones_p, nx.dot(qG, fC2t)) log['lin_loss'] = nx.sum(M * G) * (1 - alpha) log['quad_loss'] = alpha * gwloss(constC + marginal_product, hC1, hC2, G, nx) log['srfgw_dist'] = log['lin_loss'] + log['quad_loss'] return G, log else: return G def entropic_semirelaxed_fused_gromov_wasserstein2( M, C1, C2, p=None, loss_fun='square_loss', symmetric=None, epsilon=0.1, alpha=0.5, G0=None, max_iter=1e4, tol=1e-9, log=False, verbose=False, **kwargs): r""" Computes the entropic-regularized semi-relaxed FGW divergence between two graphs (see :ref:`[48] `) estimated using a Mirror Descent algorithm following the KL geometry. .. math:: \mathbf{srFGW}_{\alpha} = \min_{\mathbf{T}} \quad (1 - \alpha) \langle \mathbf{T}, \mathbf{M} \rangle_F + \alpha \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} \mathbf{T}_{k,l} s.t. \ \mathbf{T} \mathbf{1} &= \mathbf{p} \mathbf{T} &\geq 0 where : - :math:`\mathbf{M}` is the (`ns`, `nt`) metric cost matrix between features - :math:`\mathbf{C_1}`: Metric cost matrix in the source space - :math:`\mathbf{C_2}`: Metric cost matrix in the target space - :math:`\mathbf{p}` source weights (sum to 1) - `L` is a loss function to account for the misfit between the similarity matrices .. note:: This function is backend-compatible and will work on arrays from all compatible backends. However all the steps in the conditional gradient are not differentiable. The algorithm used for solving the problem is conditional gradient as discussed in :ref:`[48] ` Parameters ---------- M : array-like, shape (ns, nt) Metric cost matrix between features across domains C1 : array-like, shape (ns, ns) Metric cost matrix representative of the structure in the source space. C2 : array-like, shape (nt, nt) Metric cost matrix representative of the structure in the target space. p : array-like, shape (ns,), optional Distribution in the source space. If let to its default value None, uniform distribution is taken. loss_fun : str, optional loss function used for the solver either 'square_loss' or 'kl_loss'. epsilon : float Regularization term >0 symmetric : bool, optional Either C1 and C2 are to be assumed symmetric or not. If let to its default None value, a symmetry test will be conducted. Else if set to True (resp. False), C1 and C2 will be assumed symmetric (resp. asymetric). alpha : float, optional Trade-off parameter (0 < alpha < 1) G0: array-like, shape (ns,nt), optional If None the initial transport plan of the solver is pq^T. Otherwise G0 must satisfy marginal constraints and will be used as initial transport of the solver. max_iter : int, optional Max number of iterations tol : float, optional Stop threshold on error computed on transport plans log : bool, optional record log if True verbose : bool, optional Print information along iterations **kwargs : dict Parameters can be directly passed to the ot.optim.cg solver. Returns ------- srfgw-divergence : float Semi-relaxed Fused gromov wasserstein divergence for the given parameters. log : dict Log dictionary return only if log==True in parameters. .. _references-semirelaxed-fused-gromov-wasserstein2: References ---------- .. [48] CĂ©dric Vincent-Cuaz, RĂ©mi Flamary, Marco Corneli, Titouan Vayer, Nicolas Courty. "Semi-relaxed Gromov-Wasserstein divergence and applications on graphs" International Conference on Learning Representations (ICLR), 2022. """ T, log_srfgw = entropic_semirelaxed_fused_gromov_wasserstein( M, C1, C2, p, loss_fun, symmetric, epsilon, alpha, G0, max_iter, tol, log=True, verbose=verbose, **kwargs) log_srfgw['T'] = T if log: return log_srfgw['srfgw_dist'], log_srfgw else: return log_srfgw['srfgw_dist'] python-pot-0.9.3+dfsg/ot/gromov/_utils.py000066400000000000000000000372321455713015700204060ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Gromov-Wasserstein and Fused-Gromov-Wasserstein utils. """ # Author: Erwan Vautier # Nicolas Courty # RĂ©mi Flamary # Titouan Vayer # CĂ©dric Vincent-Cuaz # # License: MIT License from ..utils import list_to_array from ..backend import get_backend def init_matrix(C1, C2, p, q, loss_fun='square_loss', nx=None): r"""Return loss matrices and tensors for Gromov-Wasserstein fast computation Returns the value of :math:`\mathcal{L}(\mathbf{C_1}, \mathbf{C_2}) \otimes \mathbf{T}` with the selected loss function as the loss function of Gromov-Wasserstein discrepancy. The matrices are computed as described in Proposition 1 in :ref:`[12] ` Where : - :math:`\mathbf{C_1}`: Metric cost matrix in the source space - :math:`\mathbf{C_2}`: Metric cost matrix in the target space - :math:`\mathbf{T}`: A coupling between those two spaces The square-loss function :math:`L(a, b) = |a - b|^2` is read as : .. math:: L(a, b) = f_1(a) + f_2(b) - h_1(a) h_2(b) \mathrm{with} \ f_1(a) &= a^2 f_2(b) &= b^2 h_1(a) &= a h_2(b) &= 2b The kl-loss function :math:`L(a, b) = a \log\left(\frac{a}{b}\right) - a + b` is read as : .. math:: L(a, b) = f_1(a) + f_2(b) - h_1(a) h_2(b) \mathrm{with} \ f_1(a) &= a \log(a) - a f_2(b) &= b h_1(a) &= a h_2(b) &= \log(b) Parameters ---------- C1 : array-like, shape (ns, ns) Metric cost matrix in the source space C2 : array-like, shape (nt, nt) Metric cost matrix in the target space p : array-like, shape (ns,) Probability distribution in the source space q : array-like, shape (nt,) Probability distribution in the target space loss_fun : str, optional Name of loss function to use: either 'square_loss' or 'kl_loss' (default='square_loss') nx : backend, optional If let to its default value None, a backend test will be conducted. Returns ------- constC : array-like, shape (ns, nt) Constant :math:`\mathbf{C}` matrix in Eq. (6) hC1 : array-like, shape (ns, ns) :math:`\mathbf{h1}(\mathbf{C1})` matrix in Eq. (6) hC2 : array-like, shape (nt, nt) :math:`\mathbf{h2}(\mathbf{C2})` matrix in Eq. (6) .. _references-init-matrix: References ---------- .. [12] Gabriel PeyrĂ©, Marco Cuturi, and Justin Solomon, "Gromov-Wasserstein averaging of kernel and distance matrices." International Conference on Machine Learning (ICML). 2016. """ if nx is None: C1, C2, p, q = list_to_array(C1, C2, p, q) nx = get_backend(C1, C2, p, q) if loss_fun == 'square_loss': def f1(a): return (a**2) def f2(b): return (b**2) def h1(a): return a def h2(b): return 2 * b elif loss_fun == 'kl_loss': def f1(a): return a * nx.log(a + 1e-15) - a def f2(b): return b def h1(a): return a def h2(b): return nx.log(b + 1e-15) else: raise ValueError(f"Unknown `loss_fun='{loss_fun}'`. Use one of: {'square_loss', 'kl_loss'}.") constC1 = nx.dot( nx.dot(f1(C1), nx.reshape(p, (-1, 1))), nx.ones((1, len(q)), type_as=q) ) constC2 = nx.dot( nx.ones((len(p), 1), type_as=p), nx.dot(nx.reshape(q, (1, -1)), f2(C2).T) ) constC = constC1 + constC2 hC1 = h1(C1) hC2 = h2(C2) return constC, hC1, hC2 def tensor_product(constC, hC1, hC2, T, nx=None): r"""Return the tensor for Gromov-Wasserstein fast computation The tensor is computed as described in Proposition 1 Eq. (6) in :ref:`[12] ` Parameters ---------- constC : array-like, shape (ns, nt) Constant :math:`\mathbf{C}` matrix in Eq. (6) hC1 : array-like, shape (ns, ns) :math:`\mathbf{h1}(\mathbf{C1})` matrix in Eq. (6) hC2 : array-like, shape (nt, nt) :math:`\mathbf{h2}(\mathbf{C2})` matrix in Eq. (6) nx : backend, optional If let to its default value None, a backend test will be conducted. Returns ------- tens : array-like, shape (`ns`, `nt`) :math:`\mathcal{L}(\mathbf{C_1}, \mathbf{C_2}) \otimes \mathbf{T}` tensor-matrix multiplication result .. _references-tensor-product: References ---------- .. [12] Gabriel PeyrĂ©, Marco Cuturi, and Justin Solomon, "Gromov-Wasserstein averaging of kernel and distance matrices." International Conference on Machine Learning (ICML). 2016. """ if nx is None: constC, hC1, hC2, T = list_to_array(constC, hC1, hC2, T) nx = get_backend(constC, hC1, hC2, T) A = - nx.dot( nx.dot(hC1, T), hC2.T ) tens = constC + A # tens -= tens.min() return tens def gwloss(constC, hC1, hC2, T, nx=None): r"""Return the Loss for Gromov-Wasserstein The loss is computed as described in Proposition 1 Eq. (6) in :ref:`[12] ` Parameters ---------- constC : array-like, shape (ns, nt) Constant :math:`\mathbf{C}` matrix in Eq. (6) hC1 : array-like, shape (ns, ns) :math:`\mathbf{h1}(\mathbf{C1})` matrix in Eq. (6) hC2 : array-like, shape (nt, nt) :math:`\mathbf{h2}(\mathbf{C2})` matrix in Eq. (6) T : array-like, shape (ns, nt) Current value of transport matrix :math:`\mathbf{T}` nx : backend, optional If let to its default value None, a backend test will be conducted. Returns ------- loss : float Gromov-Wasserstein loss .. _references-gwloss: References ---------- .. [12] Gabriel PeyrĂ©, Marco Cuturi, and Justin Solomon, "Gromov-Wasserstein averaging of kernel and distance matrices." International Conference on Machine Learning (ICML). 2016. """ tens = tensor_product(constC, hC1, hC2, T, nx) if nx is None: tens, T = list_to_array(tens, T) nx = get_backend(tens, T) return nx.sum(tens * T) def gwggrad(constC, hC1, hC2, T, nx=None): r"""Return the gradient for Gromov-Wasserstein The gradient is computed as described in Proposition 2 in :ref:`[12] ` Parameters ---------- constC : array-like, shape (ns, nt) Constant :math:`\mathbf{C}` matrix in Eq. (6) hC1 : array-like, shape (ns, ns) :math:`\mathbf{h1}(\mathbf{C1})` matrix in Eq. (6) hC2 : array-like, shape (nt, nt) :math:`\mathbf{h2}(\mathbf{C2})` matrix in Eq. (6) T : array-like, shape (ns, nt) Current value of transport matrix :math:`\mathbf{T}` nx : backend, optional If let to its default value None, a backend test will be conducted. Returns ------- grad : array-like, shape (`ns`, `nt`) Gromov-Wasserstein gradient .. _references-gwggrad: References ---------- .. [12] Gabriel PeyrĂ©, Marco Cuturi, and Justin Solomon, "Gromov-Wasserstein averaging of kernel and distance matrices." International Conference on Machine Learning (ICML). 2016. """ return 2 * tensor_product(constC, hC1, hC2, T, nx) # [12] Prop. 2 misses a 2 factor def update_square_loss(p, lambdas, T, Cs, nx=None): r""" Updates :math:`\mathbf{C}` according to the L2 Loss kernel with the `S` :math:`\mathbf{T}_s` couplings calculated at each iteration of the GW barycenter problem in :ref:`[12]`: .. math:: \mathbf{C}^* = \mathop{\arg \min}_{\mathbf{C}\in \mathbb{R}^{N \times N}} \quad \sum_s \lambda_s \mathrm{GW}(\mathbf{C}, \mathbf{C}_s, \mathbf{p}, \mathbf{p}_s) Where : - :math:`\mathbf{C}_s`: metric cost matrix - :math:`\mathbf{p}_s`: distribution Parameters ---------- p : array-like, shape (N,) Masses in the targeted barycenter. lambdas : list of float List of the `S` spaces' weights. T : list of S array-like of shape (N, ns) The `S` :math:`\mathbf{T}_s` couplings calculated at each iteration. Cs : list of S array-like, shape(ns,ns) Metric cost matrices. nx : backend, optional If let to its default value None, a backend test will be conducted. Returns ---------- C : array-like, shape (`nt`, `nt`) Updated :math:`\mathbf{C}` matrix. References ---------- .. [12] Gabriel PeyrĂ©, Marco Cuturi, and Justin Solomon, "Gromov-Wasserstein averaging of kernel and distance matrices." International Conference on Machine Learning (ICML). 2016. """ if nx is None: nx = get_backend(p, *T, *Cs) # Correct order mistake in Equation 14 in [12] tmpsum = sum([ lambdas[s] * nx.dot( nx.dot(T[s], Cs[s]), T[s].T ) for s in range(len(T)) ]) ppt = nx.outer(p, p) return tmpsum / ppt def update_kl_loss(p, lambdas, T, Cs, nx=None): r""" Updates :math:`\mathbf{C}` according to the KL Loss kernel with the `S` :math:`\mathbf{T}_s` couplings calculated at each iteration of the GW barycenter problem in :ref:`[12]`: .. math:: \mathbf{C}^* = \mathop{\arg \min}_{\mathbf{C}\in \mathbb{R}^{N \times N}} \quad \sum_s \lambda_s \mathrm{GW}(\mathbf{C}, \mathbf{C}_s, \mathbf{p}, \mathbf{p}_s) Where : - :math:`\mathbf{C}_s`: metric cost matrix - :math:`\mathbf{p}_s`: distribution Parameters ---------- p : array-like, shape (N,) Weights in the targeted barycenter. lambdas : list of float List of the `S` spaces' weights T : list of S array-like of shape (N, ns) The `S` :math:`\mathbf{T}_s` couplings calculated at each iteration. Cs : list of S array-like, shape(ns,ns) Metric cost matrices. nx : backend, optional If let to its default value None, a backend test will be conducted. Returns ---------- C : array-like, shape (`ns`, `ns`) updated :math:`\mathbf{C}` matrix References ---------- .. [12] Gabriel PeyrĂ©, Marco Cuturi, and Justin Solomon, "Gromov-Wasserstein averaging of kernel and distance matrices." International Conference on Machine Learning (ICML). 2016. """ if nx is None: nx = get_backend(p, *T, *Cs) # Correct order mistake in Equation 15 in [12] tmpsum = sum([ lambdas[s] * nx.dot( nx.dot(T[s], nx.log(nx.maximum(Cs[s], 1e-15))), T[s].T ) for s in range(len(T)) ]) ppt = nx.outer(p, p) return nx.exp(tmpsum / ppt) def update_feature_matrix(lambdas, Ys, Ts, p, nx=None): r"""Updates the feature with respect to the `S` :math:`\mathbf{T}_s` couplings. See "Solving the barycenter problem with Block Coordinate Descent (BCD)" in :ref:`[24] ` calculated at each iteration Parameters ---------- p : array-like, shape (N,) masses in the targeted barycenter lambdas : list of float List of the `S` spaces' weights Ts : list of S array-like, shape (N, ns) The `S` :math:`\mathbf{T}_s` couplings calculated at each iteration Ys : list of S array-like, shape (d,ns) The features. nx : backend, optional If let to its default value None, a backend test will be conducted. Returns ------- X : array-like, shape (`d`, `N`) .. _references-update-feature-matrix: References ---------- .. [24] Vayer Titouan, Chapel Laetitia, Flamary RĂ©mi, Tavenard Romain and Courty Nicolas "Optimal Transport for structured data with application on graphs" International Conference on Machine Learning (ICML). 2019. """ if nx is None: nx = get_backend(*Ys, *Ts, p) p = 1. / p tmpsum = sum([ lambdas[s] * nx.dot(Ys[s], Ts[s].T) * p[None, :] for s in range(len(Ts)) ]) return tmpsum def init_matrix_semirelaxed(C1, C2, p, loss_fun='square_loss', nx=None): r"""Return loss matrices and tensors for semi-relaxed Gromov-Wasserstein fast computation Returns the value of :math:`\mathcal{L}(\mathbf{C_1}, \mathbf{C_2}) \otimes \mathbf{T}` with the selected loss function as the loss function of semi-relaxed Gromov-Wasserstein discrepancy. The matrices are computed as described in Proposition 1 in :ref:`[12] ` and adapted to the semi-relaxed problem where the second marginal is not a constant anymore. Where : - :math:`\mathbf{C_1}`: Metric cost matrix in the source space - :math:`\mathbf{C_2}`: Metric cost matrix in the target space - :math:`\mathbf{T}`: A coupling between those two spaces The square-loss function :math:`L(a, b) = |a - b|^2` is read as : .. math:: L(a, b) = f_1(a) + f_2(b) - h_1(a) h_2(b) \mathrm{with} \ f_1(a) &= a^2 f_2(b) &= b^2 h_1(a) &= a h_2(b) &= 2b The kl-loss function :math:`L(a, b) = a \log\left(\frac{a}{b}\right) - a + b` is read as : .. math:: L(a, b) = f_1(a) + f_2(b) - h_1(a) h_2(b) \mathrm{with} \ f_1(a) &= a \log(a) - a f_2(b) &= b h_1(a) &= a h_2(b) &= \log(b) Parameters ---------- C1 : array-like, shape (ns, ns) Metric cost matrix in the source space C2 : array-like, shape (nt, nt) Metric cost matrix in the target space p : array-like, shape (ns,) loss_fun : str, optional Name of loss function to use: either 'square_loss' or 'kl_loss' (default='square_loss') nx : backend, optional If let to its default value None, a backend test will be conducted. Returns ------- constC : array-like, shape (ns, nt) Constant :math:`\mathbf{C}` matrix in Eq. (6) adapted to srGW hC1 : array-like, shape (ns, ns) :math:`\mathbf{h1}(\mathbf{C1})` matrix in Eq. (6) hC2 : array-like, shape (nt, nt) :math:`\mathbf{h2}(\mathbf{C2})` matrix in Eq. (6) fC2t: array-like, shape (nt, nt) :math:`\mathbf{f2}(\mathbf{C2})^\top` matrix in Eq. (6) .. _references-init-matrix: References ---------- .. [12] Gabriel PeyrĂ©, Marco Cuturi, and Justin Solomon, "Gromov-Wasserstein averaging of kernel and distance matrices." International Conference on Machine Learning (ICML). 2016. .. [48] CĂ©dric Vincent-Cuaz, RĂ©mi Flamary, Marco Corneli, Titouan Vayer, Nicolas Courty. "Semi-relaxed Gromov-Wasserstein divergence and applications on graphs" International Conference on Learning Representations (ICLR), 2022. """ if nx is None: C1, C2, p = list_to_array(C1, C2, p) nx = get_backend(C1, C2, p) if loss_fun == 'square_loss': def f1(a): return (a**2) def f2(b): return (b**2) def h1(a): return a def h2(b): return 2 * b elif loss_fun == 'kl_loss': def f1(a): return a * nx.log(a + 1e-15) - a def f2(b): return b def h1(a): return a def h2(b): return nx.log(b + 1e-15) else: raise ValueError(f"Unknown `loss_fun='{loss_fun}'`. Use one of: {'square_loss', 'kl_loss'}.") constC = nx.dot(nx.dot(f1(C1), nx.reshape(p, (-1, 1))), nx.ones((1, C2.shape[0]), type_as=p)) hC1 = h1(C1) hC2 = h2(C2) fC2t = f2(C2).T return constC, hC1, hC2, fC2t python-pot-0.9.3+dfsg/ot/helpers/000077500000000000000000000000001455713015700166575ustar00rootroot00000000000000python-pot-0.9.3+dfsg/ot/helpers/__init__.py000066400000000000000000000001101455713015700207600ustar00rootroot00000000000000# Author: Remi Flamary # # License: MIT License python-pot-0.9.3+dfsg/ot/helpers/openmp_helpers.py000066400000000000000000000051471455713015700222600ustar00rootroot00000000000000"""Helpers for OpenMP support during the build.""" # This code is adapted for a large part from the astropy openmp helpers, which # can be found at: https://github.com/astropy/extension-helpers/blob/master/extension_helpers/_openmp_helpers.py # noqa import os import sys import textwrap import subprocess from distutils.errors import CompileError, LinkError from pre_build_helpers import compile_test_program def get_openmp_flag(compiler): """Get openmp flags for a given compiler""" if hasattr(compiler, 'compiler'): compiler = compiler.compiler[0] else: compiler = compiler.__class__.__name__ if sys.platform == "win32" and ('icc' in compiler or 'icl' in compiler): omp_flag = ['/Qopenmp'] elif sys.platform == "win32": omp_flag = ['/openmp'] elif sys.platform in ("darwin", "linux") and "icc" in compiler: omp_flag = ['-qopenmp'] elif sys.platform == "darwin" and 'openmp' in os.getenv('CPPFLAGS', ''): omp_flag = [] else: # Default flag for GCC and clang: omp_flag = ['-fopenmp'] if sys.platform.startswith("darwin"): omp_flag += ["-Xpreprocessor", "-lomp"] return omp_flag def check_openmp_support(): """Check whether OpenMP test code can be compiled and run""" code = textwrap.dedent( """\ #include #include int main(void) { #pragma omp parallel printf("nthreads=%d\\n", omp_get_num_threads()); return 0; } """) extra_preargs = os.getenv('LDFLAGS', None) if extra_preargs is not None: extra_preargs = extra_preargs.strip().split(" ") extra_preargs = [ flag for flag in extra_preargs if flag.startswith(('-L', '-Wl,-rpath', '-l'))] extra_postargs = get_openmp_flag try: output, compile_flags = compile_test_program( code, extra_preargs=extra_preargs, extra_postargs=extra_postargs ) if output and 'nthreads=' in output[0]: nthreads = int(output[0].strip().split('=')[1]) openmp_supported = len(output) == nthreads elif "PYTHON_CROSSENV" in os.environ: # Since we can't run the test program when cross-compiling # assume that openmp is supported if the program can be # compiled. openmp_supported = True else: openmp_supported = False except (CompileError, LinkError, subprocess.CalledProcessError): openmp_supported = False compile_flags = [] return openmp_supported, compile_flags python-pot-0.9.3+dfsg/ot/helpers/pre_build_helpers.py000066400000000000000000000042221455713015700227200ustar00rootroot00000000000000"""Helpers to check build environment before actual build of POT""" import os import sys import glob import tempfile import subprocess from setuptools.command.build_ext import customize_compiler, new_compiler def _get_compiler(): ccompiler = new_compiler() customize_compiler(ccompiler) return ccompiler def compile_test_program(code, extra_preargs=[], extra_postargs=[]): """Check that some C code can be compiled and run""" ccompiler = _get_compiler() # extra_(pre/post)args can be a callable to make it possible to get its # value from the compiler if callable(extra_preargs): extra_preargs = extra_preargs(ccompiler) if callable(extra_postargs): extra_postargs = extra_postargs(ccompiler) start_dir = os.path.abspath('.') with tempfile.TemporaryDirectory() as tmp_dir: try: os.chdir(tmp_dir) # Write test program with open('test_program.c', 'w') as f: f.write(code) os.mkdir('objects') # Compile, test program ccompiler.compile(['test_program.c'], output_dir='objects', extra_postargs=extra_postargs) # Link test program objects = glob.glob( os.path.join('objects', '*' + ccompiler.obj_extension)) ccompiler.link_executable(objects, 'test_program', extra_preargs=extra_preargs, extra_postargs=extra_postargs) if "PYTHON_CROSSENV" not in os.environ: # Run test program if not cross compiling # will raise a CalledProcessError if return code was non-zero output = subprocess.check_output('./test_program') output = output.decode( sys.stdout.encoding or 'utf-8').splitlines() else: # Return an empty output if we are cross compiling # as we cannot run the test_program output = [] except Exception: raise finally: os.chdir(start_dir) return output, extra_postargs python-pot-0.9.3+dfsg/ot/lowrank.py000066400000000000000000000415131455713015700172500ustar00rootroot00000000000000""" Low rank OT solvers """ # Author: Laurène David # # License: MIT License import warnings from .utils import unif, dist, get_lowrank_lazytensor from .backend import get_backend from .bregman import sinkhorn # test if sklearn is installed for linux-minimal-deps try: import sklearn.cluster sklearn_import = True except ImportError: sklearn_import = False def _init_lr_sinkhorn(X_s, X_t, a, b, rank, init, reg_init, random_state, nx=None): """ Implementation of different initialization strategies for the low rank sinkhorn solver (Q ,R, g). This function is specific to lowrank_sinkhorn. Parameters ---------- X_s : array-like, shape (n_samples_a, dim) samples in the source domain X_t : array-like, shape (n_samples_b, dim) samples in the target domain a : array-like, shape (n_samples_a,) samples weights in the source domain b : array-like, shape (n_samples_b,) samples weights in the target domain rank : int Nonnegative rank of the OT plan. init : str Initialization strategy for Q, R and g. 'random', 'trivial' or 'kmeans' reg_init : float, optional. Regularization term for a 'kmeans' init. random_state : int, optional. Random state for a "random" or 'kmeans' init strategy nx : optional, Default is None POT backend Returns --------- Q : array-like, shape (n_samples_a, r) Init for the first low-rank matrix decomposition of the OT plan (Q) R: array-like, shape (n_samples_b, r) Init for the second low-rank matrix decomposition of the OT plan (R) g : array-like, shape (r, ) Init for the weight vector of the low-rank decomposition of the OT plan (g) References ----------- .. [65] Scetbon, M., Cuturi, M., & PeyrĂ©, G. (2021). "Low-rank Sinkhorn factorization". In International Conference on Machine Learning. """ if nx is None: nx = get_backend(X_s, X_t, a, b) ns = X_s.shape[0] nt = X_t.shape[0] r = rank if init == "random": nx.seed(seed=random_state) # Init g g = nx.abs(nx.randn(r, type_as=X_s)) + 1 g = g / nx.sum(g) # Init Q Q = nx.abs(nx.randn(ns, r, type_as=X_s)) + 1 Q = (Q.T * (a / nx.sum(Q, axis=1))).T # Init R R = nx.abs(nx.randn(nt, rank, type_as=X_s)) + 1 R = (R.T * (b / nx.sum(R, axis=1))).T if init == "deterministic": # Init g g = nx.ones(rank) / rank lambda_1 = min(nx.min(a), nx.min(g), nx.min(b)) / 2 a1 = nx.arange(start=1, stop=ns + 1, type_as=X_s) a1 = a1 / nx.sum(a1) a2 = (a - lambda_1 * a1) / (1 - lambda_1) b1 = nx.arange(start=1, stop=nt + 1, type_as=X_s) b1 = b1 / nx.sum(b1) b2 = (b - lambda_1 * b1) / (1 - lambda_1) g1 = nx.arange(start=1, stop=rank + 1, type_as=X_s) g1 = g1 / nx.sum(g1) g2 = (g - lambda_1 * g1) / (1 - lambda_1) # Init Q Q1 = lambda_1 * nx.dot(a1[:, None], nx.reshape(g1, (1, -1))) Q2 = (1 - lambda_1) * nx.dot(a2[:, None], nx.reshape(g2, (1, -1))) Q = Q1 + Q2 # Init R R1 = lambda_1 * nx.dot(b1[:, None], nx.reshape(g1, (1, -1))) R2 = (1 - lambda_1) * nx.dot(b2[:, None], nx.reshape(g2, (1, -1))) R = R1 + R2 if init == "kmeans": if sklearn_import: # Init g g = nx.ones(rank, type_as=X_s) / rank # Init Q kmeans_Xs = sklearn.cluster.KMeans(n_clusters=rank, random_state=random_state, n_init="auto") kmeans_Xs.fit(X_s) Z_Xs = nx.from_numpy(kmeans_Xs.cluster_centers_) C_Xs = dist(X_s, Z_Xs) # shape (ns, rank) C_Xs = C_Xs / nx.max(C_Xs) Q = sinkhorn(a, g, C_Xs, reg=reg_init, numItermax=10000, stopThr=1e-3) # Init R kmeans_Xt = sklearn.cluster.KMeans(n_clusters=rank, random_state=random_state, n_init="auto") kmeans_Xt.fit(X_t) Z_Xt = nx.from_numpy(kmeans_Xt.cluster_centers_) C_Xt = dist(X_t, Z_Xt) # shape (nt, rank) C_Xt = C_Xt / nx.max(C_Xt) R = sinkhorn(b, g, C_Xt, reg=reg_init, numItermax=10000, stopThr=1e-3) else: raise ImportError("Scikit-learn should be installed to use the 'kmeans' init.") return Q, R, g def compute_lr_sqeuclidean_matrix(X_s, X_t, rescale_cost, nx=None): """ Compute the low rank decomposition of a squared euclidean distance matrix. This function won't work for other distance metrics. See "Section 3.5, proposition 1" Parameters ---------- X_s : array-like, shape (n_samples_a, dim) samples in the source domain X_t : array-like, shape (n_samples_b, dim) samples in the target domain rescale_cost : bool Rescale the low rank factorization of the sqeuclidean cost matrix nx : default None POT backend Returns ---------- M1 : array-like, shape (n_samples_a, dim+2) First low rank decomposition of the distance matrix M2 : array-like, shape (n_samples_b, dim+2) Second low rank decomposition of the distance matrix References ----------- .. [65] Scetbon, M., Cuturi, M., & PeyrĂ©, G. (2021). "Low-rank Sinkhorn factorization". In International Conference on Machine Learning. """ if nx is None: nx = get_backend(X_s, X_t) ns = X_s.shape[0] nt = X_t.shape[0] # First low rank decomposition of the cost matrix (A) array1 = nx.reshape(nx.sum(X_s**2, 1), (-1, 1)) array2 = nx.ones((ns, 1), type_as=X_s) M1 = nx.concatenate((array1, array2, -2 * X_s), axis=1) # Second low rank decomposition of the cost matrix (B) array1 = nx.ones((nt, 1), type_as=X_s) array2 = nx.reshape(nx.sum(X_t**2, 1), (-1, 1)) M2 = nx.concatenate((array1, array2, X_t), axis=1) if rescale_cost is True: M1 = M1 / nx.sqrt(nx.max(M1)) M2 = M2 / nx.sqrt(nx.max(M2)) return M1, M2 def _LR_Dysktra(eps1, eps2, eps3, p1, p2, alpha, stopThr, numItermax, warn, nx=None): """ Implementation of the Dykstra algorithm for the Low Rank sinkhorn OT solver. This function is specific to lowrank_sinkhorn. Parameters ---------- eps1 : array-like, shape (n_samples_a, r) First input parameter of the Dykstra algorithm eps2 : array-like, shape (n_samples_b, r) Second input parameter of the Dykstra algorithm eps3 : array-like, shape (r,) Third input parameter of the Dykstra algorithm p1 : array-like, shape (n_samples_a,) Samples weights in the source domain (same as "a" in lowrank_sinkhorn) p2 : array-like, shape (n_samples_b,) Samples weights in the target domain (same as "b" in lowrank_sinkhorn) alpha: int Lower bound for the weight vector g (same as "alpha" in lowrank_sinkhorn) stopThr : float Stop threshold on error numItermax : int Max number of iterations warn : bool, optional if True, raises a warning if the algorithm doesn't convergence. nx : default None POT backend Returns ---------- Q : array-like, shape (n_samples_a, r) Dykstra update of the first low-rank matrix decomposition Q R: array-like, shape (n_samples_b, r) Dykstra update of the Second low-rank matrix decomposition R g : array-like, shape (r, ) Dykstra update of the weight vector g References ---------- .. [65] Scetbon, M., Cuturi, M., & PeyrĂ©, G. (2021). "Low-rank Sinkhorn Factorization". In International Conference on Machine Learning. """ # POT backend if None if nx is None: nx = get_backend(eps1, eps2, eps3, p1, p2) # ----------------- Initialisation of Dykstra algorithm ----------------- r = len(eps3) # rank g_ = nx.copy(eps3) # \tilde{g} q3_1, q3_2 = nx.ones(r, type_as=p1), nx.ones(r, type_as=p1) # q^{(3)}_1, q^{(3)}_2 v1_, v2_ = nx.ones(r, type_as=p1), nx.ones(r, type_as=p1) # \tilde{v}^{(1)}, \tilde{v}^{(2)} q1, q2 = nx.ones(r, type_as=p1), nx.ones(r, type_as=p1) # q^{(1)}, q^{(2)} err = 1 # initial error # --------------------- Dykstra algorithm ------------------------- # See Section 3.3 - "Algorithm 2 LR-Dykstra" in paper for ii in range(numItermax): if err > stopThr: # Compute u^{(1)} and u^{(2)} u1 = p1 / nx.dot(eps1, v1_) u2 = p2 / nx.dot(eps2, v2_) # Compute g, g^{(3)}_1 and update \tilde{g} g = nx.maximum(alpha, g_ * q3_1) q3_1 = (g_ * q3_1) / g g_ = nx.copy(g) # Compute new value of g with \prod prod1 = (v1_ * q1) * nx.dot(eps1.T, u1) prod2 = (v2_ * q2) * nx.dot(eps2.T, u2) g = (g_ * q3_2 * prod1 * prod2) ** (1 / 3) # Compute v^{(1)} and v^{(2)} v1 = g / nx.dot(eps1.T, u1) v2 = g / nx.dot(eps2.T, u2) # Compute q^{(1)}, q^{(2)} and q^{(3)}_2 q1 = (v1_ * q1) / v1 q2 = (v2_ * q2) / v2 q3_2 = (g_ * q3_2) / g # Update values of \tilde{v}^{(1)}, \tilde{v}^{(2)} and \tilde{g} v1_, v2_ = nx.copy(v1), nx.copy(v2) g_ = nx.copy(g) # Compute error err1 = nx.sum(nx.abs(u1 * (eps1 @ v1) - p1)) err2 = nx.sum(nx.abs(u2 * (eps2 @ v2) - p2)) err = err1 + err2 else: break else: if warn: warnings.warn( "Dykstra did not converge. You might want to " "increase the number of iterations `numItermax` " ) # Compute low rank matrices Q, R Q = u1[:, None] * eps1 * v1[None, :] R = u2[:, None] * eps2 * v2[None, :] return Q, R, g def lowrank_sinkhorn(X_s, X_t, a=None, b=None, reg=0, rank=None, alpha=1e-10, rescale_cost=True, init="random", reg_init=1e-1, seed_init=49, gamma_init="rescale", numItermax=2000, stopThr=1e-7, warn=True, log=False): r""" Solve the entropic regularization optimal transport problem under low-nonnegative rank constraints on the couplings. The function solves the following optimization problem: .. math:: \mathop{\inf_{(Q,R,g) \in \mathcal{C(a,b,r)}}} \langle C, Q\mathrm{diag}(1/g)R^T \rangle - \mathrm{reg} \cdot H((Q,R,g)) where : - :math:`C` is the (`dim_a`, `dim_b`) metric cost matrix - :math:`H((Q,R,g))` is the values of the three respective entropies evaluated for each term. - :math: `Q` and `R` are the low-rank matrix decomposition of the OT plan - :math: `g` is the weight vector for the low-rank decomposition of the OT plan - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (histograms, both sum to 1) - :math: `r` is the rank of the OT plan - :math: `\mathcal{C(a,b,r)}` are the low-rank couplings of the OT problem Parameters ---------- X_s : array-like, shape (n_samples_a, dim) samples in the source domain X_t : array-like, shape (n_samples_b, dim) samples in the target domain a : array-like, shape (n_samples_a,) samples weights in the source domain b : array-like, shape (n_samples_b,) samples weights in the target domain reg : float, optional Regularization term >0 rank : int, optional. Default is None. (>0) Nonnegative rank of the OT plan. If None, min(ns, nt) is considered. alpha : int, optional. Default is 1e-10. (>0 and <1/r) Lower bound for the weight vector g. rescale_cost : bool, optional. Default is False Rescale the low rank factorization of the sqeuclidean cost matrix init : str, optional. Default is 'random'. Initialization strategy for the low rank couplings. 'random', 'deterministic' or 'kmeans' reg_init : float, optional. Default is 1e-1. (>0) Regularization term for a 'kmeans' init. If None, 1 is considered. seed_init : int, optional. Default is 49. (>0) Random state for a 'random' or 'kmeans' init strategy. gamma_init : str, optional. Default is "rescale". Initialization strategy for gamma. 'rescale', or 'theory' Gamma is a constant that scales the convergence criterion of the Mirror Descent optimization scheme used to compute the low-rank couplings (Q, R and g) numItermax : int, optional. Default is 2000. Max number of iterations for the Dykstra algorithm stopThr : float, optional. Default is 1e-7. Stop threshold on error (>0) in Dykstra warn : bool, optional if True, raises a warning if the algorithm doesn't convergence. log : bool, optional record log if True Returns --------- Q : array-like, shape (n_samples_a, r) First low-rank matrix decomposition of the OT plan R: array-like, shape (n_samples_b, r) Second low-rank matrix decomposition of the OT plan g : array-like, shape (r, ) Weight vector for the low-rank decomposition of the OT log : dict (lazy_plan, value and value_linear) log dictionary return only if log==True in parameters References ---------- .. [65] Scetbon, M., Cuturi, M., & PeyrĂ©, G. (2021). "Low-rank Sinkhorn Factorization". In International Conference on Machine Learning. """ # POT backend nx = get_backend(X_s, X_t) ns, nt = X_s.shape[0], X_t.shape[0] # Initialize weights a, b if a is None: a = unif(ns, type_as=X_s) if b is None: b = unif(nt, type_as=X_t) # Compute rank (see Section 3.1, def 1) r = rank if rank is None: r = min(ns, nt) else: r = min(ns, nt, rank) if r <= 0: raise ValueError("The rank parameter cannot have a negative value") # Dykstra won't converge if 1/rank < alpha (see Section 3.2) if 1 / r < alpha: raise ValueError("alpha ({a}) should be smaller than 1/rank ({r}) for the Dykstra algorithm to converge.".format( a=alpha, r=1 / rank)) # Low rank decomposition of the sqeuclidean cost matrix M1, M2 = compute_lr_sqeuclidean_matrix(X_s, X_t, rescale_cost, nx) # Initialize the low rank matrices Q, R, g Q, R, g = _init_lr_sinkhorn(X_s, X_t, a, b, r, init, reg_init, seed_init, nx=nx) # Gamma initialization if gamma_init == "theory": L = nx.sqrt( 3 * (2 / (alpha**4)) * ((nx.norm(M1) * nx.norm(M2)) ** 2) + (reg + (2 / (alpha**3)) * (nx.norm(M1) * nx.norm(M2))) ** 2 ) gamma = 1 / (2 * L) if gamma_init not in ["rescale", "theory"]: raise (NotImplementedError('Not implemented gamma_init="{}"'.format(gamma_init))) # -------------------------- Low rank algorithm ------------------------------ # see "Section 3.3, Algorithm 3 LOT" for ii in range(100): # Compute C*R dot using the lr decomposition of C CR = nx.dot(M2.T, R) CR_ = nx.dot(M1, CR) diag_g = (1 / g)[None, :] CR_g = CR_ * diag_g # Compute C.T * Q using the lr decomposition of C CQ = nx.dot(M1.T, Q) CQ_ = nx.dot(M2, CQ) CQ_g = CQ_ * diag_g # Compute omega omega = nx.diag(nx.dot(Q.T, CR_)) # Rescale gamma at each iteration if gamma_init == "rescale": norm_1 = nx.max(nx.abs(CR_ * diag_g + reg * nx.log(Q))) ** 2 norm_2 = nx.max(nx.abs(CQ_ * diag_g + reg * nx.log(R))) ** 2 norm_3 = nx.max(nx.abs(-omega * diag_g)) ** 2 gamma = 10 / max(norm_1, norm_2, norm_3) eps1 = nx.exp(-gamma * CR_g - ((gamma * reg) - 1) * nx.log(Q)) eps2 = nx.exp(-gamma * CQ_g - ((gamma * reg) - 1) * nx.log(R)) eps3 = nx.exp((gamma * omega / (g**2)) - (gamma * reg - 1) * nx.log(g)) # LR Dykstra algorithm Q, R, g = _LR_Dysktra( eps1, eps2, eps3, a, b, alpha, stopThr, numItermax, warn, nx ) Q = Q + 1e-16 R = R + 1e-16 g = g + 1e-16 # ----------------- Compute lazy_plan, value and value_linear ------------------ # see "Section 3.2: The Low-rank OT Problem" in the paper # Compute lazy plan (using LazyTensor class) lazy_plan = get_lowrank_lazytensor(Q, R, 1 / g) # Compute value_linear (using trace formula) v1 = nx.dot(Q.T, M1) v2 = nx.dot(R, (v1.T * diag_g).T) value_linear = nx.sum(nx.diag(nx.dot(M2.T, v2))) # Compute value with entropy reg (see "Section 3.2" in the paper) reg_Q = nx.sum(Q * nx.log(Q + 1e-16)) # entropy for Q reg_g = nx.sum(g * nx.log(g + 1e-16)) # entropy for g reg_R = nx.sum(R * nx.log(R + 1e-16)) # entropy for R value = value_linear + reg * (reg_Q + reg_g + reg_R) if log: dict_log = dict() dict_log["value"] = value dict_log["value_linear"] = value_linear dict_log["lazy_plan"] = lazy_plan return Q, R, g, dict_log return Q, R, g python-pot-0.9.3+dfsg/ot/lp/000077500000000000000000000000001455713015700156305ustar00rootroot00000000000000python-pot-0.9.3+dfsg/ot/lp/EMD.h000066400000000000000000000020231455713015700164030ustar00rootroot00000000000000/* This file is a c++ wrapper function for computing the transportation cost * between two vectors given a cost matrix. * * It was written by Antoine Rolet (2014) and mainly consists of a wrapper * of the code written by Nicolas Bonneel available on this page * http://people.seas.harvard.edu/~nbonneel/FastTransport/ * * It was then modified to make it more amenable to python inline calling * * Please give relevant credit to the original author (Nicolas Bonneel) if * you use this code for a publication. * */ #ifndef EMD_H #define EMD_H #include #include #include typedef unsigned int node_id_type; enum ProblemType { INFEASIBLE, OPTIMAL, UNBOUNDED, MAX_ITER_REACHED }; int EMD_wrap(int n1,int n2, double *X, double *Y,double *D, double *G, double* alpha, double* beta, double *cost, uint64_t maxIter); int EMD_wrap_omp(int n1,int n2, double *X, double *Y,double *D, double *G, double* alpha, double* beta, double *cost, uint64_t maxIter, int numThreads); #endif python-pot-0.9.3+dfsg/ot/lp/EMD_wrapper.cpp000066400000000000000000000125241455713015700205050ustar00rootroot00000000000000/* This file is a c++ wrapper function for computing the transportation cost * between two vectors given a cost matrix. * * It was written by Antoine Rolet (2014) and mainly consists of a wrapper * of the code written by Nicolas Bonneel available on this page * http://people.seas.harvard.edu/~nbonneel/FastTransport/ * * It was then modified to make it more amenable to python inline calling * * Please give relevant credit to the original author (Nicolas Bonneel) if * you use this code for a publication. * */ #include "network_simplex_simple.h" #include "network_simplex_simple_omp.h" #include "EMD.h" #include int EMD_wrap(int n1, int n2, double *X, double *Y, double *D, double *G, double* alpha, double* beta, double *cost, uint64_t maxIter) { // beware M and C are stored in row major C style!!! using namespace lemon; uint64_t n, m, cur; typedef FullBipartiteDigraph Digraph; DIGRAPH_TYPEDEFS(Digraph); // Get the number of non zero coordinates for r and c n=0; for (int i=0; i0) { n++; }else if(val<0){ return INFEASIBLE; } } m=0; for (int i=0; i0) { m++; }else if(val<0){ return INFEASIBLE; } } // Define the graph std::vector indI(n), indJ(m); std::vector weights1(n), weights2(m); Digraph di(n, m); NetworkSimplexSimple net(di, true, (int) (n + m), n * m, maxIter); // Set supply and demand, don't account for 0 values (faster) cur=0; for (uint64_t i=0; i0) { weights1[ cur ] = val; indI[cur++]=i; } } // Demand is actually negative supply... cur=0; for (uint64_t i=0; i0) { weights2[ cur ] = -val; indJ[cur++]=i; } } net.supplyMap(&weights1[0], (int) n, &weights2[0], (int) m); // Set the cost of each edge int64_t idarc = 0; for (uint64_t i=0; i0) { n++; }else if(val<0){ return INFEASIBLE; } } m=0; for (int i=0; i0) { m++; }else if(val<0){ return INFEASIBLE; } } // Define the graph std::vector indI(n), indJ(m); std::vector weights1(n), weights2(m); Digraph di(n, m); NetworkSimplexSimple net(di, true, (int) (n + m), n * m, maxIter, numThreads); // Set supply and demand, don't account for 0 values (faster) cur=0; for (uint64_t i=0; i0) { weights1[ cur ] = val; indI[cur++]=i; } } // Demand is actually negative supply... cur=0; for (uint64_t i=0; i0) { weights2[ cur ] = -val; indJ[cur++]=i; } } net.supplyMap(&weights1[0], (int) n, &weights2[0], (int) m); // Set the cost of each edge int64_t idarc = 0; for (uint64_t i=0; i # # License: MIT License import os import multiprocessing import sys import numpy as np import warnings from . import cvx from .cvx import barycenter from .dmmot import dmmot_monge_1dgrid_loss, dmmot_monge_1dgrid_optimize # import compiled emd from .emd_wrap import emd_c, check_result, emd_1d_sorted from .solver_1d import (emd_1d, emd2_1d, wasserstein_1d, binary_search_circle, wasserstein_circle, semidiscrete_wasserstein2_unif_circle) from ..utils import dist, list_to_array from ..utils import parmap from ..backend import get_backend __all__ = ['emd', 'emd2', 'barycenter', 'free_support_barycenter', 'cvx', ' emd_1d_sorted', 'emd_1d', 'emd2_1d', 'wasserstein_1d', 'generalized_free_support_barycenter', 'binary_search_circle', 'wasserstein_circle', 'semidiscrete_wasserstein2_unif_circle', 'dmmot_monge_1dgrid_loss', 'dmmot_monge_1dgrid_optimize'] def check_number_threads(numThreads): """Checks whether or not the requested number of threads has a valid value. Parameters ---------- numThreads : int or str The requested number of threads, should either be a strictly positive integer or "max" or None Returns ------- numThreads : int Corrected number of threads """ if (numThreads is None) or (isinstance(numThreads, str) and numThreads.lower() == 'max'): return -1 if (not isinstance(numThreads, int)) or numThreads < 1: raise ValueError('numThreads should either be "max" or a strictly positive integer') return numThreads def center_ot_dual(alpha0, beta0, a=None, b=None): r"""Center dual OT potentials w.r.t. their weights The main idea of this function is to find unique dual potentials that ensure some kind of centering/fairness. The main idea is to find dual potentials that lead to the same final objective value for both source and targets (see below for more details). It will help having stability when multiple calling of the OT solver with small changes. Basically we add another constraint to the potential that will not change the objective value but will ensure unicity. The constraint is the following: .. math:: \alpha^T \mathbf{a} = \beta^T \mathbf{b} in addition to the OT problem constraints. since :math:`\sum_i a_i=\sum_j b_j` this can be solved by adding/removing a constant from both :math:`\alpha_0` and :math:`\beta_0`. .. math:: c &= \frac{\beta_0^T \mathbf{b} - \alpha_0^T \mathbf{a}}{\mathbf{1}^T \mathbf{b} + \mathbf{1}^T \mathbf{a}} \alpha &= \alpha_0 + c \beta &= \beta_0 + c Parameters ---------- alpha0 : (ns,) numpy.ndarray, float64 Source dual potential beta0 : (nt,) numpy.ndarray, float64 Target dual potential a : (ns,) numpy.ndarray, float64 Source histogram (uniform weight if empty list) b : (nt,) numpy.ndarray, float64 Target histogram (uniform weight if empty list) Returns ------- alpha : (ns,) numpy.ndarray, float64 Source centered dual potential beta : (nt,) numpy.ndarray, float64 Target centered dual potential """ # if no weights are provided, use uniform if a is None: a = np.ones(alpha0.shape[0]) / alpha0.shape[0] if b is None: b = np.ones(beta0.shape[0]) / beta0.shape[0] # compute constant that balances the weighted sums of the duals c = (b.dot(beta0) - a.dot(alpha0)) / (a.sum() + b.sum()) # update duals alpha = alpha0 + c beta = beta0 - c return alpha, beta def estimate_dual_null_weights(alpha0, beta0, a, b, M): r"""Estimate feasible values for 0-weighted dual potentials The feasible values are computed efficiently but rather coarsely. .. warning:: This function is necessary because the C++ solver in `emd_c` discards all samples in the distributions with zeros weights. This means that while the primal variable (transport matrix) is exact, the solver only returns feasible dual potentials on the samples with weights different from zero. First we compute the constraints violations: .. math:: \mathbf{V} = \alpha + \beta^T - \mathbf{M} Next we compute the max amount of violation per row (:math:`\alpha`) and columns (:math:`beta`) .. math:: \mathbf{v^a}_i = \max_j \mathbf{V}_{i,j} \mathbf{v^b}_j = \max_i \mathbf{V}_{i,j} Finally we update the dual potential with 0 weights if a constraint is violated .. math:: \alpha_i = \alpha_i - \mathbf{v^a}_i \quad \text{ if } \mathbf{a}_i=0 \text{ and } \mathbf{v^a}_i>0 \beta_j = \beta_j - \mathbf{v^b}_j \quad \text{ if } \mathbf{b}_j=0 \text{ and } \mathbf{v^b}_j > 0 In the end the dual potentials are centered using function :py:func:`ot.lp.center_ot_dual`. Note that all those updates do not change the objective value of the solution but provide dual potentials that do not violate the constraints. Parameters ---------- alpha0 : (ns,) numpy.ndarray, float64 Source dual potential beta0 : (nt,) numpy.ndarray, float64 Target dual potential alpha0 : (ns,) numpy.ndarray, float64 Source dual potential beta0 : (nt,) numpy.ndarray, float64 Target dual potential a : (ns,) numpy.ndarray, float64 Source distribution (uniform weights if empty list) b : (nt,) numpy.ndarray, float64 Target distribution (uniform weights if empty list) M : (ns,nt) numpy.ndarray, float64 Loss matrix (c-order array with type float64) Returns ------- alpha : (ns,) numpy.ndarray, float64 Source corrected dual potential beta : (nt,) numpy.ndarray, float64 Target corrected dual potential """ # binary indexing of non-zeros weights asel = a != 0 bsel = b != 0 # compute dual constraints violation constraint_violation = alpha0[:, None] + beta0[None, :] - M # Compute largest violation per line and columns aviol = np.max(constraint_violation, 1) bviol = np.max(constraint_violation, 0) # update corrects violation of alpha_up = -1 * ~asel * np.maximum(aviol, 0) beta_up = -1 * ~bsel * np.maximum(bviol, 0) alpha = alpha0 + alpha_up beta = beta0 + beta_up return center_ot_dual(alpha, beta, a, b) def emd(a, b, M, numItermax=100000, log=False, center_dual=True, numThreads=1, check_marginals=True): r"""Solves the Earth Movers distance problem and returns the OT matrix .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F s.t. \ \gamma \mathbf{1} = \mathbf{a} \gamma^T \mathbf{1} = \mathbf{b} \gamma \geq 0 where : - :math:`\mathbf{M}` is the metric cost matrix - :math:`\mathbf{a}` and :math:`\mathbf{b}` are the sample weights .. warning:: Note that the :math:`\mathbf{M}` matrix in numpy needs to be a C-order numpy.array in float64 format. It will be converted if not in this format .. note:: This function is backend-compatible and will work on arrays from all compatible backends. But the algorithm uses the C++ CPU backend which can lead to copy overhead on GPU arrays. .. note:: This function will cast the computed transport plan to the data type of the provided input with the following priority: :math:`\mathbf{a}`, then :math:`\mathbf{b}`, then :math:`\mathbf{M}` if marginals are not provided. Casting to an integer tensor might result in a loss of precision. If this behaviour is unwanted, please make sure to provide a floating point input. .. note:: An error will be raised if the vectors :math:`\mathbf{a}` and :math:`\mathbf{b}` do not sum to the same value. Uses the algorithm proposed in :ref:`[1] `. Parameters ---------- a : (ns,) array-like, float Source histogram (uniform weight if empty list) b : (nt,) array-like, float Target histogram (uniform weight if empty list) M : (ns,nt) array-like, float Loss matrix (c-order array in numpy with type float64) numItermax : int, optional (default=100000) The maximum number of iterations before stopping the optimization algorithm if it has not converged. log: bool, optional (default=False) If True, returns a dictionary containing the cost and dual variables. Otherwise returns only the optimal transportation matrix. center_dual: boolean, optional (default=True) If True, centers the dual potential using function :py:func:`ot.lp.center_ot_dual`. numThreads: int or "max", optional (default=1, i.e. OpenMP is not used) If compiled with OpenMP, chooses the number of threads to parallelize. "max" selects the highest number possible. check_marginals: bool, optional (default=True) If True, checks that the marginals mass are equal. If False, skips the check. Returns ------- gamma: array-like, shape (ns, nt) Optimal transportation matrix for the given parameters log: dict, optional If input log is true, a dictionary containing the cost and dual variables and exit status Examples -------- Simple example with obvious solution. The function emd accepts lists and perform automatic conversion to numpy arrays >>> import ot >>> a=[.5,.5] >>> b=[.5,.5] >>> M=[[0.,1.],[1.,0.]] >>> ot.emd(a, b, M) array([[0.5, 0. ], [0. , 0.5]]) .. _references-emd: References ---------- .. [1] Bonneel, N., Van De Panne, M., Paris, S., & Heidrich, W. (2011, December). Displacement interpolation using Lagrangian mass transport. In ACM Transactions on Graphics (TOG) (Vol. 30, No. 6, p. 158). ACM. See Also -------- ot.bregman.sinkhorn : Entropic regularized OT ot.optim.cg : General regularized OT """ # convert to numpy if list a, b, M = list_to_array(a, b, M) a0, b0, M0 = a, b, M if len(a0) != 0: type_as = a0 elif len(b0) != 0: type_as = b0 else: type_as = M0 nx = get_backend(M0, a0, b0) # convert to numpy M, a, b = nx.to_numpy(M, a, b) # ensure float64 a = np.asarray(a, dtype=np.float64) b = np.asarray(b, dtype=np.float64) M = np.asarray(M, dtype=np.float64, order='C') # if empty array given then use uniform distributions if len(a) == 0: a = np.ones((M.shape[0],), dtype=np.float64) / M.shape[0] if len(b) == 0: b = np.ones((M.shape[1],), dtype=np.float64) / M.shape[1] assert (a.shape[0] == M.shape[0] and b.shape[0] == M.shape[1]), \ "Dimension mismatch, check dimensions of M with a and b" # ensure that same mass if check_marginals: np.testing.assert_almost_equal(a.sum(0), b.sum(0), err_msg='a and b vector must have the same sum', decimal=6) b = b * a.sum() / b.sum() asel = a != 0 bsel = b != 0 numThreads = check_number_threads(numThreads) G, cost, u, v, result_code = emd_c(a, b, M, numItermax, numThreads) if center_dual: u, v = center_ot_dual(u, v, a, b) if np.any(~asel) or np.any(~bsel): u, v = estimate_dual_null_weights(u, v, a, b, M) result_code_string = check_result(result_code) if not nx.is_floating_point(type_as): warnings.warn( "Input histogram consists of integer. The transport plan will be " "casted accordingly, possibly resulting in a loss of precision. " "If this behaviour is unwanted, please make sure your input " "histogram consists of floating point elements.", stacklevel=2 ) if log: log = {} log['cost'] = cost log['u'] = nx.from_numpy(u, type_as=type_as) log['v'] = nx.from_numpy(v, type_as=type_as) log['warning'] = result_code_string log['result_code'] = result_code return nx.from_numpy(G, type_as=type_as), log return nx.from_numpy(G, type_as=type_as) def emd2(a, b, M, processes=1, numItermax=100000, log=False, return_matrix=False, center_dual=True, numThreads=1, check_marginals=True): r"""Solves the Earth Movers distance problem and returns the loss .. math:: \min_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F s.t. \ \gamma \mathbf{1} = \mathbf{a} \gamma^T \mathbf{1} = \mathbf{b} \gamma \geq 0 where : - :math:`\mathbf{M}` is the metric cost matrix - :math:`\mathbf{a}` and :math:`\mathbf{b}` are the sample weights .. note:: This function is backend-compatible and will work on arrays from all compatible backends. But the algorithm uses the C++ CPU backend which can lead to copy overhead on GPU arrays. .. note:: This function will cast the computed transport plan and transportation loss to the data type of the provided input with the following priority: :math:`\mathbf{a}`, then :math:`\mathbf{b}`, then :math:`\mathbf{M}` if marginals are not provided. Casting to an integer tensor might result in a loss of precision. If this behaviour is unwanted, please make sure to provide a floating point input. .. note:: An error will be raised if the vectors :math:`\mathbf{a}` and :math:`\mathbf{b}` do not sum to the same value. Uses the algorithm proposed in :ref:`[1] `. Parameters ---------- a : (ns,) array-like, float64 Source histogram (uniform weight if empty list) b : (nt,) array-like, float64 Target histogram (uniform weight if empty list) M : (ns,nt) array-like, float64 Loss matrix (for numpy c-order array with type float64) processes : int, optional (default=1) Nb of processes used for multiple emd computation (deprecated) numItermax : int, optional (default=100000) The maximum number of iterations before stopping the optimization algorithm if it has not converged. log: boolean, optional (default=False) If True, returns a dictionary containing dual variables. Otherwise returns only the optimal transportation cost. return_matrix: boolean, optional (default=False) If True, returns the optimal transportation matrix in the log. center_dual: boolean, optional (default=True) If True, centers the dual potential using function :py:func:`ot.lp.center_ot_dual`. numThreads: int or "max", optional (default=1, i.e. OpenMP is not used) If compiled with OpenMP, chooses the number of threads to parallelize. "max" selects the highest number possible. check_marginals: bool, optional (default=True) If True, checks that the marginals mass are equal. If False, skips the check. Returns ------- W: float, array-like Optimal transportation loss for the given parameters log: dict If input log is true, a dictionary containing dual variables and exit status Examples -------- Simple example with obvious solution. The function emd accepts lists and perform automatic conversion to numpy arrays >>> import ot >>> a=[.5,.5] >>> b=[.5,.5] >>> M=[[0.,1.],[1.,0.]] >>> ot.emd2(a,b,M) 0.0 .. _references-emd2: References ---------- .. [1] Bonneel, N., Van De Panne, M., Paris, S., & Heidrich, W. (2011, December). Displacement interpolation using Lagrangian mass transport. In ACM Transactions on Graphics (TOG) (Vol. 30, No. 6, p. 158). ACM. See Also -------- ot.bregman.sinkhorn : Entropic regularized OT ot.optim.cg : General regularized OT """ a, b, M = list_to_array(a, b, M) a0, b0, M0 = a, b, M if len(a0) != 0: type_as = a0 elif len(b0) != 0: type_as = b0 else: type_as = M0 nx = get_backend(M0, a0, b0) # convert to numpy M, a, b = nx.to_numpy(M, a, b) a = np.asarray(a, dtype=np.float64) b = np.asarray(b, dtype=np.float64) M = np.asarray(M, dtype=np.float64, order='C') # if empty array given then use uniform distributions if len(a) == 0: a = np.ones((M.shape[0],), dtype=np.float64) / M.shape[0] if len(b) == 0: b = np.ones((M.shape[1],), dtype=np.float64) / M.shape[1] assert (a.shape[0] == M.shape[0] and b.shape[0] == M.shape[1]), \ "Dimension mismatch, check dimensions of M with a and b" # ensure that same mass if check_marginals: np.testing.assert_almost_equal(a.sum(0), b.sum(0,keepdims=True), err_msg='a and b vector must have the same sum', decimal=6) b = b * a.sum(0) / b.sum(0,keepdims=True) asel = a != 0 numThreads = check_number_threads(numThreads) if log or return_matrix: def f(b): bsel = b != 0 G, cost, u, v, result_code = emd_c(a, b, M, numItermax, numThreads) if center_dual: u, v = center_ot_dual(u, v, a, b) if np.any(~asel) or np.any(~bsel): u, v = estimate_dual_null_weights(u, v, a, b, M) result_code_string = check_result(result_code) log = {} if not nx.is_floating_point(type_as): warnings.warn( "Input histogram consists of integer. The transport plan will be " "casted accordingly, possibly resulting in a loss of precision. " "If this behaviour is unwanted, please make sure your input " "histogram consists of floating point elements.", stacklevel=2 ) G = nx.from_numpy(G, type_as=type_as) if return_matrix: log['G'] = G log['u'] = nx.from_numpy(u, type_as=type_as) log['v'] = nx.from_numpy(v, type_as=type_as) log['warning'] = result_code_string log['result_code'] = result_code cost = nx.set_gradients(nx.from_numpy(cost, type_as=type_as), (a0, b0, M0), (log['u'] - nx.mean(log['u']), log['v'] - nx.mean(log['v']), G)) return [cost, log] else: def f(b): bsel = b != 0 G, cost, u, v, result_code = emd_c(a, b, M, numItermax, numThreads) if center_dual: u, v = center_ot_dual(u, v, a, b) if np.any(~asel) or np.any(~bsel): u, v = estimate_dual_null_weights(u, v, a, b, M) if not nx.is_floating_point(type_as): warnings.warn( "Input histogram consists of integer. The transport plan will be " "casted accordingly, possibly resulting in a loss of precision. " "If this behaviour is unwanted, please make sure your input " "histogram consists of floating point elements.", stacklevel=2 ) G = nx.from_numpy(G, type_as=type_as) cost = nx.set_gradients(nx.from_numpy(cost, type_as=type_as), (a0, b0, M0), (nx.from_numpy(u - np.mean(u), type_as=type_as), nx.from_numpy(v - np.mean(v), type_as=type_as), G)) check_result(result_code) return cost if len(b.shape) == 1: return f(b) nb = b.shape[1] if processes > 1: warnings.warn( "The 'processes' parameter has been deprecated. " "Multiprocessing should be done outside of POT." ) res = list(map(f, [b[:, i].copy() for i in range(nb)])) return res def free_support_barycenter(measures_locations, measures_weights, X_init, b=None, weights=None, numItermax=100, stopThr=1e-7, verbose=False, log=None, numThreads=1): r""" Solves the free support (locations of the barycenters are optimized, not the weights) Wasserstein barycenter problem (i.e. the weighted Frechet mean for the 2-Wasserstein distance), formally: .. math:: \min_\mathbf{X} \quad \sum_{i=1}^N w_i W_2^2(\mathbf{b}, \mathbf{X}, \mathbf{a}_i, \mathbf{X}_i) where : - :math:`w \in \mathbb{(0, 1)}^{N}`'s are the barycenter weights and sum to one - `measure_weights` denotes the :math:`\mathbf{a}_i \in \mathbb{R}^{k_i}`: empirical measures weights (on simplex) - `measures_locations` denotes the :math:`\mathbf{X}_i \in \mathbb{R}^{k_i, d}`: empirical measures atoms locations - :math:`\mathbf{b} \in \mathbb{R}^{k}` is the desired weights vector of the barycenter This problem is considered in :ref:`[20] ` (Algorithm 2). There are two differences with the following codes: - we do not optimize over the weights - we do not do line search for the locations updates, we use i.e. :math:`\theta = 1` in :ref:`[20] ` (Algorithm 2). This can be seen as a discrete implementation of the fixed-point algorithm of :ref:`[43] ` proposed in the continuous setting. Parameters ---------- measures_locations : list of N (k_i,d) array-like The discrete support of a measure supported on :math:`k_i` locations of a `d`-dimensional space (:math:`k_i` can be different for each element of the list) measures_weights : list of N (k_i,) array-like Numpy arrays where each numpy array has :math:`k_i` non-negatives values summing to one representing the weights of each discrete input measure X_init : (k,d) array-like Initialization of the support locations (on `k` atoms) of the barycenter b : (k,) array-like Initialization of the weights of the barycenter (non-negatives, sum to 1) weights : (N,) array-like Initialization of the coefficients of the barycenter (non-negatives, sum to 1) numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True numThreads: int or "max", optional (default=1, i.e. OpenMP is not used) If compiled with OpenMP, chooses the number of threads to parallelize. "max" selects the highest number possible. Returns ------- X : (k,d) array-like Support locations (on k atoms) of the barycenter .. _references-free-support-barycenter: References ---------- .. [20] Cuturi, Marco, and Arnaud Doucet. "Fast computation of Wasserstein barycenters." International Conference on Machine Learning. 2014. .. [43] Ălvarez-Esteban, Pedro C., et al. "A fixed-point approach to barycenters in Wasserstein space." Journal of Mathematical Analysis and Applications 441.2 (2016): 744-762. """ nx = get_backend(*measures_locations, *measures_weights, X_init) iter_count = 0 N = len(measures_locations) k = X_init.shape[0] d = X_init.shape[1] if b is None: b = nx.ones((k,), type_as=X_init) / k if weights is None: weights = nx.ones((N,), type_as=X_init) / N X = X_init log_dict = {} displacement_square_norms = [] displacement_square_norm = stopThr + 1. while (displacement_square_norm > stopThr and iter_count < numItermax): T_sum = nx.zeros((k, d), type_as=X_init) for (measure_locations_i, measure_weights_i, weight_i) in zip(measures_locations, measures_weights, weights): M_i = dist(X, measure_locations_i) T_i = emd(b, measure_weights_i, M_i, numThreads=numThreads) T_sum = T_sum + weight_i * 1. / b[:, None] * nx.dot(T_i, measure_locations_i) displacement_square_norm = nx.sum((T_sum - X) ** 2) if log: displacement_square_norms.append(displacement_square_norm) X = T_sum if verbose: print('iteration %d, displacement_square_norm=%f\n', iter_count, displacement_square_norm) iter_count += 1 if log: log_dict['displacement_square_norms'] = displacement_square_norms return X, log_dict else: return X def generalized_free_support_barycenter(X_list, a_list, P_list, n_samples_bary, Y_init=None, b=None, weights=None, numItermax=100, stopThr=1e-7, verbose=False, log=None, numThreads=1, eps=0): r""" Solves the free support generalized Wasserstein barycenter problem: finding a barycenter (a discrete measure with a fixed amount of points of uniform weights) whose respective projections fit the input measures. More formally: .. math:: \min_\gamma \quad \sum_{i=1}^p w_i W_2^2(\nu_i, \mathbf{P}_i\#\gamma) where : - :math:`\gamma = \sum_{l=1}^n b_l\delta_{y_l}` is the desired barycenter with each :math:`y_l \in \mathbb{R}^d` - :math:`\mathbf{b} \in \mathbb{R}^{n}` is the desired weights vector of the barycenter - The input measures are :math:`\nu_i = \sum_{j=1}^{k_i}a_{i,j}\delta_{x_{i,j}}` - The :math:`\mathbf{a}_i \in \mathbb{R}^{k_i}` are the respective empirical measures weights (on the simplex) - The :math:`\mathbf{X}_i \in \mathbb{R}^{k_i, d_i}` are the respective empirical measures atoms locations - :math:`w = (w_1, \cdots w_p)` are the barycenter coefficients (on the simplex) - Each :math:`\mathbf{P}_i \in \mathbb{R}^{d, d_i}`, and :math:`P_i\#\nu_i = \sum_{j=1}^{k_i}a_{i,j}\delta_{P_ix_{i,j}}` As show by :ref:`[42] `, this problem can be re-written as a Wasserstein Barycenter problem, which we solve using the free support method :ref:`[20] ` (Algorithm 2). Parameters ---------- X_list : list of p (k_i,d_i) array-like Discrete supports of the input measures: each consists of :math:`k_i` locations of a `d_i`-dimensional space (:math:`k_i` can be different for each element of the list) a_list : list of p (k_i,) array-like Measure weights: each element is a vector (k_i) on the simplex P_list : list of p (d_i,d) array-like Each :math:`P_i` is a linear map :math:`\mathbb{R}^{d} \rightarrow \mathbb{R}^{d_i}` n_samples_bary : int Number of barycenter points Y_init : (n_samples_bary,d) array-like Initialization of the support locations (on `k` atoms) of the barycenter b : (n_samples_bary,) array-like Initialization of the weights of the barycenter measure (on the simplex) weights : (p,) array-like Initialization of the coefficients of the barycenter (on the simplex) numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True numThreads: int or "max", optional (default=1, i.e. OpenMP is not used) If compiled with OpenMP, chooses the number of threads to parallelize. "max" selects the highest number possible. eps: Stability coefficient for the change of variable matrix inversion If the :math:`\mathbf{P}_i^T` matrices don't span :math:`\mathbb{R}^d`, the problem is ill-defined and a matrix inversion will fail. In this case one may set eps=1e-8 and get a solution anyway (which may make little sense) Returns ------- Y : (n_samples_bary,d) array-like Support locations (on n_samples_bary atoms) of the barycenter .. _references-generalized-free-support-barycenter: References ---------- .. [20] Cuturi, M. and Doucet, A.. "Fast computation of Wasserstein barycenters." International Conference on Machine Learning. 2014. .. [42] Delon, J., Gozlan, N., and Saint-Dizier, A.. Generalized Wasserstein barycenters between probability measures living on different subspaces. arXiv preprint arXiv:2105.09755, 2021. """ nx = get_backend(*X_list, *a_list, *P_list) d = P_list[0].shape[1] p = len(X_list) if weights is None: weights = nx.ones(p, type_as=X_list[0]) / p # variable change matrix to reduce the problem to a Wasserstein Barycenter (WB) A = eps * nx.eye(d, type_as=X_list[0]) # if eps nonzero: will force the invertibility of A for (P_i, lambda_i) in zip(P_list, weights): A = A + lambda_i * P_i.T @ P_i B = nx.inv(nx.sqrtm(A)) Z_list = [x @ Pi @ B.T for (x, Pi) in zip(X_list, P_list)] # change of variables -> (WB) problem on Z if Y_init is None: Y_init = nx.randn(n_samples_bary, d, type_as=X_list[0]) if b is None: b = nx.ones(n_samples_bary, type_as=X_list[0]) / n_samples_bary # not optimized out = free_support_barycenter(Z_list, a_list, Y_init, b, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, numThreads=numThreads) if log: # unpack Y, log_dict = out else: Y = out log_dict = None Y = Y @ B.T # return to the Generalized WB formulation if log: return Y, log_dict else: return Y python-pot-0.9.3+dfsg/ot/lp/core.h000066400000000000000000000066171455713015700167430ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; -*- * * This file has been adapted by Nicolas Bonneel (2013), * from full_graph.h from LEMON, a generic C++ optimization library, * to make the other files independant from the rest of * the original library. * * **** Original file Copyright Notice : * Copyright (C) 2003-2010 * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport * (Egervary Research Group on Combinatorial Optimization, EGRES). * * Permission to use, modify and distribute this software is granted * provided that this copyright notice appears in all copies. For * precise terms see the accompanying LICENSE file. * * This software is provided "AS IS" with no warranty of any kind, * express or implied, and with no claim as to its suitability for any * purpose. * */ #ifndef LEMON_CORE_H #define LEMON_CORE_H #include #include // Disable the following warnings when compiling with MSVC: // C4250: 'class1' : inherits 'class2::member' via dominance // C4355: 'this' : used in base member initializer list // C4503: 'function' : decorated name length exceeded, name was truncated // C4800: 'type' : forcing value to bool 'true' or 'false' (performance warning) // C4996: 'function': was declared deprecated #ifdef _MSC_VER #pragma warning( disable : 4250 4355 4503 4800 4996 ) #endif ///\file ///\brief LEMON core utilities. /// ///This header file contains core utilities for LEMON. ///It is automatically included by all graph types, therefore it usually ///do not have to be included directly. namespace lemon { /// \brief Dummy type to make it easier to create invalid iterators. /// /// Dummy type to make it easier to create invalid iterators. /// See \ref INVALID for the usage. struct Invalid { public: bool operator==(Invalid) { return true; } bool operator!=(Invalid) { return false; } bool operator< (Invalid) { return false; } }; /// \brief Invalid iterators. /// /// \ref Invalid is a global type that converts to each iterator /// in such a way that the value of the target iterator will be invalid. #ifdef LEMON_ONLY_TEMPLATES const Invalid INVALID = Invalid(); #else extern const Invalid INVALID; #endif /// \addtogroup gutils /// @{ ///Create convenience typedefs for the digraph types and iterators ///This \c \#define creates convenient type definitions for the following ///types of \c Digraph: \c Node, \c NodeIt, \c Arc, \c ArcIt, \c InArcIt, ///\c OutArcIt, \c BoolNodeMap, \c IntNodeMap, \c DoubleNodeMap, ///\c BoolArcMap, \c IntArcMap, \c DoubleArcMap. /// ///\note If the graph type is a dependent type, ie. the graph type depend ///on a template parameter, then use \c TEMPLATE_DIGRAPH_TYPEDEFS() ///macro. #define DIGRAPH_TYPEDEFS(Digraph) \ typedef Digraph::Node Node; \ typedef Digraph::Arc Arc; \ ///Create convenience typedefs for the digraph types and iterators ///\see DIGRAPH_TYPEDEFS /// ///\note Use this macro, if the graph type is a dependent type, ///ie. the graph type depend on a template parameter. #define TEMPLATE_DIGRAPH_TYPEDEFS(Digraph) \ typedef typename Digraph::Node Node; \ typedef typename Digraph::Arc Arc; \ } //namespace lemon #endif python-pot-0.9.3+dfsg/ot/lp/cvx.py000066400000000000000000000102701455713015700170020ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ LP solvers for optimal transport using cvxopt """ # Author: Remi Flamary # # License: MIT License import numpy as np import scipy as sp import scipy.sparse as sps try: import cvxopt from cvxopt import solvers, matrix, spmatrix except ImportError: cvxopt = False def scipy_sparse_to_spmatrix(A): """Efficient conversion from scipy sparse matrix to cvxopt sparse matrix""" coo = A.tocoo() SP = spmatrix(coo.data.tolist(), coo.row.tolist(), coo.col.tolist(), size=A.shape) return SP def barycenter(A, M, weights=None, verbose=False, log=False, solver='highs-ipm'): r"""Compute the Wasserstein barycenter of distributions A The function solves the following optimization problem [16]: .. math:: \mathbf{a} = arg\min_\mathbf{a} \sum_i W_{1}(\mathbf{a},\mathbf{a}_i) where : - :math:`W_1(\cdot,\cdot)` is the Wasserstein distance (see ot.emd.sinkhorn) - :math:`\mathbf{a}_i` are training distributions in the columns of matrix :math:`\mathbf{A}` The linear program is solved using the interior point solver from scipy.optimize. If cvxopt solver if installed it can use cvxopt Note that this problem do not scale well (both in memory and computational time). Parameters ---------- A : np.ndarray (d,n) n training distributions a_i of size d M : np.ndarray (d,d) loss matrix for OT reg : float Regularization term >0 weights : np.ndarray (n,) Weights of each histogram a_i on the simplex (barycentric coordinates) verbose : bool, optional Print information along iterations log : bool, optional record log if True solver : string, optional the solver used, default 'interior-point' use the lp solver from scipy.optimize. None, or 'glpk' or 'mosek' use the solver from cvxopt. Returns ------- a : (d,) ndarray Wasserstein barycenter log : dict log dictionary return only if log==True in parameters References ---------- .. [16] Agueh, M., & Carlier, G. (2011). Barycenters in the Wasserstein space. SIAM Journal on Mathematical Analysis, 43(2), 904-924. """ if weights is None: weights = np.ones(A.shape[1]) / A.shape[1] else: assert len(weights) == A.shape[1] n_distributions = A.shape[1] n = A.shape[0] n2 = n * n c = np.zeros((0)) b_eq1 = np.zeros((0)) for i in range(n_distributions): c = np.concatenate((c, M.ravel() * weights[i])) b_eq1 = np.concatenate((b_eq1, A[:, i])) c = np.concatenate((c, np.zeros(n))) lst_idiag1 = [sps.kron(sps.eye(n), np.ones((1, n))) for i in range(n_distributions)] # row constraints A_eq1 = sps.hstack((sps.block_diag(lst_idiag1), sps.coo_matrix((n_distributions * n, n)))) # columns constraints lst_idiag2 = [] lst_eye = [] for i in range(n_distributions): if i == 0: lst_idiag2.append(sps.kron(np.ones((1, n)), sps.eye(n))) lst_eye.append(-sps.eye(n)) else: lst_idiag2.append(sps.kron(np.ones((1, n)), sps.eye(n - 1, n))) lst_eye.append(-sps.eye(n - 1, n)) A_eq2 = sps.hstack((sps.block_diag(lst_idiag2), sps.vstack(lst_eye))) b_eq2 = np.zeros((A_eq2.shape[0])) # full problem A_eq = sps.vstack((A_eq1, A_eq2)) b_eq = np.concatenate((b_eq1, b_eq2)) if not cvxopt or solver in ['interior-point', 'highs', 'highs-ipm', 'highs-ds']: # cvxopt not installed or interior point if solver is None: solver = 'interior-point' options = {'disp': verbose} sol = sp.optimize.linprog(c, A_eq=A_eq, b_eq=b_eq, method=solver, options=options) x = sol.x b = x[-n:] else: h = np.zeros((n_distributions * n2 + n)) G = -sps.eye(n_distributions * n2 + n) sol = solvers.lp(matrix(c), scipy_sparse_to_spmatrix(G), matrix(h), A=scipy_sparse_to_spmatrix(A_eq), b=matrix(b_eq), solver=solver) x = np.array(sol['x']) b = x[-n:].ravel() if log: return b, sol else: return b python-pot-0.9.3+dfsg/ot/lp/dmmot.py000066400000000000000000000270401455713015700173250ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ d-MMOT solvers for optimal transport """ # Author: Ronak Mehta # Xizheng Yu # # License: MIT License import numpy as np from ..backend import get_backend def dist_monge_max_min(i): r""" A tensor :math:c is Monge if for all valid :math:i_1, \ldots i_d and :math:j_1, \ldots, j_d, .. math:: c(s_1, \ldots, s_d) + c(t_1, \ldots t_d) \leq c(i_1, \ldots i_d) + c(j_1, \ldots, j_d) where :math:s_k = \min(i_k, j_k) and :math:t_k = \max(i_k, j_k). Our focus is on a specific cost, which is known to be Monge: .. math:: c(i_1,i_2,\ldots,i_d) = \max{i_k:k\in[d]} - \min{i_k:k\in[d]}. When :math:d=2, this cost reduces to :math:c(i_1,i_2)=|i_1-i_2|, which agrees with the classical EMD cost. This choice of :math:c is called the generalized EMD cost. Parameters ---------- i : list The list of integer indexes. Returns ------- cost : numeric value The ground cost (generalized EMD cost) of the tensor. References ---------- .. [56] Jeffery Kline. Properties of the d-dimensional earth mover's problem. Discrete Applied Mathematics, 265: 128-141, 2019. .. [57] Wolfgang W. Bein, Peter Brucker, James K. Park, and Pramod K. Pathak. A monge property for the d-dimensional transportation problem. Discrete Applied Mathematics, 58(2):97-109, 1995. ISSN 0166-218X. doi: https://doi.org/10.1016/0166-218X(93)E0121-E. URL https://www.sciencedirect.com/ science/article/pii/0166218X93E0121E. Workshop on Discrete Algoritms. """ return max(i) - min(i) def dmmot_monge_1dgrid_loss(A, verbose=False, log=False): r""" Compute the discrete multi-marginal optimal transport of distributions A. This function operates on distributions whose supports are real numbers on the real line. The algorithm solves both primal and dual d-MMOT programs concurrently to produce the optimal transport plan as well as the total (minimal) cost. The cost is a ground cost, and the solution is independent of which Monge cost is desired. The algorithm accepts :math:`d` distributions (i.e., histograms) :math:`a_{1}, \ldots, a_{d} \in \mathbb{R}_{+}^{n}` with :math:`e^{\prime} a_{j}=1` for all :math:`j \in[d]`. Although the algorithm states that all histograms have the same number of bins, the algorithm can be easily adapted to accept as inputs :math:`a_{i} \in \mathbb{R}_{+}^{n_{i}}` with :math:`n_{i} \neq n_{j}` [50]. The function solves the following optimization problem[51]: .. math:: \begin{align}\begin{aligned} \underset{\gamma\in\mathbb{R}^{n^{d}}_{+}} {\textrm{min}} \sum_{i_1,\ldots,i_d} c(i_1,\ldots, i_d)\, \gamma(i_1,\ldots,i_d) \quad \textrm{s.t.} \sum_{i_2,\ldots,i_d} \gamma(i_1,\ldots,i_d) &= a_1(i_i), (\forall i_1\in[n])\\ \qquad\vdots\\ \sum_{i_1,\ldots,i_{d-1}} \gamma(i_1,\ldots,i_d) &= a_{d}(i_{d}), (\forall i_d\in[n]). \end{aligned} \end{align} Parameters ---------- A : nx.ndarray, shape (dim, n_hists) The input ndarray containing distributions of n bins in d dimensions. verbose : bool, optional If True, print debugging information during execution. Default=False. log : bool, optional If True, record log. Default is False. Returns ------- obj : float the value of the primal objective function evaluated at the solution. log : dict A dictionary containing the log of the discrete mmot problem: - 'A': a dictionary that maps tuples of indices to the corresponding primal variables. The tuples are the indices of the entries that are set to their minimum value during the algorithm. - 'primal objective': a float, the value of the objective function evaluated at the solution. - 'dual': a list of arrays, the dual variables corresponding to the input arrays. The i-th element of the list is the dual variable corresponding to the i-th dimension of the input arrays. - 'dual objective': a float, the value of the dual objective function evaluated at the solution. References ---------- .. [55] Ronak Mehta, Jeffery Kline, Vishnu Suresh Lokhande, Glenn Fung, & Vikas Singh (2023). Efficient Discrete Multi Marginal Optimal Transport Regularization. In The Eleventh International Conference on Learning Representations. .. [56] Jeffery Kline. Properties of the d-dimensional earth mover's problem. Discrete Applied Mathematics, 265: 128-141, 2019. .. [58] Leonid V Kantorovich. On the translocation of masses. Dokl. Akad. Nauk SSSR, 37:227-229, 1942. See Also -------- ot.lp.dmmot_monge_1dgrid_optimize : Optimize the d-Dimensional Earth Mover's Distance (d-MMOT) """ nx = get_backend(A) A_copy = A A = nx.to_numpy(A) AA = [np.copy(A[:, j]) for j in range(A.shape[1])] dims = tuple([len(_) for _ in AA]) xx = {} dual = [np.zeros(d) for d in dims] idx = [0, ] * len(AA) obj = 0 if verbose: print('i minval oldidx\t\tobj\t\tvals') while all([i < _ for _, i in zip(dims, idx)]): vals = [v[i] for v, i in zip(AA, idx)] minval = min(vals) i = vals.index(minval) xx[tuple(idx)] = minval obj += (dist_monge_max_min(idx)) * minval for v, j in zip(AA, idx): v[j] -= minval # oldidx = nx.copy(idx) oldidx = idx.copy() idx[i] += 1 if idx[i] < dims[i]: temp = (dist_monge_max_min(idx) - dist_monge_max_min(oldidx) + dual[i][idx[i] - 1]) dual[i][idx[i]] += temp if verbose: print(i, minval, oldidx, obj, '\t', vals) # the above terminates when any entry in idx equals the corresponding # value in dims this leaves other dimensions incomplete; the remaining # terms of the dual solution must be filled-in for _, i in enumerate(idx): try: dual[_][i:] = dual[_][i] except Exception: pass dualobj = sum([np.dot(A[:, i], arr) for i, arr in enumerate(dual)]) obj = nx.from_numpy(obj) log_dict = {'A': xx, 'primal objective': obj, 'dual': dual, 'dual objective': dualobj} # define forward/backward relations for pytorch obj = nx.set_gradients(obj, (A_copy), (dual)) if log: return obj, log_dict else: return obj def dmmot_monge_1dgrid_optimize( A, niters=100, lr_init=1e-5, lr_decay=0.995, print_rate=100, verbose=False, log=False): r"""Minimize the d-dimensional EMD using gradient descent. Discrete Multi-Marginal Optimal Transport (d-MMOT): Let :math:`a_1, \ldots, a_d\in\mathbb{R}^n_{+}` be discrete probability distributions. Here, the d-MMOT is the LP, .. math:: \begin{align}\begin{aligned} \underset{x\in\mathbb{R}^{n^{d}}_{+}} {\textrm{min}} \sum_{i_1,\ldots,i_d} c(i_1,\ldots, i_d)\, x(i_1,\ldots,i_d) \quad \textrm{s.t.} \sum_{i_2,\ldots,i_d} x(i_1,\ldots,i_d) &= a_1(i_i), (\forall i_1\in[n])\\ \qquad\vdots\\ \sum_{i_1,\ldots,i_{d-1}} x(i_1,\ldots,i_d) &= a_{d}(i_{d}), (\forall i_d\in[n]). \end{aligned} \end{align} The dual linear program of the d-MMOT problem is: .. math:: \underset{z_j\in\mathbb{R}^n, j\in[d]}{\textrm{maximize}}\qquad\sum_{j} a_j'z_j\qquad \textrm{subject to}\qquad z_{1}(i_1)+\cdots+z_{d}(i_{d}) \leq c(i_1,\ldots,i_{d}), where the indices in the constraints include all :math:`i_j\in[n]`, :math: `j\in[d]`. Denote by :math:`\phi(a_1,\ldots,a_d)`, the optimal objective value of the LP in d-MMOT problem. Let :math:`z^*` be an optimal solution to the dual program. Then, .. math:: \begin{align} \nabla \phi(a_1,\ldots,a_{d}) &= z^*, ~~\text{and for any $t\in \mathbb{R}$,}~~ \phi(a_1,a_2,\ldots,a_{d}) = \sum_{j}a_j' (z_j^* + t\, \eta), \nonumber \\ \text{where } \eta &:= (z_1^{*}(n)\,e, z^*_1(n)\,e, \cdots, z^*_{d}(n)\,e) \end{align} Using these dual variables naturally provided by the algorithm in ot.lp.dmmot_monge_1dgrid_loss, gradient steps move each input distribution to minimize their d-mmot distance. Parameters ---------- A : nx.ndarray, shape (dim, n_hists) The input ndarray containing distributions of n bins in d dimensions. niters : int, optional (default=100) The maximum number of iterations for the optimization algorithm. lr_init : float, optional (default=1e-5) The initial learning rate (step size) for the optimization algorithm. lr_decay : float, optional (default=0.995) The learning rate decay rate in each iteration. print_rate : int, optional (default=100) The rate at which to print the objective value and gradient norm during the optimization algorithm. verbose : bool, optional If True, print debugging information during execution. Default=False. log : bool, optional If True, record log. Default is False. Returns ------- a : list of ndarrays, each of shape (n,) The optimal solution as a list of n approximate barycenters, each of length vecsize. log : dict log dictionary return only if log==True in parameters References ---------- .. [55] Ronak Mehta, Jeffery Kline, Vishnu Suresh Lokhande, Glenn Fung, & Vikas Singh (2023). Efficient Discrete Multi Marginal Optimal Transport Regularization. In The Eleventh International Conference on Learning Representations. .. [60] Olvi L Mangasarian and RR Meyer. Nonlinear perturbation of linear programs. SIAM Journal on Control and Optimization, 17(6):745-752, 1979 .. [59] Michael C Ferris and Olvi L Mangasarian. Finite perturbation of convex programs. Applied Mathematics and Optimization, 23(1):263-273, 1991. See Also -------- ot.lp.dmmot_monge_1dgrid_loss: d-Dimensional Earth Mover's Solver """ # function body here nx = get_backend(A) A = nx.to_numpy(A) n, d = A.shape # n is dim, d is n_hists def dualIter(A, lr): funcval, log_dict = dmmot_monge_1dgrid_loss( A, verbose=verbose, log=True) grad = np.column_stack(log_dict['dual']) A_new = np.reshape(A, (n, d)) - grad * lr return funcval, A_new, grad, log_dict def renormalize(A): A = np.reshape(A, (n, d)) for i in range(A.shape[1]): if min(A[:, i]) < 0: A[:, i] -= min(A[:, i]) A[:, i] /= np.sum(A[:, i]) return A def listify(A): return [A[:, i] for i in range(A.shape[1])] lr = lr_init funcval, _, grad, log_dict = dualIter(A, lr) gn = np.linalg.norm(grad) print(f'Inital:\t\tObj:\t{funcval:.4f}\tGradNorm:\t{gn:.4f}') for i in range(niters): A = renormalize(A) funcval, A, grad, log_dict = dualIter(A, lr) gn = np.linalg.norm(grad) if i % print_rate == 0: print(f'Iter {i:2.0f}:\tObj:\t{funcval:.4f}\tGradNorm:\t{gn:.4f}') lr *= lr_decay A = renormalize(A) a = listify(A) if log: return a, log_dict else: return a python-pot-0.9.3+dfsg/ot/lp/emd_wrap.pyx000066400000000000000000000153221455713015700201730ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Cython linker with C solver """ # Author: Remi Flamary # # License: MIT License import numpy as np cimport numpy as np from ..utils import dist cimport cython cimport libc.math as math from libc.stdint cimport uint64_t import warnings cdef extern from "EMD.h": int EMD_wrap(int n1,int n2, double *X, double *Y,double *D, double *G, double* alpha, double* beta, double *cost, uint64_t maxIter) nogil int EMD_wrap_omp(int n1,int n2, double *X, double *Y,double *D, double *G, double* alpha, double* beta, double *cost, uint64_t maxIter, int numThreads) nogil cdef enum ProblemType: INFEASIBLE, OPTIMAL, UNBOUNDED, MAX_ITER_REACHED def check_result(result_code): if result_code == OPTIMAL: return None if result_code == INFEASIBLE: message = "Problem infeasible. Check that a and b are in the simplex" elif result_code == UNBOUNDED: message = "Problem unbounded" elif result_code == MAX_ITER_REACHED: message = "numItermax reached before optimality. Try to increase numItermax." warnings.warn(message) return message @cython.boundscheck(False) @cython.wraparound(False) def emd_c(np.ndarray[double, ndim=1, mode="c"] a, np.ndarray[double, ndim=1, mode="c"] b, np.ndarray[double, ndim=2, mode="c"] M, uint64_t max_iter, int numThreads): """ Solves the Earth Movers distance problem and returns the optimal transport matrix gamm=emd(a,b,M) .. math:: \gamma = arg\min_\gamma <\gamma,M>_F s.t. \gamma 1 = a \gamma^T 1= b \gamma\geq 0 where : - M is the metric cost matrix - a and b are the sample weights .. warning:: Note that the M matrix needs to be a C-order :py.cls:`numpy.array` .. warning:: The C++ solver discards all samples in the distributions with zeros weights. This means that while the primal variable (transport matrix) is exact, the solver only returns feasible dual potentials on the samples with weights different from zero. Parameters ---------- a : (ns,) numpy.ndarray, float64 source histogram b : (nt,) numpy.ndarray, float64 target histogram M : (ns,nt) numpy.ndarray, float64 loss matrix max_iter : uint64_t The maximum number of iterations before stopping the optimization algorithm if it has not converged. Returns ------- gamma: (ns x nt) numpy.ndarray Optimal transportation matrix for the given parameters """ cdef int n1= M.shape[0] cdef int n2= M.shape[1] cdef int nmax=n1+n2-1 cdef int result_code = 0 cdef int nG=0 cdef double cost=0 cdef np.ndarray[double, ndim=1, mode="c"] alpha=np.zeros(n1) cdef np.ndarray[double, ndim=1, mode="c"] beta=np.zeros(n2) cdef np.ndarray[double, ndim=2, mode="c"] G=np.zeros([0, 0]) cdef np.ndarray[double, ndim=1, mode="c"] Gv=np.zeros(0) if not len(a): a=np.ones((n1,))/n1 if not len(b): b=np.ones((n2,))/n2 # init OT matrix G=np.zeros([n1, n2]) # calling the function with nogil: if numThreads == 1: result_code = EMD_wrap(n1, n2, a.data, b.data, M.data, G.data, alpha.data, beta.data, &cost, max_iter) else: result_code = EMD_wrap_omp(n1, n2, a.data, b.data, M.data, G.data, alpha.data, beta.data, &cost, max_iter, numThreads) return G, cost, alpha, beta, result_code @cython.boundscheck(False) @cython.wraparound(False) def emd_1d_sorted(np.ndarray[double, ndim=1, mode="c"] u_weights, np.ndarray[double, ndim=1, mode="c"] v_weights, np.ndarray[double, ndim=1, mode="c"] u, np.ndarray[double, ndim=1, mode="c"] v, str metric='sqeuclidean', double p=1.): r""" Solves the Earth Movers distance problem between sorted 1d measures and returns the OT matrix and the associated cost Parameters ---------- u_weights : (ns,) ndarray, float64 Source histogram v_weights : (nt,) ndarray, float64 Target histogram u : (ns,) ndarray, float64 Source dirac locations (on the real line) v : (nt,) ndarray, float64 Target dirac locations (on the real line) metric: str, optional (default='sqeuclidean') Metric to be used. Only strings listed in :func:`ot.dist` are accepted. Due to implementation details, this function runs faster when `'sqeuclidean'`, `'minkowski'`, `'cityblock'`, or `'euclidean'` metrics are used. p: float, optional (default=1.0) The p-norm to apply for if metric='minkowski' Returns ------- gamma: (n, ) ndarray, float64 Values in the Optimal transportation matrix indices: (n, 2) ndarray, int64 Indices of the values stored in gamma for the Optimal transportation matrix cost cost associated to the optimal transportation """ cdef double cost = 0. cdef Py_ssize_t n = u_weights.shape[0] cdef Py_ssize_t m = v_weights.shape[0] cdef Py_ssize_t i = 0 cdef double w_i = u_weights[0] cdef Py_ssize_t j = 0 cdef double w_j = v_weights[0] cdef double m_ij = 0. cdef np.ndarray[double, ndim=1, mode="c"] G = np.zeros((n + m - 1, ), dtype=np.float64) cdef np.ndarray[long long, ndim=2, mode="c"] indices = np.zeros((n + m - 1, 2), dtype=np.int64) cdef Py_ssize_t cur_idx = 0 while True: if metric == 'sqeuclidean': m_ij = (u[i] - v[j]) * (u[i] - v[j]) elif metric == 'cityblock' or metric == 'euclidean': m_ij = math.fabs(u[i] - v[j]) elif metric == 'minkowski': m_ij = math.pow(math.fabs(u[i] - v[j]), p) else: m_ij = dist(u[i].reshape((1, 1)), v[j].reshape((1, 1)), metric=metric)[0, 0] if w_i < w_j or j == m - 1: cost += m_ij * w_i G[cur_idx] = w_i indices[cur_idx, 0] = i indices[cur_idx, 1] = j i += 1 if i == n: break w_j -= w_i w_i = u_weights[i] else: cost += m_ij * w_j G[cur_idx] = w_j indices[cur_idx, 0] = i indices[cur_idx, 1] = j j += 1 if j == m: break w_i -= w_j w_j = v_weights[j] cur_idx += 1 cur_idx += 1 return G[:cur_idx], indices[:cur_idx], cost python-pot-0.9.3+dfsg/ot/lp/full_bipartitegraph.h000066400000000000000000000135471455713015700220420ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; -*- * * This file has been adapted by Nicolas Bonneel (2013), * from full_graph.h from LEMON, a generic C++ optimization library, * to implement a lightweight fully connected bipartite graph. A previous * version of this file is used as part of the Displacement Interpolation * project, * Web: http://www.cs.ubc.ca/labs/imager/tr/2011/DisplacementInterpolation/ * * **** Original file Copyright Notice : * Copyright (C) 2003-2010 * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport * (Egervary Research Group on Combinatorial Optimization, EGRES). * * Permission to use, modify and distribute this software is granted * provided that this copyright notice appears in all copies. For * precise terms see the accompanying LICENSE file. * * This software is provided "AS IS" with no warranty of any kind, * express or implied, and with no claim as to its suitability for any * purpose. * */ #pragma once #include "core.h" #include ///\ingroup graphs ///\file ///\brief FullBipartiteDigraph and FullBipartiteGraph classes. namespace lemon { class FullBipartiteDigraphBase { public: typedef FullBipartiteDigraphBase Digraph; //class Node; typedef int Node; //class Arc; typedef int64_t Arc; protected: int _node_num; int64_t _arc_num; FullBipartiteDigraphBase() {} void construct(int n1, int n2) { _node_num = n1+n2; _arc_num = (int64_t)n1 * (int64_t)n2; _n1=n1; _n2=n2;} public: int _n1, _n2; Node operator()(int ix) const { return Node(ix); } static int index(const Node& node) { return node; } Arc arc(const Node& s, const Node& t) const { if (s<_n1 && t>=_n1) return Arc((int64_t)s * (int64_t)_n2 + (int64_t)(t-_n1) ); else return Arc(-1); } int nodeNum() const { return _node_num; } int64_t arcNum() const { return _arc_num; } int maxNodeId() const { return _node_num - 1; } int64_t maxArcId() const { return _arc_num - 1; } Node source(Arc arc) const { return arc / _n2; } Node target(Arc arc) const { return (arc % _n2) + _n1; } static int id(Node node) { return node; } static int64_t id(Arc arc) { return arc; } static Node nodeFromId(int id) { return Node(id);} static Arc arcFromId(int64_t id) { return Arc(id);} Arc findArc(Node s, Node t, Arc prev = -1) const { return prev == -1 ? arc(s, t) : -1; } void first(Node& node) const { node = _node_num - 1; } static void next(Node& node) { --node; } void first(Arc& arc) const { arc = _arc_num - 1; } static void next(Arc& arc) { --arc; } void firstOut(Arc& arc, const Node& node) const { if (node>=_n1) arc = -1; else arc = (node + 1) * _n2 - 1; } void nextOut(Arc& arc) const { if (arc % _n2 == 0) arc = 0; --arc; } void firstIn(Arc& arc, const Node& node) const { if (node<_n1) arc = -1; else arc = _arc_num + node - _node_num; } void nextIn(Arc& arc) const { arc -= _n2; if (arc < 0) arc = -1; } }; /// \ingroup graphs /// /// \brief A directed full graph class. /// /// FullBipartiteDigraph is a simple and fast implementation of directed full /// (complete) graphs. It contains an arc from each node to each node /// (including a loop for each node), therefore the number of arcs /// is the square of the number of nodes. /// This class is completely static and it needs constant memory space. /// Thus you can neither add nor delete nodes or arcs, however /// the structure can be resized using resize(). /// /// This type fully conforms to the \ref concepts::Digraph "Digraph concept". /// Most of its member functions and nested classes are documented /// only in the concept class. /// /// This class provides constant time counting for nodes and arcs. /// /// \note FullBipartiteDigraph and FullBipartiteGraph classes are very similar, /// but there are two differences. While this class conforms only /// to the \ref concepts::Digraph "Digraph" concept, FullBipartiteGraph /// conforms to the \ref concepts::Graph "Graph" concept, /// moreover FullBipartiteGraph does not contain a loop for each /// node as this class does. /// /// \sa FullBipartiteGraph class FullBipartiteDigraph : public FullBipartiteDigraphBase { typedef FullBipartiteDigraphBase Parent; public: /// \brief Default constructor. /// /// Default constructor. The number of nodes and arcs will be zero. FullBipartiteDigraph() { construct(0,0); } /// \brief Constructor /// /// Constructor. /// \param n The number of the nodes. FullBipartiteDigraph(int n1, int n2) { construct(n1, n2); } /// \brief Returns the node with the given index. /// /// Returns the node with the given index. Since this structure is /// completely static, the nodes can be indexed with integers from /// the range [0..nodeNum()-1]. /// The index of a node is the same as its ID. /// \sa index() Node operator()(int ix) const { return Parent::operator()(ix); } /// \brief Returns the index of the given node. /// /// Returns the index of the given node. Since this structure is /// completely static, the nodes can be indexed with integers from /// the range [0..nodeNum()-1]. /// The index of a node is the same as its ID. /// \sa operator()() static int index(const Node& node) { return Parent::index(node); } /// \brief Returns the arc connecting the given nodes. /// /// Returns the arc connecting the given nodes. /*Arc arc(Node u, Node v) const { return Parent::arc(u, v); }*/ /// \brief Number of nodes. int nodeNum() const { return Parent::nodeNum(); } /// \brief Number of arcs. int64_t arcNum() const { return Parent::arcNum(); } }; } //namespace lemon python-pot-0.9.3+dfsg/ot/lp/full_bipartitegraph_omp.h000066400000000000000000000156161455713015700227140ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; -*- * * This file has been adapted by Nicolas Bonneel (2013), * from full_graph.h from LEMON, a generic C++ optimization library, * to implement a lightweight fully connected bipartite graph. A previous * version of this file is used as part of the Displacement Interpolation * project, * Web: http://www.cs.ubc.ca/labs/imager/tr/2011/DisplacementInterpolation/ * * **** Original file Copyright Notice : * Copyright (C) 2003-2010 * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport * (Egervary Research Group on Combinatorial Optimization, EGRES). * * Permission to use, modify and distribute this software is granted * provided that this copyright notice appears in all copies. For * precise terms see the accompanying LICENSE file. * * This software is provided "AS IS" with no warranty of any kind, * express or implied, and with no claim as to its suitability for any * purpose. * */ #pragma once #include ///\ingroup graphs ///\file ///\brief FullBipartiteDigraph and FullBipartiteGraph classes. namespace lemon_omp { ///This \c \#define creates convenient type definitions for the following ///types of \c Digraph: \c Node, \c NodeIt, \c Arc, \c ArcIt, \c InArcIt, ///\c OutArcIt, \c BoolNodeMap, \c IntNodeMap, \c DoubleNodeMap, ///\c BoolArcMap, \c IntArcMap, \c DoubleArcMap. /// ///\note If the graph type is a dependent type, ie. the graph type depend ///on a template parameter, then use \c TEMPLATE_DIGRAPH_TYPEDEFS() ///macro. #define DIGRAPH_TYPEDEFS(Digraph) \ typedef Digraph::Node Node; \ typedef Digraph::Arc Arc; \ ///Create convenience typedefs for the digraph types and iterators ///\see DIGRAPH_TYPEDEFS /// ///\note Use this macro, if the graph type is a dependent type, ///ie. the graph type depend on a template parameter. #define TEMPLATE_DIGRAPH_TYPEDEFS(Digraph) \ typedef typename Digraph::Node Node; \ typedef typename Digraph::Arc Arc; \ class FullBipartiteDigraphBase { public: typedef FullBipartiteDigraphBase Digraph; //class Node; typedef int Node; //class Arc; typedef int64_t Arc; protected: int _node_num; int64_t _arc_num; FullBipartiteDigraphBase() {} void construct(int n1, int n2) { _node_num = n1+n2; _arc_num = (int64_t)n1 * (int64_t)n2; _n1=n1; _n2=n2;} public: int _n1, _n2; Node operator()(int ix) const { return Node(ix); } static int index(const Node& node) { return node; } Arc arc(const Node& s, const Node& t) const { if (s<_n1 && t>=_n1) return Arc((int64_t)s * (int64_t)_n2 + (int64_t)(t-_n1) ); else return Arc(-1); } int nodeNum() const { return _node_num; } int64_t arcNum() const { return _arc_num; } int maxNodeId() const { return _node_num - 1; } int64_t maxArcId() const { return _arc_num - 1; } Node source(Arc arc) const { return arc / _n2; } Node target(Arc arc) const { return (arc % _n2) + _n1; } static int id(Node node) { return node; } static int64_t id(Arc arc) { return arc; } static Node nodeFromId(int id) { return Node(id);} static Arc arcFromId(int64_t id) { return Arc(id);} Arc findArc(Node s, Node t, Arc prev = -1) const { return prev == -1 ? arc(s, t) : -1; } void first(Node& node) const { node = _node_num - 1; } static void next(Node& node) { --node; } void first(Arc& arc) const { arc = _arc_num - 1; } static void next(Arc& arc) { --arc; } void firstOut(Arc& arc, const Node& node) const { if (node>=_n1) arc = -1; else arc = (node + 1) * _n2 - 1; } void nextOut(Arc& arc) const { if (arc % _n2 == 0) arc = 0; --arc; } void firstIn(Arc& arc, const Node& node) const { if (node<_n1) arc = -1; else arc = _arc_num + node - _node_num; } void nextIn(Arc& arc) const { arc -= _n2; if (arc < 0) arc = -1; } }; /// \ingroup graphs /// /// \brief A directed full graph class. /// /// FullBipartiteDigraph is a simple and fast implmenetation of directed full /// (complete) graphs. It contains an arc from each node to each node /// (including a loop for each node), therefore the number of arcs /// is the square of the number of nodes. /// This class is completely static and it needs constant memory space. /// Thus you can neither add nor delete nodes or arcs, however /// the structure can be resized using resize(). /// /// This type fully conforms to the \ref concepts::Digraph "Digraph concept". /// Most of its member functions and nested classes are documented /// only in the concept class. /// /// This class provides constant time counting for nodes and arcs. /// /// \note FullBipartiteDigraph and FullBipartiteGraph classes are very similar, /// but there are two differences. While this class conforms only /// to the \ref concepts::Digraph "Digraph" concept, FullBipartiteGraph /// conforms to the \ref concepts::Graph "Graph" concept, /// moreover FullBipartiteGraph does not contain a loop for each /// node as this class does. /// /// \sa FullBipartiteGraph class FullBipartiteDigraph : public FullBipartiteDigraphBase { typedef FullBipartiteDigraphBase Parent; public: /// \brief Default constructor. /// /// Default constructor. The number of nodes and arcs will be zero. FullBipartiteDigraph() { construct(0,0); } /// \brief Constructor /// /// Constructor. /// \param n The number of the nodes. FullBipartiteDigraph(int n1, int n2) { construct(n1, n2); } /// \brief Returns the node with the given index. /// /// Returns the node with the given index. Since this structure is /// completely static, the nodes can be indexed with integers from /// the range [0..nodeNum()-1]. /// The index of a node is the same as its ID. /// \sa index() Node operator()(int ix) const { return Parent::operator()(ix); } /// \brief Returns the index of the given node. /// /// Returns the index of the given node. Since this structure is /// completely static, the nodes can be indexed with integers from /// the range [0..nodeNum()-1]. /// The index of a node is the same as its ID. /// \sa operator()() static int index(const Node& node) { return Parent::index(node); } /// \brief Returns the arc connecting the given nodes. /// /// Returns the arc connecting the given nodes. /*Arc arc(Node u, Node v) const { return Parent::arc(u, v); }*/ /// \brief Number of nodes. int nodeNum() const { return Parent::nodeNum(); } /// \brief Number of arcs. int64_t arcNum() const { return Parent::arcNum(); } }; } //namespace lemon_omp python-pot-0.9.3+dfsg/ot/lp/network_simplex_simple.h000066400000000000000000001562531455713015700226200ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; -*- * * * This file has been adapted by Nicolas Bonneel (2013), * from network_simplex.h from LEMON, a generic C++ optimization library, * to implement a lightweight network simplex for mass transport, more * memory efficient that the original file. A previous version of this file * is used as part of the Displacement Interpolation project, * Web: http://www.cs.ubc.ca/labs/imager/tr/2011/DisplacementInterpolation/ * * **** Original file Copyright Notice : * * Copyright (C) 2003-2010 * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport * (Egervary Research Group on Combinatorial Optimization, EGRES). * * Permission to use, modify and distribute this software is granted * provided that this copyright notice appears in all copies. For * precise terms see the accompanying LICENSE file. * * This software is provided "AS IS" with no warranty of any kind, * express or implied, and with no claim as to its suitability for any * purpose. * */ #pragma once #undef DEBUG_LVL #define DEBUG_LVL 0 #if DEBUG_LVL>0 #include #endif #undef EPSILON #undef _EPSILON #undef MAX_DEBUG_ITER #define EPSILON 2.2204460492503131e-15 #define _EPSILON 1e-8 #define MAX_DEBUG_ITER 100000 /// \ingroup min_cost_flow_algs /// /// \file /// \brief Network Simplex algorithm for finding a minimum cost flow. // if your compiler has troubles with stdext or hashmaps, just comment the following line to use a slower std::map instead //#define HASHMAP #include #include #include #include #include #ifdef HASHMAP #include #else #include #endif #include //#include "core.h" //#include "lmath.h" //#include "sparse_array_n.h" #include "full_bipartitegraph.h" #undef INVALIDNODE #undef INVALID #define INVALIDNODE -1 #define INVALID (-1) namespace lemon { template class ProxyObject; template class SparseValueVector { public: SparseValueVector(size_t n=0) { } void resize(size_t n=0){}; T operator[](const size_t id) const { #ifdef HASHMAP typename stdext::hash_map::const_iterator it = data.find(id); #else typename std::map::const_iterator it = data.find(id); #endif if (it==data.end()) return 0; else return it->second; } ProxyObject operator[](const size_t id) { return ProxyObject( this, id ); } //private: #ifdef HASHMAP stdext::hash_map data; #else std::map data; #endif }; template class ProxyObject { public: ProxyObject( SparseValueVector *v, size_t idx ){_v=v; _idx=idx;}; ProxyObject & operator=( const T &v ) { // If we get here, we know that operator[] was called to perform a write access, // so we can insert an item in the vector if needed if (v!=0) _v->data[_idx]=v; return *this; } operator T() { // If we get here, we know that operator[] was called to perform a read access, // so we can simply return the existing object #ifdef HASHMAP typename stdext::hash_map::iterator it = _v->data.find(_idx); #else typename std::map::iterator it = _v->data.find(_idx); #endif if (it==_v->data.end()) return 0; else return it->second; } void operator+=(T val) { if (val==0) return; #ifdef HASHMAP typename stdext::hash_map::iterator it = _v->data.find(_idx); #else typename std::map::iterator it = _v->data.find(_idx); #endif if (it==_v->data.end()) _v->data[_idx] = val; else { T sum = it->second + val; if (sum==0) _v->data.erase(it); else it->second = sum; } } void operator-=(T val) { if (val==0) return; #ifdef HASHMAP typename stdext::hash_map::iterator it = _v->data.find(_idx); #else typename std::map::iterator it = _v->data.find(_idx); #endif if (it==_v->data.end()) _v->data[_idx] = -val; else { T sum = it->second - val; if (sum==0) _v->data.erase(it); else it->second = sum; } } SparseValueVector *_v; size_t _idx; }; /// \addtogroup min_cost_flow_algs /// @{ /// \brief Implementation of the primal Network Simplex algorithm /// for finding a \ref min_cost_flow "minimum cost flow". /// /// \ref NetworkSimplexSimple implements the primal Network Simplex algorithm /// for finding a \ref min_cost_flow "minimum cost flow" /// \ref amo93networkflows, \ref dantzig63linearprog, /// \ref kellyoneill91netsimplex. /// This algorithm is a highly efficient specialized version of the /// linear programming simplex method directly for the minimum cost /// flow problem. /// /// In general, %NetworkSimplexSimple is the fastest implementation available /// in LEMON for this problem. /// Moreover, it supports both directions of the supply/demand inequality /// constraints. For more information, see \ref SupplyType. /// /// Most of the parameters of the problem (except for the digraph) /// can be given using separate functions, and the algorithm can be /// executed using the \ref run() function. If some parameters are not /// specified, then default values will be used. /// /// \tparam GR The digraph type the algorithm runs on. /// \tparam V The number type used for flow amounts, capacity bounds /// and supply values in the algorithm. By default, it is \c int64_t. /// \tparam C The number type used for costs and potentials in the /// algorithm. By default, it is the same as \c V. /// /// \warning Both number types must be signed and all input data must /// be integer. /// /// \note %NetworkSimplexSimple provides five different pivot rule /// implementations, from which the most efficient one is used /// by default. For more information, see \ref PivotRule. template class NetworkSimplexSimple { public: /// \brief Constructor. /// /// The constructor of the class. /// /// \param graph The digraph the algorithm runs on. /// \param arc_mixing Indicate if the arcs have to be stored in a /// mixed order in the internal data structure. /// In special cases, it could lead to better overall performance, /// but it is usually slower. Therefore it is disabled by default. NetworkSimplexSimple(const GR& graph, bool arc_mixing, int nbnodes, ArcsType nb_arcs, uint64_t maxiters) : _graph(graph), //_arc_id(graph), _arc_mixing(arc_mixing), _init_nb_nodes(nbnodes), _init_nb_arcs(nb_arcs), MAX(std::numeric_limits::max()), INF(std::numeric_limits::has_infinity ? std::numeric_limits::infinity() : MAX) { // Reset data structures reset(); max_iter = maxiters; } /// The type of the flow amounts, capacity bounds and supply values typedef V Value; /// The type of the arc costs typedef C Cost; public: /// \brief Problem type constants for the \c run() function. /// /// Enum type containing the problem type constants that can be /// returned by the \ref run() function of the algorithm. enum ProblemType { /// The problem has no feasible solution (flow). INFEASIBLE, /// The problem has optimal solution (i.e. it is feasible and /// bounded), and the algorithm has found optimal flow and node /// potentials (primal and dual solutions). OPTIMAL, /// The objective function of the problem is unbounded, i.e. /// there is a directed cycle having negative total cost and /// infinite upper bound. UNBOUNDED, /// The maximum number of iteration has been reached MAX_ITER_REACHED }; /// \brief Constants for selecting the type of the supply constraints. /// /// Enum type containing constants for selecting the supply type, /// i.e. the direction of the inequalities in the supply/demand /// constraints of the \ref min_cost_flow "minimum cost flow problem". /// /// The default supply type is \c GEQ, the \c LEQ type can be /// selected using \ref supplyType(). /// The equality form is a special case of both supply types. enum SupplyType { /// This option means that there are "greater or equal" /// supply/demand constraints in the definition of the problem. GEQ, /// This option means that there are "less or equal" /// supply/demand constraints in the definition of the problem. LEQ }; private: uint64_t max_iter; TEMPLATE_DIGRAPH_TYPEDEFS(GR); typedef std::vector IntVector; typedef std::vector ArcVector; typedef std::vector ValueVector; typedef std::vector CostVector; // typedef SparseValueVector CostVector; typedef std::vector BoolVector; // Note: vector is used instead of vector for efficiency reasons // State constants for arcs enum ArcState { STATE_UPPER = -1, STATE_TREE = 0, STATE_LOWER = 1 }; typedef std::vector StateVector; // Note: vector is used instead of vector for // efficiency reasons private: // Data related to the underlying digraph const GR &_graph; int _node_num; ArcsType _arc_num; ArcsType _all_arc_num; ArcsType _search_arc_num; // Parameters of the problem SupplyType _stype; Value _sum_supply; inline int _node_id(int n) const {return _node_num-n-1;} ; // IntArcMap _arc_id; IntVector _source; // keep nodes as integers IntVector _target; bool _arc_mixing; public: // Node and arc data CostVector _cost; ValueVector _supply; ValueVector _flow; //SparseValueVector _flow; CostVector _pi; private: // Data for storing the spanning tree structure IntVector _parent; ArcVector _pred; IntVector _thread; IntVector _rev_thread; IntVector _succ_num; IntVector _last_succ; IntVector _dirty_revs; BoolVector _forward; StateVector _state; ArcsType _root; // Temporary data used in the current pivot iteration ArcsType in_arc, join, u_in, v_in, u_out, v_out; ArcsType first, second, right, last; ArcsType stem, par_stem, new_stem; Value delta; const Value MAX; ArcsType mixingCoeff; public: /// \brief Constant for infinite upper bounds (capacities). /// /// Constant for infinite upper bounds (capacities). /// It is \c std::numeric_limits::infinity() if available, /// \c std::numeric_limits::max() otherwise. const Value INF; private: // thank you to DVK and MizardX from StackOverflow for this function! inline ArcsType sequence(ArcsType k) const { ArcsType smallv = (k > num_total_big_subsequence_numbers) & 1; k -= num_total_big_subsequence_numbers * smallv; ArcsType subsequence_length2 = subsequence_length- smallv; ArcsType subsequence_num = (k / subsequence_length2) + num_big_subseqiences * smallv; ArcsType subsequence_offset = (k % subsequence_length2) * mixingCoeff; return subsequence_offset + subsequence_num; } ArcsType subsequence_length; ArcsType num_big_subseqiences; ArcsType num_total_big_subsequence_numbers; inline ArcsType getArcID(const Arc &arc) const { //int n = _arc_num-arc._id-1; ArcsType n = _arc_num-GR::id(arc)-1; //ArcsType a = mixingCoeff*(n%mixingCoeff) + n/mixingCoeff; //ArcsType b = _arc_id[arc]; if (_arc_mixing) return sequence(n); else return n; } // finally unused because too slow inline ArcsType getSource(const ArcsType arc) const { //ArcsType a = _source[arc]; //return a; ArcsType n = _arc_num-arc-1; if (_arc_mixing) n = mixingCoeff*(n%mixingCoeff) + n/mixingCoeff; ArcsType b; if (n>=0) b = _node_id(_graph.source(GR::arcFromId( n ) )); else { n = arc+1-_arc_num; if ( n<=_node_num) b = _node_num; else if ( n>=_graph._n1) b = _graph._n1; else b = _graph._n1-n; } return b; } // Implementation of the Block Search pivot rule class BlockSearchPivotRule { private: // References to the NetworkSimplexSimple class const IntVector &_source; const IntVector &_target; const CostVector &_cost; const StateVector &_state; const CostVector &_pi; ArcsType &_in_arc; ArcsType _search_arc_num; // Pivot rule data ArcsType _block_size; ArcsType _next_arc; NetworkSimplexSimple &_ns; public: // Constructor BlockSearchPivotRule(NetworkSimplexSimple &ns) : _source(ns._source), _target(ns._target), _cost(ns._cost), _state(ns._state), _pi(ns._pi), _in_arc(ns.in_arc), _search_arc_num(ns._search_arc_num), _next_arc(0),_ns(ns) { // The main parameters of the pivot rule const double BLOCK_SIZE_FACTOR = 1.0; const ArcsType MIN_BLOCK_SIZE = 10; _block_size = std::max(ArcsType(BLOCK_SIZE_FACTOR * std::sqrt(double(_search_arc_num))), MIN_BLOCK_SIZE); } // Find next entering arc bool findEnteringArc() { Cost c, min = 0; ArcsType e; ArcsType cnt = _block_size; double a; for (e = _next_arc; e != _search_arc_num; ++e) { c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); if (c < min) { min = c; _in_arc = e; } if (--cnt == 0) { a=fabs(_pi[_source[_in_arc]])>fabs(_pi[_target[_in_arc]]) ? fabs(_pi[_source[_in_arc]]):fabs(_pi[_target[_in_arc]]); a=a>fabs(_cost[_in_arc])?a:fabs(_cost[_in_arc]); if (min < -EPSILON*a) goto search_end; cnt = _block_size; } } for (e = 0; e != _next_arc; ++e) { c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); if (c < min) { min = c; _in_arc = e; } if (--cnt == 0) { a=fabs(_pi[_source[_in_arc]])>fabs(_pi[_target[_in_arc]]) ? fabs(_pi[_source[_in_arc]]):fabs(_pi[_target[_in_arc]]); a=a>fabs(_cost[_in_arc])?a:fabs(_cost[_in_arc]); if (min < -EPSILON*a) goto search_end; cnt = _block_size; } } a=fabs(_pi[_source[_in_arc]])>fabs(_pi[_target[_in_arc]]) ? fabs(_pi[_source[_in_arc]]):fabs(_pi[_target[_in_arc]]); a=a>fabs(_cost[_in_arc])?a:fabs(_cost[_in_arc]); if (min >= -EPSILON*a) return false; search_end: _next_arc = e; return true; } }; //class BlockSearchPivotRule public: int _init_nb_nodes; ArcsType _init_nb_arcs; /// \name Parameters /// The parameters of the algorithm can be specified using these /// functions. /// @{ /// \brief Set the costs of the arcs. /// /// This function sets the costs of the arcs. /// If it is not used before calling \ref run(), the costs /// will be set to \c 1 on all arcs. /// /// \param map An arc map storing the costs. /// Its \c Value type must be convertible to the \c Cost type /// of the algorithm. /// /// \return (*this) template NetworkSimplexSimple& costMap(const CostMap& map) { Arc a; _graph.first(a); for (; a != INVALID; _graph.next(a)) { _cost[getArcID(a)] = map[a]; } return *this; } /// \brief Set the costs of one arc. /// /// This function sets the costs of one arcs. /// Done for memory reasons /// /// \param arc An arc. /// \param arc A cost /// /// \return (*this) template NetworkSimplexSimple& setCost(const Arc& arc, const Value cost) { _cost[getArcID(arc)] = cost; return *this; } /// \brief Set the supply values of the nodes. /// /// This function sets the supply values of the nodes. /// If neither this function nor \ref stSupply() is used before /// calling \ref run(), the supply of each node will be set to zero. /// /// \param map A node map storing the supply values. /// Its \c Value type must be convertible to the \c Value type /// of the algorithm. /// /// \return (*this) template NetworkSimplexSimple& supplyMap(const SupplyMap& map) { Node n; _graph.first(n); for (; n != INVALIDNODE; _graph.next(n)) { _supply[_node_id(n)] = map[n]; } return *this; } template NetworkSimplexSimple& supplyMap(const SupplyMap* map1, int n1, const SupplyMap* map2, int n2) { Node n; _graph.first(n); for (; n != INVALIDNODE; _graph.next(n)) { if (n NetworkSimplexSimple& supplyMapAll(SupplyMap val1, int n1, SupplyMap val2, int n2) { Node n; _graph.first(n); for (; n != INVALIDNODE; _graph.next(n)) { if (n(*this) NetworkSimplexSimple& stSupply(const Node& s, const Node& t, Value k) { for (int i = 0; i != _node_num; ++i) { _supply[i] = 0; } _supply[_node_id(s)] = k; _supply[_node_id(t)] = -k; return *this; } /// \brief Set the type of the supply constraints. /// /// This function sets the type of the supply/demand constraints. /// If it is not used before calling \ref run(), the \ref GEQ supply /// type will be used. /// /// For more information, see \ref SupplyType. /// /// \return (*this) NetworkSimplexSimple& supplyType(SupplyType supply_type) { _stype = supply_type; return *this; } /// @} /// \name Execution Control /// The algorithm can be executed using \ref run(). /// @{ /// \brief Run the algorithm. /// /// This function runs the algorithm. /// The paramters can be specified using functions \ref lowerMap(), /// \ref upperMap(), \ref costMap(), \ref supplyMap(), \ref stSupply(), /// \ref supplyType(). /// For example, /// \code /// NetworkSimplexSimple ns(graph); /// ns.lowerMap(lower).upperMap(upper).costMap(cost) /// .supplyMap(sup).run(); /// \endcode /// /// This function can be called more than once. All the given parameters /// are kept for the next call, unless \ref resetParams() or \ref reset() /// is used, thus only the modified parameters have to be set again. /// If the underlying digraph was also modified after the construction /// of the class (or the last \ref reset() call), then the \ref reset() /// function must be called. /// /// \param pivot_rule The pivot rule that will be used during the /// algorithm. For more information, see \ref PivotRule. /// /// \return \c INFEASIBLE if no feasible flow exists, /// \n \c OPTIMAL if the problem has optimal solution /// (i.e. it is feasible and bounded), and the algorithm has found /// optimal flow and node potentials (primal and dual solutions), /// \n \c UNBOUNDED if the objective function of the problem is /// unbounded, i.e. there is a directed cycle having negative total /// cost and infinite upper bound. /// /// \see ProblemType, PivotRule /// \see resetParams(), reset() ProblemType run() { #if DEBUG_LVL>0 std::cout << "OPTIMAL = " << OPTIMAL << "\nINFEASIBLE = " << INFEASIBLE << "\nUNBOUNDED = " << UNBOUNDED << "\nMAX_ITER_REACHED" << MAX_ITER_REACHED << "\n" ; #endif if (!init()) return INFEASIBLE; #if DEBUG_LVL>0 std::cout << "Init done, starting iterations\n"; #endif return start(); } /// \brief Reset all the parameters that have been given before. /// /// This function resets all the paramaters that have been given /// before using functions \ref lowerMap(), \ref upperMap(), /// \ref costMap(), \ref supplyMap(), \ref stSupply(), \ref supplyType(). /// /// It is useful for multiple \ref run() calls. Basically, all the given /// parameters are kept for the next \ref run() call, unless /// \ref resetParams() or \ref reset() is used. /// If the underlying digraph was also modified after the construction /// of the class or the last \ref reset() call, then the \ref reset() /// function must be used, otherwise \ref resetParams() is sufficient. /// /// For example, /// \code /// NetworkSimplexSimple ns(graph); /// /// // First run /// ns.lowerMap(lower).upperMap(upper).costMap(cost) /// .supplyMap(sup).run(); /// /// // Run again with modified cost map (resetParams() is not called, /// // so only the cost map have to be set again) /// cost[e] += 100; /// ns.costMap(cost).run(); /// /// // Run again from scratch using resetParams() /// // (the lower bounds will be set to zero on all arcs) /// ns.resetParams(); /// ns.upperMap(capacity).costMap(cost) /// .supplyMap(sup).run(); /// \endcode /// /// \return (*this) /// /// \see reset(), run() NetworkSimplexSimple& resetParams() { for (int i = 0; i != _node_num; ++i) { _supply[i] = 0; } for (ArcsType i = 0; i != _arc_num; ++i) { _cost[i] = 1; } _stype = GEQ; return *this; } int64_t divid (int64_t x, int64_t y) { return (x-x%y)/y; } /// \brief Reset the internal data structures and all the parameters /// that have been given before. /// /// This function resets the internal data structures and all the /// paramaters that have been given before using functions \ref lowerMap(), /// \ref upperMap(), \ref costMap(), \ref supplyMap(), \ref stSupply(), /// \ref supplyType(). /// /// It is useful for multiple \ref run() calls. Basically, all the given /// parameters are kept for the next \ref run() call, unless /// \ref resetParams() or \ref reset() is used. /// If the underlying digraph was also modified after the construction /// of the class or the last \ref reset() call, then the \ref reset() /// function must be used, otherwise \ref resetParams() is sufficient. /// /// See \ref resetParams() for examples. /// /// \return (*this) /// /// \see resetParams(), run() NetworkSimplexSimple& reset() { // Resize vectors _node_num = _init_nb_nodes; _arc_num = _init_nb_arcs; int all_node_num = _node_num + 1; ArcsType max_arc_num = _arc_num + 2 * _node_num; _source.resize(max_arc_num); _target.resize(max_arc_num); _cost.resize(max_arc_num); _supply.resize(all_node_num); _flow.resize(max_arc_num); _pi.resize(all_node_num); _parent.resize(all_node_num); _pred.resize(all_node_num); _forward.resize(all_node_num); _thread.resize(all_node_num); _rev_thread.resize(all_node_num); _succ_num.resize(all_node_num); _last_succ.resize(all_node_num); _state.resize(max_arc_num); //_arc_mixing=false; if (_arc_mixing) { // Store the arcs in a mixed order const ArcsType k = std::max(ArcsType(std::sqrt(double(_arc_num))), ArcsType(10)); mixingCoeff = k; subsequence_length = _arc_num / mixingCoeff + 1; num_big_subseqiences = _arc_num % mixingCoeff; num_total_big_subsequence_numbers = subsequence_length * num_big_subseqiences; ArcsType i = 0, j = 0; Arc a; _graph.first(a); for (; a != INVALID; _graph.next(a)) { _source[i] = _node_id(_graph.source(a)); _target[i] = _node_id(_graph.target(a)); //_arc_id[a] = i; if ((i += k) >= _arc_num) i = ++j; } } else { // Store the arcs in the original order ArcsType i = 0; Arc a; _graph.first(a); for (; a != INVALID; _graph.next(a), ++i) { _source[i] = _node_id(_graph.source(a)); _target[i] = _node_id(_graph.target(a)); //_arc_id[a] = i; } } // Reset parameters resetParams(); return *this; } /// @} /// \name Query Functions /// The results of the algorithm can be obtained using these /// functions.\n /// The \ref run() function must be called before using them. /// @{ /// \brief Return the total cost of the found flow. /// /// This function returns the total cost of the found flow. /// Its complexity is O(e). /// /// \note The return type of the function can be specified as a /// template parameter. For example, /// \code /// ns.totalCost(); /// \endcode /// It is useful if the total cost cannot be stored in the \c Cost /// type of the algorithm, which is the default return type of the /// function. /// /// \pre \ref run() must be called before using this function. /*template Number totalCost() const { Number c = 0; for (ArcIt a(_graph); a != INVALID; ++a) { int64_t i = getArcID(a); c += Number(_flow[i]) * Number(_cost[i]); } return c; }*/ template Number totalCost() const { Number c = 0; /*#ifdef HASHMAP typename stdext::hash_map::const_iterator it; #else typename std::map::const_iterator it; #endif for (it = _flow.data.begin(); it!=_flow.data.end(); ++it) c += Number(it->second) * Number(_cost[it->first]); return c;*/ for (ArcsType i=0; i<_flow.size(); i++) c += _flow[i] * Number(_cost[i]); return c; } #ifndef DOXYGEN Cost totalCost() const { return totalCost(); } #endif /// \brief Return the flow on the given arc. /// /// This function returns the flow on the given arc. /// /// \pre \ref run() must be called before using this function. Value flow(const Arc& a) const { return _flow[getArcID(a)]; } /// \brief Return the flow map (the primal solution). /// /// This function copies the flow value on each arc into the given /// map. The \c Value type of the algorithm must be convertible to /// the \c Value type of the map. /// /// \pre \ref run() must be called before using this function. template void flowMap(FlowMap &map) const { Arc a; _graph.first(a); for (; a != INVALID; _graph.next(a)) { map.set(a, _flow[getArcID(a)]); } } /// \brief Return the potential (dual value) of the given node. /// /// This function returns the potential (dual value) of the /// given node. /// /// \pre \ref run() must be called before using this function. Cost potential(const Node& n) const { return _pi[_node_id(n)]; } /// \brief Return the potential map (the dual solution). /// /// This function copies the potential (dual value) of each node /// into the given map. /// The \c Cost type of the algorithm must be convertible to the /// \c Value type of the map. /// /// \pre \ref run() must be called before using this function. template void potentialMap(PotentialMap &map) const { Node n; _graph.first(n); for (; n != INVALID; _graph.next(n)) { map.set(n, _pi[_node_id(n)]); } } /// @} private: // Initialize internal data structures bool init() { if (_node_num == 0) return false; // Check the sum of supply values _sum_supply = 0; for (int i = 0; i != _node_num; ++i) { _sum_supply += _supply[i]; } if ( fabs(_sum_supply) > _EPSILON ) return false; _sum_supply = 0; // Initialize artifical cost Cost ART_COST; if (std::numeric_limits::is_exact) { ART_COST = std::numeric_limits::max() / 2 + 1; } else { ART_COST = 0; for (ArcsType i = 0; i != _arc_num; ++i) { if (_cost[i] > ART_COST) ART_COST = _cost[i]; } ART_COST = (ART_COST + 1) * _node_num; } // Initialize arc maps for (ArcsType i = 0; i != _arc_num; ++i) { //_flow[i] = 0; //by default, the sparse matrix is empty _state[i] = STATE_LOWER; } // Set data for the artificial root node _root = _node_num; _parent[_root] = -1; _pred[_root] = -1; _thread[_root] = 0; _rev_thread[0] = _root; _succ_num[_root] = _node_num + 1; _last_succ[_root] = _root - 1; _supply[_root] = -_sum_supply; _pi[_root] = 0; // Add artificial arcs and initialize the spanning tree data structure if (_sum_supply == 0) { // EQ supply constraints _search_arc_num = _arc_num; _all_arc_num = _arc_num + _node_num; for (ArcsType u = 0, e = _arc_num; u != _node_num; ++u, ++e) { _parent[u] = _root; _pred[u] = e; _thread[u] = u + 1; _rev_thread[u + 1] = u; _succ_num[u] = 1; _last_succ[u] = u; _state[e] = STATE_TREE; if (_supply[u] >= 0) { _forward[u] = true; _pi[u] = 0; _source[e] = u; _target[e] = _root; _flow[e] = _supply[u]; _cost[e] = 0; } else { _forward[u] = false; _pi[u] = ART_COST; _source[e] = _root; _target[e] = u; _flow[e] = -_supply[u]; _cost[e] = ART_COST; } } } else if (_sum_supply > 0) { // LEQ supply constraints _search_arc_num = _arc_num + _node_num; ArcsType f = _arc_num + _node_num; for (ArcsType u = 0, e = _arc_num; u != _node_num; ++u, ++e) { _parent[u] = _root; _thread[u] = u + 1; _rev_thread[u + 1] = u; _succ_num[u] = 1; _last_succ[u] = u; if (_supply[u] >= 0) { _forward[u] = true; _pi[u] = 0; _pred[u] = e; _source[e] = u; _target[e] = _root; _flow[e] = _supply[u]; _cost[e] = 0; _state[e] = STATE_TREE; } else { _forward[u] = false; _pi[u] = ART_COST; _pred[u] = f; _source[f] = _root; _target[f] = u; _flow[f] = -_supply[u]; _cost[f] = ART_COST; _state[f] = STATE_TREE; _source[e] = u; _target[e] = _root; //_flow[e] = 0; //by default, the sparse matrix is empty _cost[e] = 0; _state[e] = STATE_LOWER; ++f; } } _all_arc_num = f; } else { // GEQ supply constraints _search_arc_num = _arc_num + _node_num; ArcsType f = _arc_num + _node_num; for (ArcsType u = 0, e = _arc_num; u != _node_num; ++u, ++e) { _parent[u] = _root; _thread[u] = u + 1; _rev_thread[u + 1] = u; _succ_num[u] = 1; _last_succ[u] = u; if (_supply[u] <= 0) { _forward[u] = false; _pi[u] = 0; _pred[u] = e; _source[e] = _root; _target[e] = u; _flow[e] = -_supply[u]; _cost[e] = 0; _state[e] = STATE_TREE; } else { _forward[u] = true; _pi[u] = -ART_COST; _pred[u] = f; _source[f] = u; _target[f] = _root; _flow[f] = _supply[u]; _state[f] = STATE_TREE; _cost[f] = ART_COST; _source[e] = _root; _target[e] = u; //_flow[e] = 0; //by default, the sparse matrix is empty _cost[e] = 0; _state[e] = STATE_LOWER; ++f; } } _all_arc_num = f; } return true; } // Find the join node void findJoinNode() { int u = _source[in_arc]; int v = _target[in_arc]; while (u != v) { if (_succ_num[u] < _succ_num[v]) { u = _parent[u]; } else { v = _parent[v]; } } join = u; } // Find the leaving arc of the cycle and returns true if the // leaving arc is not the same as the entering arc bool findLeavingArc() { // Initialize first and second nodes according to the direction // of the cycle if (_state[in_arc] == STATE_LOWER) { first = _source[in_arc]; second = _target[in_arc]; } else { first = _target[in_arc]; second = _source[in_arc]; } delta = INF; char result = 0; Value d; ArcsType e; // Search the cycle along the path form the first node to the root for (int u = first; u != join; u = _parent[u]) { e = _pred[u]; d = _forward[u] ? _flow[e] : INF ; if (d < delta) { delta = d; u_out = u; result = 1; } } // Search the cycle along the path form the second node to the root for (int u = second; u != join; u = _parent[u]) { e = _pred[u]; d = _forward[u] ? INF : _flow[e]; if (d <= delta) { delta = d; u_out = u; result = 2; } } if (result == 1) { u_in = first; v_in = second; } else { u_in = second; v_in = first; } return result != 0; } // Change _flow and _state vectors void changeFlow(bool change) { // Augment along the cycle if (delta > 0) { Value val = _state[in_arc] * delta; _flow[in_arc] += val; for (int u = _source[in_arc]; u != join; u = _parent[u]) { _flow[_pred[u]] += _forward[u] ? -val : val; } for (int u = _target[in_arc]; u != join; u = _parent[u]) { _flow[_pred[u]] += _forward[u] ? val : -val; } } // Update the state of the entering and leaving arcs if (change) { _state[in_arc] = STATE_TREE; _state[_pred[u_out]] = (_flow[_pred[u_out]] == 0) ? STATE_LOWER : STATE_UPPER; } else { _state[in_arc] = -_state[in_arc]; } } // Update the tree structure void updateTreeStructure() { int u, w; int old_rev_thread = _rev_thread[u_out]; int old_succ_num = _succ_num[u_out]; int old_last_succ = _last_succ[u_out]; v_out = _parent[u_out]; u = _last_succ[u_in]; // the last successor of u_in right = _thread[u]; // the node after it // Handle the case when old_rev_thread equals to v_in // (it also means that join and v_out coincide) if (old_rev_thread == v_in) { last = _thread[_last_succ[u_out]]; } else { last = _thread[v_in]; } // Update _thread and _parent along the stem nodes (i.e. the nodes // between u_in and u_out, whose parent have to be changed) _thread[v_in] = stem = u_in; _dirty_revs.clear(); _dirty_revs.push_back(v_in); par_stem = v_in; while (stem != u_out) { // Insert the next stem node into the thread list new_stem = _parent[stem]; _thread[u] = new_stem; _dirty_revs.push_back(u); // Remove the subtree of stem from the thread list w = _rev_thread[stem]; _thread[w] = right; _rev_thread[right] = w; // Change the parent node and shift stem nodes _parent[stem] = par_stem; par_stem = stem; stem = new_stem; // Update u and right u = _last_succ[stem] == _last_succ[par_stem] ? _rev_thread[par_stem] : _last_succ[stem]; right = _thread[u]; } _parent[u_out] = par_stem; _thread[u] = last; _rev_thread[last] = u; _last_succ[u_out] = u; // Remove the subtree of u_out from the thread list except for // the case when old_rev_thread equals to v_in // (it also means that join and v_out coincide) if (old_rev_thread != v_in) { _thread[old_rev_thread] = right; _rev_thread[right] = old_rev_thread; } // Update _rev_thread using the new _thread values for (int i = 0; i != int(_dirty_revs.size()); ++i) { int u = _dirty_revs[i]; _rev_thread[_thread[u]] = u; } // Update _pred, _forward, _last_succ and _succ_num for the // stem nodes from u_out to u_in int tmp_sc = 0, tmp_ls = _last_succ[u_out]; u = u_out; while (u != u_in) { w = _parent[u]; _pred[u] = _pred[w]; _forward[u] = !_forward[w]; tmp_sc += _succ_num[u] - _succ_num[w]; _succ_num[u] = tmp_sc; _last_succ[w] = tmp_ls; u = w; } _pred[u_in] = in_arc; _forward[u_in] = (u_in == _source[in_arc]); _succ_num[u_in] = old_succ_num; // Set limits for updating _last_succ form v_in and v_out // towards the root int up_limit_in = -1; int up_limit_out = -1; if (_last_succ[join] == v_in) { up_limit_out = join; } else { up_limit_in = join; } // Update _last_succ from v_in towards the root for (u = v_in; u != up_limit_in && _last_succ[u] == v_in; u = _parent[u]) { _last_succ[u] = _last_succ[u_out]; } // Update _last_succ from v_out towards the root if (join != old_rev_thread && v_in != old_rev_thread) { for (u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; u = _parent[u]) { _last_succ[u] = old_rev_thread; } } else { for (u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; u = _parent[u]) { _last_succ[u] = _last_succ[u_out]; } } // Update _succ_num from v_in to join for (u = v_in; u != join; u = _parent[u]) { _succ_num[u] += old_succ_num; } // Update _succ_num from v_out to join for (u = v_out; u != join; u = _parent[u]) { _succ_num[u] -= old_succ_num; } } // Update potentials void updatePotential() { Cost sigma = _forward[u_in] ? _pi[v_in] - _pi[u_in] - _cost[_pred[u_in]] : _pi[v_in] - _pi[u_in] + _cost[_pred[u_in]]; // Update potentials in the subtree, which has been moved int end = _thread[_last_succ[u_in]]; for (int u = u_in; u != end; u = _thread[u]) { _pi[u] += sigma; } } // Heuristic initial pivots bool initialPivots() { Value curr, total = 0; std::vector supply_nodes, demand_nodes; Node u; _graph.first(u); for (; u != INVALIDNODE; _graph.next(u)) { curr = _supply[_node_id(u)]; if (curr > 0) { total += curr; supply_nodes.push_back(u); } else if (curr < 0) { demand_nodes.push_back(u); } } if (_sum_supply > 0) total -= _sum_supply; if (total <= 0) return true; ArcVector arc_vector; if (_sum_supply >= 0) { if (supply_nodes.size() == 1 && demand_nodes.size() == 1) { // Perform a reverse graph search from the sink to the source //typename GR::template NodeMap reached(_graph, false); BoolVector reached(_node_num, false); Node s = supply_nodes[0], t = demand_nodes[0]; std::vector stack; reached[t] = true; stack.push_back(t); while (!stack.empty()) { Node u, v = stack.back(); stack.pop_back(); if (v == s) break; Arc a; _graph.firstIn(a, v); for (; a != INVALID; _graph.nextIn(a)) { if (reached[u = _graph.source(a)]) continue; ArcsType j = getArcID(a); if (INF >= total) { arc_vector.push_back(j); reached[u] = true; stack.push_back(u); } } } } else { // Find the min. cost incomming arc for each demand node for (int i = 0; i != demand_nodes.size(); ++i) { Node v = demand_nodes[i]; Cost c, min_cost = std::numeric_limits::max(); Arc min_arc = INVALID; Arc a; _graph.firstIn(a, v); for (; a != INVALID; _graph.nextIn(a)) { c = _cost[getArcID(a)]; if (c < min_cost) { min_cost = c; min_arc = a; } } if (min_arc != INVALID) { arc_vector.push_back(getArcID(min_arc)); } } } } else { // Find the min. cost outgoing arc for each supply node for (int i = 0; i != int(supply_nodes.size()); ++i) { Node u = supply_nodes[i]; Cost c, min_cost = std::numeric_limits::max(); Arc min_arc = INVALID; Arc a; _graph.firstOut(a, u); for (; a != INVALID; _graph.nextOut(a)) { c = _cost[getArcID(a)]; if (c < min_cost) { min_cost = c; min_arc = a; } } if (min_arc != INVALID) { arc_vector.push_back(getArcID(min_arc)); } } } // Perform heuristic initial pivots for (ArcsType i = 0; i != arc_vector.size(); ++i) { in_arc = arc_vector[i]; // l'erreur est probablement ici... if (_state[in_arc] * (_cost[in_arc] + _pi[_source[in_arc]] - _pi[_target[in_arc]]) >= 0) continue; findJoinNode(); bool change = findLeavingArc(); if (delta >= MAX) return false; changeFlow(change); if (change) { updateTreeStructure(); updatePotential(); } } return true; } // Execute the algorithm ProblemType start() { return start(); } template ProblemType start() { PivotRuleImpl pivot(*this); ProblemType retVal = OPTIMAL; // Perform heuristic initial pivots if (!initialPivots()) return UNBOUNDED; uint64_t iter_number = 0; //pivot.setDantzig(true); // Execute the Network Simplex algorithm while (pivot.findEnteringArc()) { if(max_iter > 0 && ++iter_number>=max_iter&&max_iter>0){ // max iterations hit retVal = MAX_ITER_REACHED; break; } #if DEBUG_LVL>0 if(iter_number>MAX_DEBUG_ITER) break; if(iter_number%1000==0||iter_number%1000==1){ double curCost=totalCost(); double sumFlow=0; double a; a= (fabs(_pi[_source[in_arc]])>=fabs(_pi[_target[in_arc]])) ? fabs(_pi[_source[in_arc]]) : fabs(_pi[_target[in_arc]]); a=a>=fabs(_cost[in_arc])?a:fabs(_cost[in_arc]); for (int64_t i=0; i<_flow.size(); i++) { sumFlow+=_state[i]*_flow[i]; } std::cout << "Sum of the flow " << std::setprecision(20) << sumFlow << "\n" << iter_number << " iterations, current cost=" << curCost << "\nReduced cost=" << _state[in_arc] * (_cost[in_arc] + _pi[_source[in_arc]] -_pi[_target[in_arc]]) << "\nPrecision = "<< -EPSILON*(a) << "\n"; std::cout << "Arc in = (" << _node_id(_source[in_arc]) << ", " << _node_id(_target[in_arc]) <<")\n"; std::cout << "Supplies = (" << _supply[_source[in_arc]] << ", " << _supply[_target[in_arc]] << ")\n"; std::cout << _cost[in_arc] << "\n"; std::cout << _pi[_source[in_arc]] << "\n"; std::cout << _pi[_target[in_arc]] << "\n"; std::cout << a << "\n"; } #endif findJoinNode(); bool change = findLeavingArc(); if (delta >= MAX) return UNBOUNDED; changeFlow(change); if (change) { updateTreeStructure(); updatePotential(); } #if DEBUG_LVL>0 else{ std::cout << "No change\n"; } #endif #if DEBUG_LVL>1 std::cout << "Arc in = (" << _source[in_arc] << ", " << _target[in_arc] << ")\n"; #endif } #if DEBUG_LVL>0 double curCost=totalCost(); double sumFlow=0; double a; a= (fabs(_pi[_source[in_arc]])>=fabs(_pi[_target[in_arc]])) ? fabs(_pi[_source[in_arc]]) : fabs(_pi[_target[in_arc]]); a=a>=fabs(_cost[in_arc])?a:fabs(_cost[in_arc]); for (int64_t i=0; i<_flow.size(); i++) { sumFlow+=_state[i]*_flow[i]; } std::cout << "Sum of the flow " << std::setprecision(20) << sumFlow << "\n" << niter << " iterations, current cost=" << curCost << "\nReduced cost=" << _state[in_arc] * (_cost[in_arc] + _pi[_source[in_arc]] -_pi[_target[in_arc]]) << "\nPrecision = "<< -EPSILON*(a) << "\n"; std::cout << "Arc in = (" << _node_id(_source[in_arc]) << ", " << _node_id(_target[in_arc]) <<")\n"; std::cout << "Supplies = (" << _supply[_source[in_arc]] << ", " << _supply[_target[in_arc]] << ")\n"; #endif #if DEBUG_LVL>1 sumFlow=0; for (int i=0; i<_flow.size(); i++) { sumFlow+=_state[i]*_flow[i]; if (_state[i]==STATE_TREE) { std::cout << "Non zero value at (" << _node_num+1-_source[i] << ", " << _node_num+1-_target[i] << ")\n"; } } std::cout << "Sum of the flow " << sumFlow << "\n"<< niter <<" iterations, current cost=" << totalCost() << "\n"; #endif // Check feasibility if( retVal == OPTIMAL){ for (ArcsType e = _search_arc_num; e != _all_arc_num; ++e) { if (_flow[e] != 0){ if (fabs(_flow[e]) > _EPSILON) // change of the original code following issue #126 return INFEASIBLE; else _flow[e]=0; } } } // Shift potentials to meet the requirements of the GEQ/LEQ type // optimality conditions if (_sum_supply == 0) { if (_stype == GEQ) { Cost max_pot = -std::numeric_limits::max(); for (ArcsType i = 0; i != _node_num; ++i) { if (_pi[i] > max_pot) max_pot = _pi[i]; } if (max_pot > 0) { for (ArcsType i = 0; i != _node_num; ++i) _pi[i] -= max_pot; } } else { Cost min_pot = std::numeric_limits::max(); for (ArcsType i = 0; i != _node_num; ++i) { if (_pi[i] < min_pot) min_pot = _pi[i]; } if (min_pot < 0) { for (ArcsType i = 0; i != _node_num; ++i) _pi[i] -= min_pot; } } } return retVal; } }; //class NetworkSimplexSimple ///@} } //namespace lemon python-pot-0.9.3+dfsg/ot/lp/network_simplex_simple_omp.h000066400000000000000000001403351455713015700234650ustar00rootroot00000000000000/* -*- mode: C++; indent-tabs-mode: nil; -*- * * * This file has been adapted by Nicolas Bonneel (2013), * from network_simplex.h from LEMON, a generic C++ optimization library, * to implement a lightweight network simplex for mass transport, more * memory efficient than the original file. A previous version of this file * is used as part of the Displacement Interpolation project, * Web: http://www.cs.ubc.ca/labs/imager/tr/2011/DisplacementInterpolation/ * * Revisions: * March 2015: added OpenMP parallelization * March 2017: included Antoine Rolet's trick to make it more robust * April 2018: IMPORTANT bug fix + uses 64bit integers (slightly slower but less risks of overflows), updated to a newer version of the algo by LEMON, sparse flow by default + minor edits. * * **** Original file Copyright Notice : * * Copyright (C) 2003-2010 * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport * (Egervary Research Group on Combinatorial Optimization, EGRES). * * Permission to use, modify and distribute this software is granted * provided that this copyright notice appears in all copies. For * precise terms see the accompanying LICENSE file. * * This software is provided "AS IS" with no warranty of any kind, * express or implied, and with no claim as to its suitability for any * purpose. * */ #pragma once #undef DEBUG_LVL #define DEBUG_LVL 0 #if DEBUG_LVL>0 #include #endif #undef EPSILON #undef _EPSILON #undef MAX_DEBUG_ITER #define EPSILON std::numeric_limits::epsilon() #define _EPSILON 1e-14 #define MAX_DEBUG_ITER 100000 /// \ingroup min_cost_flow_algs /// /// \file /// \brief Network Simplex algorithm for finding a minimum cost flow. // if your compiler has troubles with unorderedmaps, just comment the following line to use a slower std::map instead #define HASHMAP // now handled with unorderedmaps instead of stdext::hash_map. Should be better supported. #define SPARSE_FLOW // a sparse flow vector will be 10-15% slower for small problems but uses less memory and becomes faster for large problems (40k total nodes) #include #include #include #include #ifdef HASHMAP #include #else #include #endif //#include "core.h" //#include "lmath.h" #ifdef _OPENMP #include #endif #include //#include "sparse_array_n.h" #include "full_bipartitegraph_omp.h" #undef INVALIDNODE #undef INVALID #define INVALIDNODE -1 #define INVALID (-1) namespace lemon_omp { int64_t max_threads = -1; template class ProxyObject; template class SparseValueVector { public: SparseValueVector(size_t n = 0) // parameter n for compatibility with standard vectors { } void resize(size_t n = 0) {}; T operator[](const size_t id) const { #ifdef HASHMAP typename std::unordered_map::const_iterator it = data.find(id); #else typename std::map::const_iterator it = data.find(id); #endif if (it == data.end()) return 0; else return it->second; } ProxyObject operator[](const size_t id) { return ProxyObject(this, id); } //private: #ifdef HASHMAP std::unordered_map data; #else std::map data; #endif }; template class ProxyObject { public: ProxyObject(SparseValueVector *v, size_t idx) { _v = v; _idx = idx; }; ProxyObject & operator=(const T &v) { // If we get here, we know that operator[] was called to perform a write access, // so we can insert an item in the vector if needed if (v != 0) _v->data[_idx] = v; return *this; } operator T() { // If we get here, we know that operator[] was called to perform a read access, // so we can simply return the existing object #ifdef HASHMAP typename std::unordered_map::iterator it = _v->data.find(_idx); #else typename std::map::iterator it = _v->data.find(_idx); #endif if (it == _v->data.end()) return 0; else return it->second; } void operator+=(T val) { if (val == 0) return; #ifdef HASHMAP typename std::unordered_map::iterator it = _v->data.find(_idx); #else typename std::map::iterator it = _v->data.find(_idx); #endif if (it == _v->data.end()) _v->data[_idx] = val; else { T sum = it->second + val; if (sum == 0) _v->data.erase(it); else it->second = sum; } } void operator-=(T val) { if (val == 0) return; #ifdef HASHMAP typename std::unordered_map::iterator it = _v->data.find(_idx); #else typename std::map::iterator it = _v->data.find(_idx); #endif if (it == _v->data.end()) _v->data[_idx] = -val; else { T sum = it->second - val; if (sum == 0) _v->data.erase(it); else it->second = sum; } } SparseValueVector *_v; size_t _idx; }; /// \addtogroup min_cost_flow_algs /// @{ /// \brief Implementation of the primal Network Simplex algorithm /// for finding a \ref min_cost_flow "minimum cost flow". /// /// \ref NetworkSimplexSimple implements the primal Network Simplex algorithm /// for finding a \ref min_cost_flow "minimum cost flow" /// \ref amo93networkflows, \ref dantzig63linearprog, /// \ref kellyoneill91netsimplex. /// This algorithm is a highly efficient specialized version of the /// linear programming simplex method directly for the minimum cost /// flow problem. /// /// In general, %NetworkSimplexSimple is the fastest implementation available /// in LEMON for this problem. /// Moreover, it supports both directions of the supply/demand inequality /// constraints. For more information, see \ref SupplyType. /// /// Most of the parameters of the problem (except for the digraph) /// can be given using separate functions, and the algorithm can be /// executed using the \ref run() function. If some parameters are not /// specified, then default values will be used. /// /// \tparam GR The digraph type the algorithm runs on. /// \tparam V The number type used for flow amounts, capacity bounds /// and supply values in the algorithm. By default, it is \c int. /// \tparam C The number type used for costs and potentials in the /// algorithm. By default, it is the same as \c V. /// /// \warning Both number types must be signed and all input data must /// be integer. /// /// \note %NetworkSimplexSimple provides five different pivot rule /// implementations, from which the most efficient one is used /// by default. For more information, see \ref PivotRule. template class NetworkSimplexSimple { public: /// \brief Constructor. /// /// The constructor of the class. /// /// \param graph The digraph the algorithm runs on. /// \param arc_mixing Indicate if the arcs have to be stored in a /// mixed order in the internal data structure. /// In special cases, it could lead to better overall performance, /// but it is usually slower. Therefore it is disabled by default. NetworkSimplexSimple(const GR& graph, bool arc_mixing, int nbnodes, ArcsType nb_arcs, uint64_t maxiters = 0, int numThreads=-1) : _graph(graph), //_arc_id(graph), _arc_mixing(arc_mixing), _init_nb_nodes(nbnodes), _init_nb_arcs(nb_arcs), MAX(std::numeric_limits::max()), INF(std::numeric_limits::has_infinity ? std::numeric_limits::infinity() : MAX) { // Reset data structures reset(); max_iter = maxiters; #ifdef _OPENMP if (max_threads < 0) { max_threads = omp_get_max_threads(); } if (numThreads > 0 && numThreads<=max_threads){ num_threads = numThreads; } else if (numThreads == -1 || numThreads>max_threads) { num_threads = max_threads; } else { num_threads = 1; } omp_set_num_threads(num_threads); #else num_threads = 1; #endif } /// The type of the flow amounts, capacity bounds and supply values typedef V Value; /// The type of the arc costs typedef C Cost; public: /// \brief Problem type constants for the \c run() function. /// /// Enum type containing the problem type constants that can be /// returned by the \ref run() function of the algorithm. enum ProblemType { /// The problem has no feasible solution (flow). INFEASIBLE, /// The problem has optimal solution (i.e. it is feasible and /// bounded), and the algorithm has found optimal flow and node /// potentials (primal and dual solutions). OPTIMAL, /// The objective function of the problem is unbounded, i.e. /// there is a directed cycle having negative total cost and /// infinite upper bound. UNBOUNDED, // The maximum number of iteration has been reached MAX_ITER_REACHED }; /// \brief Constants for selecting the type of the supply constraints. /// /// Enum type containing constants for selecting the supply type, /// i.e. the direction of the inequalities in the supply/demand /// constraints of the \ref min_cost_flow "minimum cost flow problem". /// /// The default supply type is \c GEQ, the \c LEQ type can be /// selected using \ref supplyType(). /// The equality form is a special case of both supply types. enum SupplyType { /// This option means that there are "greater or equal" /// supply/demand constraints in the definition of the problem. GEQ, /// This option means that there are "less or equal" /// supply/demand constraints in the definition of the problem. LEQ }; private: uint64_t max_iter; int num_threads; TEMPLATE_DIGRAPH_TYPEDEFS(GR); typedef std::vector IntVector; typedef std::vector ArcVector; typedef std::vector ValueVector; typedef std::vector CostVector; // typedef SparseValueVector CostVector; typedef std::vector BoolVector; // Note: vector is used instead of vector for efficiency reasons // State constants for arcs enum ArcState { STATE_UPPER = -1, STATE_TREE = 0, STATE_LOWER = 1 }; typedef std::vector StateVector; // Note: vector is used instead of vector for // efficiency reasons private: // Data related to the underlying digraph const GR &_graph; int _node_num; ArcsType _arc_num; ArcsType _all_arc_num; ArcsType _search_arc_num; // Parameters of the problem SupplyType _stype; Value _sum_supply; inline int _node_id(int n) const { return _node_num - n - 1; }; //IntArcMap _arc_id; IntVector _source; // keep nodes as integers IntVector _target; bool _arc_mixing; // Node and arc data CostVector _cost; ValueVector _supply; #ifdef SPARSE_FLOW SparseValueVector _flow; #else ValueVector _flow; #endif CostVector _pi; // Data for storing the spanning tree structure IntVector _parent; ArcVector _pred; IntVector _thread; IntVector _rev_thread; IntVector _succ_num; IntVector _last_succ; IntVector _dirty_revs; BoolVector _forward; StateVector _state; ArcsType _root; // Temporary data used in the current pivot iteration ArcsType in_arc, join, u_in, v_in, u_out, v_out; ArcsType first, second, right, last; ArcsType stem, par_stem, new_stem; Value delta; const Value MAX; ArcsType mixingCoeff; public: /// \brief Constant for infinite upper bounds (capacities). /// /// Constant for infinite upper bounds (capacities). /// It is \c std::numeric_limits::infinity() if available, /// \c std::numeric_limits::max() otherwise. const Value INF; private: // thank you to DVK and MizardX from StackOverflow for this function! inline ArcsType sequence(ArcsType k) const { ArcsType smallv = (k > num_total_big_subsequence_numbers) & 1; k -= num_total_big_subsequence_numbers * smallv; ArcsType subsequence_length2 = subsequence_length - smallv; ArcsType subsequence_num = (k / subsequence_length2) + num_big_subsequences * smallv; ArcsType subsequence_offset = (k % subsequence_length2) * mixingCoeff; return subsequence_offset + subsequence_num; } ArcsType subsequence_length; ArcsType num_big_subsequences; ArcsType num_total_big_subsequence_numbers; inline ArcsType getArcID(const Arc &arc) const { //int n = _arc_num-arc._id-1; ArcsType n = _arc_num - GR::id(arc) - 1; //ArcsType a = mixingCoeff*(n%mixingCoeff) + n/mixingCoeff; //ArcsType b = _arc_id[arc]; if (_arc_mixing) return sequence(n); else return n; } // finally unused because too slow inline ArcsType getSource(const ArcsType arc) const { //ArcsType a = _source[arc]; //return a; ArcsType n = _arc_num - arc - 1; if (_arc_mixing) n = mixingCoeff*(n%mixingCoeff) + n / mixingCoeff; ArcsType b; if (n >= 0) b = _node_id(_graph.source(GR::arcFromId(n))); else { n = arc + 1 - _arc_num; if (n <= _node_num) b = _node_num; else if (n >= _graph._n1) b = _graph._n1; else b = _graph._n1 - n; } return b; } // Implementation of the Block Search pivot rule class BlockSearchPivotRule { private: // References to the NetworkSimplexSimple class const IntVector &_source; const IntVector &_target; const CostVector &_cost; const StateVector &_state; const CostVector &_pi; ArcsType &_in_arc; ArcsType _search_arc_num; // Pivot rule data ArcsType _block_size; ArcsType _next_arc; NetworkSimplexSimple &_ns; public: // Constructor BlockSearchPivotRule(NetworkSimplexSimple &ns) : _source(ns._source), _target(ns._target), _cost(ns._cost), _state(ns._state), _pi(ns._pi), _in_arc(ns.in_arc), _search_arc_num(ns._search_arc_num), _next_arc(0), _ns(ns) { // The main parameters of the pivot rule const double BLOCK_SIZE_FACTOR = 1; const ArcsType MIN_BLOCK_SIZE = 10; _block_size = std::max(ArcsType(BLOCK_SIZE_FACTOR * std::sqrt(double(_search_arc_num))), MIN_BLOCK_SIZE); } // Find next entering arc bool findEnteringArc() { Cost min_val = 0; ArcsType N = _ns.num_threads; std::vector minArray(N, 0); std::vector arcId(N); ArcsType bs = (ArcsType)ceil(_block_size / (double)N); for (ArcsType i = 0; i < _search_arc_num; i += _block_size) { ArcsType e; int j; #pragma omp parallel { #ifdef _OPENMP int t = omp_get_thread_num(); #else int t = 0; #endif #pragma omp for schedule(static, bs) lastprivate(e) for (j = 0; j < std::min(i + _block_size, _search_arc_num) - i; j++) { e = (_next_arc + i + j); if (e >= _search_arc_num) e -= _search_arc_num; Cost c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); if (c < minArray[t]) { minArray[t] = c; arcId[t] = e; } } } for (int j = 0; j < N; j++) { if (minArray[j] < min_val) { min_val = minArray[j]; _in_arc = arcId[j]; } } Cost a = std::abs(_pi[_source[_in_arc]]) > std::abs(_pi[_target[_in_arc]]) ? std::abs(_pi[_source[_in_arc]]) : std::abs(_pi[_target[_in_arc]]); a = a > std::abs(_cost[_in_arc]) ? a : std::abs(_cost[_in_arc]); if (min_val < -EPSILON*a) { _next_arc = e; return true; } } Cost a = fabs(_pi[_source[_in_arc]]) > fabs(_pi[_target[_in_arc]]) ? fabs(_pi[_source[_in_arc]]) : fabs(_pi[_target[_in_arc]]); a = a > fabs(_cost[_in_arc]) ? a : fabs(_cost[_in_arc]); if (min_val >= -EPSILON*a) return false; return true; } // Find next entering arc /*bool findEnteringArc() { Cost min_val = 0; int N = omp_get_max_threads(); std::vector minArray(N); std::vector arcId(N); ArcsType bs = (ArcsType)ceil(_block_size / (double)N); for (ArcsType i = 0; i < _search_arc_num; i += _block_size) { ArcsType maxJ = std::min(i + _block_size, _search_arc_num) - i; ArcsType j; #pragma omp parallel { int t = omp_get_thread_num(); Cost minV = 0; ArcsType arcStart = _next_arc + i; ArcsType arc = -1; #pragma omp for schedule(static, bs) for (j = 0; j < maxJ; j++) { ArcsType e = arcStart + j; if (e >= _search_arc_num) e -= _search_arc_num; Cost c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); if (c < minV) { minV = c; arc = e; } } minArray[t] = minV; arcId[t] = arc; } for (int j = 0; j < N; j++) { if (minArray[j] < min_val) { min_val = minArray[j]; _in_arc = arcId[j]; } } //FIX by Antoine Rolet to avoid precision issues Cost a = std::max(std::abs(_cost[_in_arc]), std::max(std::abs(_pi[_source[_in_arc]]), std::abs(_pi[_target[_in_arc]]))); if (min_val <-std::numeric_limits::epsilon()*a) { _next_arc = _next_arc + i + maxJ - 1; if (_next_arc >= _search_arc_num) _next_arc -= _search_arc_num; return true; } } if (min_val >= 0) { return false; } return true; }*/ /*bool findEnteringArc() { Cost c, min = 0; int cnt = _block_size; int e, min_arc = _next_arc; for (e = _next_arc; e < _search_arc_num; ++e) { c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); if (c < min) { min = c; min_arc = e; } if (--cnt == 0) { if (min < 0) break; cnt = _block_size; } } if (min == 0 || cnt > 0) { for (e = 0; e < _next_arc; ++e) { c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); if (c < min) { min = c; min_arc = e; } if (--cnt == 0) { if (min < 0) break; cnt = _block_size; } } } if (min >= 0) return false; _in_arc = min_arc; _next_arc = e; return true; }*/ }; //class BlockSearchPivotRule public: int _init_nb_nodes; ArcsType _init_nb_arcs; /// \name Parameters /// The parameters of the algorithm can be specified using these /// functions. /// @{ /// \brief Set the costs of the arcs. /// /// This function sets the costs of the arcs. /// If it is not used before calling \ref run(), the costs /// will be set to \c 1 on all arcs. /// /// \param map An arc map storing the costs. /// Its \c Value type must be convertible to the \c Cost type /// of the algorithm. /// /// \return (*this) template NetworkSimplexSimple& costMap(const CostMap& map) { Arc a; _graph.first(a); for (; a != INVALID; _graph.next(a)) { _cost[getArcID(a)] = map[a]; } return *this; } /// \brief Set the costs of one arc. /// /// This function sets the costs of one arcs. /// Done for memory reasons /// /// \param arc An arc. /// \param arc A cost /// /// \return (*this) template NetworkSimplexSimple& setCost(const Arc& arc, const Value cost) { _cost[getArcID(arc)] = cost; return *this; } /// \brief Set the supply values of the nodes. /// /// This function sets the supply values of the nodes. /// If neither this function nor \ref stSupply() is used before /// calling \ref run(), the supply of each node will be set to zero. /// /// \param map A node map storing the supply values. /// Its \c Value type must be convertible to the \c Value type /// of the algorithm. /// /// \return (*this) template NetworkSimplexSimple& supplyMap(const SupplyMap& map) { Node n; _graph.first(n); for (; n != INVALIDNODE; _graph.next(n)) { _supply[_node_id(n)] = map[n]; } return *this; } template NetworkSimplexSimple& supplyMap(const SupplyMap* map1, int n1, const SupplyMap* map2, int n2) { Node n; _graph.first(n); for (; n != INVALIDNODE; _graph.next(n)) { if (n NetworkSimplexSimple& supplyMapAll(SupplyMap val1, int n1, SupplyMap val2, int n2) { Node n; _graph.first(n); for (; n != INVALIDNODE; _graph.next(n)) { if (n(*this) NetworkSimplexSimple& stSupply(const Node& s, const Node& t, Value k) { for (int i = 0; i != _node_num; ++i) { _supply[i] = 0; } _supply[_node_id(s)] = k; _supply[_node_id(t)] = -k; return *this; } /// \brief Set the type of the supply constraints. /// /// This function sets the type of the supply/demand constraints. /// If it is not used before calling \ref run(), the \ref GEQ supply /// type will be used. /// /// For more information, see \ref SupplyType. /// /// \return (*this) NetworkSimplexSimple& supplyType(SupplyType supply_type) { _stype = supply_type; return *this; } /// @} /// \name Execution Control /// The algorithm can be executed using \ref run(). /// @{ /// \brief Run the algorithm. /// /// This function runs the algorithm. /// The paramters can be specified using functions \ref lowerMap(), /// \ref upperMap(), \ref costMap(), \ref supplyMap(), \ref stSupply(), /// \ref supplyType(). /// For example, /// \code /// NetworkSimplexSimple ns(graph); /// ns.lowerMap(lower).upperMap(upper).costMap(cost) /// .supplyMap(sup).run(); /// \endcode /// /// This function can be called more than once. All the given parameters /// are kept for the next call, unless \ref resetParams() or \ref reset() /// is used, thus only the modified parameters have to be set again. /// If the underlying digraph was also modified after the construction /// of the class (or the last \ref reset() call), then the \ref reset() /// function must be called. /// /// \param pivot_rule The pivot rule that will be used during the /// algorithm. For more information, see \ref PivotRule. /// /// \return \c INFEASIBLE if no feasible flow exists, /// \n \c OPTIMAL if the problem has optimal solution /// (i.e. it is feasible and bounded), and the algorithm has found /// optimal flow and node potentials (primal and dual solutions), /// \n \c UNBOUNDED if the objective function of the problem is /// unbounded, i.e. there is a directed cycle having negative total /// cost and infinite upper bound. /// /// \see ProblemType, PivotRule /// \see resetParams(), reset() ProblemType run() { #if DEBUG_LVL>0 std::cout << "OPTIMAL = " << OPTIMAL << "\nINFEASIBLE = " << INFEASIBLE << "\nUNBOUNDED = " << UNBOUNDED << "\nMAX_ITER_REACHED" << MAX_ITER_REACHED << "\n" ; #endif if (!init()) return INFEASIBLE; #if DEBUG_LVL>0 std::cout << "Init done, starting iterations\n"; #endif return start(); } /// \brief Reset all the parameters that have been given before. /// /// This function resets all the paramaters that have been given /// before using functions \ref lowerMap(), \ref upperMap(), /// \ref costMap(), \ref supplyMap(), \ref stSupply(), \ref supplyType(). /// /// It is useful for multiple \ref run() calls. Basically, all the given /// parameters are kept for the next \ref run() call, unless /// \ref resetParams() or \ref reset() is used. /// If the underlying digraph was also modified after the construction /// of the class or the last \ref reset() call, then the \ref reset() /// function must be used, otherwise \ref resetParams() is sufficient. /// /// For example, /// \code /// NetworkSimplexSimple ns(graph); /// /// // First run /// ns.lowerMap(lower).upperMap(upper).costMap(cost) /// .supplyMap(sup).run(); /// /// // Run again with modified cost map (resetParams() is not called, /// // so only the cost map have to be set again) /// cost[e] += 100; /// ns.costMap(cost).run(); /// /// // Run again from scratch using resetParams() /// // (the lower bounds will be set to zero on all arcs) /// ns.resetParams(); /// ns.upperMap(capacity).costMap(cost) /// .supplyMap(sup).run(); /// \endcode /// /// \return (*this) /// /// \see reset(), run() NetworkSimplexSimple& resetParams() { for (int i = 0; i != _node_num; ++i) { _supply[i] = 0; } for (ArcsType i = 0; i != _arc_num; ++i) { _cost[i] = 1; } _stype = GEQ; return *this; } /// \brief Reset the internal data structures and all the parameters /// that have been given before. /// /// This function resets the internal data structures and all the /// paramaters that have been given before using functions \ref lowerMap(), /// \ref upperMap(), \ref costMap(), \ref supplyMap(), \ref stSupply(), /// \ref supplyType(). /// /// It is useful for multiple \ref run() calls. Basically, all the given /// parameters are kept for the next \ref run() call, unless /// \ref resetParams() or \ref reset() is used. /// If the underlying digraph was also modified after the construction /// of the class or the last \ref reset() call, then the \ref reset() /// function must be used, otherwise \ref resetParams() is sufficient. /// /// See \ref resetParams() for examples. /// /// \return (*this) /// /// \see resetParams(), run() NetworkSimplexSimple& reset() { // Resize vectors _node_num = _init_nb_nodes; _arc_num = _init_nb_arcs; int all_node_num = _node_num + 1; ArcsType max_arc_num = _arc_num + 2 * _node_num; _source.resize(max_arc_num); _target.resize(max_arc_num); _cost.resize(max_arc_num); _supply.resize(all_node_num); _flow.resize(max_arc_num); _pi.resize(all_node_num); _parent.resize(all_node_num); _pred.resize(all_node_num); _forward.resize(all_node_num); _thread.resize(all_node_num); _rev_thread.resize(all_node_num); _succ_num.resize(all_node_num); _last_succ.resize(all_node_num); _state.resize(max_arc_num); //_arc_mixing=false; if (_arc_mixing && _node_num > 1) { // Store the arcs in a mixed order //ArcsType k = std::max(ArcsType(std::sqrt(double(_arc_num))), ArcsType(10)); const ArcsType k = std::max(ArcsType(_arc_num / _node_num), ArcsType(3)); mixingCoeff = k; subsequence_length = _arc_num / mixingCoeff + 1; num_big_subsequences = _arc_num % mixingCoeff; num_total_big_subsequence_numbers = subsequence_length * num_big_subsequences; #pragma omp parallel for schedule(static) for (Arc a = 0; a <= _graph.maxArcId(); a++) { // --a <=> _graph.next(a) , -1 == INVALID ArcsType i = sequence(_graph.maxArcId()-a); _source[i] = _node_id(_graph.source(a)); _target[i] = _node_id(_graph.target(a)); } } else { // Store the arcs in the original order ArcsType i = 0; Arc a; _graph.first(a); for (; a != INVALID; _graph.next(a), ++i) { _source[i] = _node_id(_graph.source(a)); _target[i] = _node_id(_graph.target(a)); //_arc_id[a] = i; } } // Reset parameters resetParams(); return *this; } /// @} /// \name Query Functions /// The results of the algorithm can be obtained using these /// functions.\n /// The \ref run() function must be called before using them. /// @{ /// \brief Return the total cost of the found flow. /// /// This function returns the total cost of the found flow. /// Its complexity is O(e). /// /// \note The return type of the function can be specified as a /// template parameter. For example, /// \code /// ns.totalCost(); /// \endcode /// It is useful if the total cost cannot be stored in the \c Cost /// type of the algorithm, which is the default return type of the /// function. /// /// \pre \ref run() must be called before using this function. /*template Number totalCost() const { Number c = 0; for (ArcIt a(_graph); a != INVALID; ++a) { int i = getArcID(a); c += Number(_flow[i]) * Number(_cost[i]); } return c; }*/ template Number totalCost() const { Number c = 0; #ifdef SPARSE_FLOW #ifdef HASHMAP typename std::unordered_map::const_iterator it; #else typename std::map::const_iterator it; #endif for (it = _flow.data.begin(); it!=_flow.data.end(); ++it) c += Number(it->second) * Number(_cost[it->first]); return c; #else for (ArcsType i = 0; i<_flow.size(); i++) c += _flow[i] * Number(_cost[i]); return c; #endif } #ifndef DOXYGEN Cost totalCost() const { return totalCost(); } #endif /// \brief Return the flow on the given arc. /// /// This function returns the flow on the given arc. /// /// \pre \ref run() must be called before using this function. Value flow(const Arc& a) const { return _flow[getArcID(a)]; } /// \brief Return the flow map (the primal solution). /// /// This function copies the flow value on each arc into the given /// map. The \c Value type of the algorithm must be convertible to /// the \c Value type of the map. /// /// \pre \ref run() must be called before using this function. template void flowMap(FlowMap &map) const { Arc a; _graph.first(a); for (; a != INVALID; _graph.next(a)) { map.set(a, _flow[getArcID(a)]); } } /// \brief Return the potential (dual value) of the given node. /// /// This function returns the potential (dual value) of the /// given node. /// /// \pre \ref run() must be called before using this function. Cost potential(const Node& n) const { return _pi[_node_id(n)]; } /// \brief Return the potential map (the dual solution). /// /// This function copies the potential (dual value) of each node /// into the given map. /// The \c Cost type of the algorithm must be convertible to the /// \c Value type of the map. /// /// \pre \ref run() must be called before using this function. template void potentialMap(PotentialMap &map) const { Node n; _graph.first(n); for (; n != INVALID; _graph.next(n)) { map.set(n, _pi[_node_id(n)]); } } /// @} private: // Initialize internal data structures bool init() { if (_node_num == 0) return false; // Check the sum of supply values _sum_supply = 0; for (int i = 0; i != _node_num; ++i) { _sum_supply += _supply[i]; } /*if (!((_stype == GEQ && _sum_supply <= 0) || (_stype == LEQ && _sum_supply >= 0))) return false;*/ // Initialize artifical cost Cost ART_COST; if (std::numeric_limits::is_exact) { ART_COST = std::numeric_limits::max() / 2 + 1; } else { ART_COST = 0; for (ArcsType i = 0; i != _arc_num; ++i) { if (_cost[i] > ART_COST) ART_COST = _cost[i]; } ART_COST = (ART_COST + 1) * _node_num; } // Initialize arc maps for (ArcsType i = 0; i != _arc_num; ++i) { #ifndef SPARSE_FLOW _flow[i] = 0; //by default, the sparse matrix is empty #endif _state[i] = STATE_LOWER; } #ifdef SPARSE_FLOW _flow = SparseValueVector(); #endif // Set data for the artificial root node _root = _node_num; _parent[_root] = -1; _pred[_root] = -1; _thread[_root] = 0; _rev_thread[0] = _root; _succ_num[_root] = _node_num + 1; _last_succ[_root] = _root - 1; _supply[_root] = -_sum_supply; _pi[_root] = 0; // Add artificial arcs and initialize the spanning tree data structure if (_sum_supply == 0) { // EQ supply constraints _search_arc_num = _arc_num; _all_arc_num = _arc_num + _node_num; for (ArcsType u = 0, e = _arc_num; u != _node_num; ++u, ++e) { _parent[u] = _root; _pred[u] = e; _thread[u] = u + 1; _rev_thread[u + 1] = u; _succ_num[u] = 1; _last_succ[u] = u; _state[e] = STATE_TREE; if (_supply[u] >= 0) { _forward[u] = true; _pi[u] = 0; _source[e] = u; _target[e] = _root; _flow[e] = _supply[u]; _cost[e] = 0; } else { _forward[u] = false; _pi[u] = ART_COST; _source[e] = _root; _target[e] = u; _flow[e] = -_supply[u]; _cost[e] = ART_COST; } } } else if (_sum_supply > 0) { // LEQ supply constraints _search_arc_num = _arc_num + _node_num; ArcsType f = _arc_num + _node_num; for (ArcsType u = 0, e = _arc_num; u != _node_num; ++u, ++e) { _parent[u] = _root; _thread[u] = u + 1; _rev_thread[u + 1] = u; _succ_num[u] = 1; _last_succ[u] = u; if (_supply[u] >= 0) { _forward[u] = true; _pi[u] = 0; _pred[u] = e; _source[e] = u; _target[e] = _root; _flow[e] = _supply[u]; _cost[e] = 0; _state[e] = STATE_TREE; } else { _forward[u] = false; _pi[u] = ART_COST; _pred[u] = f; _source[f] = _root; _target[f] = u; _flow[f] = -_supply[u]; _cost[f] = ART_COST; _state[f] = STATE_TREE; _source[e] = u; _target[e] = _root; //_flow[e] = 0; //by default, the sparse matrix is empty _cost[e] = 0; _state[e] = STATE_LOWER; ++f; } } _all_arc_num = f; } else { // GEQ supply constraints _search_arc_num = _arc_num + _node_num; ArcsType f = _arc_num + _node_num; for (ArcsType u = 0, e = _arc_num; u != _node_num; ++u, ++e) { _parent[u] = _root; _thread[u] = u + 1; _rev_thread[u + 1] = u; _succ_num[u] = 1; _last_succ[u] = u; if (_supply[u] <= 0) { _forward[u] = false; _pi[u] = 0; _pred[u] = e; _source[e] = _root; _target[e] = u; _flow[e] = -_supply[u]; _cost[e] = 0; _state[e] = STATE_TREE; } else { _forward[u] = true; _pi[u] = -ART_COST; _pred[u] = f; _source[f] = u; _target[f] = _root; _flow[f] = _supply[u]; _state[f] = STATE_TREE; _cost[f] = ART_COST; _source[e] = _root; _target[e] = u; //_flow[e] = 0; //by default, the sparse matrix is empty _cost[e] = 0; _state[e] = STATE_LOWER; ++f; } } _all_arc_num = f; } return true; } // Find the join node void findJoinNode() { int u = _source[in_arc]; int v = _target[in_arc]; while (u != v) { if (_succ_num[u] < _succ_num[v]) { u = _parent[u]; } else { v = _parent[v]; } } join = u; } // Find the leaving arc of the cycle and returns true if the // leaving arc is not the same as the entering arc bool findLeavingArc() { // Initialize first and second nodes according to the direction // of the cycle if (_state[in_arc] == STATE_LOWER) { first = _source[in_arc]; second = _target[in_arc]; } else { first = _target[in_arc]; second = _source[in_arc]; } delta = INF; char result = 0; Value d; ArcsType e; // Search the cycle along the path form the first node to the root for (int u = first; u != join; u = _parent[u]) { e = _pred[u]; d = _forward[u] ? _flow[e] : INF; if (d < delta) { delta = d; u_out = u; result = 1; } } // Search the cycle along the path form the second node to the root for (int u = second; u != join; u = _parent[u]) { e = _pred[u]; d = _forward[u] ? INF : _flow[e]; if (d <= delta) { delta = d; u_out = u; result = 2; } } if (result == 1) { u_in = first; v_in = second; } else { u_in = second; v_in = first; } return result != 0; } // Change _flow and _state vectors void changeFlow(bool change) { // Augment along the cycle if (delta > 0) { Value val = _state[in_arc] * delta; _flow[in_arc] += val; for (int u = _source[in_arc]; u != join; u = _parent[u]) { _flow[_pred[u]] += _forward[u] ? -val : val; } for (int u = _target[in_arc]; u != join; u = _parent[u]) { _flow[_pred[u]] += _forward[u] ? val : -val; } } // Update the state of the entering and leaving arcs if (change) { _state[in_arc] = STATE_TREE; _state[_pred[u_out]] = (_flow[_pred[u_out]] == 0) ? STATE_LOWER : STATE_UPPER; } else { _state[in_arc] = -_state[in_arc]; } } // Update the tree structure void updateTreeStructure() { int old_rev_thread = _rev_thread[u_out]; int old_succ_num = _succ_num[u_out]; int old_last_succ = _last_succ[u_out]; v_out = _parent[u_out]; // Check if u_in and u_out coincide if (u_in == u_out) { // Update _parent, _pred, _pred_dir _parent[u_in] = v_in; _pred[u_in] = in_arc; _forward[u_in] = (u_in == _source[in_arc]); // Update _thread and _rev_thread if (_thread[v_in] != u_out) { ArcsType after = _thread[old_last_succ]; _thread[old_rev_thread] = after; _rev_thread[after] = old_rev_thread; after = _thread[v_in]; _thread[v_in] = u_out; _rev_thread[u_out] = v_in; _thread[old_last_succ] = after; _rev_thread[after] = old_last_succ; } } else { // Handle the case when old_rev_thread equals to v_in // (it also means that join and v_out coincide) int thread_continue = old_rev_thread == v_in ? _thread[old_last_succ] : _thread[v_in]; // Update _thread and _parent along the stem nodes (i.e. the nodes // between u_in and u_out, whose parent have to be changed) int stem = u_in; // the current stem node int par_stem = v_in; // the new parent of stem int next_stem; // the next stem node int last = _last_succ[u_in]; // the last successor of stem int before, after = _thread[last]; _thread[v_in] = u_in; _dirty_revs.clear(); _dirty_revs.push_back(v_in); while (stem != u_out) { // Insert the next stem node into the thread list next_stem = _parent[stem]; _thread[last] = next_stem; _dirty_revs.push_back(last); // Remove the subtree of stem from the thread list before = _rev_thread[stem]; _thread[before] = after; _rev_thread[after] = before; // Change the parent node and shift stem nodes _parent[stem] = par_stem; par_stem = stem; stem = next_stem; // Update last and after last = _last_succ[stem] == _last_succ[par_stem] ? _rev_thread[par_stem] : _last_succ[stem]; after = _thread[last]; } _parent[u_out] = par_stem; _thread[last] = thread_continue; _rev_thread[thread_continue] = last; _last_succ[u_out] = last; // Remove the subtree of u_out from the thread list except for // the case when old_rev_thread equals to v_in if (old_rev_thread != v_in) { _thread[old_rev_thread] = after; _rev_thread[after] = old_rev_thread; } // Update _rev_thread using the new _thread values for (int i = 0; i != int(_dirty_revs.size()); ++i) { int u = _dirty_revs[i]; _rev_thread[_thread[u]] = u; } // Update _pred, _pred_dir, _last_succ and _succ_num for the // stem nodes from u_out to u_in int tmp_sc = 0, tmp_ls = _last_succ[u_out]; for (int u = u_out, p = _parent[u]; u != u_in; u = p, p = _parent[u]) { _pred[u] = _pred[p]; _forward[u] = !_forward[p]; tmp_sc += _succ_num[u] - _succ_num[p]; _succ_num[u] = tmp_sc; _last_succ[p] = tmp_ls; } _pred[u_in] = in_arc; _forward[u_in] = (u_in == _source[in_arc]); _succ_num[u_in] = old_succ_num; } // Update _last_succ from v_in towards the root int up_limit_out = _last_succ[join] == v_in ? join : -1; int last_succ_out = _last_succ[u_out]; for (int u = v_in; u != -1 && _last_succ[u] == v_in; u = _parent[u]) { _last_succ[u] = last_succ_out; } // Update _last_succ from v_out towards the root if (join != old_rev_thread && v_in != old_rev_thread) { for (int u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; u = _parent[u]) { _last_succ[u] = old_rev_thread; } } else if (last_succ_out != old_last_succ) { for (int u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; u = _parent[u]) { _last_succ[u] = last_succ_out; } } // Update _succ_num from v_in to join for (int u = v_in; u != join; u = _parent[u]) { _succ_num[u] += old_succ_num; } // Update _succ_num from v_out to join for (int u = v_out; u != join; u = _parent[u]) { _succ_num[u] -= old_succ_num; } } void updatePotential() { Cost sigma = _pi[v_in] - _pi[u_in] - ((_forward[u_in])?_cost[in_arc]:(-_cost[in_arc])); int end = _thread[_last_succ[u_in]]; for (int u = u_in; u != end; u = _thread[u]) { _pi[u] += sigma; } } // Heuristic initial pivots bool initialPivots() { Value curr, total = 0; std::vector supply_nodes, demand_nodes; Node u; _graph.first(u); for (; u != INVALIDNODE; _graph.next(u)) { curr = _supply[_node_id(u)]; if (curr > 0) { total += curr; supply_nodes.push_back(u); } else if (curr < 0) { demand_nodes.push_back(u); } } if (_sum_supply > 0) total -= _sum_supply; if (total <= 0) return true; ArcVector arc_vector; if (_sum_supply >= 0) { if (supply_nodes.size() == 1 && demand_nodes.size() == 1) { // Perform a reverse graph search from the sink to the source //typename GR::template NodeMap reached(_graph, false); BoolVector reached(_node_num, false); Node s = supply_nodes[0], t = demand_nodes[0]; std::vector stack; reached[t] = true; stack.push_back(t); while (!stack.empty()) { Node u, v = stack.back(); stack.pop_back(); if (v == s) break; Arc a; _graph.firstIn(a, v); for (; a != INVALID; _graph.nextIn(a)) { if (reached[u = _graph.source(a)]) continue; ArcsType j = getArcID(a); arc_vector.push_back(j); reached[u] = true; stack.push_back(u); } } } else { arc_vector.resize(demand_nodes.size()); // Find the min. cost incomming arc for each demand node #pragma omp parallel for for (int i = 0; i < demand_nodes.size(); ++i) { Node v = demand_nodes[i]; Cost min_cost = std::numeric_limits::max(); Arc min_arc = INVALID; Arc a; _graph.firstIn(a, v); for (; a != INVALID; _graph.nextIn(a)) { Cost c = _cost[getArcID(a)]; if (c < min_cost) { min_cost = c; min_arc = a; } } arc_vector[i] = getArcID(min_arc); } arc_vector.erase(std::remove(arc_vector.begin(), arc_vector.end(), INVALID), arc_vector.end()); } } else { arc_vector.resize(supply_nodes.size()); // Find the min. cost outgoing arc for each supply node #pragma omp parallel for for (int i = 0; i < int(supply_nodes.size()); ++i) { Node u = supply_nodes[i]; Cost min_cost = std::numeric_limits::max(); Arc min_arc = INVALID; Arc a; _graph.firstOut(a, u); for (; a != INVALID; _graph.nextOut(a)) { Cost c = _cost[getArcID(a)]; if (c < min_cost) { min_cost = c; min_arc = a; } } arc_vector[i] = getArcID(min_arc); } arc_vector.erase(std::remove(arc_vector.begin(), arc_vector.end(), INVALID), arc_vector.end()); } // Perform heuristic initial pivots for (ArcsType i = 0; i != ArcsType(arc_vector.size()); ++i) { in_arc = arc_vector[i]; if (_state[in_arc] * (_cost[in_arc] + _pi[_source[in_arc]] - _pi[_target[in_arc]]) >= 0) continue; findJoinNode(); bool change = findLeavingArc(); if (delta >= MAX) return false; changeFlow(change); if (change) { updateTreeStructure(); updatePotential(); } } return true; } // Execute the algorithm ProblemType start() { return start(); } template ProblemType start() { PivotRuleImpl pivot(*this); ProblemType retVal = OPTIMAL; // Perform heuristic initial pivots if (!initialPivots()) return UNBOUNDED; uint64_t iter_number = 0; // Execute the Network Simplex algorithm while (pivot.findEnteringArc()) { if ((++iter_number <= max_iter&&max_iter > 0) || max_iter<=0) { #if DEBUG_LVL>0 if(iter_number>MAX_DEBUG_ITER) break; if(iter_number%1000==0||iter_number%1000==1){ Cost curCost=totalCost(); Value sumFlow=0; Cost a; a= (fabs(_pi[_source[in_arc]])>=fabs(_pi[_target[in_arc]])) ? fabs(_pi[_source[in_arc]]) : fabs(_pi[_target[in_arc]]); a=a>=fabs(_cost[in_arc])?a:fabs(_cost[in_arc]); for (int i=0; i<_flow.size(); i++) { sumFlow+=_state[i]*_flow[i]; } std::cout << "Sum of the flow " << std::setprecision(20) << sumFlow << "\n" << iter_number << " iterations, current cost=" << curCost << "\nReduced cost=" << _state[in_arc] * (_cost[in_arc] + _pi[_source[in_arc]] -_pi[_target[in_arc]]) << "\nPrecision = "<< -EPSILON*(a) << "\n"; std::cout << "Arc in = (" << _node_id(_source[in_arc]) << ", " << _node_id(_target[in_arc]) <<")\n"; std::cout << "Supplies = (" << _supply[_source[in_arc]] << ", " << _supply[_target[in_arc]] << ")\n"; std::cout << _cost[in_arc] << "\n"; std::cout << _pi[_source[in_arc]] << "\n"; std::cout << _pi[_target[in_arc]] << "\n"; std::cout << a << "\n"; } #endif findJoinNode(); bool change = findLeavingArc(); if (delta >= MAX) return UNBOUNDED; changeFlow(change); if (change) { updateTreeStructure(); updatePotential(); } #if DEBUG_LVL>0 else{ std::cout << "No change\n"; } #endif #if DEBUG_LVL>1 std::cout << "Arc in = (" << _source[in_arc] << ", " << _target[in_arc] << ")\n"; #endif } else { // max iters retVal = MAX_ITER_REACHED; break; } } #if DEBUG_LVL>0 Cost curCost=totalCost(); Value sumFlow=0; Cost a; a= (fabs(_pi[_source[in_arc]])>=fabs(_pi[_target[in_arc]])) ? fabs(_pi[_source[in_arc]]) : fabs(_pi[_target[in_arc]]); a=a>=fabs(_cost[in_arc])?a:fabs(_cost[in_arc]); for (int i=0; i<_flow.size(); i++) { sumFlow+=_state[i]*_flow[i]; } std::cout << "Sum of the flow " << std::setprecision(20) << sumFlow << "\n" << niter << " iterations, current cost=" << curCost << "\nReduced cost=" << _state[in_arc] * (_cost[in_arc] + _pi[_source[in_arc]] -_pi[_target[in_arc]]) << "\nPrecision = "<< -EPSILON*(a) << "\n"; std::cout << "Arc in = (" << _node_id(_source[in_arc]) << ", " << _node_id(_target[in_arc]) <<")\n"; std::cout << "Supplies = (" << _supply[_source[in_arc]] << ", " << _supply[_target[in_arc]] << ")\n"; #endif #if DEBUG_LVL>1 sumFlow=0; for (int i=0; i<_flow.size(); i++) { sumFlow+=_state[i]*_flow[i]; if (_state[i]==STATE_TREE) { std::cout << "Non zero value at (" << _node_num+1-_source[i] << ", " << _node_num+1-_target[i] << ")\n"; } } std::cout << "Sum of the flow " << sumFlow << "\n"<< niter <<" iterations, current cost=" << totalCost() << "\n"; #endif //Check feasibility if(retVal == OPTIMAL){ for (ArcsType e = _search_arc_num; e != _all_arc_num; ++e) { if (_flow[e] != 0){ if (fabs(_flow[e]) > _EPSILON) // change of the original code following issue #126 return INFEASIBLE; else _flow[e]=0; } } } // Shift potentials to meet the requirements of the GEQ/LEQ type // optimality conditions if (_sum_supply == 0) { if (_stype == GEQ) { Cost max_pot = -std::numeric_limits::max(); for (ArcsType i = 0; i != _node_num; ++i) { if (_pi[i] > max_pot) max_pot = _pi[i]; } if (max_pot > 0) { for (ArcsType i = 0; i != _node_num; ++i) _pi[i] -= max_pot; } } else { Cost min_pot = std::numeric_limits::max(); for (ArcsType i = 0; i != _node_num; ++i) { if (_pi[i] < min_pot) min_pot = _pi[i]; } if (min_pot < 0) { for (ArcsType i = 0; i != _node_num; ++i) _pi[i] -= min_pot; } } } return retVal; } }; //class NetworkSimplexSimple ///@} } //namespace lemon_omp python-pot-0.9.3+dfsg/ot/lp/solver_1d.py000066400000000000000000001053671455713015700201140ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Exact solvers for the 1D Wasserstein distance using cvxopt """ # Author: Remi Flamary # Author: Nicolas Courty # # License: MIT License import numpy as np import warnings from .emd_wrap import emd_1d_sorted from ..backend import get_backend from ..utils import list_to_array def quantile_function(qs, cws, xs): r""" Computes the quantile function of an empirical distribution Parameters ---------- qs: array-like, shape (n,) Quantiles at which the quantile function is evaluated cws: array-like, shape (m, ...) cumulative weights of the 1D empirical distribution, if batched, must be similar to xs xs: array-like, shape (n, ...) locations of the 1D empirical distribution, batched against the `xs.ndim - 1` first dimensions Returns ------- q: array-like, shape (..., n) The quantiles of the distribution """ nx = get_backend(qs, cws) n = xs.shape[0] if nx.__name__ == 'torch': # this is to ensure the best performance for torch searchsorted # and avoid a warning related to non-contiguous arrays cws = cws.T.contiguous() qs = qs.T.contiguous() else: cws = cws.T qs = qs.T idx = nx.searchsorted(cws, qs).T return nx.take_along_axis(xs, nx.clip(idx, 0, n - 1), axis=0) def wasserstein_1d(u_values, v_values, u_weights=None, v_weights=None, p=1, require_sort=True): r""" Computes the 1 dimensional OT loss [15] between two (batched) empirical distributions .. math: OT_{loss} = \int_0^1 |cdf_u^{-1}(q) - cdf_v^{-1}(q)|^p dq It is formally the p-Wasserstein distance raised to the power p. We do so in a vectorized way by first building the individual quantile functions then integrating them. This function should be preferred to `emd_1d` whenever the backend is different to numpy, and when gradients over either sample positions or weights are required. Parameters ---------- u_values: array-like, shape (n, ...) locations of the first empirical distribution v_values: array-like, shape (m, ...) locations of the second empirical distribution u_weights: array-like, shape (n, ...), optional weights of the first empirical distribution, if None then uniform weights are used v_weights: array-like, shape (m, ...), optional weights of the second empirical distribution, if None then uniform weights are used p: int, optional order of the ground metric used, should be at least 1 (see [2, Chap. 2], default is 1 require_sort: bool, optional sort the distributions atoms locations, if False we will consider they have been sorted prior to being passed to the function, default is True Returns ------- cost: float/array-like, shape (...) the batched EMD References ---------- .. [15] PeyrĂ©, G., & Cuturi, M. (2018). Computational Optimal Transport. """ assert p >= 1, "The OT loss is only valid for p>=1, {p} was given".format(p=p) if u_weights is not None and v_weights is not None: nx = get_backend(u_values, v_values, u_weights, v_weights) else: nx = get_backend(u_values, v_values) n = u_values.shape[0] m = v_values.shape[0] if u_weights is None: u_weights = nx.full(u_values.shape, 1. / n, type_as=u_values) elif u_weights.ndim != u_values.ndim: u_weights = nx.repeat(u_weights[..., None], u_values.shape[-1], -1) if v_weights is None: v_weights = nx.full(v_values.shape, 1. / m, type_as=v_values) elif v_weights.ndim != v_values.ndim: v_weights = nx.repeat(v_weights[..., None], v_values.shape[-1], -1) if require_sort: u_sorter = nx.argsort(u_values, 0) u_values = nx.take_along_axis(u_values, u_sorter, 0) v_sorter = nx.argsort(v_values, 0) v_values = nx.take_along_axis(v_values, v_sorter, 0) u_weights = nx.take_along_axis(u_weights, u_sorter, 0) v_weights = nx.take_along_axis(v_weights, v_sorter, 0) u_cumweights = nx.cumsum(u_weights, 0) v_cumweights = nx.cumsum(v_weights, 0) qs = nx.sort(nx.concatenate((u_cumweights, v_cumweights), 0), 0) u_quantiles = quantile_function(qs, u_cumweights, u_values) v_quantiles = quantile_function(qs, v_cumweights, v_values) qs = nx.zero_pad(qs, pad_width=[(1, 0)] + (qs.ndim - 1) * [(0, 0)]) delta = qs[1:, ...] - qs[:-1, ...] diff_quantiles = nx.abs(u_quantiles - v_quantiles) if p == 1: return nx.sum(delta * diff_quantiles, axis=0) return nx.sum(delta * nx.power(diff_quantiles, p), axis=0) def emd_1d(x_a, x_b, a=None, b=None, metric='sqeuclidean', p=1., dense=True, log=False, check_marginals=True): r"""Solves the Earth Movers distance problem between 1d measures and returns the OT matrix .. math:: \gamma = arg\min_\gamma \sum_i \sum_j \gamma_{ij} d(x_a[i], x_b[j]) s.t. \gamma 1 = a, \gamma^T 1= b, \gamma\geq 0 where : - d is the metric - x_a and x_b are the samples - a and b are the sample weights When 'minkowski' is used as a metric, :math:`d(x, y) = |x - y|^p`. Uses the algorithm detailed in [1]_ Parameters ---------- x_a : (ns,) or (ns, 1) ndarray, float64 Source dirac locations (on the real line) x_b : (nt,) or (ns, 1) ndarray, float64 Target dirac locations (on the real line) a : (ns,) ndarray, float64, optional Source histogram (default is uniform weight) b : (nt,) ndarray, float64, optional Target histogram (default is uniform weight) metric: str, optional (default='sqeuclidean') Metric to be used. Only strings listed in :func:`ot.dist` are accepted. Due to implementation details, this function runs faster when `'sqeuclidean'`, `'cityblock'`, or `'euclidean'` metrics are used. p: float, optional (default=1.0) The p-norm to apply for if metric='minkowski' dense: boolean, optional (default=True) If True, returns math:`\gamma` as a dense ndarray of shape (ns, nt). Otherwise returns a sparse representation using scipy's `coo_matrix` format. Due to implementation details, this function runs faster when `'sqeuclidean'`, `'minkowski'`, `'cityblock'`, or `'euclidean'` metrics are used. log: boolean, optional (default=False) If True, returns a dictionary containing the cost. Otherwise returns only the optimal transportation matrix. check_marginals: bool, optional (default=True) If True, checks that the marginals mass are equal. If False, skips the check. Returns ------- gamma: (ns, nt) ndarray Optimal transportation matrix for the given parameters log: dict If input log is True, a dictionary containing the cost Examples -------- Simple example with obvious solution. The function emd_1d accepts lists and performs automatic conversion to numpy arrays >>> import ot >>> a=[.5, .5] >>> b=[.5, .5] >>> x_a = [2., 0.] >>> x_b = [0., 3.] >>> ot.emd_1d(x_a, x_b, a, b) array([[0. , 0.5], [0.5, 0. ]]) >>> ot.emd_1d(x_a, x_b) array([[0. , 0.5], [0.5, 0. ]]) References ---------- .. [1] PeyrĂ©, G., & Cuturi, M. (2017). "Computational Optimal Transport", 2018. See Also -------- ot.lp.emd : EMD for multidimensional distributions ot.lp.emd2_1d : EMD for 1d distributions (returns cost instead of the transportation matrix) """ a, b, x_a, x_b = list_to_array(a, b, x_a, x_b) nx = get_backend(x_a, x_b) assert (x_a.ndim == 1 or x_a.ndim == 2 and x_a.shape[1] == 1), \ "emd_1d should only be used with monodimensional data" assert (x_b.ndim == 1 or x_b.ndim == 2 and x_b.shape[1] == 1), \ "emd_1d should only be used with monodimensional data" # if empty array given then use uniform distributions if a is None or a.ndim == 0 or len(a) == 0: a = nx.ones((x_a.shape[0],), type_as=x_a) / x_a.shape[0] if b is None or b.ndim == 0 or len(b) == 0: b = nx.ones((x_b.shape[0],), type_as=x_b) / x_b.shape[0] # ensure that same mass if check_marginals: np.testing.assert_almost_equal( nx.to_numpy(nx.sum(a, axis=0)), nx.to_numpy(nx.sum(b, axis=0)), err_msg='a and b vector must have the same sum', decimal=6 ) b = b * nx.sum(a) / nx.sum(b) x_a_1d = nx.reshape(x_a, (-1,)) x_b_1d = nx.reshape(x_b, (-1,)) perm_a = nx.argsort(x_a_1d) perm_b = nx.argsort(x_b_1d) G_sorted, indices, cost = emd_1d_sorted( nx.to_numpy(a[perm_a]).astype(np.float64), nx.to_numpy(b[perm_b]).astype(np.float64), nx.to_numpy(x_a_1d[perm_a]).astype(np.float64), nx.to_numpy(x_b_1d[perm_b]).astype(np.float64), metric=metric, p=p ) G = nx.coo_matrix( G_sorted, perm_a[indices[:, 0]], perm_b[indices[:, 1]], shape=(a.shape[0], b.shape[0]), type_as=x_a ) if dense: G = nx.todense(G) elif str(nx) == "jax": warnings.warn("JAX does not support sparse matrices, converting to dense") if log: log = {'cost': nx.from_numpy(cost, type_as=x_a)} return G, log return G def emd2_1d(x_a, x_b, a=None, b=None, metric='sqeuclidean', p=1., dense=True, log=False): r"""Solves the Earth Movers distance problem between 1d measures and returns the loss .. math:: \gamma = arg\min_\gamma \sum_i \sum_j \gamma_{ij} d(x_a[i], x_b[j]) s.t. \gamma 1 = a, \gamma^T 1= b, \gamma\geq 0 where : - d is the metric - x_a and x_b are the samples - a and b are the sample weights When 'minkowski' is used as a metric, :math:`d(x, y) = |x - y|^p`. Uses the algorithm detailed in [1]_ Parameters ---------- x_a : (ns,) or (ns, 1) ndarray, float64 Source dirac locations (on the real line) x_b : (nt,) or (ns, 1) ndarray, float64 Target dirac locations (on the real line) a : (ns,) ndarray, float64, optional Source histogram (default is uniform weight) b : (nt,) ndarray, float64, optional Target histogram (default is uniform weight) metric: str, optional (default='sqeuclidean') Metric to be used. Only strings listed in :func:`ot.dist` are accepted. Due to implementation details, this function runs faster when `'sqeuclidean'`, `'minkowski'`, `'cityblock'`, or `'euclidean'` metrics are used. p: float, optional (default=1.0) The p-norm to apply for if metric='minkowski' dense: boolean, optional (default=True) If True, returns math:`\gamma` as a dense ndarray of shape (ns, nt). Otherwise returns a sparse representation using scipy's `coo_matrix` format. Only used if log is set to True. Due to implementation details, this function runs faster when dense is set to False. log: boolean, optional (default=False) If True, returns a dictionary containing the transportation matrix. Otherwise returns only the loss. Returns ------- loss: float Cost associated to the optimal transportation log: dict If input log is True, a dictionary containing the Optimal transportation matrix for the given parameters Examples -------- Simple example with obvious solution. The function emd2_1d accepts lists and performs automatic conversion to numpy arrays >>> import ot >>> a=[.5, .5] >>> b=[.5, .5] >>> x_a = [2., 0.] >>> x_b = [0., 3.] >>> ot.emd2_1d(x_a, x_b, a, b) 0.5 >>> ot.emd2_1d(x_a, x_b) 0.5 References ---------- .. [1] PeyrĂ©, G., & Cuturi, M. (2017). "Computational Optimal Transport", 2018. See Also -------- ot.lp.emd2 : EMD for multidimensional distributions ot.lp.emd_1d : EMD for 1d distributions (returns the transportation matrix instead of the cost) """ # If we do not return G (log==False), then we should not to cast it to dense # (useless overhead) G, log_emd = emd_1d(x_a=x_a, x_b=x_b, a=a, b=b, metric=metric, p=p, dense=dense and log, log=True) cost = log_emd['cost'] if log: log_emd = {'G': G} return cost, log_emd return cost def roll_cols(M, shifts): r""" Utils functions which allow to shift the order of each row of a 2d matrix Parameters ---------- M : (nr, nc) ndarray Matrix to shift shifts: int or (nr,) ndarray Returns ------- Shifted array Examples -------- >>> M = np.array([[1,2,3],[4,5,6],[7,8,9]]) >>> roll_cols(M, 2) array([[2, 3, 1], [5, 6, 4], [8, 9, 7]]) >>> roll_cols(M, np.array([[1],[2],[1]])) array([[3, 1, 2], [5, 6, 4], [9, 7, 8]]) References ---------- https://stackoverflow.com/questions/66596699/how-to-shift-columns-or-rows-in-a-tensor-with-different-offsets-in-pytorch """ nx = get_backend(M) n_rows, n_cols = M.shape arange1 = nx.tile(nx.reshape(nx.arange(n_cols, type_as=shifts), (1, n_cols)), (n_rows, 1)) arange2 = (arange1 - shifts) % n_cols return nx.take_along_axis(M, arange2, 1) def derivative_cost_on_circle(theta, u_values, v_values, u_cdf, v_cdf, p=2): r""" Computes the left and right derivative of the cost (Equation (6.3) and (6.4) of [1]) Parameters ---------- theta: array-like, shape (n_batch, n) Cuts on the circle u_values: array-like, shape (n_batch, n) locations of the first empirical distribution v_values: array-like, shape (n_batch, n) locations of the second empirical distribution u_cdf: array-like, shape (n_batch, n) cdf of the first empirical distribution v_cdf: array-like, shape (n_batch, n) cdf of the second empirical distribution p: float, optional = 2 Power p used for computing the Wasserstein distance Returns ------- dCp: array-like, shape (n_batch, 1) The batched right derivative dCm: array-like, shape (n_batch, 1) The batched left derivative References --------- .. [44] Delon, Julie, Julien Salomon, and Andrei Sobolevski. "Fast transport optimization for Monge costs on the circle." SIAM Journal on Applied Mathematics 70.7 (2010): 2239-2258. """ nx = get_backend(theta, u_values, v_values, u_cdf, v_cdf) v_values = nx.copy(v_values) n = u_values.shape[-1] m_batch, m = v_values.shape v_cdf_theta = v_cdf - (theta - nx.floor(theta)) mask_p = v_cdf_theta >= 0 mask_n = v_cdf_theta < 0 v_values[mask_n] += nx.floor(theta)[mask_n] + 1 v_values[mask_p] += nx.floor(theta)[mask_p] if nx.any(mask_n) and nx.any(mask_p): v_cdf_theta[mask_n] += 1 v_cdf_theta2 = nx.copy(v_cdf_theta) v_cdf_theta2[mask_n] = np.inf shift = (-nx.argmin(v_cdf_theta2, axis=-1)) v_cdf_theta = roll_cols(v_cdf_theta, nx.reshape(shift, (-1, 1))) v_values = roll_cols(v_values, nx.reshape(shift, (-1, 1))) v_values = nx.concatenate([v_values, nx.reshape(v_values[:, 0], (-1, 1)) + 1], axis=1) if nx.__name__ == 'torch': # this is to ensure the best performance for torch searchsorted # and avoid a warning related to non-contiguous arrays u_cdf = u_cdf.contiguous() v_cdf_theta = v_cdf_theta.contiguous() # quantiles of F_u evaluated in F_v^\theta u_index = nx.searchsorted(u_cdf, v_cdf_theta) u_icdf_theta = nx.take_along_axis(u_values, nx.clip(u_index, 0, n - 1), -1) # Deal with 1 u_cdfm = nx.concatenate([u_cdf, nx.reshape(u_cdf[:, 0], (-1, 1)) + 1], axis=1) u_valuesm = nx.concatenate([u_values, nx.reshape(u_values[:, 0], (-1, 1)) + 1], axis=1) if nx.__name__ == 'torch': # this is to ensure the best performance for torch searchsorted # and avoid a warning related to non-contiguous arrays u_cdfm = u_cdfm.contiguous() v_cdf_theta = v_cdf_theta.contiguous() u_indexm = nx.searchsorted(u_cdfm, v_cdf_theta, side="right") u_icdfm_theta = nx.take_along_axis(u_valuesm, nx.clip(u_indexm, 0, n), -1) dCp = nx.sum(nx.power(nx.abs(u_icdf_theta - v_values[:, 1:]), p) - nx.power(nx.abs(u_icdf_theta - v_values[:, :-1]), p), axis=-1) dCm = nx.sum(nx.power(nx.abs(u_icdfm_theta - v_values[:, 1:]), p) - nx.power(nx.abs(u_icdfm_theta - v_values[:, :-1]), p), axis=-1) return dCp.reshape(-1, 1), dCm.reshape(-1, 1) def ot_cost_on_circle(theta, u_values, v_values, u_cdf, v_cdf, p): r""" Computes the the cost (Equation (6.2) of [1]) Parameters ---------- theta: array-like, shape (n_batch, n) Cuts on the circle u_values: array-like, shape (n_batch, n) locations of the first empirical distribution v_values: array-like, shape (n_batch, n) locations of the second empirical distribution u_cdf: array-like, shape (n_batch, n) cdf of the first empirical distribution v_cdf: array-like, shape (n_batch, n) cdf of the second empirical distribution p: float, optional = 2 Power p used for computing the Wasserstein distance Returns ------- ot_cost: array-like, shape (n_batch,) OT cost evaluated at theta References --------- .. [44] Delon, Julie, Julien Salomon, and Andrei Sobolevski. "Fast transport optimization for Monge costs on the circle." SIAM Journal on Applied Mathematics 70.7 (2010): 2239-2258. """ nx = get_backend(theta, u_values, v_values, u_cdf, v_cdf) v_values = nx.copy(v_values) m_batch, m = v_values.shape n_batch, n = u_values.shape v_cdf_theta = v_cdf - (theta - nx.floor(theta)) mask_p = v_cdf_theta >= 0 mask_n = v_cdf_theta < 0 v_values[mask_n] += nx.floor(theta)[mask_n] + 1 v_values[mask_p] += nx.floor(theta)[mask_p] if nx.any(mask_n) and nx.any(mask_p): v_cdf_theta[mask_n] += 1 # Put negative values at the end v_cdf_theta2 = nx.copy(v_cdf_theta) v_cdf_theta2[mask_n] = np.inf shift = (-nx.argmin(v_cdf_theta2, axis=-1)) v_cdf_theta = roll_cols(v_cdf_theta, nx.reshape(shift, (-1, 1))) v_values = roll_cols(v_values, nx.reshape(shift, (-1, 1))) v_values = nx.concatenate([v_values, nx.reshape(v_values[:, 0], (-1, 1)) + 1], axis=1) # Compute absciss cdf_axis = nx.sort(nx.concatenate((u_cdf, v_cdf_theta), -1), -1) cdf_axis_pad = nx.zero_pad(cdf_axis, pad_width=[(0, 0), (1, 0)]) delta = cdf_axis_pad[..., 1:] - cdf_axis_pad[..., :-1] if nx.__name__ == 'torch': # this is to ensure the best performance for torch searchsorted # and avoid a warninng related to non-contiguous arrays u_cdf = u_cdf.contiguous() v_cdf_theta = v_cdf_theta.contiguous() cdf_axis = cdf_axis.contiguous() # Compute icdf u_index = nx.searchsorted(u_cdf, cdf_axis) u_icdf = nx.take_along_axis(u_values, u_index.clip(0, n - 1), -1) v_values = nx.concatenate([v_values, nx.reshape(v_values[:, 0], (-1, 1)) + 1], axis=1) v_index = nx.searchsorted(v_cdf_theta, cdf_axis) v_icdf = nx.take_along_axis(v_values, v_index.clip(0, m), -1) if p == 1: ot_cost = nx.sum(delta * nx.abs(u_icdf - v_icdf), axis=-1) else: ot_cost = nx.sum(delta * nx.power(nx.abs(u_icdf - v_icdf), p), axis=-1) return ot_cost def binary_search_circle(u_values, v_values, u_weights=None, v_weights=None, p=1, Lm=10, Lp=10, tm=-1, tp=1, eps=1e-6, require_sort=True, log=False): r"""Computes the Wasserstein distance on the circle using the Binary search algorithm proposed in [44]. Samples need to be in :math:`S^1\cong [0,1[`. If they are on :math:`\mathbb{R}`, takes the value modulo 1. If the values are on :math:`S^1\subset\mathbb{R}^2`, it is required to first find the coordinates using e.g. the atan2 function. .. math:: W_p^p(u,v) = \inf_{\theta\in\mathbb{R}}\int_0^1 |F_u^{-1}(q) - (F_v-\theta)^{-1}(q)|^p\ \mathrm{d}q where: - :math:`F_u` and :math:`F_v` are respectively the cdfs of :math:`u` and :math:`v` For values :math:`x=(x_1,x_2)\in S^1`, it is required to first get their coordinates with .. math:: u = \frac{\pi + \mathrm{atan2}(-x_2,-x_1)}{2\pi} using e.g. ot.utils.get_coordinate_circle(x) The function runs on backend but tensorflow and jax are not supported. Parameters ---------- u_values : ndarray, shape (n, ...) samples in the source domain (coordinates on [0,1[) v_values : ndarray, shape (n, ...) samples in the target domain (coordinates on [0,1[) u_weights : ndarray, shape (n, ...), optional samples weights in the source domain v_weights : ndarray, shape (n, ...), optional samples weights in the target domain p : float, optional (default=1) Power p used for computing the Wasserstein distance Lm : int, optional Lower bound dC Lp : int, optional Upper bound dC tm: float, optional Lower bound theta tp: float, optional Upper bound theta eps: float, optional Stopping condition require_sort: bool, optional If True, sort the values. log: bool, optional If True, returns also the optimal theta Returns ------- loss: float Cost associated to the optimal transportation log: dict, optional log dictionary returned only if log==True in parameters Examples -------- >>> u = np.array([[0.2,0.5,0.8]])%1 >>> v = np.array([[0.4,0.5,0.7]])%1 >>> binary_search_circle(u.T, v.T, p=1) array([0.1]) References ---------- .. [44] Delon, Julie, Julien Salomon, and Andrei Sobolevski. "Fast transport optimization for Monge costs on the circle." SIAM Journal on Applied Mathematics 70.7 (2010): 2239-2258. .. Matlab Code: https://users.mccme.ru/ansobol/otarie/software.html """ assert p >= 1, "The OT loss is only valid for p>=1, {p} was given".format(p=p) if u_weights is not None and v_weights is not None: nx = get_backend(u_values, v_values, u_weights, v_weights) else: nx = get_backend(u_values, v_values) n = u_values.shape[0] m = v_values.shape[0] if len(u_values.shape) == 1: u_values = nx.reshape(u_values, (n, 1)) if len(v_values.shape) == 1: v_values = nx.reshape(v_values, (m, 1)) if u_values.shape[1] != v_values.shape[1]: raise ValueError( "u and v must have the same number of batches {} and {} respectively given".format(u_values.shape[1], v_values.shape[1])) u_values = u_values % 1 v_values = v_values % 1 if u_weights is None: u_weights = nx.full(u_values.shape, 1. / n, type_as=u_values) elif u_weights.ndim != u_values.ndim: u_weights = nx.repeat(u_weights[..., None], u_values.shape[-1], -1) if v_weights is None: v_weights = nx.full(v_values.shape, 1. / m, type_as=v_values) elif v_weights.ndim != v_values.ndim: v_weights = nx.repeat(v_weights[..., None], v_values.shape[-1], -1) if require_sort: u_sorter = nx.argsort(u_values, 0) u_values = nx.take_along_axis(u_values, u_sorter, 0) v_sorter = nx.argsort(v_values, 0) v_values = nx.take_along_axis(v_values, v_sorter, 0) u_weights = nx.take_along_axis(u_weights, u_sorter, 0) v_weights = nx.take_along_axis(v_weights, v_sorter, 0) u_cdf = nx.cumsum(u_weights, 0).T v_cdf = nx.cumsum(v_weights, 0).T u_values = u_values.T v_values = v_values.T L = max(Lm, Lp) tm = tm * nx.reshape(nx.ones((u_values.shape[0],), type_as=u_values), (-1, 1)) tm = nx.tile(tm, (1, m)) tp = tp * nx.reshape(nx.ones((u_values.shape[0],), type_as=u_values), (-1, 1)) tp = nx.tile(tp, (1, m)) tc = (tm + tp) / 2 done = nx.zeros((u_values.shape[0], m)) cpt = 0 while nx.any(1 - done): cpt += 1 dCp, dCm = derivative_cost_on_circle(tc, u_values, v_values, u_cdf, v_cdf, p) done = ((dCp * dCm) <= 0) * 1 mask = ((tp - tm) < eps / L) * (1 - done) if nx.any(mask): # can probably be improved by computing only relevant values dCptp, dCmtp = derivative_cost_on_circle(tp, u_values, v_values, u_cdf, v_cdf, p) dCptm, dCmtm = derivative_cost_on_circle(tm, u_values, v_values, u_cdf, v_cdf, p) Ctm = ot_cost_on_circle(tm, u_values, v_values, u_cdf, v_cdf, p).reshape(-1, 1) Ctp = ot_cost_on_circle(tp, u_values, v_values, u_cdf, v_cdf, p).reshape(-1, 1) mask_end = mask * (nx.abs(dCptm - dCmtp) > 0.001) tc[mask_end > 0] = ((Ctp - Ctm + tm * dCptm - tp * dCmtp) / (dCptm - dCmtp))[mask_end > 0] done[nx.prod(mask, axis=-1) > 0] = 1 elif nx.any(1 - done): tm[((1 - mask) * (dCp < 0)) > 0] = tc[((1 - mask) * (dCp < 0)) > 0] tp[((1 - mask) * (dCp >= 0)) > 0] = tc[((1 - mask) * (dCp >= 0)) > 0] tc[((1 - mask) * (1 - done)) > 0] = (tm[((1 - mask) * (1 - done)) > 0] + tp[((1 - mask) * (1 - done)) > 0]) / 2 w = ot_cost_on_circle(nx.detach(tc), u_values, v_values, u_cdf, v_cdf, p) if log: return w, {"optimal_theta": tc[:, 0]} return w def wasserstein1_circle(u_values, v_values, u_weights=None, v_weights=None, require_sort=True): r"""Computes the 1-Wasserstein distance on the circle using the level median [45]. Samples need to be in :math:`S^1\cong [0,1[`. If they are on :math:`\mathbb{R}`, takes the value modulo 1. If the values are on :math:`S^1\subset\mathbb{R}^2`, first find the coordinates using e.g. the atan2 function. The function runs on backend but tensorflow and jax are not supported. .. math:: W_1(u,v) = \int_0^1 |F_u(t)-F_v(t)-LevMed(F_u-F_v)|\ \mathrm{d}t Parameters ---------- u_values : ndarray, shape (n, ...) samples in the source domain (coordinates on [0,1[) v_values : ndarray, shape (n, ...) samples in the target domain (coordinates on [0,1[) u_weights : ndarray, shape (n, ...), optional samples weights in the source domain v_weights : ndarray, shape (n, ...), optional samples weights in the target domain require_sort: bool, optional If True, sort the values. Returns ------- loss: float Cost associated to the optimal transportation Examples -------- >>> u = np.array([[0.2,0.5,0.8]])%1 >>> v = np.array([[0.4,0.5,0.7]])%1 >>> wasserstein1_circle(u.T, v.T) array([0.1]) References ---------- .. [45] Hundrieser, Shayan, Marcel Klatt, and Axel Munk. "The statistics of circular optimal transport." Directional Statistics for Innovative Applications: A Bicentennial Tribute to Florence Nightingale. Singapore: Springer Nature Singapore, 2022. 57-82. .. Code R: https://gitlab.gwdg.de/shundri/circularOT/-/tree/master/ """ if u_weights is not None and v_weights is not None: nx = get_backend(u_values, v_values, u_weights, v_weights) else: nx = get_backend(u_values, v_values) n = u_values.shape[0] m = v_values.shape[0] if len(u_values.shape) == 1: u_values = nx.reshape(u_values, (n, 1)) if len(v_values.shape) == 1: v_values = nx.reshape(v_values, (m, 1)) if u_values.shape[1] != v_values.shape[1]: raise ValueError( "u and v must have the same number of batchs {} and {} respectively given".format(u_values.shape[1], v_values.shape[1])) u_values = u_values % 1 v_values = v_values % 1 if u_weights is None: u_weights = nx.full(u_values.shape, 1. / n, type_as=u_values) elif u_weights.ndim != u_values.ndim: u_weights = nx.repeat(u_weights[..., None], u_values.shape[-1], -1) if v_weights is None: v_weights = nx.full(v_values.shape, 1. / m, type_as=v_values) elif v_weights.ndim != v_values.ndim: v_weights = nx.repeat(v_weights[..., None], v_values.shape[-1], -1) if require_sort: u_sorter = nx.argsort(u_values, 0) u_values = nx.take_along_axis(u_values, u_sorter, 0) v_sorter = nx.argsort(v_values, 0) v_values = nx.take_along_axis(v_values, v_sorter, 0) u_weights = nx.take_along_axis(u_weights, u_sorter, 0) v_weights = nx.take_along_axis(v_weights, v_sorter, 0) # Code inspired from https://gitlab.gwdg.de/shundri/circularOT/-/tree/master/ values_sorted, values_sorter = nx.sort2(nx.concatenate((u_values, v_values), 0), 0) cdf_diff = nx.cumsum(nx.take_along_axis(nx.concatenate((u_weights, -v_weights), 0), values_sorter, 0), 0) cdf_diff_sorted, cdf_diff_sorter = nx.sort2(cdf_diff, axis=0) values_sorted = nx.zero_pad(values_sorted, pad_width=[(0, 1), (0, 0)], value=1) delta = values_sorted[1:, ...] - values_sorted[:-1, ...] weight_sorted = nx.take_along_axis(delta, cdf_diff_sorter, 0) sum_weights = nx.cumsum(weight_sorted, axis=0) - 0.5 sum_weights[sum_weights < 0] = np.inf inds = nx.argmin(sum_weights, axis=0) levMed = nx.take_along_axis(cdf_diff_sorted, nx.reshape(inds, (1, -1)), 0) return nx.sum(delta * nx.abs(cdf_diff - levMed), axis=0) def wasserstein_circle(u_values, v_values, u_weights=None, v_weights=None, p=1, Lm=10, Lp=10, tm=-1, tp=1, eps=1e-6, require_sort=True): r"""Computes the Wasserstein distance on the circle using either [45] for p=1 or the binary search algorithm proposed in [44] otherwise. Samples need to be in :math:`S^1\cong [0,1[`. If they are on :math:`\mathbb{R}`, takes the value modulo 1. If the values are on :math:`S^1\subset\mathbb{R}^2`, it requires to first find the coordinates using e.g. the atan2 function. General loss returned: .. math:: OT_{loss} = \inf_{\theta\in\mathbb{R}}\int_0^1 |cdf_u^{-1}(q) - (cdf_v-\theta)^{-1}(q)|^p\ \mathrm{d}q For p=1, [45] .. math:: W_1(u,v) = \int_0^1 |F_u(t)-F_v(t)-LevMed(F_u-F_v)|\ \mathrm{d}t For values :math:`x=(x_1,x_2)\in S^1`, it is required to first get their coordinates with .. math:: u = \frac{\pi + \mathrm{atan2}(-x_2,-x_1)}{2\pi} using e.g. ot.utils.get_coordinate_circle(x) The function runs on backend but tensorflow and jax are not supported. Parameters ---------- u_values : ndarray, shape (n, ...) samples in the source domain (coordinates on [0,1[) v_values : ndarray, shape (n, ...) samples in the target domain (coordinates on [0,1[) u_weights : ndarray, shape (n, ...), optional samples weights in the source domain v_weights : ndarray, shape (n, ...), optional samples weights in the target domain p : float, optional (default=1) Power p used for computing the Wasserstein distance Lm : int, optional Lower bound dC. For p>1. Lp : int, optional Upper bound dC. For p>1. tm: float, optional Lower bound theta. For p>1. tp: float, optional Upper bound theta. For p>1. eps: float, optional Stopping condition. For p>1. require_sort: bool, optional If True, sort the values. Returns ------- loss: float Cost associated to the optimal transportation Examples -------- >>> u = np.array([[0.2,0.5,0.8]])%1 >>> v = np.array([[0.4,0.5,0.7]])%1 >>> wasserstein_circle(u.T, v.T) array([0.1]) References ---------- .. [44] Hundrieser, Shayan, Marcel Klatt, and Axel Munk. "The statistics of circular optimal transport." Directional Statistics for Innovative Applications: A Bicentennial Tribute to Florence Nightingale. Singapore: Springer Nature Singapore, 2022. 57-82. .. [45] Delon, Julie, Julien Salomon, and Andrei Sobolevski. "Fast transport optimization for Monge costs on the circle." SIAM Journal on Applied Mathematics 70.7 (2010): 2239-2258. """ assert p >= 1, "The OT loss is only valid for p>=1, {p} was given".format(p=p) if p == 1: return wasserstein1_circle(u_values, v_values, u_weights, v_weights, require_sort) return binary_search_circle(u_values, v_values, u_weights, v_weights, p=p, Lm=Lm, Lp=Lp, tm=tm, tp=tp, eps=eps, require_sort=require_sort) def semidiscrete_wasserstein2_unif_circle(u_values, u_weights=None): r"""Computes the closed-form for the 2-Wasserstein distance between samples and a uniform distribution on :math:`S^1` Samples need to be in :math:`S^1\cong [0,1[`. If they are on :math:`\mathbb{R}`, takes the value modulo 1. If the values are on :math:`S^1\subset\mathbb{R}^2`, it is required to first find the coordinates using e.g. the atan2 function. .. math:: W_2^2(\mu_n, \nu) = \sum_{i=1}^n \alpha_i x_i^2 - \left(\sum_{i=1}^n \alpha_i x_i\right)^2 + \sum_{i=1}^n \alpha_i x_i \left(1-\alpha_i-2\sum_{k=1}^{i-1}\alpha_k\right) + \frac{1}{12} where: - :math:`\nu=\mathrm{Unif}(S^1)` and :math:`\mu_n = \sum_{i=1}^n \alpha_i \delta_{x_i}` For values :math:`x=(x_1,x_2)\in S^1`, it is required to first get their coordinates with .. math:: u = \frac{\pi + \mathrm{atan2}(-x_2,-x_1)}{2\pi}, using e.g. ot.utils.get_coordinate_circle(x) Parameters ---------- u_values: ndarray, shape (n, ...) Samples u_weights : ndarray, shape (n, ...), optional samples weights in the source domain Returns ------- loss: float Cost associated to the optimal transportation Examples -------- >>> x0 = np.array([[0], [0.2], [0.4]]) >>> semidiscrete_wasserstein2_unif_circle(x0) array([0.02111111]) References ---------- .. [46] Bonet, C., Berg, P., Courty, N., Septier, F., Drumetz, L., & Pham, M. T. (2023). Spherical sliced-wasserstein. International Conference on Learning Representations. """ if u_weights is not None: nx = get_backend(u_values, u_weights) else: nx = get_backend(u_values) n = u_values.shape[0] u_values = u_values % 1 if len(u_values.shape) == 1: u_values = nx.reshape(u_values, (n, 1)) if u_weights is None: u_weights = nx.full(u_values.shape, 1. / n, type_as=u_values) elif u_weights.ndim != u_values.ndim: u_weights = nx.repeat(u_weights[..., None], u_values.shape[-1], -1) u_values = nx.sort(u_values, 0) u_cdf = nx.cumsum(u_weights, 0) u_cdf = nx.zero_pad(u_cdf, [(1, 0), (0, 0)]) cpt1 = nx.sum(u_weights * u_values**2, axis=0) u_mean = nx.sum(u_weights * u_values, axis=0) ns = 1 - u_weights - 2 * u_cdf[:-1] cpt2 = nx.sum(u_values * u_weights * ns, axis=0) return cpt1 - u_mean**2 + cpt2 + 1 / 12 python-pot-0.9.3+dfsg/ot/mapping.py000066400000000000000000000663231455713015700172340ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Optimal Transport maps and variants .. warning:: Note that by default the module is not imported in :mod:`ot`. In order to use it you need to explicitly import :mod:`ot.mapping` """ # Author: Eloi Tanguy # Remi Flamary # # License: MIT License from .backend import get_backend, to_numpy from .lp import emd import numpy as np from .optim import cg from .utils import dist, unif, list_to_array, kernel, dots def nearest_brenier_potential_fit(X, V, X_classes=None, a=None, b=None, strongly_convex_constant=.6, gradient_lipschitz_constant=1.4, its=100, log=False, init_method='barycentric'): r""" Computes optimal values and gradients at X for a strongly convex potential :math:`\varphi` with Lipschitz gradients on the partitions defined by `X_classes`, where :math:`\varphi` is optimal such that :math:`\nabla \varphi \#\mu \approx \nu`, given samples :math:`X = x_1, \cdots, x_n \sim \mu` and :math:`V = v_1, \cdots, v_n \sim \nu`. Finding such a potential that has the desired regularity on the partition :math:`(E_k)_{k \in [K]}` (given by the classes `X_classes`) is equivalent to finding optimal values `phi` for the :math:`\varphi(x_i)` and its gradients :math:`\nabla \varphi(x_i)` (variable`G`). In practice, these optimal values are found by solving the following problem .. math:: :nowrap: \begin{gather*} \text{min} \sum_{i,j}\pi_{i,j}\|g_i - v_j\|_2^2 \\ g_1,\cdots, g_n \in \mathbb{R}^d,\; \varphi_1, \cdots, \varphi_n \in \mathbb{R},\; \pi \in \Pi(a, b) \\ \text{s.t.}\ \forall k \in [K],\; \forall i,j \in I_k: \\ \varphi_i-\varphi_j-\langle g_j, x_i-x_j\rangle \geq c_1\|g_i - g_j\|_2^2 + c_2\|x_i-x_j\|_2^2 - c_3\langle g_j-g_i, x_j -x_i \rangle. \end{gather*} The constants :math:`c_1, c_2, c_3` only depend on `strongly_convex_constant` and `gradient_lipschitz_constant`. The constraint :math:`\pi \in \Pi(a, b)` denotes the fact that the matrix :math:`\pi` belong to the OT polytope of marginals a and b. :math:`I_k` is the subset of :math:`[n]` of the i such that :math:`x_i` is in the partition (or class) :math:`E_k`, i.e. `X_classes[i] == k`. This problem is solved by alternating over the variable :math:`\pi` and the variables :math:`\varphi_i, g_i`. For :math:`\pi`, the problem is the standard discrete OT problem, and for :math:`\varphi_i, g_i`, the problem is a convex QCQP solved using :code:`cvxpy` (ECOS solver). Accepts any compatible backend, but will perform the QCQP optimisation on Numpy arrays, and convert back at the end. .. warning:: This function requires the CVXPY library .. warning:: Accepts any backend but will convert to Numpy then back to the backend. Parameters ---------- X : array-like (n, d) reference points used to compute the optimal values phi and G V : array-like (n, d) values of the gradients at the reference points X X_classes : array-like (n,), optional classes of the reference points, defaults to a single class a : array-like (n,), optional weights for the reference points X, defaults to uniform b : array-like (n,), optional weights for the target points V, defaults to uniform strongly_convex_constant : float, optional constant for the strong convexity of the input potential phi, defaults to 0.6 gradient_lipschitz_constant : float, optional constant for the Lipschitz property of the input gradient G, defaults to 1.4 its: int, optional number of iterations, defaults to 100 log : bool, optional record log if true init_method : str, optional 'target' initialises G=V, 'barycentric' initialises at the image of X by the barycentric projection Returns ------- phi : array-like (n,) optimal values of the potential at the points X G : array-like (n, d) optimal values of the gradients at the points X log : dict, optional If input log is true, a dictionary containing the values of the variables at each iteration, as well as solver information References ---------- .. [58] François-Pierre Paty, Alexandre d’Aspremont, and Marco Cuturi. Regularity as regularization: Smooth and strongly convex brenier potentials in optimal transport. In International Conference on Artificial Intelligence and Statistics, pages 1222–1232. PMLR, 2020. See Also -------- ot.mapping.nearest_brenier_potential_predict_bounds : Predicting SSNB images on new source data ot.da.NearestBrenierPotential : BaseTransport wrapper for SSNB """ try: import cvxpy as cvx except ImportError: print('Please install CVXPY to use this function') return assert X.shape == V.shape, f"point shape should be the same as value shape, yet {X.shape} != {V.shape}" nx = get_backend(X, V, X_classes, a, b) X, V = to_numpy(X), to_numpy(V) n, d = X.shape if X_classes is not None: X_classes = to_numpy(X_classes) assert X_classes.size == n, "incorrect number of class items" else: X_classes = np.zeros(n) a = unif(n) if a is None else nx.to_numpy(a) b = unif(n) if b is None else nx.to_numpy(b) assert a.shape[-1] == b.shape[-1] == n, 'incorrect measure weight sizes' assert init_method in ['target', 'barycentric'], f"Unsupported initialization method '{init_method}'" if init_method == 'target': G_val = V else: # Init G_val with barycentric projection G_val = emd(a, b, dist(X, V)) @ V / a.reshape(n, 1) phi_val = None log_dict = { 'G_list': [], 'phi_list': [], 'its': [] } for _ in range(its): # alternate optimisation iterations cost_matrix = dist(G_val, V) # optimise the plan plan = emd(a, b, cost_matrix) # optimise the values phi and the gradients G phi = cvx.Variable(n) G = cvx.Variable((n, d)) constraints = [] cost = 0 for i in range(n): for j in range(n): cost += cvx.sum_squares(G[i, :] - V[j, :]) * plan[i, j] objective = cvx.Minimize(cost) # OT cost c1, c2, c3 = _ssnb_qcqp_constants(strongly_convex_constant, gradient_lipschitz_constant) for k in np.unique(X_classes): # constraints for the convex interpolation for i in np.where(X_classes == k)[0]: for j in np.where(X_classes == k)[0]: constraints += [ phi[i] >= phi[j] + G[j].T @ (X[i] - X[j]) + c1 * cvx.sum_squares(G[i] - G[j]) + c2 * cvx.sum_squares(X[i] - X[j]) - c3 * (G[j] - G[i]).T @ (X[j] - X[i]) ] problem = cvx.Problem(objective, constraints) problem.solve(solver=cvx.ECOS) phi_val, G_val = phi.value, G.value it_log_dict = { 'solve_time': problem.solver_stats.solve_time, 'setup_time': problem.solver_stats.setup_time, 'num_iters': problem.solver_stats.num_iters, 'status': problem.status, 'value': problem.value } if log: log_dict['its'].append(it_log_dict) log_dict['G_list'].append(G_val) log_dict['phi_list'].append(phi_val) # convert back to backend phi_val = nx.from_numpy(phi_val) G_val = nx.from_numpy(G_val) if not log: return phi_val, G_val return phi_val, G_val, log_dict def _ssnb_qcqp_constants(strongly_convex_constant, gradient_lipschitz_constant): r""" Handy function computing the constants for the Nearest Brenier Potential QCQP problems Parameters ---------- strongly_convex_constant : float gradient_lipschitz_constant : float Returns ------- c1 : float c2 : float c3 : float """ assert 0 < strongly_convex_constant < gradient_lipschitz_constant, "incompatible regularity assumption" c = 1 / (2 * (1 - strongly_convex_constant / gradient_lipschitz_constant)) c1 = c / gradient_lipschitz_constant c2 = strongly_convex_constant * c c3 = 2 * strongly_convex_constant * c / gradient_lipschitz_constant return c1, c2, c3 def nearest_brenier_potential_predict_bounds(X, phi, G, Y, X_classes=None, Y_classes=None, strongly_convex_constant=0.6, gradient_lipschitz_constant=1.4, log=False): r""" Compute the values of the lower and upper bounding potentials at the input points Y, using the potential optimal values phi at X and their gradients G at X. The 'lower' potential corresponds to the method from :ref:`[58]`, Equation 2, while the bounding property and 'upper' potential come from :ref:`[59]`, Theorem 3.14 (taking into account the fact that this theorem's statement has a min instead of a max, which is a typo). Both potentials are optimal for the SSNB problem. If :math:`I_k` is the subset of :math:`[n]` of the i such that :math:`x_i` is in the partition (or class) :math:`E_k`, for each :math:`y \in E_k`, this function solves the convex QCQP problems, respectively for l: 'lower' and u: 'upper': .. math:: :nowrap: \begin{gather*} (\varphi_{l}(x), \nabla \varphi_l(x)) = \text{argmin}\ t, \\ t\in \mathbb{R},\; g\in \mathbb{R}^d, \\ \text{s.t.} \forall j \in I_k,\; t-\varphi_j - \langle g_j, y-x_j \rangle \geq c_1\|g - g_j\|_2^2 + c_2\|y-x_j\|_2^2 - c_3\langle g_j-g, x_j -y \rangle. \end{gather*} .. math:: :nowrap: \begin{gather*} (\varphi_{u}(x), \nabla \varphi_u(x)) = \text{argmax}\ t, \\ t\in \mathbb{R},\; g\in \mathbb{R}^d, \\ \text{s.t.} \forall i \in I_k,\; \varphi_i^* -t - \langle g, x_i-y \rangle \geq c_1\|g_i - g\|_2^2 + c_2\|x_i-y\|_2^2 - c_3\langle g-g_i, y -x_i \rangle. \end{gather*} The constants :math:`c_1, c_2, c_3` only depend on `strongly_convex_constant` and `gradient_lipschitz_constant`. .. warning:: This function requires the CVXPY library .. warning:: Accepts any backend but will convert to Numpy then back to the backend. Parameters ---------- X : array-like (n, d) reference points used to compute the optimal values phi and G X_classes : array-like (n,) classes of the reference points phi : array-like (n,) optimal values of the potential at the points X G : array-like (n, d) optimal values of the gradients at the points X Y : array-like (m, d) input points X_classes : array-like (n,), optional classes of the reference points, defaults to a single class Y_classes : array_like (m,), optional classes of the input points, defaults to a single class strongly_convex_constant : float, optional constant for the strong convexity of the input potential phi, defaults to 0.6 gradient_lipschitz_constant : float, optional constant for the Lipschitz property of the input gradient G, defaults to 1.4 log : bool, optional record log if true Returns ------- phi_lu: array-like (2, m) values of the lower and upper bounding potentials at Y G_lu: array-like (2, m, d) gradients of the lower and upper bounding potentials at Y log : dict, optional If input log is true, a dictionary containing solver information References ---------- .. [58] François-Pierre Paty, Alexandre d’Aspremont, and Marco Cuturi. Regularity as regularization: Smooth and strongly convex brenier potentials in optimal transport. In International Conference on Artificial Intelligence and Statistics, pages 1222–1232. PMLR, 2020. .. [59] Adrien B Taylor. Convex interpolation and performance estimation of first-order methods for convex optimization. PhD thesis, Catholic University of Louvain, Louvain-la-Neuve, Belgium, 2017. See Also -------- ot.mapping.nearest_brenier_potential_fit : Fitting the SSNB on source and target data ot.da.NearestBrenierPotential : BaseTransport wrapper for SSNB """ try: import cvxpy as cvx except ImportError: print('Please install CVXPY to use this function') return nx = get_backend(X, phi, G, Y) X = to_numpy(X) phi = to_numpy(phi) G = to_numpy(G) Y = to_numpy(Y) m, d = Y.shape if Y_classes is not None: Y_classes = to_numpy(Y_classes) assert Y_classes.size == m, 'wrong number of class items for Y' else: Y_classes = np.zeros(m) assert X.shape[1] == d, f'incompatible dimensions between X: {X.shape} and Y: {Y.shape}' n, _ = X.shape if X_classes is not None: X_classes = to_numpy(X_classes) assert X_classes.size == n, "incorrect number of class items" else: X_classes = np.zeros(n) assert X_classes.size == n, 'wrong number of class items for X' c1, c2, c3 = _ssnb_qcqp_constants(strongly_convex_constant, gradient_lipschitz_constant) phi_lu = np.zeros((2, m)) G_lu = np.zeros((2, m, d)) log_dict = {} for y_idx in range(m): log_item = {} # lower bound phi_l_y = cvx.Variable(1) G_l_y = cvx.Variable(d) objective = cvx.Minimize(phi_l_y) constraints = [] k = Y_classes[y_idx] for j in np.where(X_classes == k)[0]: constraints += [ phi_l_y >= phi[j] + G[j].T @ (Y[y_idx] - X[j]) + c1 * cvx.sum_squares(G_l_y - G[j]) + c2 * cvx.sum_squares(Y[y_idx] - X[j]) - c3 * (G[j] - G_l_y).T @ (X[j] - Y[y_idx]) ] problem = cvx.Problem(objective, constraints) problem.solve(solver=cvx.ECOS) phi_lu[0, y_idx] = phi_l_y.value G_lu[0, y_idx] = G_l_y.value if log: log_item['l'] = { 'solve_time': problem.solver_stats.solve_time, 'setup_time': problem.solver_stats.setup_time, 'num_iters': problem.solver_stats.num_iters, 'status': problem.status, 'value': problem.value } # upper bound phi_u_y = cvx.Variable(1) G_u_y = cvx.Variable(d) objective = cvx.Maximize(phi_u_y) constraints = [] for i in np.where(X_classes == k)[0]: constraints += [ phi[i] >= phi_u_y + G_u_y.T @ (X[i] - Y[y_idx]) + c1 * cvx.sum_squares(G[i] - G_u_y) + c2 * cvx.sum_squares(X[i] - Y[y_idx]) - c3 * (G_u_y - G[i]).T @ (Y[y_idx] - X[i]) ] problem = cvx.Problem(objective, constraints) problem.solve(solver=cvx.ECOS) phi_lu[1, y_idx] = phi_u_y.value G_lu[1, y_idx] = G_u_y.value if log: log_item['u'] = { 'solve_time': problem.solver_stats.solve_time, 'setup_time': problem.solver_stats.setup_time, 'num_iters': problem.solver_stats.num_iters, 'status': problem.status, 'value': problem.value } log_dict[y_idx] = log_item phi_lu, G_lu = nx.from_numpy(phi_lu), nx.from_numpy(G_lu) if not log: return phi_lu, G_lu return phi_lu, G_lu, log_dict def joint_OT_mapping_linear(xs, xt, mu=1, eta=0.001, bias=False, verbose=False, verbose2=False, numItermax=100, numInnerItermax=10, stopInnerThr=1e-6, stopThr=1e-5, log=False, **kwargs): r"""Joint OT and linear mapping estimation as proposed in :ref:`[8] `. The function solves the following optimization problem: .. math:: \min_{\gamma,L}\quad \|L(\mathbf{X_s}) - n_s\gamma \mathbf{X_t} \|^2_F + \mu \langle \gamma, \mathbf{M} \rangle_F + \eta \|L - \mathbf{I}\|^2_F s.t. \ \gamma \mathbf{1} = \mathbf{a} \gamma^T \mathbf{1} = \mathbf{b} \gamma \geq 0 where : - :math:`\mathbf{M}` is the (`ns`, `nt`) squared euclidean cost matrix between samples in :math:`\mathbf{X_s}` and :math:`\mathbf{X_t}` (scaled by :math:`n_s`) - :math:`L` is a :math:`d\times d` linear operator that approximates the barycentric mapping - :math:`\mathbf{I}` is the identity matrix (neutral linear mapping) - :math:`\mathbf{a}` and :math:`\mathbf{b}` are uniform source and target weights The problem consist in solving jointly an optimal transport matrix :math:`\gamma` and a linear mapping that fits the barycentric mapping :math:`n_s\gamma \mathbf{X_t}`. One can also estimate a mapping with constant bias (see supplementary material of :ref:`[8] `) using the bias optional argument. The algorithm used for solving the problem is the block coordinate descent that alternates between updates of :math:`\mathbf{G}` (using conditional gradient) and the update of :math:`\mathbf{L}` using a classical least square solver. Parameters ---------- xs : array-like (ns,d) samples in the source domain xt : array-like (nt,d) samples in the target domain mu : float,optional Weight for the linear OT loss (>0) eta : float, optional Regularization term for the linear mapping L (>0) bias : bool,optional Estimate linear mapping with constant bias numItermax : int, optional Max number of BCD iterations stopThr : float, optional Stop threshold on relative loss decrease (>0) numInnerItermax : int, optional Max number of iterations (inner CG solver) stopInnerThr : float, optional Stop threshold on error (inner CG solver) (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- gamma : (ns, nt) array-like Optimal transportation matrix for the given parameters L : (d, d) array-like Linear mapping matrix ((:math:`d+1`, `d`) if bias) log : dict log dictionary return only if log==True in parameters .. _references-joint-OT-mapping-linear: References ---------- .. [8] M. Perrot, N. Courty, R. Flamary, A. Habrard, "Mapping estimation for discrete optimal transport", Neural Information Processing Systems (NIPS), 2016. See Also -------- ot.lp.emd : Unregularized OT ot.optim.cg : General regularized OT """ xs, xt = list_to_array(xs, xt) nx = get_backend(xs, xt) ns, nt, d = xs.shape[0], xt.shape[0], xt.shape[1] if bias: xs1 = nx.concatenate((xs, nx.ones((ns, 1), type_as=xs)), axis=1) xstxs = nx.dot(xs1.T, xs1) Id = nx.eye(d + 1, type_as=xs) Id[-1] = 0 I0 = Id[:, :-1] def sel(x): return x[:-1, :] else: xs1 = xs xstxs = nx.dot(xs1.T, xs1) Id = nx.eye(d, type_as=xs) I0 = Id def sel(x): return x if log: log = {'err': []} a = unif(ns, type_as=xs) b = unif(nt, type_as=xt) M = dist(xs, xt) * ns G = emd(a, b, M) vloss = [] def loss(L, G): """Compute full loss""" return ( nx.sum((nx.dot(xs1, L) - ns * nx.dot(G, xt)) ** 2) + mu * nx.sum(G * M) + eta * nx.sum(sel(L - I0) ** 2) ) def solve_L(G): """ solve L problem with fixed G (least square)""" xst = ns * nx.dot(G, xt) return nx.solve(xstxs + eta * Id, nx.dot(xs1.T, xst) + eta * I0) def solve_G(L, G0): """Update G with CG algorithm""" xsi = nx.dot(xs1, L) def f(G): return nx.sum((xsi - ns * nx.dot(G, xt)) ** 2) def df(G): return -2 * ns * nx.dot(xsi - ns * nx.dot(G, xt), xt.T) G = cg(a, b, M, 1.0 / mu, f, df, G0=G0, numItermax=numInnerItermax, stopThr=stopInnerThr) return G L = solve_L(G) vloss.append(loss(L, G)) if verbose: print('{:5s}|{:12s}|{:8s}'.format( 'It.', 'Loss', 'Delta loss') + '\n' + '-' * 32) print('{:5d}|{:8e}|{:8e}'.format(0, vloss[-1], 0)) # init loop if numItermax > 0: loop = 1 else: loop = 0 it = 0 while loop: it += 1 # update G G = solve_G(L, G) # update L L = solve_L(G) vloss.append(loss(L, G)) if it >= numItermax: loop = 0 if abs(vloss[-1] - vloss[-2]) / abs(vloss[-2]) < stopThr: loop = 0 if verbose: if it % 20 == 0: print('{:5s}|{:12s}|{:8s}'.format( 'It.', 'Loss', 'Delta loss') + '\n' + '-' * 32) print('{:5d}|{:8e}|{:8e}'.format( it, vloss[-1], (vloss[-1] - vloss[-2]) / abs(vloss[-2]))) if log: log['loss'] = vloss return G, L, log else: return G, L def joint_OT_mapping_kernel(xs, xt, mu=1, eta=0.001, kerneltype='gaussian', sigma=1, bias=False, verbose=False, verbose2=False, numItermax=100, numInnerItermax=10, stopInnerThr=1e-6, stopThr=1e-5, log=False, **kwargs): r"""Joint OT and nonlinear mapping estimation with kernels as proposed in :ref:`[8] `. The function solves the following optimization problem: .. math:: \min_{\gamma, L\in\mathcal{H}}\quad \|L(\mathbf{X_s}) - n_s\gamma \mathbf{X_t}\|^2_F + \mu \langle \gamma, \mathbf{M} \rangle_F + \eta \|L\|^2_\mathcal{H} s.t. \ \gamma \mathbf{1} = \mathbf{a} \gamma^T \mathbf{1} = \mathbf{b} \gamma \geq 0 where : - :math:`\mathbf{M}` is the (`ns`, `nt`) squared euclidean cost matrix between samples in :math:`\mathbf{X_s}` and :math:`\mathbf{X_t}` (scaled by :math:`n_s`) - :math:`L` is a :math:`n_s \times d` linear operator on a kernel matrix that approximates the barycentric mapping - :math:`\mathbf{a}` and :math:`\mathbf{b}` are uniform source and target weights The problem consist in solving jointly an optimal transport matrix :math:`\gamma` and the nonlinear mapping that fits the barycentric mapping :math:`n_s\gamma \mathbf{X_t}`. One can also estimate a mapping with constant bias (see supplementary material of :ref:`[8] `) using the bias optional argument. The algorithm used for solving the problem is the block coordinate descent that alternates between updates of :math:`\mathbf{G}` (using conditional gradient) and the update of :math:`\mathbf{L}` using a classical kernel least square solver. Parameters ---------- xs : array-like (ns,d) samples in the source domain xt : array-like (nt,d) samples in the target domain mu : float,optional Weight for the linear OT loss (>0) eta : float, optional Regularization term for the linear mapping L (>0) kerneltype : str,optional kernel used by calling function :py:func:`ot.utils.kernel` (gaussian by default) sigma : float, optional Gaussian kernel bandwidth. bias : bool,optional Estimate linear mapping with constant bias verbose : bool, optional Print information along iterations verbose2 : bool, optional Print information along iterations numItermax : int, optional Max number of BCD iterations numInnerItermax : int, optional Max number of iterations (inner CG solver) stopInnerThr : float, optional Stop threshold on error (inner CG solver) (>0) stopThr : float, optional Stop threshold on relative loss decrease (>0) log : bool, optional record log if True Returns ------- gamma : (ns, nt) array-like Optimal transportation matrix for the given parameters L : (ns, d) array-like Nonlinear mapping matrix ((:math:`n_s+1`, `d`) if bias) log : dict log dictionary return only if log==True in parameters .. _references-joint-OT-mapping-kernel: References ---------- .. [8] M. Perrot, N. Courty, R. Flamary, A. Habrard, "Mapping estimation for discrete optimal transport", Neural Information Processing Systems (NIPS), 2016. See Also -------- ot.lp.emd : Unregularized OT ot.optim.cg : General regularized OT """ xs, xt = list_to_array(xs, xt) nx = get_backend(xs, xt) ns, nt = xs.shape[0], xt.shape[0] K = kernel(xs, xs, method=kerneltype, sigma=sigma) if bias: K1 = nx.concatenate((K, nx.ones((ns, 1), type_as=xs)), axis=1) Id = nx.eye(ns + 1, type_as=xs) Id[-1] = 0 Kp = nx.eye(ns + 1, type_as=xs) Kp[:ns, :ns] = K # ls regu # K0 = K1.T.dot(K1)+eta*I # Kreg=I # RKHS regul K0 = nx.dot(K1.T, K1) + eta * Kp Kreg = Kp else: K1 = K Id = nx.eye(ns, type_as=xs) # ls regul # K0 = K1.T.dot(K1)+eta*I # Kreg=I # proper kernel ridge K0 = K + eta * Id Kreg = K if log: log = {'err': []} a = unif(ns, type_as=xs) b = unif(nt, type_as=xt) M = dist(xs, xt) * ns G = emd(a, b, M) vloss = [] def loss(L, G): """Compute full loss""" return ( nx.sum((nx.dot(K1, L) - ns * nx.dot(G, xt)) ** 2) + mu * nx.sum(G * M) + eta * nx.trace(dots(L.T, Kreg, L)) ) def solve_L_nobias(G): """ solve L problem with fixed G (least square)""" xst = ns * nx.dot(G, xt) return nx.solve(K0, xst) def solve_L_bias(G): """ solve L problem with fixed G (least square)""" xst = ns * nx.dot(G, xt) return nx.solve(K0, nx.dot(K1.T, xst)) def solve_G(L, G0): """Update G with CG algorithm""" xsi = nx.dot(K1, L) def f(G): return nx.sum((xsi - ns * nx.dot(G, xt)) ** 2) def df(G): return -2 * ns * nx.dot(xsi - ns * nx.dot(G, xt), xt.T) G = cg(a, b, M, 1.0 / mu, f, df, G0=G0, numItermax=numInnerItermax, stopThr=stopInnerThr) return G if bias: solve_L = solve_L_bias else: solve_L = solve_L_nobias L = solve_L(G) vloss.append(loss(L, G)) if verbose: print('{:5s}|{:12s}|{:8s}'.format( 'It.', 'Loss', 'Delta loss') + '\n' + '-' * 32) print('{:5d}|{:8e}|{:8e}'.format(0, vloss[-1], 0)) # init loop if numItermax > 0: loop = 1 else: loop = 0 it = 0 while loop: it += 1 # update G G = solve_G(L, G) # update L L = solve_L(G) vloss.append(loss(L, G)) if it >= numItermax: loop = 0 if abs(vloss[-1] - vloss[-2]) / abs(vloss[-2]) < stopThr: loop = 0 if verbose: if it % 20 == 0: print('{:5s}|{:12s}|{:8s}'.format( 'It.', 'Loss', 'Delta loss') + '\n' + '-' * 32) print('{:5d}|{:8e}|{:8e}'.format( it, vloss[-1], (vloss[-1] - vloss[-2]) / abs(vloss[-2]))) if log: log['loss'] = vloss return G, L, log else: return G, L python-pot-0.9.3+dfsg/ot/optim.py000066400000000000000000000474241455713015700167320ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Generic solvers for regularized OT or its semi-relaxed version. """ # Author: Remi Flamary # Titouan Vayer # CĂ©dric Vincent-Cuaz # License: MIT License import numpy as np import warnings from .lp import emd from .bregman import sinkhorn from .utils import list_to_array from .backend import get_backend with warnings.catch_warnings(): warnings.simplefilter("ignore") try: from scipy.optimize import scalar_search_armijo except ImportError: from scipy.optimize.linesearch import scalar_search_armijo # The corresponding scipy function does not work for matrices def line_search_armijo( f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=0.99, alpha_min=0., alpha_max=None, nx=None, **kwargs ): r""" Armijo linesearch function that works with matrices Find an approximate minimum of :math:`f(x_k + \alpha \cdot p_k)` that satisfies the armijo conditions. .. note:: If the loss function f returns a float (resp. a 1d array) then the returned alpha and fa are float (resp. 1d arrays). Parameters ---------- f : callable loss function xk : array-like initial position pk : array-like descent direction gfk : array-like gradient of `f` at :math:`x_k` old_fval : float or 1d array loss value at :math:`x_k` args : tuple, optional arguments given to `f` c1 : float, optional :math:`c_1` const in armijo rule (>0) alpha0 : float, optional initial step (>0) alpha_min : float, default=0. minimum value for alpha alpha_max : float, optional maximum value for alpha nx : backend, optional If let to its default value None, a backend test will be conducted. Returns ------- alpha : float or 1d array step that satisfy armijo conditions fc : int nb of function call fa : float or 1d array loss value at step alpha """ if nx is None: xk, pk, gfk = list_to_array(xk, pk, gfk) xk0, pk0 = xk, pk nx = get_backend(xk0, pk0) else: xk0, pk0 = xk, pk if len(xk.shape) == 0: xk = nx.reshape(xk, (-1,)) xk = nx.to_numpy(xk) pk = nx.to_numpy(pk) gfk = nx.to_numpy(gfk) fc = [0] def phi(alpha1): # it's necessary to check boundary condition here for the coefficient # as the callback could be evaluated for negative value of alpha by # `scalar_search_armijo` function here: # # https://github.com/scipy/scipy/blob/11509c4a98edded6c59423ac44ca1b7f28fba1fd/scipy/optimize/linesearch.py#L686 # # see more details https://github.com/PythonOT/POT/issues/502 alpha1 = np.clip(alpha1, alpha_min, alpha_max) # The callable function operates on nx backend fc[0] += 1 alpha10 = nx.from_numpy(alpha1) fval = f(xk0 + alpha10 * pk0, *args) if isinstance(fval, float): # prevent bug from nx.to_numpy that can look for .cpu or .gpu return fval else: return nx.to_numpy(fval) if old_fval is None: phi0 = phi(0.) elif isinstance(old_fval, float): # prevent bug from nx.to_numpy that can look for .cpu or .gpu phi0 = old_fval else: phi0 = nx.to_numpy(old_fval) derphi0 = np.sum(pk * gfk) # Quickfix for matrices alpha, phi1 = scalar_search_armijo( phi, phi0, derphi0, c1=c1, alpha0=alpha0, amin=alpha_min) if alpha is None: return 0., fc[0], nx.from_numpy(phi0, type_as=xk0) else: alpha = np.clip(alpha, alpha_min, alpha_max) return nx.from_numpy(alpha, type_as=xk0), fc[0], nx.from_numpy(phi1, type_as=xk0) def generic_conditional_gradient(a, b, M, f, df, reg1, reg2, lp_solver, line_search, G0=None, numItermax=200, stopThr=1e-9, stopThr2=1e-9, verbose=False, log=False, **kwargs): r""" Solve the general regularized OT problem or its semi-relaxed version with conditional gradient or generalized conditional gradient depending on the provided linear program solver. The function solves the following optimization problem if set as a conditional gradient: .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg_1} \cdot f(\gamma) s.t. \ \gamma \mathbf{1} &= \mathbf{a} \gamma^T \mathbf{1} &= \mathbf{b} (optional constraint) \gamma &\geq 0 where : - :math:`\mathbf{M}` is the (`ns`, `nt`) metric cost matrix - :math:`f` is the regularization term (and `df` is its gradient) - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (sum to 1) The algorithm used for solving the problem is conditional gradient as discussed in :ref:`[1] ` The function solves the following optimization problem if set a generalized conditional gradient: .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg_1}\cdot f(\gamma) + \mathrm{reg_2}\cdot\Omega(\gamma) s.t. \ \gamma \mathbf{1} &= \mathbf{a} \gamma^T \mathbf{1} &= \mathbf{b} \gamma &\geq 0 where : - :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` The algorithm used for solving the problem is the generalized conditional gradient as discussed in :ref:`[5, 7] ` Parameters ---------- a : array-like, shape (ns,) samples weights in the source domain b : array-like, shape (nt,) samples weights in the target domain M : array-like, shape (ns, nt) loss matrix f : function Regularization function taking a transportation matrix as argument df: function Gradient of the regularization function taking a transportation matrix as argument reg1 : float Regularization term >0 reg2 : float, Entropic Regularization term >0. Ignored if set to None. lp_solver: function, linear program solver for direction finding of the (generalized) conditional gradient. If set to emd will solve the general regularized OT problem using cg. If set to lp_semi_relaxed_OT will solve the general regularized semi-relaxed OT problem using cg. If set to sinkhorn will solve the general regularized OT problem using generalized cg. line_search: function, Function to find the optimal step. Currently used instances are: line_search_armijo (generic solver). solve_gromov_linesearch for (F)GW problem. solve_semirelaxed_gromov_linesearch for sr(F)GW problem. gcg_linesearch for the Generalized cg. G0 : array-like, shape (ns,nt), optional initial guess (default is indep joint density) numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on the relative variation (>0) stopThr2 : float, optional Stop threshold on the absolute variation (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True **kwargs : dict Parameters for linesearch Returns ------- gamma : (ns x nt) ndarray Optimal transportation matrix for the given parameters log : dict log dictionary return only if log==True in parameters .. _references-cg: .. _references_gcg: References ---------- .. [1] Ferradans, S., Papadakis, N., PeyrĂ©, G., & Aujol, J. F. (2014). Regularized discrete optimal transport. SIAM Journal on Imaging Sciences, 7(3), 1853-1882. .. [5] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, "Optimal Transport for Domain Adaptation," in IEEE Transactions on Pattern Analysis and Machine Intelligence , vol.PP, no.99, pp.1-1 .. [7] Rakotomamonjy, A., Flamary, R., & Courty, N. (2015). Generalized conditional gradient: analysis of convergence and applications. arXiv preprint arXiv:1510.06567. See Also -------- ot.lp.emd : Unregularized optimal transport ot.bregman.sinkhorn : Entropic regularized optimal transport """ a, b, M, G0 = list_to_array(a, b, M, G0) if isinstance(M, int) or isinstance(M, float): nx = get_backend(a, b) else: nx = get_backend(a, b, M) loop = 1 if log: log = {'loss': []} if G0 is None: G = nx.outer(a, b) else: # to not change G0 in place. G = nx.copy(G0) if reg2 is None: def cost(G): return nx.sum(M * G) + reg1 * f(G) else: def cost(G): return nx.sum(M * G) + reg1 * f(G) + reg2 * nx.sum(G * nx.log(G)) cost_G = cost(G) if log: log['loss'].append(cost_G) it = 0 if verbose: print('{:5s}|{:12s}|{:8s}|{:8s}'.format( 'It.', 'Loss', 'Relative loss', 'Absolute loss') + '\n' + '-' * 48) print('{:5d}|{:8e}|{:8e}|{:8e}'.format(it, cost_G, 0, 0)) while loop: it += 1 old_cost_G = cost_G # problem linearization Mi = M + reg1 * df(G) if not (reg2 is None): Mi = Mi + reg2 * (1 + nx.log(G)) # set M positive Mi = Mi + nx.min(Mi) # solve linear program Gc, innerlog_ = lp_solver(a, b, Mi, **kwargs) # line search deltaG = Gc - G alpha, fc, cost_G = line_search(cost, G, deltaG, Mi, cost_G, **kwargs) G = G + alpha * deltaG # test convergence if it >= numItermax: loop = 0 abs_delta_cost_G = abs(cost_G - old_cost_G) relative_delta_cost_G = abs_delta_cost_G / abs(cost_G) if cost_G != 0. else np.nan if relative_delta_cost_G < stopThr or abs_delta_cost_G < stopThr2: loop = 0 if log: log['loss'].append(cost_G) if verbose: if it % 20 == 0: print('{:5s}|{:12s}|{:8s}|{:8s}'.format( 'It.', 'Loss', 'Relative loss', 'Absolute loss') + '\n' + '-' * 48) print('{:5d}|{:8e}|{:8e}|{:8e}'.format(it, cost_G, relative_delta_cost_G, abs_delta_cost_G)) if log: log.update(innerlog_) return G, log else: return G def cg(a, b, M, reg, f, df, G0=None, line_search=line_search_armijo, numItermax=200, numItermaxEmd=100000, stopThr=1e-9, stopThr2=1e-9, verbose=False, log=False, **kwargs): r""" Solve the general regularized OT problem with conditional gradient The function solves the following optimization problem: .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg} \cdot f(\gamma) s.t. \ \gamma \mathbf{1} &= \mathbf{a} \gamma^T \mathbf{1} &= \mathbf{b} \gamma &\geq 0 where : - :math:`\mathbf{M}` is the (`ns`, `nt`) metric cost matrix - :math:`f` is the regularization term (and `df` is its gradient) - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (sum to 1) The algorithm used for solving the problem is conditional gradient as discussed in :ref:`[1] ` Parameters ---------- a : array-like, shape (ns,) samples weights in the source domain b : array-like, shape (nt,) samples in the target domain M : array-like, shape (ns, nt) loss matrix reg : float Regularization term >0 G0 : array-like, shape (ns,nt), optional initial guess (default is indep joint density) line_search: function, Function to find the optimal step. Default is line_search_armijo. numItermax : int, optional Max number of iterations numItermaxEmd : int, optional Max number of iterations for emd stopThr : float, optional Stop threshold on the relative variation (>0) stopThr2 : float, optional Stop threshold on the absolute variation (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True **kwargs : dict Parameters for linesearch Returns ------- gamma : (ns x nt) ndarray Optimal transportation matrix for the given parameters log : dict log dictionary return only if log==True in parameters .. _references-cg: References ---------- .. [1] Ferradans, S., Papadakis, N., PeyrĂ©, G., & Aujol, J. F. (2014). Regularized discrete optimal transport. SIAM Journal on Imaging Sciences, 7(3), 1853-1882. See Also -------- ot.lp.emd : Unregularized optimal transport ot.bregman.sinkhorn : Entropic regularized optimal transport """ def lp_solver(a, b, M, **kwargs): return emd(a, b, M, numItermaxEmd, log=True) return generic_conditional_gradient(a, b, M, f, df, reg, None, lp_solver, line_search, G0=G0, numItermax=numItermax, stopThr=stopThr, stopThr2=stopThr2, verbose=verbose, log=log, **kwargs) def semirelaxed_cg(a, b, M, reg, f, df, G0=None, line_search=line_search_armijo, numItermax=200, stopThr=1e-9, stopThr2=1e-9, verbose=False, log=False, **kwargs): r""" Solve the general regularized and semi-relaxed OT problem with conditional gradient The function solves the following optimization problem: .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg} \cdot f(\gamma) s.t. \ \gamma \mathbf{1} &= \mathbf{a} \gamma &\geq 0 where : - :math:`\mathbf{M}` is the (`ns`, `nt`) metric cost matrix - :math:`f` is the regularization term (and `df` is its gradient) - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (sum to 1) The algorithm used for solving the problem is conditional gradient as discussed in :ref:`[1] ` Parameters ---------- a : array-like, shape (ns,) samples weights in the source domain b : array-like, shape (nt,) currently estimated samples weights in the target domain M : array-like, shape (ns, nt) loss matrix reg : float Regularization term >0 G0 : array-like, shape (ns,nt), optional initial guess (default is indep joint density) line_search: function, Function to find the optimal step. Default is the armijo line-search. numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on the relative variation (>0) stopThr2 : float, optional Stop threshold on the absolute variation (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True **kwargs : dict Parameters for linesearch Returns ------- gamma : (ns x nt) ndarray Optimal transportation matrix for the given parameters log : dict log dictionary return only if log==True in parameters .. _references-cg: References ---------- .. [48] CĂ©dric Vincent-Cuaz, RĂ©mi Flamary, Marco Corneli, Titouan Vayer, Nicolas Courty. "Semi-relaxed Gromov-Wasserstein divergence and applications on graphs" International Conference on Learning Representations (ICLR), 2021. """ nx = get_backend(a, b) def lp_solver(a, b, Mi, **kwargs): # get minimum by rows as binary mask Gc = nx.ones(1, type_as=a) * (Mi == nx.reshape(nx.min(Mi, axis=1), (-1, 1))) Gc *= nx.reshape((a / nx.sum(Gc, axis=1)), (-1, 1)) # return by default an empty inner_log return Gc, {} return generic_conditional_gradient(a, b, M, f, df, reg, None, lp_solver, line_search, G0=G0, numItermax=numItermax, stopThr=stopThr, stopThr2=stopThr2, verbose=verbose, log=log, **kwargs) def gcg(a, b, M, reg1, reg2, f, df, G0=None, numItermax=10, numInnerItermax=200, stopThr=1e-9, stopThr2=1e-9, verbose=False, log=False, **kwargs): r""" Solve the general regularized OT problem with the generalized conditional gradient The function solves the following optimization problem: .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg_1}\cdot\Omega(\gamma) + \mathrm{reg_2}\cdot f(\gamma) s.t. \ \gamma \mathbf{1} &= \mathbf{a} \gamma^T \mathbf{1} &= \mathbf{b} \gamma &\geq 0 where : - :math:`\mathbf{M}` is the (`ns`, `nt`) metric cost matrix - :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`f` is the regularization term (and `df` is its gradient) - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (sum to 1) The algorithm used for solving the problem is the generalized conditional gradient as discussed in :ref:`[5, 7] ` Parameters ---------- a : array-like, shape (ns,) samples weights in the source domain b : array-like, (nt,) samples in the target domain M : array-like, shape (ns, nt) loss matrix reg1 : float Entropic Regularization term >0 reg2 : float Second Regularization term >0 G0 : array-like, shape (ns, nt), optional initial guess (default is indep joint density) numItermax : int, optional Max number of iterations numInnerItermax : int, optional Max number of iterations of Sinkhorn stopThr : float, optional Stop threshold on the relative variation (>0) stopThr2 : float, optional Stop threshold on the absolute variation (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- gamma : ndarray, shape (ns, nt) Optimal transportation matrix for the given parameters log : dict log dictionary return only if log==True in parameters .. _references-gcg: References ---------- .. [5] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, "Optimal Transport for Domain Adaptation," in IEEE Transactions on Pattern Analysis and Machine Intelligence , vol.PP, no.99, pp.1-1 .. [7] Rakotomamonjy, A., Flamary, R., & Courty, N. (2015). Generalized conditional gradient: analysis of convergence and applications. arXiv preprint arXiv:1510.06567. See Also -------- ot.optim.cg : conditional gradient """ def lp_solver(a, b, Mi, **kwargs): return sinkhorn(a, b, Mi, reg1, numItermax=numInnerItermax, log=True, **kwargs) def line_search(cost, G, deltaG, Mi, cost_G, **kwargs): return line_search_armijo(cost, G, deltaG, Mi, cost_G, **kwargs) return generic_conditional_gradient(a, b, M, f, df, reg2, reg1, lp_solver, line_search, G0=G0, numItermax=numItermax, stopThr=stopThr, stopThr2=stopThr2, verbose=verbose, log=log, **kwargs) def solve_1d_linesearch_quad(a, b): r""" For any convex or non-convex 1d quadratic function `f`, solve the following problem: .. math:: \mathop{\arg \min}_{0 \leq x \leq 1} \quad f(x) = ax^{2} + bx + c Parameters ---------- a,b : float or tensors (1,) The coefficients of the quadratic function Returns ------- x : float The optimal value which leads to the minimal cost """ if a > 0: # convex minimum = min(1., max(0., -b / (2.0 * a))) return minimum else: # non convex if a + b < 0: return 1. else: return 0. python-pot-0.9.3+dfsg/ot/partial.py000077500000000000000000001127401455713015700172330ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Partial OT solvers """ # Author: Laetitia Chapel # License: MIT License import numpy as np from .lp import emd from .backend import get_backend from .utils import list_to_array def partial_wasserstein_lagrange(a, b, M, reg_m=None, nb_dummies=1, log=False, **kwargs): r""" Solves the partial optimal transport problem for the quadratic cost and returns the OT plan The function considers the following problem: .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, (\mathbf{M} - \lambda) \rangle_F .. math:: s.t. \ \gamma \mathbf{1} &\leq \mathbf{a} \gamma^T \mathbf{1} &\leq \mathbf{b} \gamma &\geq 0 \mathbf{1}^T \gamma^T \mathbf{1} = m & \leq \min\{\|\mathbf{a}\|_1, \|\mathbf{b}\|_1\} or equivalently (see Chizat, L., PeyrĂ©, G., Schmitzer, B., & Vialard, F. X. (2018). An interpolating distance between optimal transport and Fisher–Rao metrics. Foundations of Computational Mathematics, 18(1), 1-44.) .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \sqrt{\frac{\lambda}{2} (\|\gamma \mathbf{1} - \mathbf{a}\|_1 + \|\gamma^T \mathbf{1} - \mathbf{b}\|_1)} s.t. \ \gamma \geq 0 where : - :math:`\mathbf{M}` is the metric cost matrix - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target unbalanced distributions - :math:`\lambda` is the lagrangian cost. Tuning its value allows attaining a given mass to be transported `m` The formulation of the problem has been proposed in :ref:`[28] ` Parameters ---------- a : np.ndarray (dim_a,) Unnormalized histogram of dimension `dim_a` b : np.ndarray (dim_b,) Unnormalized histograms of dimension `dim_b` M : np.ndarray (dim_a, dim_b) cost matrix for the quadratic cost reg_m : float, optional Lagrangian cost nb_dummies : int, optional, default:1 number of reservoir points to be added (to avoid numerical instabilities, increase its value if an error is raised) log : bool, optional record log if True **kwargs : dict parameters can be directly passed to the emd solver .. warning:: When dealing with a large number of points, the EMD solver may face some instabilities, especially when the mass associated to the dummy point is large. To avoid them, increase the number of dummy points (allows a smoother repartition of the mass over the points). Returns ------- gamma : (dim_a, dim_b) ndarray Optimal transportation matrix for the given parameters log : dict log dictionary returned only if `log` is `True` Examples -------- >>> import ot >>> a = [.1, .2] >>> b = [.1, .1] >>> M = [[0., 1.], [2., 3.]] >>> np.round(partial_wasserstein_lagrange(a,b,M), 2) array([[0.1, 0. ], [0. , 0.1]]) >>> np.round(partial_wasserstein_lagrange(a,b,M,reg_m=2), 2) array([[0.1, 0. ], [0. , 0. ]]) .. _references-partial-wasserstein-lagrange: References ---------- .. [28] Caffarelli, L. A., & McCann, R. J. (2010) Free boundaries in optimal transport and Monge-Ampere obstacle problems. Annals of mathematics, 673-730. See Also -------- ot.partial.partial_wasserstein : Partial Wasserstein with fixed mass """ a, b, M = list_to_array(a, b, M) nx = get_backend(a, b, M) if nx.sum(a) > 1 + 1e-15 or nx.sum(b) > 1 + 1e-15: # 1e-15 for numerical errors raise ValueError("Problem infeasible. Check that a and b are in the " "simplex") if reg_m is None: reg_m = float(nx.max(M)) + 1 if reg_m < -nx.max(M): return nx.zeros((len(a), len(b)), type_as=M) a0, b0, M0 = a, b, M # convert to humpy a, b, M = nx.to_numpy(a, b, M) eps = 1e-20 M = np.asarray(M, dtype=np.float64) b = np.asarray(b, dtype=np.float64) a = np.asarray(a, dtype=np.float64) M_star = M - reg_m # modified cost matrix # trick to fasten the computation: select only the subset of columns/lines # that can have marginals greater than 0 (that is to say M < 0) idx_x = np.where(np.min(M_star, axis=1) < eps)[0] idx_y = np.where(np.min(M_star, axis=0) < eps)[0] # extend a, b, M with "reservoir" or "dummy" points M_extended = np.zeros((len(idx_x) + nb_dummies, len(idx_y) + nb_dummies)) M_extended[:len(idx_x), :len(idx_y)] = M_star[np.ix_(idx_x, idx_y)] a_extended = np.append(a[idx_x], [(np.sum(a) - np.sum(a[idx_x]) + np.sum(b)) / nb_dummies] * nb_dummies) b_extended = np.append(b[idx_y], [(np.sum(b) - np.sum(b[idx_y]) + np.sum(a)) / nb_dummies] * nb_dummies) gamma_extended, log_emd = emd(a_extended, b_extended, M_extended, log=True, **kwargs) gamma = np.zeros((len(a), len(b))) gamma[np.ix_(idx_x, idx_y)] = gamma_extended[:-nb_dummies, :-nb_dummies] # convert back to backend gamma = nx.from_numpy(gamma, type_as=M0) if log_emd['warning'] is not None: raise ValueError("Error in the EMD resolution: try to increase the" " number of dummy points") log_emd['cost'] = nx.sum(gamma * M0) log_emd['u'] = nx.from_numpy(log_emd['u'], type_as=a0) log_emd['v'] = nx.from_numpy(log_emd['v'], type_as=b0) if log: return gamma, log_emd else: return gamma def partial_wasserstein(a, b, M, m=None, nb_dummies=1, log=False, **kwargs): r""" Solves the partial optimal transport problem for the quadratic cost and returns the OT plan The function considers the following problem: .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F .. math:: s.t. \ \gamma \mathbf{1} &\leq \mathbf{a} \gamma^T \mathbf{1} &\leq \mathbf{b} \gamma &\geq 0 \mathbf{1}^T \gamma^T \mathbf{1} = m &\leq \min\{\|\mathbf{a}\|_1, \|\mathbf{b}\|_1\} where : - :math:`\mathbf{M}` is the metric cost matrix - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target unbalanced distributions - `m` is the amount of mass to be transported Parameters ---------- a : np.ndarray (dim_a,) Unnormalized histogram of dimension `dim_a` b : np.ndarray (dim_b,) Unnormalized histograms of dimension `dim_b` M : np.ndarray (dim_a, dim_b) cost matrix for the quadratic cost m : float, optional amount of mass to be transported nb_dummies : int, optional, default:1 number of reservoir points to be added (to avoid numerical instabilities, increase its value if an error is raised) log : bool, optional record log if True **kwargs : dict parameters can be directly passed to the emd solver .. warning:: When dealing with a large number of points, the EMD solver may face some instabilities, especially when the mass associated to the dummy point is large. To avoid them, increase the number of dummy points (allows a smoother repartition of the mass over the points). Returns ------- gamma : (dim_a, dim_b) ndarray Optimal transportation matrix for the given parameters log : dict log dictionary returned only if `log` is `True` Examples -------- >>> import ot >>> a = [.1, .2] >>> b = [.1, .1] >>> M = [[0., 1.], [2., 3.]] >>> np.round(partial_wasserstein(a,b,M), 2) array([[0.1, 0. ], [0. , 0.1]]) >>> np.round(partial_wasserstein(a,b,M,m=0.1), 2) array([[0.1, 0. ], [0. , 0. ]]) References ---------- .. [28] Caffarelli, L. A., & McCann, R. J. (2010) Free boundaries in optimal transport and Monge-Ampere obstacle problems. Annals of mathematics, 673-730. .. [29] Chapel, L., Alaya, M., Gasso, G. (2020). "Partial Optimal Transport with Applications on Positive-Unlabeled Learning". NeurIPS. See Also -------- ot.partial.partial_wasserstein_lagrange: Partial Wasserstein with regularization on the marginals ot.partial.entropic_partial_wasserstein: Partial Wasserstein with a entropic regularization parameter """ a, b, M = list_to_array(a, b, M) nx = get_backend(a, b, M) dim_a, dim_b = M.shape if len(a) == 0: a = nx.ones(dim_a, type_as=a) / dim_a if len(b) == 0: b = nx.ones(dim_b, type_as=b) / dim_b if m is None: return partial_wasserstein_lagrange(a, b, M, log=log, **kwargs) elif m < 0: raise ValueError("Problem infeasible. Parameter m should be greater" " than 0.") elif m > nx.min(nx.stack((nx.sum(a), nx.sum(b)))): raise ValueError("Problem infeasible. Parameter m should lower or" " equal than min(|a|_1, |b|_1).") b_extension = nx.ones(nb_dummies, type_as=b) * (nx.sum(a) - m) / nb_dummies b_extended = nx.concatenate((b, b_extension)) a_extension = nx.ones(nb_dummies, type_as=a) * (nx.sum(b) - m) / nb_dummies a_extended = nx.concatenate((a, a_extension)) M_extension = nx.ones((nb_dummies, nb_dummies), type_as=M) * nx.max(M) * 2 M_extended = nx.concatenate( (nx.concatenate((M, nx.zeros((M.shape[0], M_extension.shape[1]))), axis=1), nx.concatenate((nx.zeros((M_extension.shape[0], M.shape[1])), M_extension), axis=1)), axis=0 ) gamma, log_emd = emd(a_extended, b_extended, M_extended, log=True, **kwargs) gamma = gamma[:len(a), :len(b)] if log_emd['warning'] is not None: raise ValueError("Error in the EMD resolution: try to increase the" " number of dummy points") log_emd['partial_w_dist'] = nx.sum(M * gamma) log_emd['u'] = log_emd['u'][:len(a)] log_emd['v'] = log_emd['v'][:len(b)] if log: return gamma, log_emd else: return gamma def partial_wasserstein2(a, b, M, m=None, nb_dummies=1, log=False, **kwargs): r""" Solves the partial optimal transport problem for the quadratic cost and returns the partial GW discrepancy The function considers the following problem: .. math:: \gamma = \min_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F .. math:: s.t. \ \gamma \mathbf{1} &\leq \mathbf{a} \gamma^T \mathbf{1} &\leq \mathbf{b} \gamma &\geq 0 \mathbf{1}^T \gamma^T \mathbf{1} = m &\leq \min\{\|\mathbf{a}\|_1, \|\mathbf{b}\|_1\} where : - :math:`\mathbf{M}` is the metric cost matrix - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target unbalanced distributions - `m` is the amount of mass to be transported Parameters ---------- a : np.ndarray (dim_a,) Unnormalized histogram of dimension `dim_a` b : np.ndarray (dim_b,) Unnormalized histograms of dimension `dim_b` M : np.ndarray (dim_a, dim_b) cost matrix for the quadratic cost m : float, optional amount of mass to be transported nb_dummies : int, optional, default:1 number of reservoir points to be added (to avoid numerical instabilities, increase its value if an error is raised) log : bool, optional record log if True **kwargs : dict parameters can be directly passed to the emd solver .. warning:: When dealing with a large number of points, the EMD solver may face some instabilities, especially when the mass associated to the dummy point is large. To avoid them, increase the number of dummy points (allows a smoother repartition of the mass over the points). Returns ------- GW: float partial GW discrepancy log : dict log dictionary returned only if `log` is `True` Examples -------- >>> import ot >>> a=[.1, .2] >>> b=[.1, .1] >>> M=[[0., 1.], [2., 3.]] >>> np.round(partial_wasserstein2(a, b, M), 1) 0.3 >>> np.round(partial_wasserstein2(a,b,M,m=0.1), 1) 0.0 References ---------- .. [28] Caffarelli, L. A., & McCann, R. J. (2010) Free boundaries in optimal transport and Monge-Ampere obstacle problems. Annals of mathematics, 673-730. .. [29] Chapel, L., Alaya, M., Gasso, G. (2020). "Partial Optimal Transport with Applications on Positive-Unlabeled Learning". NeurIPS. """ a, b, M = list_to_array(a, b, M) nx = get_backend(a, b, M) partial_gw, log_w = partial_wasserstein(a, b, M, m, nb_dummies, log=True, **kwargs) log_w['T'] = partial_gw if log: return nx.sum(partial_gw * M), log_w else: return nx.sum(partial_gw * M) def gwgrad_partial(C1, C2, T): """Compute the GW gradient. Note: we can not use the trick in :ref:`[12] ` as the marginals may not sum to 1. Parameters ---------- C1: array of shape (n_p,n_p) intra-source (P) cost matrix C2: array of shape (n_u,n_u) intra-target (U) cost matrix T : array of shape(n_p+nb_dummies, n_u) (default: None) Transport matrix Returns ------- numpy.array of shape (n_p+nb_dummies, n_u) gradient .. _references-gwgrad-partial: References ---------- .. [12] PeyrĂ©, Gabriel, Marco Cuturi, and Justin Solomon, "Gromov-Wasserstein averaging of kernel and distance matrices." International Conference on Machine Learning (ICML). 2016. """ cC1 = np.dot(C1 ** 2 / 2, np.dot(T, np.ones(C2.shape[0]).reshape(-1, 1))) cC2 = np.dot(np.dot(np.ones(C1.shape[0]).reshape(1, -1), T), C2 ** 2 / 2) constC = cC1 + cC2 A = -np.dot(C1, T).dot(C2.T) tens = constC + A return tens * 2 def gwloss_partial(C1, C2, T): """Compute the GW loss. Parameters ---------- C1: array of shape (n_p,n_p) intra-source (P) cost matrix C2: array of shape (n_u,n_u) intra-target (U) cost matrix T : array of shape(n_p+nb_dummies, n_u) (default: None) Transport matrix Returns ------- GW loss """ g = gwgrad_partial(C1, C2, T) * 0.5 return np.sum(g * T) def partial_gromov_wasserstein(C1, C2, p, q, m=None, nb_dummies=1, G0=None, thres=1, numItermax=1000, tol=1e-7, log=False, verbose=False, **kwargs): r""" Solves the partial optimal transport problem and returns the OT plan The function considers the following problem: .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F .. math:: s.t. \ \gamma \mathbf{1} &\leq \mathbf{a} \gamma^T \mathbf{1} &\leq \mathbf{b} \gamma &\geq 0 \mathbf{1}^T \gamma^T \mathbf{1} = m &\leq \min\{\|\mathbf{a}\|_1, \|\mathbf{b}\|_1\} where : - :math:`\mathbf{M}` is the metric cost matrix - :math:`\Omega` is the entropic regularization term, :math:`\Omega(\gamma) = \sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`\mathbf{a}` and :math:`\mathbf{b}` are the sample weights - `m` is the amount of mass to be transported The formulation of the problem has been proposed in :ref:`[29] ` Parameters ---------- C1 : ndarray, shape (ns, ns) Metric cost matrix in the source space C2 : ndarray, shape (nt, nt) Metric costfr matrix in the target space p : ndarray, shape (ns,) Distribution in the source space q : ndarray, shape (nt,) Distribution in the target space m : float, optional Amount of mass to be transported (default: :math:`\min\{\|\mathbf{p}\|_1, \|\mathbf{q}\|_1\}`) nb_dummies : int, optional Number of dummy points to add (avoid instabilities in the EMD solver) G0 : ndarray, shape (ns, nt), optional Initialization of the transportation matrix thres : float, optional quantile of the gradient matrix to populate the cost matrix when 0 (default: 1) numItermax : int, optional Max number of iterations tol : float, optional tolerance for stopping iterations log : bool, optional return log if True verbose : bool, optional Print information along iterations **kwargs : dict parameters can be directly passed to the emd solver Returns ------- gamma : (dim_a, dim_b) ndarray Optimal transportation matrix for the given parameters log : dict log dictionary returned only if `log` is `True` Examples -------- >>> import ot >>> import scipy as sp >>> a = np.array([0.25] * 4) >>> b = np.array([0.25] * 4) >>> x = np.array([1,2,100,200]).reshape((-1,1)) >>> y = np.array([3,2,98,199]).reshape((-1,1)) >>> C1 = sp.spatial.distance.cdist(x, x) >>> C2 = sp.spatial.distance.cdist(y, y) >>> np.round(partial_gromov_wasserstein(C1, C2, a, b),2) array([[0. , 0.25, 0. , 0. ], [0.25, 0. , 0. , 0. ], [0. , 0. , 0.25, 0. ], [0. , 0. , 0. , 0.25]]) >>> np.round(partial_gromov_wasserstein(C1, C2, a, b, m=0.25),2) array([[0. , 0. , 0. , 0. ], [0. , 0. , 0. , 0. ], [0. , 0. , 0.25, 0. ], [0. , 0. , 0. , 0. ]]) .. _references-partial-gromov-wasserstein: References ---------- .. [29] Chapel, L., Alaya, M., Gasso, G. (2020). "Partial Optimal Transport with Applications on Positive-Unlabeled Learning". NeurIPS. """ if m is None: m = np.min((np.sum(p), np.sum(q))) elif m < 0: raise ValueError("Problem infeasible. Parameter m should be greater" " than 0.") elif m > np.min((np.sum(p), np.sum(q))): raise ValueError("Problem infeasible. Parameter m should lower or" " equal than min(|a|_1, |b|_1).") if G0 is None: G0 = np.outer(p, q) dim_G_extended = (len(p) + nb_dummies, len(q) + nb_dummies) q_extended = np.append(q, [(np.sum(p) - m) / nb_dummies] * nb_dummies) p_extended = np.append(p, [(np.sum(q) - m) / nb_dummies] * nb_dummies) cpt = 0 err = 1 if log: log = {'err': []} while (err > tol and cpt < numItermax): Gprev = np.copy(G0) M = gwgrad_partial(C1, C2, G0) M_emd = np.zeros(dim_G_extended) M_emd[:len(p), :len(q)] = M M_emd[-nb_dummies:, -nb_dummies:] = np.max(M) * 1e2 M_emd = np.asarray(M_emd, dtype=np.float64) Gc, logemd = emd(p_extended, q_extended, M_emd, log=True, **kwargs) if logemd['warning'] is not None: raise ValueError("Error in the EMD resolution: try to increase the" " number of dummy points") G0 = Gc[:len(p), :len(q)] if cpt % 10 == 0: # to speed up the computations err = np.linalg.norm(G0 - Gprev) if log: log['err'].append(err) if verbose: if cpt % 200 == 0: print('{:5s}|{:12s}|{:12s}'.format( 'It.', 'Err', 'Loss') + '\n' + '-' * 31) print('{:5d}|{:8e}|{:8e}'.format(cpt, err, gwloss_partial(C1, C2, G0))) deltaG = G0 - Gprev a = gwloss_partial(C1, C2, deltaG) b = 2 * np.sum(M * deltaG) if b > 0: # due to numerical precision gamma = 0 cpt = numItermax elif a > 0: gamma = min(1, np.divide(-b, 2.0 * a)) else: if (a + b) < 0: gamma = 1 else: gamma = 0 cpt = numItermax G0 = Gprev + gamma * deltaG cpt += 1 if log: log['partial_gw_dist'] = gwloss_partial(C1, C2, G0) return G0[:len(p), :len(q)], log else: return G0[:len(p), :len(q)] def partial_gromov_wasserstein2(C1, C2, p, q, m=None, nb_dummies=1, G0=None, thres=1, numItermax=1000, tol=1e-7, log=False, verbose=False, **kwargs): r""" Solves the partial optimal transport problem and returns the partial Gromov-Wasserstein discrepancy The function considers the following problem: .. math:: GW = \min_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F .. math:: s.t. \ \gamma \mathbf{1} &\leq \mathbf{a} \gamma^T \mathbf{1} &\leq \mathbf{b} \gamma &\geq 0 \mathbf{1}^T \gamma^T \mathbf{1} = m &\leq \min\{\|\mathbf{a}\|_1, \|\mathbf{b}\|_1\} where : - :math:`\mathbf{M}` is the metric cost matrix - :math:`\Omega` is the entropic regularization term, :math:`\Omega(\gamma) = \sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`\mathbf{a}` and :math:`\mathbf{b}` are the sample weights - `m` is the amount of mass to be transported The formulation of the problem has been proposed in :ref:`[29] ` Parameters ---------- C1 : ndarray, shape (ns, ns) Metric cost matrix in the source space C2 : ndarray, shape (nt, nt) Metric cost matrix in the target space p : ndarray, shape (ns,) Distribution in the source space q : ndarray, shape (nt,) Distribution in the target space m : float, optional Amount of mass to be transported (default: :math:`\min\{\|\mathbf{p}\|_1, \|\mathbf{q}\|_1\}`) nb_dummies : int, optional Number of dummy points to add (avoid instabilities in the EMD solver) G0 : ndarray, shape (ns, nt), optional Initialization of the transportation matrix thres : float, optional quantile of the gradient matrix to populate the cost matrix when 0 (default: 1) numItermax : int, optional Max number of iterations tol : float, optional tolerance for stopping iterations log : bool, optional return log if True verbose : bool, optional Print information along iterations **kwargs : dict parameters can be directly passed to the emd solver .. warning:: When dealing with a large number of points, the EMD solver may face some instabilities, especially when the mass associated to the dummy point is large. To avoid them, increase the number of dummy points (allows a smoother repartition of the mass over the points). Returns ------- partial_gw_dist : float partial GW discrepancy log : dict log dictionary returned only if `log` is `True` Examples -------- >>> import ot >>> import scipy as sp >>> a = np.array([0.25] * 4) >>> b = np.array([0.25] * 4) >>> x = np.array([1,2,100,200]).reshape((-1,1)) >>> y = np.array([3,2,98,199]).reshape((-1,1)) >>> C1 = sp.spatial.distance.cdist(x, x) >>> C2 = sp.spatial.distance.cdist(y, y) >>> np.round(partial_gromov_wasserstein2(C1, C2, a, b),2) 1.69 >>> np.round(partial_gromov_wasserstein2(C1, C2, a, b, m=0.25),2) 0.0 .. _references-partial-gromov-wasserstein2: References ---------- .. [29] Chapel, L., Alaya, M., Gasso, G. (2020). "Partial Optimal Transport with Applications on Positive-Unlabeled Learning". NeurIPS. """ partial_gw, log_gw = partial_gromov_wasserstein(C1, C2, p, q, m, nb_dummies, G0, thres, numItermax, tol, True, verbose, **kwargs) log_gw['T'] = partial_gw if log: return log_gw['partial_gw_dist'], log_gw else: return log_gw['partial_gw_dist'] def entropic_partial_wasserstein(a, b, M, reg, m=None, numItermax=1000, stopThr=1e-100, verbose=False, log=False): r""" Solves the partial optimal transport problem and returns the OT plan The function considers the following problem: .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg} \cdot\Omega(\gamma) s.t. \gamma \mathbf{1} &\leq \mathbf{a} \\ \gamma^T \mathbf{1} &\leq \mathbf{b} \\ \gamma &\geq 0 \\ \mathbf{1}^T \gamma^T \mathbf{1} = m &\leq \min\{\|\mathbf{a}\|_1, \|\mathbf{b}\|_1\} \\ where : - :math:`\mathbf{M}` is the metric cost matrix - :math:`\Omega` is the entropic regularization term, :math:`\Omega=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`\mathbf{a}` and :math:`\mathbf{b}` are the sample weights - `m` is the amount of mass to be transported The formulation of the problem has been proposed in :ref:`[3] ` (prop. 5) Parameters ---------- a : np.ndarray (dim_a,) Unnormalized histogram of dimension `dim_a` b : np.ndarray (dim_b,) Unnormalized histograms of dimension `dim_b` M : np.ndarray (dim_a, dim_b) cost matrix reg : float Regularization term > 0 m : float, optional Amount of mass to be transported numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- gamma : (dim_a, dim_b) ndarray Optimal transportation matrix for the given parameters log : dict log dictionary returned only if `log` is `True` Examples -------- >>> import ot >>> a = [.1, .2] >>> b = [.1, .1] >>> M = [[0., 1.], [2., 3.]] >>> np.round(entropic_partial_wasserstein(a, b, M, 1, 0.1), 2) array([[0.06, 0.02], [0.01, 0. ]]) .. _references-entropic-partial-wasserstein: References ---------- .. [3] Benamou, J. D., Carlier, G., Cuturi, M., Nenna, L., & PeyrĂ©, G. (2015). Iterative Bregman projections for regularized transportation problems. SIAM Journal on Scientific Computing, 37(2), A1111-A1138. See Also -------- ot.partial.partial_wasserstein: exact Partial Wasserstein """ a, b, M = list_to_array(a, b, M) nx = get_backend(a, b, M) dim_a, dim_b = M.shape dx = nx.ones(dim_a, type_as=a) dy = nx.ones(dim_b, type_as=b) if len(a) == 0: a = nx.ones(dim_a, type_as=a) / dim_a if len(b) == 0: b = nx.ones(dim_b, type_as=b) / dim_b if m is None: m = nx.min(nx.stack((nx.sum(a), nx.sum(b)))) * 1.0 if m < 0: raise ValueError("Problem infeasible. Parameter m should be greater" " than 0.") if m > nx.min(nx.stack((nx.sum(a), nx.sum(b)))): raise ValueError("Problem infeasible. Parameter m should lower or" " equal than min(|a|_1, |b|_1).") log_e = {'err': []} if nx.__name__ == "numpy": # Next 3 lines equivalent to K=nx.exp(-M/reg), but faster to compute K = np.empty(M.shape, dtype=M.dtype) np.divide(M, -reg, out=K) np.exp(K, out=K) np.multiply(K, m / np.sum(K), out=K) else: K = nx.exp(-M / reg) K = K * m / nx.sum(K) err, cpt = 1, 0 q1 = nx.ones(K.shape, type_as=K) q2 = nx.ones(K.shape, type_as=K) q3 = nx.ones(K.shape, type_as=K) while (err > stopThr and cpt < numItermax): Kprev = K K = K * q1 K1 = nx.dot(nx.diag(nx.minimum(a / nx.sum(K, axis=1), dx)), K) q1 = q1 * Kprev / K1 K1prev = K1 K1 = K1 * q2 K2 = nx.dot(K1, nx.diag(nx.minimum(b / nx.sum(K1, axis=0), dy))) q2 = q2 * K1prev / K2 K2prev = K2 K2 = K2 * q3 K = K2 * (m / nx.sum(K2)) q3 = q3 * K2prev / K if nx.any(nx.isnan(K)) or nx.any(nx.isinf(K)): print('Warning: numerical errors at iteration', cpt) break if cpt % 10 == 0: err = nx.norm(Kprev - K) if log: log_e['err'].append(err) if verbose: if cpt % 200 == 0: print( '{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 11) print('{:5d}|{:8e}|'.format(cpt, err)) cpt = cpt + 1 log_e['partial_w_dist'] = nx.sum(M * K) if log: return K, log_e else: return K def entropic_partial_gromov_wasserstein(C1, C2, p, q, reg, m=None, G0=None, numItermax=1000, tol=1e-7, log=False, verbose=False): r""" Returns the partial Gromov-Wasserstein transport between :math:`(\mathbf{C_1}, \mathbf{p})` and :math:`(\mathbf{C_2}, \mathbf{q})` The function solves the following optimization problem: .. math:: \gamma = \mathop{\arg \min}_{\gamma} \quad \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l})\cdot \gamma_{i,j}\cdot\gamma_{k,l} + \mathrm{reg} \cdot\Omega(\gamma) .. math:: s.t. \ \gamma &\geq 0 \gamma \mathbf{1} &\leq \mathbf{a} \gamma^T \mathbf{1} &\leq \mathbf{b} \mathbf{1}^T \gamma^T \mathbf{1} = m &\leq \min\{\|\mathbf{a}\|_1, \|\mathbf{b}\|_1\} where : - :math:`\mathbf{C_1}` is the metric cost matrix in the source space - :math:`\mathbf{C_2}` is the metric cost matrix in the target space - :math:`\mathbf{p}` and :math:`\mathbf{q}` are the sample weights - `L`: quadratic loss function - :math:`\Omega` is the entropic regularization term, :math:`\Omega=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - `m` is the amount of mass to be transported The formulation of the GW problem has been proposed in :ref:`[12] ` and the partial GW in :ref:`[29] ` Parameters ---------- C1 : ndarray, shape (ns, ns) Metric cost matrix in the source space C2 : ndarray, shape (nt, nt) Metric cost matrix in the target space p : ndarray, shape (ns,) Distribution in the source space q : ndarray, shape (nt,) Distribution in the target space reg: float entropic regularization parameter m : float, optional Amount of mass to be transported (default: :math:`\min\{\|\mathbf{p}\|_1, \|\mathbf{q}\|_1\}`) G0 : ndarray, shape (ns, nt), optional Initialization of the transportation matrix numItermax : int, optional Max number of iterations tol : float, optional Stop threshold on error (>0) log : bool, optional return log if True verbose : bool, optional Print information along iterations Examples -------- >>> import ot >>> import scipy as sp >>> a = np.array([0.25] * 4) >>> b = np.array([0.25] * 4) >>> x = np.array([1,2,100,200]).reshape((-1,1)) >>> y = np.array([3,2,98,199]).reshape((-1,1)) >>> C1 = sp.spatial.distance.cdist(x, x) >>> C2 = sp.spatial.distance.cdist(y, y) >>> np.round(entropic_partial_gromov_wasserstein(C1, C2, a, b, 50), 2) array([[0.12, 0.13, 0. , 0. ], [0.13, 0.12, 0. , 0. ], [0. , 0. , 0.25, 0. ], [0. , 0. , 0. , 0.25]]) >>> np.round(entropic_partial_gromov_wasserstein(C1, C2, a, b, 50,0.25), 2) array([[0.02, 0.03, 0. , 0.03], [0.03, 0.03, 0. , 0.03], [0. , 0. , 0.03, 0. ], [0.02, 0.02, 0. , 0.03]]) Returns ------- :math: `gamma` : (dim_a, dim_b) ndarray Optimal transportation matrix for the given parameters log : dict log dictionary returned only if `log` is `True` .. _references-entropic-partial-gromov-wasserstein: References ---------- .. [12] PeyrĂ©, Gabriel, Marco Cuturi, and Justin Solomon, "Gromov-Wasserstein averaging of kernel and distance matrices." International Conference on Machine Learning (ICML). 2016. .. [29] Chapel, L., Alaya, M., Gasso, G. (2020). "Partial Optimal Transport with Applications on Positive-Unlabeled Learning". NeurIPS. See Also -------- ot.partial.partial_gromov_wasserstein: exact Partial Gromov-Wasserstein """ if G0 is None: G0 = np.outer(p, q) if m is None: m = np.min((np.sum(p), np.sum(q))) elif m < 0: raise ValueError("Problem infeasible. Parameter m should be greater" " than 0.") elif m > np.min((np.sum(p), np.sum(q))): raise ValueError("Problem infeasible. Parameter m should lower or" " equal than min(|a|_1, |b|_1).") cpt = 0 err = 1 loge = {'err': []} while (err > tol and cpt < numItermax): Gprev = G0 M_entr = gwgrad_partial(C1, C2, G0) G0 = entropic_partial_wasserstein(p, q, M_entr, reg, m) if cpt % 10 == 0: # to speed up the computations err = np.linalg.norm(G0 - Gprev) if log: loge['err'].append(err) if verbose: if cpt % 200 == 0: print('{:5s}|{:12s}|{:12s}'.format( 'It.', 'Err', 'Loss') + '\n' + '-' * 31) print('{:5d}|{:8e}|{:8e}'.format(cpt, err, gwloss_partial(C1, C2, G0))) cpt += 1 if log: loge['partial_gw_dist'] = gwloss_partial(C1, C2, G0) return G0, loge else: return G0 def entropic_partial_gromov_wasserstein2(C1, C2, p, q, reg, m=None, G0=None, numItermax=1000, tol=1e-7, log=False, verbose=False): r""" Returns the partial Gromov-Wasserstein discrepancy between :math:`(\mathbf{C_1}, \mathbf{p})` and :math:`(\mathbf{C_2}, \mathbf{q})` The function solves the following optimization problem: .. math:: GW = \min_{\gamma} \quad \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l})\cdot \gamma_{i,j}\cdot\gamma_{k,l} + \mathrm{reg} \cdot\Omega(\gamma) .. math:: s.t. \ \gamma &\geq 0 \gamma \mathbf{1} &\leq \mathbf{a} \gamma^T \mathbf{1} &\leq \mathbf{b} \mathbf{1}^T \gamma^T \mathbf{1} = m &\leq \min\{\|\mathbf{a}\|_1, \|\mathbf{b}\|_1\} where : - :math:`\mathbf{C_1}` is the metric cost matrix in the source space - :math:`\mathbf{C_2}` is the metric cost matrix in the target space - :math:`\mathbf{p}` and :math:`\mathbf{q}` are the sample weights - `L` : quadratic loss function - :math:`\Omega` is the entropic regularization term, :math:`\Omega=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - `m` is the amount of mass to be transported The formulation of the GW problem has been proposed in :ref:`[12] ` and the partial GW in :ref:`[29] ` Parameters ---------- C1 : ndarray, shape (ns, ns) Metric cost matrix in the source space C2 : ndarray, shape (nt, nt) Metric cost matrix in the target space p : ndarray, shape (ns,) Distribution in the source space q : ndarray, shape (nt,) Distribution in the target space reg: float entropic regularization parameter m : float, optional Amount of mass to be transported (default: :math:`\min\{\|\mathbf{p}\|_1, \|\mathbf{q}\|_1\}`) G0 : ndarray, shape (ns, nt), optional Initialization of the transportation matrix numItermax : int, optional Max number of iterations tol : float, optional Stop threshold on error (>0) log : bool, optional return log if True verbose : bool, optional Print information along iterations Returns ------- partial_gw_dist: float Gromov-Wasserstein distance log : dict log dictionary returned only if `log` is `True` Examples -------- >>> import ot >>> import scipy as sp >>> a = np.array([0.25] * 4) >>> b = np.array([0.25] * 4) >>> x = np.array([1,2,100,200]).reshape((-1,1)) >>> y = np.array([3,2,98,199]).reshape((-1,1)) >>> C1 = sp.spatial.distance.cdist(x, x) >>> C2 = sp.spatial.distance.cdist(y, y) >>> np.round(entropic_partial_gromov_wasserstein2(C1, C2, a, b,50), 2) 1.87 .. _references-entropic-partial-gromov-wasserstein2: References ---------- .. [12] PeyrĂ©, Gabriel, Marco Cuturi, and Justin Solomon, "Gromov-Wasserstein averaging of kernel and distance matrices." International Conference on Machine Learning (ICML). 2016. .. [29] Chapel, L., Alaya, M., Gasso, G. (2020). "Partial Optimal Transport with Applications on Positive-Unlabeled Learning". NeurIPS. """ partial_gw, log_gw = entropic_partial_gromov_wasserstein(C1, C2, p, q, reg, m, G0, numItermax, tol, True, verbose) log_gw['T'] = partial_gw if log: return log_gw['partial_gw_dist'], log_gw else: return log_gw['partial_gw_dist'] python-pot-0.9.3+dfsg/ot/plot.py000066400000000000000000000050451455713015700165510ustar00rootroot00000000000000""" Functions for plotting OT matrices .. warning:: Note that by default the module is not import in :mod:`ot`. In order to use it you need to explicitly import :mod:`ot.plot` """ # Author: Remi Flamary # # License: MIT License import numpy as np import matplotlib.pylab as pl from matplotlib import gridspec def plot1D_mat(a, b, M, title=''): r""" Plot matrix :math:`\mathbf{M}` with the source and target 1D distribution Creates a subplot with the source distribution :math:`\mathbf{a}` on the left and target distribution :math:`\mathbf{b}` on the top. The matrix :math:`\mathbf{M}` is shown in between. Parameters ---------- a : ndarray, shape (na,) Source distribution b : ndarray, shape (nb,) Target distribution M : ndarray, shape (na, nb) Matrix to plot """ na, nb = M.shape gs = gridspec.GridSpec(3, 3) xa = np.arange(na) xb = np.arange(nb) ax1 = pl.subplot(gs[0, 1:]) pl.plot(xb, b, 'r', label='Target distribution') pl.yticks(()) pl.title(title) ax2 = pl.subplot(gs[1:, 0]) pl.plot(a, xa, 'b', label='Source distribution') pl.gca().invert_xaxis() pl.gca().invert_yaxis() pl.xticks(()) pl.subplot(gs[1:, 1:], sharex=ax1, sharey=ax2) pl.imshow(M, interpolation='nearest') pl.axis('off') pl.xlim((0, nb)) pl.tight_layout() pl.subplots_adjust(wspace=0., hspace=0.2) def plot2D_samples_mat(xs, xt, G, thr=1e-8, **kwargs): r""" Plot matrix :math:`\mathbf{G}` in 2D with lines using alpha values Plot lines between source and target 2D samples with a color proportional to the value of the matrix :math:`\mathbf{G}` between samples. Parameters ---------- xs : ndarray, shape (ns,2) Source samples positions b : ndarray, shape (nt,2) Target samples positions G : ndarray, shape (na,nb) OT matrix thr : float, optional threshold above which the line is drawn **kwargs : dict parameters given to the plot functions (default color is black if nothing given) """ if ('color' not in kwargs) and ('c' not in kwargs): kwargs['color'] = 'k' mx = G.max() if 'alpha' in kwargs: scale = kwargs['alpha'] del kwargs['alpha'] else: scale = 1 for i in range(xs.shape[0]): for j in range(xt.shape[0]): if G[i, j] / mx > thr: pl.plot([xs[i, 0], xt[j, 0]], [xs[i, 1], xt[j, 1]], alpha=G[i, j] / mx * scale, **kwargs) python-pot-0.9.3+dfsg/ot/regpath.py000066400000000000000000000776201455713015700172350ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Regularization path OT solvers """ # Author: Haoran Wu # License: MIT License import numpy as np import scipy.sparse as sp def recast_ot_as_lasso(a, b, C): r"""This function recasts the l2-penalized UOT problem as a Lasso problem. Recall the l2-penalized UOT problem defined in :ref:`[41] ` .. math:: \text{UOT}_{\lambda} = \min_T + \lambda \|T 1_m - \mathbf{a}\|_2^2 + \lambda \|T^T 1_n - \mathbf{b}\|_2^2 s.t. T \geq 0 where : - :math:`C` is the cost matrix - :math:`\lambda` is the l2-regularization parameter - :math:`\mathbf{a}` and :math:`\mathbf{b}` are the source and target \ distributions - :math:`T` is the transport plan to optimize The problem above can be reformulated as a non-negative penalized linear regression problem, particularly Lasso .. math:: \text{UOT2}_{\lambda} = \min_{\mathbf{t}} \gamma \mathbf{c}^T \mathbf{t} + 0.5 * \|H \mathbf{t} - \mathbf{y}\|_2^2 s.t. \mathbf{t} \geq 0 where : - :math:`\mathbf{c}` is the flattened version of the cost matrix :math:`C` - :math:`\mathbf{y}` is the concatenation of vectors :math:`\mathbf{a}` \ and :math:`\mathbf{b}` - :math:`H` is a metric matrix, see :ref:`[41] ` for \ the design of :math:`H`. The matrix product :math:`H\mathbf{t}` \ computes both the source marginal and the target marginals. - :math:`\mathbf{t}` is the flattened version of the transport plan \ :math:`T` Parameters ---------- a : np.ndarray (dim_a,) Histogram of dimension dim_a b : np.ndarray (dim_b,) Histogram of dimension dim_b C : np.ndarray, shape (dim_a, dim_b) Cost matrix Returns ------- H : np.ndarray (dim_a+dim_b, dim_a*dim_b) Design matrix that contains only 0 and 1 y : np.ndarray (ns + nt, ) Concatenation of histograms :math:`\mathbf{a}` and :math:`\mathbf{b}` c : np.ndarray (ns * nt, ) Flattened array of the cost matrix Examples -------- >>> import ot >>> a = np.array([0.2, 0.3, 0.5]) >>> b = np.array([0.1, 0.9]) >>> C = np.array([[16., 25.], [28., 16.], [40., 36.]]) >>> H, y, c = ot.regpath.recast_ot_as_lasso(a, b, C) >>> H.toarray() array([[1., 1., 0., 0., 0., 0.], [0., 0., 1., 1., 0., 0.], [0., 0., 0., 0., 1., 1.], [1., 0., 1., 0., 1., 0.], [0., 1., 0., 1., 0., 1.]]) >>> y array([0.2, 0.3, 0.5, 0.1, 0.9]) >>> c array([16., 25., 28., 16., 40., 36.]) References ---------- .. [41] Chapel, L., Flamary, R., Wu, H., FĂ©votte, C., and Gasso, G. (2021). Unbalanced optimal transport through non-negative penalized linear regression. NeurIPS. """ dim_a = np.shape(a)[0] dim_b = np.shape(b)[0] y = np.concatenate((a, b)) c = C.flatten() jHa = np.arange(dim_a * dim_b) iHa = np.repeat(np.arange(dim_a), dim_b) jHb = np.arange(dim_a * dim_b) iHb = np.tile(np.arange(dim_b), dim_a) + dim_a j = np.concatenate((jHa, jHb)) i = np.concatenate((iHa, iHb)) H = sp.csc_matrix((np.ones(dim_a * dim_b * 2), (i, j)), shape=(dim_a + dim_b, dim_a * dim_b)) return H, y, c def recast_semi_relaxed_as_lasso(a, b, C): r"""This function recasts the semi-relaxed l2-UOT problem as Lasso problem. .. math:: \text{semi-relaxed UOT} = \min_T + \lambda \|T 1_m - \mathbf{a}\|_2^2 s.t. T^T 1_n = \mathbf{b} \mathbf{t} \geq 0 where : - :math:`C` is the metric cost matrix - :math:`\lambda` is the l2-regularization parameter - :math:`\mathbf{a}` and :math:`\mathbf{b}` are the source and target \ distributions - :math:`T` is the transport plan to optimize The problem above can be reformulated as follows .. math:: \text{semi-relaxed UOT2} = \min_t \gamma \mathbf{c}^T t + 0.5 * \|H_r \mathbf{t} - \mathbf{a}\|_2^2 s.t. H_c \mathbf{t} = \mathbf{b} \mathbf{t} \geq 0 where : - :math:`\mathbf{c}` is flattened version of the cost matrix :math:`C` - :math:`\gamma = 1/\lambda` is the l2-regularization parameter - :math:`H_r` is a metric matrix which computes the sum along the \ rows of the transport plan :math:`T` - :math:`H_c` is a metric matrix which computes the sum along the \ columns of the transport plan :math:`T` - :math:`\mathbf{t}` is the flattened version of :math:`T` Parameters ---------- a : np.ndarray (dim_a,) Histogram of dimension dim_a b : np.ndarray (dim_b,) Histogram of dimension dim_b C : np.ndarray, shape (dim_a, dim_b) Cost matrix Returns ------- Hr : np.ndarray (dim_a, dim_a * dim_b) Auxiliary matrix constituted by 0 and 1, which computes the sum along the rows of transport plan :math:`T` Hc : np.ndarray (dim_b, dim_a * dim_b) Auxiliary matrix constituted by 0 and 1, which computes the sum along the columns of transport plan :math:`T` c : np.ndarray (ns * nt, ) Flattened array of the cost matrix Examples -------- >>> import ot >>> a = np.array([0.2, 0.3, 0.5]) >>> b = np.array([0.1, 0.9]) >>> C = np.array([[16., 25.], [28., 16.], [40., 36.]]) >>> Hr,Hc,c = ot.regpath.recast_semi_relaxed_as_lasso(a, b, C) >>> Hr.toarray() array([[1., 1., 0., 0., 0., 0.], [0., 0., 1., 1., 0., 0.], [0., 0., 0., 0., 1., 1.]]) >>> Hc.toarray() array([[1., 0., 1., 0., 1., 0.], [0., 1., 0., 1., 0., 1.]]) >>> c array([16., 25., 28., 16., 40., 36.]) """ dim_a = np.shape(a)[0] dim_b = np.shape(b)[0] c = C.flatten() jHr = np.arange(dim_a * dim_b) iHr = np.repeat(np.arange(dim_a), dim_b) jHc = np.arange(dim_a * dim_b) iHc = np.tile(np.arange(dim_b), dim_a) Hr = sp.csc_matrix((np.ones(dim_a * dim_b), (iHr, jHr)), shape=(dim_a, dim_a * dim_b)) Hc = sp.csc_matrix((np.ones(dim_a * dim_b), (iHc, jHc)), shape=(dim_b, dim_a * dim_b)) return Hr, Hc, c def ot_next_gamma(phi, delta, HtH, Hty, c, active_index, current_gamma): r""" This function computes the next value of gamma if a variable is added in the next iteration of the regularization path. We look for the largest value of gamma such that the gradient of an inactive variable vanishes .. math:: \max_{i \in \bar{A}} \frac{\mathbf{h}_i^T(H_A \phi - \mathbf{y})} {\mathbf{h}_i^T H_A \delta - \mathbf{c}_i} where : - A is the current active set - :math:`\mathbf{h}_i` is the :math:`i` th column of the design \ matrix :math:`{H}` - :math:`{H}_A` is the sub-matrix constructed by the columns of \ :math:`{H}` whose indices belong to the active set A - :math:`\mathbf{c}_i` is the :math:`i` th element of the cost vector \ :math:`\mathbf{c}` - :math:`\mathbf{y}` is the concatenation of the source and target \ distributions - :math:`\phi` is the intercept of the solutions at the current iteration - :math:`\delta` is the slope of the solutions at the current iteration Parameters ---------- phi : np.ndarray (size(A), ) Intercept of the solutions at the current iteration delta : np.ndarray (size(A), ) Slope of the solutions at the current iteration HtH : np.ndarray (dim_a * dim_b, dim_a * dim_b) Matrix product of :math:`{H}^T {H}` Hty : np.ndarray (dim_a + dim_b, ) Matrix product of :math:`{H}^T \mathbf{y}` c: np.ndarray (dim_a * dim_b, ) Flattened array of the cost matrix :math:`{C}` active_index : list Indices of active variables current_gamma : float Value of the regularization parameter at the beginning of the current \ iteration Returns ------- next_gamma : float Value of gamma if a variable is added to active set in next iteration next_active_index : int Index of variable to be activated References ---------- .. [41] Chapel, L., Flamary, R., Wu, H., FĂ©votte, C., and Gasso, G. (2021). Unbalanced optimal transport through non-negative penalized linear regression. NeurIPS. """ M = (HtH[:, active_index].dot(phi) - Hty) / \ (HtH[:, active_index].dot(delta) - c + 1e-16) M[active_index] = 0 M[M > (current_gamma - 1e-10 * current_gamma)] = 0 return np.max(M), np.argmax(M) def semi_relaxed_next_gamma(phi, delta, phi_u, delta_u, HrHr, Hc, Hra, c, active_index, current_gamma): r""" This function computes the next value of gamma when a variable is active in the regularization path of semi-relaxed UOT. By taking the Lagrangian form of the problem, we obtain a similar update as the two-sided relaxed UOT .. math:: \max_{i \in \bar{A}} \frac{\mathbf{h}_{ri}^T(H_{rA} \phi - \mathbf{a}) + \mathbf{h}_{c i}^T\phi_u}{\mathbf{h}_{r i}^T H_{r A} \delta + \ \mathbf{h}_{c i} \delta_u - \mathbf{c}_i} where : - A is the current active set - :math:`\mathbf{h}_{r i}` is the ith column of the matrix :math:`H_r` - :math:`\mathbf{h}_{c i}` is the ith column of the matrix :math:`H_c` - :math:`H_{r A}` is the sub-matrix constructed by the columns of \ :math:`H_r` whose indices belong to the active set A - :math:`\mathbf{c}_i` is the :math:`i` th element of cost vector \ :math:`\mathbf{c}` - :math:`\phi` is the intercept of the solutions in current iteration - :math:`\delta` is the slope of the solutions in current iteration - :math:`\phi_u` is the intercept of Lagrange parameter at the \ current iteration - :math:`\delta_u` is the slope of Lagrange parameter at the \ current iteration Parameters ---------- phi : np.ndarray (size(A), ) Intercept of the solutions at the current iteration delta : np.ndarray (size(A), ) Slope of the solutions at the current iteration phi_u : np.ndarray (dim_b, ) Intercept of the Lagrange parameter at the current iteration delta_u : np.ndarray (dim_b, ) Slope of the Lagrange parameter at the current iteration HrHr : np.ndarray (dim_a * dim_b, dim_a * dim_b) Matrix product of :math:`H_r^T H_r` Hc : np.ndarray (dim_b, dim_a * dim_b) Matrix that computes the sum along the columns of the transport plan \ :math:`T` Hra : np.ndarray (dim_a * dim_b, ) Matrix product of :math:`H_r^T \mathbf{a}` c: np.ndarray (dim_a * dim_b, ) Flattened array of cost matrix :math:`C` active_index : list Indices of active variables current_gamma : float Value of regularization coefficient at the start of current iteration Returns ------- next_gamma : float Value of gamma if a variable is added to active set in next iteration next_active_index : int Index of variable to be activated References ---------- .. [41] Chapel, L., Flamary, R., Wu, H., FĂ©votte, C., and Gasso, G. (2021). Unbalanced optimal transport through non-negative penalized linear regression. NeurIPS. """ M = (HrHr[:, active_index].dot(phi) - Hra + Hc.T.dot(phi_u)) / \ (HrHr[:, active_index].dot(delta) - c + Hc.T.dot(delta_u) + 1e-16) M[active_index] = 0 M[M > (current_gamma - 1e-10 * current_gamma)] = 0 return np.max(M), np.argmax(M) def compute_next_removal(phi, delta, current_gamma): r""" This function computes the next gamma value if a variable is removed at the next iteration of the regularization path. We look for the largest value of the regularization parameter such that an element of the current solution vanishes .. math:: \max_{j \in A} \frac{\phi_j}{\delta_j} where : - A is the current active set - :math:`\phi_j` is the :math:`j` th element of the intercept of the \ current solution - :math:`\delta_j` is the :math:`j` th element of the slope of the \ current solution Parameters ---------- phi : ndarray, shape (size(A), ) Intercept of the solution at the current iteration delta : ndarray, shape (size(A), ) Slope of the solution at the current iteration current_gamma : float Value of the regularization parameter at the beginning of the \ current iteration Returns ------- next_removal_gamma : float Gamma value if a variable is removed at the next iteration next_removal_index : int Index of the variable to be removed at the next iteration .. _references-regpath: References ---------- .. [41] Chapel, L., Flamary, R., Wu, H., FĂ©votte, C., and Gasso, G. (2021). Unbalanced optimal transport through non-negative penalized linear regression. NeurIPS. """ r_candidate = phi / (delta - 1e-16) r_candidate[r_candidate >= (1 - 1e-8) * current_gamma] = 0 return np.max(r_candidate), np.argmax(r_candidate) def complement_schur(M_current, b, d, id_pop): r""" This function computes the inverse of the design matrix in the \ regularization path using the Schur complement. Two cases may arise: Case 1: one variable is added to the active set .. math:: M_{k+1}^{-1} = \begin{bmatrix} M_{k}^{-1} + s^{-1} M_{k}^{-1} \mathbf{b} \mathbf{b}^T M_{k}^{-1} \ & - M_{k}^{-1} \mathbf{b} s^{-1} \\ - s^{-1} \mathbf{b}^T M_{k}^{-1} & s^{-1} \end{bmatrix} where : - :math:`M_k^{-1}` is the inverse of the design matrix :math:`H_A^tH_A` \ of the previous iteration - :math:`\mathbf{b}` is the last column of :math:`M_{k}` - :math:`s` is the Schur complement, given by \ :math:`s = \mathbf{d} - \mathbf{b}^T M_{k}^{-1} \mathbf{b}` Case 2: one variable is removed from the active set. .. math:: M_{k+1}^{-1} = M^{-1}_{k \backslash q} - \frac{r_{-q,q} r^{T}_{-q,q}}{r_{q,q}} where : - :math:`q` is the index of column and row to delete - :math:`M^{-1}_{k \backslash q}` is the previous inverse matrix deprived \ of the :math:`q` th column and :math:`q` th row - :math:`r_{-q,q}` is the :math:`q` th column of :math:`M^{-1}_{k}` \ without the :math:`q` th element - :math:`r_{q, q}` is the element of :math:`q` th column and :math:`q` th \ row in :math:`M^{-1}_{k}` Parameters ---------- M_current : ndarray, shape (size(A)-1, size(A)-1) Inverse matrix of :math:`H_A^tH_A` at the previous iteration, with \ size(A) the size of the active set b : ndarray, shape (size(A)-1, ) None for case 2 (removal), last column of :math:`M_{k}` for case 1 \ (addition) d : float should be equal to 2 when UOT and 1 for the semi-relaxed OT id_pop : int Index of the variable to be removed, equal to -1 if no variable is deleted at the current iteration Returns ------- M : ndarray, shape (size(A), size(A)) Inverse matrix of :math:`H_A^tH_A` of the current iteration References ---------- .. [41] Chapel, L., Flamary, R., Wu, H., FĂ©votte, C., and Gasso, G. (2021). Unbalanced optimal transport through non-negative penalized linear regression. NeurIPS. """ if b is None: b = M_current[id_pop, :] b = np.delete(b, id_pop) M_del = np.delete(M_current, id_pop, 0) a = M_del[:, id_pop] M_del = np.delete(M_del, id_pop, 1) M = M_del - np.outer(a, b) / M_current[id_pop, id_pop] else: n = b.shape[0] + 1 if np.shape(b)[0] == 0: M = np.array([[0.5]]) else: X = M_current.dot(b) s = d - b.T.dot(X) M = np.zeros((n, n)) M[:-1, :-1] = M_current + X.dot(X.T) / s X_ravel = X.ravel() M[-1, :-1] = -X_ravel / s M[:-1, -1] = -X_ravel / s M[-1, -1] = 1 / s return M def construct_augmented_H(active_index, m, Hc, HrHr): r""" This function constructs an augmented matrix for the first iteration of the semi-relaxed regularization path .. math:: \text{Augmented}_H = \begin{bmatrix} 0 & H_{c A} \\ H_{c A}^T & H_{r A}^T H_{r A} \end{bmatrix} where : - :math:`H_{r A}` is the sub-matrix constructed by the columns of \ :math:`H_r` whose indices belong to the active set A - :math:`H_{c A}` is the sub-matrix constructed by the columns of \ :math:`H_c` whose indices belong to the active set A Parameters ---------- active_index : list Indices of the active variables m : int Length of the target distribution Hc : np.ndarray (dim_b, dim_a * dim_b) Matrix that computes the sum along the columns of the transport plan \ :math:`T` HrHr : np.ndarray (dim_a * dim_b, dim_a * dim_b) Matrix product of :math:`H_r^T H_r` Returns ------- H_augmented : np.ndarray (dim_b + size(A), dim_b + size(A)) Augmented matrix for the first iteration of the semi-relaxed regularization path """ Hc_sub = Hc[:, active_index].toarray() HrHr_sub = HrHr[:, active_index] HrHr_sub = HrHr_sub[active_index, :].toarray() H_augmented = np.block([[np.zeros((m, m)), Hc_sub], [Hc_sub.T, HrHr_sub]]) return H_augmented def fully_relaxed_path(a: np.array, b: np.array, C: np.array, reg=1e-4, itmax=50000): r"""This function gives the regularization path of l2-penalized UOT problem The problem to optimize is the Lasso reformulation of the l2-penalized UOT: .. math:: \min_t \gamma \mathbf{c}^T \mathbf{t} + 0.5 * \|{H} \mathbf{t} - \mathbf{y}\|_2^2 s.t. \mathbf{t} \geq 0 where : - :math:`\mathbf{c}` is the flattened version of the cost matrix \ :math:`{C}` - :math:`\gamma = 1/\lambda` is the l2-regularization coefficient - :math:`\mathbf{y}` is the concatenation of vectors :math:`\mathbf{a}` \ and :math:`\mathbf{b}`, defined as \ :math:`\mathbf{y}^T = [\mathbf{a}^T \mathbf{b}^T]` - :math:`{H}` is a design matrix, see :ref:`[41] ` \ for the design of :math:`{H}`. The matrix product :math:`H\mathbf{t}` \ computes both the source marginal and the target marginals. - :math:`\mathbf{t}` is the flattened version of the transport matrix Parameters ---------- a : np.ndarray (dim_a,) Histogram of dimension dim_a b : np.ndarray (dim_b,) Histogram of dimension dim_b C : np.ndarray, shape (dim_a, dim_b) Cost matrix reg: float l2-regularization coefficient itmax: int Maximum number of iteration Returns ------- t : np.ndarray (dim_a*dim_b, ) Flattened vector of the optimal transport matrix t_list : list List of solutions in the regularization path gamma_list : list List of regularization coefficients in the regularization path Examples -------- >>> import ot >>> import numpy as np >>> n = 3 >>> xs = np.array([1., 2., 3.]).reshape((n, 1)) >>> xt = np.array([5., 6., 7.]).reshape((n, 1)) >>> C = ot.dist(xs, xt) >>> C /= C.max() >>> a = np.array([0.2, 0.5, 0.3]) >>> b = np.array([0.2, 0.5, 0.3]) >>> t, _, _ = ot.regpath.fully_relaxed_path(a, b, C, 1e-4) >>> t array([1.99958333e-01, 0.00000000e+00, 0.00000000e+00, 3.88888889e-05, 4.99938889e-01, 0.00000000e+00, 0.00000000e+00, 3.88888889e-05, 2.99958333e-01]) References ---------- .. [41] Chapel, L., Flamary, R., Wu, H., FĂ©votte, C., and Gasso, G. (2021). Unbalanced optimal transport through non-negative penalized linear regression. NeurIPS. """ n = np.shape(a)[0] m = np.shape(b)[0] H, y, c = recast_ot_as_lasso(a, b, C) HtH = H.T.dot(H) Hty = H.T.dot(y) n_iter = 1 # initialization M0 = Hty / c gamma_list = [np.max(M0)] active_index = [np.argmax(M0)] t_list = [np.zeros((n * m,))] H_inv = np.array([[]]) add_col = np.array([]) id_pop = -1 while n_iter < itmax and gamma_list[-1] > reg: H_inv = complement_schur(H_inv, add_col, 2., id_pop) current_gamma = gamma_list[-1] # compute the intercept and slope of solutions in current iteration # t = phi - gamma * delta phi = H_inv.dot(Hty[active_index]) delta = H_inv.dot(c[active_index]) gamma, ik = ot_next_gamma(phi, delta, HtH, Hty, c, active_index, current_gamma) # compute the next lambda when removing a point from the active set alt_gamma, id_pop = compute_next_removal(phi, delta, current_gamma) # if the positivity constraint is violated, we remove id_pop # from active set, otherwise we add ik to active set if alt_gamma > gamma: gamma = alt_gamma else: id_pop = -1 # compute the solution of current segment tA = phi - gamma * delta sol = np.zeros((n * m, )) sol[active_index] = tA if id_pop != -1: active_index.pop(id_pop) add_col = None else: active_index.append(ik) add_col = HtH[active_index[:-1], ik].toarray() gamma_list.append(gamma) t_list.append(sol) n_iter += 1 if itmax <= n_iter: print('maximum iteration has been reached !') # correct the last solution and gamma if len(t_list) > 1: t_final = (t_list[-2] + (t_list[-1] - t_list[-2]) * (reg - gamma_list[-2]) / (gamma_list[-1] - gamma_list[-2])) t_list[-1] = t_final gamma_list[-1] = reg else: gamma_list[-1] = reg print('Regularization path does not exist !') return t_list[-1], t_list, gamma_list def semi_relaxed_path(a: np.array, b: np.array, C: np.array, reg=1e-4, itmax=50000): r"""This function gives the regularization path of semi-relaxed l2-UOT problem. The problem to optimize is the Lasso reformulation of the l2-penalized UOT: .. math:: \min_t \gamma \mathbf{c}^T t + 0.5 * \|H_r \mathbf{t} - \mathbf{a}\|_2^2 s.t. H_c \mathbf{t} = \mathbf{b} \mathbf{t} \geq 0 where : - :math:`\mathbf{c}` is the flattened version of the cost matrix \ :math:`C` - :math:`\gamma = 1/\lambda` is the l2-regularization parameter - :math:`H_r` is a matrix that computes the sum along the rows of \ the transport plan :math:`T` - :math:`H_c` is a matrix that computes the sum along the columns of \ the transport plan :math:`T` - :math:`\mathbf{t}` is the flattened version of the transport plan \ :math:`T` Parameters ---------- a : np.ndarray (dim_a,) Histogram of dimension dim_a b : np.ndarray (dim_b,) Histogram of dimension dim_b C : np.ndarray, shape (dim_a, dim_b) Cost matrix reg: float (optional) l2-regularization coefficient itmax: int (optional) Maximum number of iteration Returns ------- t : np.ndarray (dim_a*dim_b, ) Flattened vector of the (unregularized) optimal transport matrix t_list : list List of all the optimal transport vectors of the regularization path gamma_list : list List of the regularization parameters in the path Examples -------- >>> import ot >>> import numpy as np >>> n = 3 >>> xs = np.array([1., 2., 3.]).reshape((n, 1)) >>> xt = np.array([5., 6., 7.]).reshape((n, 1)) >>> C = ot.dist(xs, xt) >>> C /= C.max() >>> a = np.array([0.2, 0.5, 0.3]) >>> b = np.array([0.2, 0.5, 0.3]) >>> t, _, _ = ot.regpath.semi_relaxed_path(a, b, C, 1e-4) >>> t array([1.99980556e-01, 0.00000000e+00, 0.00000000e+00, 1.94444444e-05, 4.99980556e-01, 0.00000000e+00, 0.00000000e+00, 1.94444444e-05, 3.00000000e-01]) References ---------- .. [41] Chapel, L., Flamary, R., Wu, H., FĂ©votte, C., and Gasso, G. (2021). Unbalanced optimal transport through non-negative penalized linear regression. NeurIPS. """ n = np.shape(a)[0] m = np.shape(b)[0] Hr, Hc, c = recast_semi_relaxed_as_lasso(a, b, C) Hra = Hr.T.dot(a) HrHr = Hr.T.dot(Hr) n_iter = 1 active_index = [] # initialization for j in range(np.shape(C)[1]): i = np.argmin(C[:, j]) active_index.append(i * m + j) gamma_list = [] t_list = [] current_gamma = np.Inf augmented_H0 = construct_augmented_H(active_index, m, Hc, HrHr) add_col = np.array([]) id_pop = -1 while n_iter < itmax and current_gamma > reg: if n_iter == 1: H_inv = np.linalg.inv(augmented_H0) else: H_inv = complement_schur(H_inv, add_col, 1., id_pop + m) # compute the intercept and slope of solutions in current iteration augmented_phi = H_inv.dot(np.concatenate((b, Hra[active_index]))) augmented_delta = H_inv[:, m:].dot(c[active_index]) phi = augmented_phi[m:] delta = augmented_delta[m:] phi_u = augmented_phi[0:m] delta_u = augmented_delta[0:m] gamma, ik = semi_relaxed_next_gamma(phi, delta, phi_u, delta_u, HrHr, Hc, Hra, c, active_index, current_gamma) # compute the next lambda when removing a point from the active set alt_gamma, id_pop = compute_next_removal(phi, delta, current_gamma) # if the positivity constraint is violated, we remove id_pop # from active set, otherwise we add ik to active set if alt_gamma > gamma: gamma = alt_gamma else: id_pop = -1 # compute the solution of current segment tA = phi - gamma * delta sol = np.zeros((n * m, )) sol[active_index] = tA if id_pop != -1: active_index.pop(id_pop) add_col = None else: active_index.append(ik) add_col = np.concatenate((Hc.toarray()[:, ik], HrHr.toarray()[active_index[:-1], ik])) add_col = add_col[:, np.newaxis] gamma_list.append(gamma) t_list.append(sol) current_gamma = gamma n_iter += 1 if itmax <= n_iter: print('maximum iteration has been reached !') # correct the last solution and gamma if len(t_list) > 1: t_final = (t_list[-2] + (t_list[-1] - t_list[-2]) * (reg - gamma_list[-2]) / (gamma_list[-1] - gamma_list[-2])) t_list[-1] = t_final gamma_list[-1] = reg else: gamma_list[-1] = reg print('Regularization path does not exist !') return t_list[-1], t_list, gamma_list def regularization_path(a: np.array, b: np.array, C: np.array, reg=1e-4, semi_relaxed=False, itmax=50000): r"""This function provides all the solutions of the regularization path \ of the l2-UOT problem :ref:`[41] `. The problem to optimize is the Lasso reformulation of the l2-penalized UOT: .. math:: \min_t \gamma \mathbf{c}^T \mathbf{t} + 0.5 * \|{H} \mathbf{t} - \mathbf{y}\|_2^2 s.t. \mathbf{t} \geq 0 where : - :math:`\mathbf{c}` is the flattened version of the cost matrix \ :math:`{C}` - :math:`\gamma = 1/\lambda` is the l2-regularization coefficient - :math:`\mathbf{y}` is the concatenation of vectors :math:`\mathbf{a}` \ and :math:`\mathbf{b}`, defined as \ :math:`\mathbf{y}^T = [\mathbf{a}^T \mathbf{b}^T]` - :math:`{H}` is a design matrix, see :ref:`[41] ` \ for the design of :math:`{H}`. The matrix product :math:`H\mathbf{t}` \ computes both the source marginal and the target marginals. - :math:`\mathbf{t}` is the flattened version of the transport matrix For the semi-relaxed problem, it optimizes the Lasso reformulation of the l2-penalized UOT: .. math:: \min_t \gamma \mathbf{c}^T \mathbf{t} + 0.5 * \|H_r \mathbf{t} - \mathbf{a}\|_2^2 s.t. H_c \mathbf{t} = \mathbf{b} \mathbf{t} \geq 0 Parameters ---------- a : np.ndarray (dim_a,) Histogram of dimension dim_a b : np.ndarray (dim_b,) Histogram of dimension dim_b C : np.ndarray, shape (dim_a, dim_b) Cost matrix reg: float (optional) l2-regularization coefficient semi_relaxed : bool (optional) Give the semi-relaxed path if True itmax: int (optional) Maximum number of iteration Returns ------- t : np.ndarray (dim_a*dim_b, ) Flattened vector of the (unregularized) optimal transport matrix t_list : list List of all the optimal transport vectors of the regularization path gamma_list : list List of the regularization parameters in the path References ---------- .. [41] Chapel, L., Flamary, R., Wu, H., FĂ©votte, C., and Gasso, G. (2021). Unbalanced optimal transport through non-negative penalized linear regression. NeurIPS. """ if semi_relaxed: t, t_list, gamma_list = semi_relaxed_path(a, b, C, reg=reg, itmax=itmax) else: t, t_list, gamma_list = fully_relaxed_path(a, b, C, reg=reg, itmax=itmax) return t, t_list, gamma_list def compute_transport_plan(gamma, gamma_list, Pi_list): r""" Given the regularization path, this function computes the transport plan for any value of gamma thanks to the piecewise linearity of the path. .. math:: t(\gamma) = \phi(\gamma) - \gamma \delta(\gamma) where: - :math:`\gamma` is the regularization parameter - :math:`\phi(\gamma)` is the corresponding intercept - :math:`\delta(\gamma)` is the corresponding slope - :math:`\mathbf{t}` is the flattened version of the transport matrix Parameters ---------- gamma : float Regularization coefficient gamma_list : list List of regularization parameters of the regularization path Pi_list : list List of all the solutions of the regularization path Returns ------- t : np.ndarray (dim_a*dim_b, ) Vectorization of the transport plan corresponding to the given value of gamma Examples -------- >>> import ot >>> import numpy as np >>> n = 3 >>> xs = np.array([1., 2., 3.]).reshape((n, 1)) >>> xt = np.array([5., 6., 7.]).reshape((n, 1)) >>> C = ot.dist(xs, xt) >>> C /= C.max() >>> a = np.array([0.2, 0.5, 0.3]) >>> b = np.array([0.2, 0.5, 0.3]) >>> t, pi_list, g_list = ot.regpath.regularization_path(a, b, C, reg=1e-4) >>> gamma = 1 >>> t2 = ot.regpath.compute_transport_plan(gamma, g_list, pi_list) >>> t2 array([0. , 0. , 0. , 0.19722222, 0.05555556, 0. , 0. , 0.24722222, 0. ]) .. _references-regpath: References ---------- .. [41] Chapel, L., Flamary, R., Wu, H., FĂ©votte, C., and Gasso, G. (2021). Unbalanced optimal transport through non-negative penalized linear regression. NeurIPS. """ if gamma >= gamma_list[0]: Pi = Pi_list[0] elif gamma <= gamma_list[-1]: Pi = Pi_list[-1] else: idx = np.where(gamma <= np.array(gamma_list))[0][-1] gamma_k0 = gamma_list[idx] gamma_k1 = gamma_list[idx + 1] pi_k0 = Pi_list[idx] pi_k1 = Pi_list[idx + 1] Pi = pi_k0 + (pi_k1 - pi_k0) * (gamma - gamma_k0) \ / (gamma_k1 - gamma_k0) return Pi python-pot-0.9.3+dfsg/ot/sliced.py000066400000000000000000000363361455713015700170450ustar00rootroot00000000000000""" Sliced OT Distances """ # Author: Adrien Corenflos # Nicolas Courty # RĂ©mi Flamary # # License: MIT License import numpy as np from .backend import get_backend, NumpyBackend from .utils import list_to_array, get_coordinate_circle from .lp import wasserstein_circle, semidiscrete_wasserstein2_unif_circle def get_random_projections(d, n_projections, seed=None, backend=None, type_as=None): r""" Generates n_projections samples from the uniform on the unit sphere of dimension :math:`d-1`: :math:`\mathcal{U}(\mathcal{S}^{d-1})` Parameters ---------- d : int dimension of the space n_projections : int number of samples requested seed: int or RandomState, optional Seed used for numpy random number generator backend: Backend to use for random generation Returns ------- out: ndarray, shape (d, n_projections) The uniform unit vectors on the sphere Examples -------- >>> n_projections = 100 >>> d = 5 >>> projs = get_random_projections(d, n_projections) >>> np.allclose(np.sum(np.square(projs), 0), 1.) # doctest: +NORMALIZE_WHITESPACE True """ if backend is None: nx = NumpyBackend() else: nx = backend if isinstance(seed, np.random.RandomState) and str(nx) == 'numpy': projections = seed.randn(d, n_projections) else: if seed is not None: nx.seed(seed) projections = nx.randn(d, n_projections, type_as=type_as) projections = projections / nx.sqrt(nx.sum(projections**2, 0, keepdims=True)) return projections def sliced_wasserstein_distance(X_s, X_t, a=None, b=None, n_projections=50, p=2, projections=None, seed=None, log=False): r""" Computes a Monte-Carlo approximation of the p-Sliced Wasserstein distance .. math:: \mathcal{SWD}_p(\mu, \nu) = \underset{\theta \sim \mathcal{U}(\mathbb{S}^{d-1})}{\mathbb{E}}\left(\mathcal{W}_p^p(\theta_\# \mu, \theta_\# \nu)\right)^{\frac{1}{p}} where : - :math:`\theta_\# \mu` stands for the pushforwards of the projection :math:`X \in \mathbb{R}^d \mapsto \langle \theta, X \rangle` Parameters ---------- X_s : ndarray, shape (n_samples_a, dim) samples in the source domain X_t : ndarray, shape (n_samples_b, dim) samples in the target domain a : ndarray, shape (n_samples_a,), optional samples weights in the source domain b : ndarray, shape (n_samples_b,), optional samples weights in the target domain n_projections : int, optional Number of projections used for the Monte-Carlo approximation p: float, optional = Power p used for computing the sliced Wasserstein projections: shape (dim, n_projections), optional Projection matrix (n_projections and seed are not used in this case) seed: int or RandomState or None, optional Seed used for random number generator log: bool, optional if True, sliced_wasserstein_distance returns the projections used and their associated EMD. Returns ------- cost: float Sliced Wasserstein Cost log : dict, optional log dictionary return only if log==True in parameters Examples -------- >>> n_samples_a = 20 >>> X = np.random.normal(0., 1., (n_samples_a, 5)) >>> sliced_wasserstein_distance(X, X, seed=0) # doctest: +NORMALIZE_WHITESPACE 0.0 References ---------- .. [31] Bonneel, Nicolas, et al. "Sliced and radon wasserstein barycenters of measures." Journal of Mathematical Imaging and Vision 51.1 (2015): 22-45 """ from .lp import wasserstein_1d X_s, X_t = list_to_array(X_s, X_t) if a is not None and b is not None and projections is None: nx = get_backend(X_s, X_t, a, b) elif a is not None and b is not None and projections is not None: nx = get_backend(X_s, X_t, a, b, projections) elif a is None and b is None and projections is not None: nx = get_backend(X_s, X_t, projections) else: nx = get_backend(X_s, X_t) n = X_s.shape[0] m = X_t.shape[0] if X_s.shape[1] != X_t.shape[1]: raise ValueError( "X_s and X_t must have the same number of dimensions {} and {} respectively given".format(X_s.shape[1], X_t.shape[1])) if a is None: a = nx.full(n, 1 / n, type_as=X_s) if b is None: b = nx.full(m, 1 / m, type_as=X_s) d = X_s.shape[1] if projections is None: projections = get_random_projections(d, n_projections, seed, backend=nx, type_as=X_s) else: n_projections = projections.shape[1] X_s_projections = nx.dot(X_s, projections) X_t_projections = nx.dot(X_t, projections) projected_emd = wasserstein_1d(X_s_projections, X_t_projections, a, b, p=p) res = (nx.sum(projected_emd) / n_projections) ** (1.0 / p) if log: return res, {"projections": projections, "projected_emds": projected_emd} return res def max_sliced_wasserstein_distance(X_s, X_t, a=None, b=None, n_projections=50, p=2, projections=None, seed=None, log=False): r""" Computes a Monte-Carlo approximation of the max p-Sliced Wasserstein distance .. math:: \mathcal{Max-SWD}_p(\mu, \nu) = \underset{\theta _in \mathcal{U}(\mathbb{S}^{d-1})}{\max} [\mathcal{W}_p^p(\theta_\# \mu, \theta_\# \nu)]^{\frac{1}{p}} where : - :math:`\theta_\# \mu` stands for the pushforwards of the projection :math:`\mathbb{R}^d \ni X \mapsto \langle \theta, X \rangle` Parameters ---------- X_s : ndarray, shape (n_samples_a, dim) samples in the source domain X_t : ndarray, shape (n_samples_b, dim) samples in the target domain a : ndarray, shape (n_samples_a,), optional samples weights in the source domain b : ndarray, shape (n_samples_b,), optional samples weights in the target domain n_projections : int, optional Number of projections used for the Monte-Carlo approximation p: float, optional = Power p used for computing the sliced Wasserstein projections: shape (dim, n_projections), optional Projection matrix (n_projections and seed are not used in this case) seed: int or RandomState or None, optional Seed used for random number generator log: bool, optional if True, sliced_wasserstein_distance returns the projections used and their associated EMD. Returns ------- cost: float Sliced Wasserstein Cost log : dict, optional log dictionary return only if log==True in parameters Examples -------- >>> n_samples_a = 20 >>> X = np.random.normal(0., 1., (n_samples_a, 5)) >>> sliced_wasserstein_distance(X, X, seed=0) # doctest: +NORMALIZE_WHITESPACE 0.0 References ---------- .. [35] Deshpande, I., Hu, Y. T., Sun, R., Pyrros, A., Siddiqui, N., Koyejo, S., ... & Schwing, A. G. (2019). Max-sliced wasserstein distance and its use for gans. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (pp. 10648-10656). """ from .lp import wasserstein_1d X_s, X_t = list_to_array(X_s, X_t) if a is not None and b is not None and projections is None: nx = get_backend(X_s, X_t, a, b) elif a is not None and b is not None and projections is not None: nx = get_backend(X_s, X_t, a, b, projections) elif a is None and b is None and projections is not None: nx = get_backend(X_s, X_t, projections) else: nx = get_backend(X_s, X_t) n = X_s.shape[0] m = X_t.shape[0] if X_s.shape[1] != X_t.shape[1]: raise ValueError( "X_s and X_t must have the same number of dimensions {} and {} respectively given".format(X_s.shape[1], X_t.shape[1])) if a is None: a = nx.full(n, 1 / n, type_as=X_s) if b is None: b = nx.full(m, 1 / m, type_as=X_s) d = X_s.shape[1] if projections is None: projections = get_random_projections(d, n_projections, seed, backend=nx, type_as=X_s) X_s_projections = nx.dot(X_s, projections) X_t_projections = nx.dot(X_t, projections) projected_emd = wasserstein_1d(X_s_projections, X_t_projections, a, b, p=p) res = nx.max(projected_emd) ** (1.0 / p) if log: return res, {"projections": projections, "projected_emds": projected_emd} return res def sliced_wasserstein_sphere(X_s, X_t, a=None, b=None, n_projections=50, p=2, projections=None, seed=None, log=False): r""" Compute the spherical sliced-Wasserstein discrepancy. .. math:: SSW_p(\mu,\nu) = \left(\int_{\mathbb{V}_{d,2}} W_p^p(P^U_\#\mu, P^U_\#\nu)\ \mathrm{d}\sigma(U)\right)^{\frac{1}{p}} where: - :math:`P^U_\# \mu` stands for the pushforwards of the projection :math:`\forall x\in S^{d-1},\ P^U(x) = \frac{U^Tx}{\|U^Tx\|_2}` The function runs on backend but tensorflow and jax are not supported. Parameters ---------- X_s: ndarray, shape (n_samples_a, dim) Samples in the source domain X_t: ndarray, shape (n_samples_b, dim) Samples in the target domain a : ndarray, shape (n_samples_a,), optional samples weights in the source domain b : ndarray, shape (n_samples_b,), optional samples weights in the target domain n_projections : int, optional Number of projections used for the Monte-Carlo approximation p: float, optional (default=2) Power p used for computing the spherical sliced Wasserstein projections: shape (n_projections, dim, 2), optional Projection matrix (n_projections and seed are not used in this case) seed: int or RandomState or None, optional Seed used for random number generator log: bool, optional if True, sliced_wasserstein_sphere returns the projections used and their associated EMD. Returns ------- cost: float Spherical Sliced Wasserstein Cost log: dict, optional log dictionary return only if log==True in parameters Examples -------- >>> n_samples_a = 20 >>> X = np.random.normal(0., 1., (n_samples_a, 5)) >>> X = X / np.sqrt(np.sum(X**2, -1, keepdims=True)) >>> sliced_wasserstein_sphere(X, X, seed=0) # doctest: +NORMALIZE_WHITESPACE 0.0 References ---------- .. [46] Bonet, C., Berg, P., Courty, N., Septier, F., Drumetz, L., & Pham, M. T. (2023). Spherical sliced-wasserstein. International Conference on Learning Representations. """ if a is not None and b is not None: nx = get_backend(X_s, X_t, a, b) else: nx = get_backend(X_s, X_t) n, d = X_s.shape m, _ = X_t.shape if X_s.shape[1] != X_t.shape[1]: raise ValueError( "X_s and X_t must have the same number of dimensions {} and {} respectively given".format(X_s.shape[1], X_t.shape[1])) if nx.any(nx.abs(nx.sum(X_s**2, axis=-1) - 1) > 10**(-4)): raise ValueError("X_s is not on the sphere.") if nx.any(nx.abs(nx.sum(X_t**2, axis=-1) - 1) > 10**(-4)): raise ValueError("X_t is not on the sphere.") if projections is None: # Uniforms and independent samples on the Stiefel manifold V_{d,2} if isinstance(seed, np.random.RandomState) and str(nx) == 'numpy': Z = seed.randn(n_projections, d, 2) else: if seed is not None: nx.seed(seed) Z = nx.randn(n_projections, d, 2, type_as=X_s) projections, _ = nx.qr(Z) else: n_projections = projections.shape[0] # Projection on S^1 # Projection on plane Xps = nx.einsum("ikj, lk -> ilj", projections, X_s) Xpt = nx.einsum("ikj, lk -> ilj", projections, X_t) # Projection on sphere Xps = Xps / nx.sqrt(nx.sum(Xps**2, -1, keepdims=True)) Xpt = Xpt / nx.sqrt(nx.sum(Xpt**2, -1, keepdims=True)) # Get coordinates on [0,1[ Xps_coords = nx.reshape(get_coordinate_circle(nx.reshape(Xps, (-1, 2))), (n_projections, n)) Xpt_coords = nx.reshape(get_coordinate_circle(nx.reshape(Xpt, (-1, 2))), (n_projections, m)) projected_emd = wasserstein_circle(Xps_coords.T, Xpt_coords.T, u_weights=a, v_weights=b, p=p) res = nx.mean(projected_emd) ** (1 / p) if log: return res, {"projections": projections, "projected_emds": projected_emd} return res def sliced_wasserstein_sphere_unif(X_s, a=None, n_projections=50, seed=None, log=False): r"""Compute the 2-spherical sliced wasserstein w.r.t. a uniform distribution. .. math:: SSW_2(\mu_n, \nu) where - :math:`\mu_n=\sum_{i=1}^n \alpha_i \delta_{x_i}` - :math:`\nu=\mathrm{Unif}(S^1)` Parameters ---------- X_s: ndarray, shape (n_samples_a, dim) Samples in the source domain a : ndarray, shape (n_samples_a,), optional samples weights in the source domain n_projections : int, optional Number of projections used for the Monte-Carlo approximation seed: int or RandomState or None, optional Seed used for random number generator log: bool, optional if True, sliced_wasserstein_distance returns the projections used and their associated EMD. Returns ------- cost: float Spherical Sliced Wasserstein Cost log: dict, optional log dictionary return only if log==True in parameters Examples --------- >>> np.random.seed(42) >>> x0 = np.random.randn(500,3) >>> x0 = x0 / np.sqrt(np.sum(x0**2, -1, keepdims=True)) >>> ssw = sliced_wasserstein_sphere_unif(x0, seed=42) >>> np.allclose(sliced_wasserstein_sphere_unif(x0, seed=42), 0.01734, atol=1e-3) True References: ----------- .. [46] Bonet, C., Berg, P., Courty, N., Septier, F., Drumetz, L., & Pham, M. T. (2023). Spherical sliced-wasserstein. International Conference on Learning Representations. """ if a is not None: nx = get_backend(X_s, a) else: nx = get_backend(X_s) n, d = X_s.shape if nx.any(nx.abs(nx.sum(X_s**2, axis=-1) - 1) > 10**(-4)): raise ValueError("X_s is not on the sphere.") # Uniforms and independent samples on the Stiefel manifold V_{d,2} if isinstance(seed, np.random.RandomState) and str(nx) == 'numpy': Z = seed.randn(n_projections, d, 2) else: if seed is not None: nx.seed(seed) Z = nx.randn(n_projections, d, 2, type_as=X_s) projections, _ = nx.qr(Z) # Projection on S^1 # Projection on plane Xps = nx.einsum("ikj, lk -> ilj", projections, X_s) # Projection on sphere Xps = Xps / nx.sqrt(nx.sum(Xps**2, -1, keepdims=True)) # Get coordinates on [0,1[ Xps_coords = nx.reshape(get_coordinate_circle(nx.reshape(Xps, (-1, 2))), (n_projections, n)) projected_emd = semidiscrete_wasserstein2_unif_circle(Xps_coords.T, u_weights=a) res = nx.mean(projected_emd) ** (1 / 2) if log: return res, {"projections": projections, "projected_emds": projected_emd} return res python-pot-0.9.3+dfsg/ot/smooth.py000066400000000000000000000551401455713015700171050ustar00rootroot00000000000000#Copyright (c) 2018, Mathieu Blondel #All rights reserved. # #Redistribution and use in source and binary forms, with or without #modification, are permitted provided that the following conditions are met: # #1. Redistributions of source code must retain the above copyright notice, this #list of conditions and the following disclaimer. # #2. Redistributions in binary form must reproduce the above copyright notice, #this list of conditions and the following disclaimer in the documentation and/or #other materials provided with the distribution. # #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND #ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. #IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT #NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, #OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR #OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF #THE POSSIBILITY OF SUCH DAMAGE. # Author: Mathieu Blondel # Remi Flamary # Tianlin Liu """ Smooth and Sparse (KL an L2 reg.) and sparsity-constrained OT solvers. Implementation of : Smooth and Sparse Optimal Transport. Mathieu Blondel, Vivien Seguy, Antoine Rolet. In Proc. of AISTATS 2018. https://arxiv.org/abs/1710.06276 (Original code from https://github.com/mblondel/smooth-ot/) Sparsity-Constrained Optimal Transport. Liu, T., Puigcerver, J., & Blondel, M. (2023). Sparsity-constrained optimal transport. Proceedings of the Eleventh International Conference on Learning Representations (ICLR). https://arxiv.org/abs/2209.15466 [17] Blondel, M., Seguy, V., & Rolet, A. (2018). Smooth and Sparse Optimal Transport. Proceedings of the Twenty-First International Conference on Artificial Intelligence and Statistics (AISTATS). [50] Liu, T., Puigcerver, J., & Blondel, M. (2023). Sparsity-constrained optimal transport. Proceedings of the Eleventh International Conference on Learning Representations (ICLR). """ import numpy as np from scipy.optimize import minimize from .backend import get_backend import ot def projection_simplex(V, z=1, axis=None): r""" Projection of :math:`\mathbf{V}` onto the simplex, scaled by `z` .. math:: P\left(\mathbf{V}, z\right) = \mathop{\arg \min}_{\substack{\mathbf{y} >= 0 \\ \sum_i \mathbf{y}_i = z}} \quad \|\mathbf{y} - \mathbf{V}\|^2 Parameters ---------- V: ndarray, rank 2 z: float or array If array, len(z) must be compatible with :math:`\mathbf{V}` axis: None or int - axis=None: project :math:`\mathbf{V}` by :math:`P(\mathbf{V}.\mathrm{ravel}(), z)` - axis=1: project each :math:`\mathbf{V}_i` by :math:`P(\mathbf{V}_i, z_i)` - axis=0: project each :math:`\mathbf{V}_{:, j}` by :math:`P(\mathbf{V}_{:, j}, z_j)` Returns ------- projection: ndarray, shape :math:`\mathbf{V}`.shape """ if axis == 1: n_features = V.shape[1] U = np.sort(V, axis=1)[:, ::-1] z = np.ones(len(V)) * z cssv = np.cumsum(U, axis=1) - z[:, np.newaxis] ind = np.arange(n_features) + 1 cond = U - cssv / ind > 0 rho = np.count_nonzero(cond, axis=1) theta = cssv[np.arange(len(V)), rho - 1] / rho return np.maximum(V - theta[:, np.newaxis], 0) elif axis == 0: return projection_simplex(V.T, z, axis=1).T else: V = V.ravel().reshape(1, -1) return projection_simplex(V, z, axis=1).ravel() class Regularization(object): r"""Base class for Regularization objects Notes ----- This class is not intended for direct use but as apparent for true regularization implementation. """ def __init__(self, gamma=1.0): """ Parameters ---------- gamma: float Regularization parameter. We recover unregularized OT when gamma -> 0. """ self.gamma = gamma def delta_Omega(X): r""" Compute :math:`\delta_\Omega(\mathbf{X}_{:, j})` for each :math:`\mathbf{X}_{:, j}`. .. math:: \delta_\Omega(\mathbf{x}) = \sup_{\mathbf{y} >= 0} \ \mathbf{y}^T \mathbf{x} - \Omega(\mathbf{y}) Parameters ---------- X: array, shape = (len(a), len(b)) Input array. Returns ------- v: array, (len(b), ) Values: :math:`\mathbf{v}_j = \delta_\Omega(\mathbf{X}_{:, j})` G: array, (len(a), len(b)) Gradients: :math:`\mathbf{G}_{:, j} = \nabla \delta_\Omega(\mathbf{X}_{:, j})` """ raise NotImplementedError def max_Omega(X, b): r""" Compute :math:`\mathrm{max}_{\Omega, j}(\mathbf{X}_{:, j})` for each :math:`\mathbf{X}_{:, j}`. .. math:: \mathrm{max}_{\Omega, j}(\mathbf{x}) = \sup_{\substack{\mathbf{y} >= 0 \ \sum_i \mathbf{y}_i = 1}} \mathbf{y}^T \mathbf{x} - \frac{1}{\mathbf{b}_j} \Omega(\mathbf{b}_j \mathbf{y}) Parameters ---------- X: array, shape = (len(a), len(b)) Input array. b: array, shape = (len(b), ) Returns ------- v: array, (len(b), ) Values: :math:`\mathbf{v}_j = \mathrm{max}_{\Omega, j}(\mathbf{X}_{:, j})` G: array, (len(a), len(b)) Gradients: :math:`\mathbf{G}_{:, j} = \nabla \mathrm{max}_{\Omega, j}(\mathbf{X}_{:, j})` """ raise NotImplementedError def Omega(T): """ Compute regularization term. Parameters ---------- T: array, shape = len(a) x len(b) Input array. Returns ------- value: float Regularization term. """ raise NotImplementedError class NegEntropy(Regularization): """ NegEntropy regularization """ def delta_Omega(self, X): G = np.exp(X / self.gamma - 1) val = self.gamma * np.sum(G, axis=0) return val, G def max_Omega(self, X, b): max_X = np.max(X, axis=0) / self.gamma exp_X = np.exp(X / self.gamma - max_X) val = self.gamma * (np.log(np.sum(exp_X, axis=0)) + max_X) val -= self.gamma * np.log(b) G = exp_X / np.sum(exp_X, axis=0) return val, G def Omega(self, T): return self.gamma * np.sum(T * np.log(T)) class SquaredL2(Regularization): """ Squared L2 regularization """ def delta_Omega(self, X): max_X = np.maximum(X, 0) val = np.sum(max_X ** 2, axis=0) / (2 * self.gamma) G = max_X / self.gamma return val, G def max_Omega(self, X, b): G = projection_simplex(X / (b * self.gamma), axis=0) val = np.sum(X * G, axis=0) val -= 0.5 * self.gamma * b * np.sum(G * G, axis=0) return val, G def Omega(self, T): return 0.5 * self.gamma * np.sum(T ** 2) class SparsityConstrained(Regularization): """ Squared L2 regularization with sparsity constraints """ def __init__(self, max_nz, gamma=1.0): self.max_nz = max_nz self.gamma = gamma def delta_Omega(self, X): # For each column of X, find entries that are not among the top max_nz. non_top_indices = np.argpartition( -X, self.max_nz, axis=0)[self.max_nz:] # Set these entries to -inf. if X.ndim == 1: X[non_top_indices] = 0.0 else: X[non_top_indices, np.arange(X.shape[1])] = 0.0 max_X = np.maximum(X, 0) val = np.sum(max_X ** 2, axis=0) / (2 * self.gamma) G = max_X / self.gamma return val, G def max_Omega(self, X, b): # Project the scaled X onto the simplex with sparsity constraint. G = ot.utils.projection_sparse_simplex( X / (b * self.gamma), self.max_nz, axis=0) val = np.sum(X * G, axis=0) val -= 0.5 * self.gamma * b * np.sum(G * G, axis=0) return val, G def Omega(self, T): return 0.5 * self.gamma * np.sum(T ** 2) def dual_obj_grad(alpha, beta, a, b, C, regul): r""" Compute objective value and gradients of dual objective. Parameters ---------- alpha: array, shape = len(a) beta: array, shape = len(b) Current iterate of dual potentials. a: array, shape = len(a) b: array, shape = len(b) Input histograms (should be non-negative and sum to 1). C: array, shape = (len(a), len(b)) Ground cost matrix. regul: Regularization object Should implement a `delta_Omega(X)` method. Returns ------- obj: float Objective value (higher is better). grad_alpha: array, shape = len(a) Gradient w.r.t. `alpha`. grad_beta: array, shape = len(b) Gradient w.r.t. `beta`. """ obj = np.dot(alpha, a) + np.dot(beta, b) grad_alpha = a.copy() grad_beta = b.copy() # X[:, j] = alpha + beta[j] - C[:, j] X = alpha[:, np.newaxis] + beta - C # val.shape = len(b) # G.shape = len(a) x len(b) val, G = regul.delta_Omega(X) obj -= np.sum(val) grad_alpha -= G.sum(axis=1) grad_beta -= G.sum(axis=0) return obj, grad_alpha, grad_beta def solve_dual(a, b, C, regul, method="L-BFGS-B", tol=1e-3, max_iter=500, verbose=False): """ Solve the "smoothed" dual objective. Parameters ---------- a: array, shape = (len(a), ) b: array, shape = (len(b), ) Input histograms (should be non-negative and sum to 1). C: array, shape = (len(a), len(b)) Ground cost matrix. regul: Regularization object Should implement a `delta_Omega(X)` method. method: str Solver to be used (passed to `scipy.optimize.minimize`). tol: float Tolerance parameter. max_iter: int Maximum number of iterations. Returns ------- alpha: array, shape = (len(a), ) beta: array, shape = (len(b), ) Dual potentials. """ def _func(params): # Unpack alpha and beta. alpha = params[:len(a)] beta = params[len(a):] obj, grad_alpha, grad_beta = dual_obj_grad(alpha, beta, a, b, C, regul) # Pack grad_alpha and grad_beta. grad = np.concatenate((grad_alpha, grad_beta)) # We need to maximize the dual. return -obj, -grad # Unfortunately, `minimize` only supports functions whose argument is a # vector. So, we need to concatenate alpha and beta. alpha_init = np.zeros(len(a)) beta_init = np.zeros(len(b)) params_init = np.concatenate((alpha_init, beta_init)) res = minimize(_func, params_init, method=method, jac=True, tol=tol, options=dict(maxiter=max_iter, disp=verbose)) alpha = res.x[:len(a)] beta = res.x[len(a):] return alpha, beta, res def semi_dual_obj_grad(alpha, a, b, C, regul): """ Compute objective value and gradient of semi-dual objective. Parameters ---------- alpha: array, shape = len(a) Current iterate of semi-dual potentials. a: array, shape = len(a) b: array, shape = len(b) Input histograms (should be non-negative and sum to 1). C: array, shape = (len(a), len(b)) Ground cost matrix. regul: Regularization object Should implement a `max_Omega(X)` method. Returns ------- obj: float Objective value (higher is better). grad: array, shape = len(a) Gradient w.r.t. alpha. """ obj = np.dot(alpha, a) grad = a.copy() # X[:, j] = alpha - C[:, j] X = alpha[:, np.newaxis] - C # val.shape = len(b) # G.shape = len(a) x len(b) val, G = regul.max_Omega(X, b) obj -= np.dot(b, val) grad -= np.dot(G, b) return obj, grad def solve_semi_dual(a, b, C, regul, method="L-BFGS-B", tol=1e-3, max_iter=500, verbose=False): """ Solve the "smoothed" semi-dual objective. Parameters ---------- a: array, shape = (len(a), ) b: array, shape = (len(b), ) Input histograms (should be non-negative and sum to 1). C: array, shape = (len(a), len(b)) Ground cost matrix. regul: Regularization object Should implement a `max_Omega(X)` method. method: str Solver to be used (passed to `scipy.optimize.minimize`). tol: float Tolerance parameter. max_iter: int Maximum number of iterations. Returns ------- alpha: array, shape = (len(a), ) Semi-dual potentials. """ def _func(alpha): obj, grad = semi_dual_obj_grad(alpha, a, b, C, regul) # We need to maximize the semi-dual. return -obj, -grad alpha_init = np.zeros(len(a)) res = minimize(_func, alpha_init, method=method, jac=True, tol=tol, options=dict(maxiter=max_iter, disp=verbose)) return res.x, res def get_plan_from_dual(alpha, beta, C, regul): r""" Retrieve optimal transportation plan from optimal dual potentials. Parameters ---------- alpha: array, shape = len(a) beta: array, shape = len(b) Optimal dual potentials. C: array, shape = (len(a), len(b)) Ground cost matrix. regul: Regularization object Should implement a `delta_Omega(X)` method. Returns ------- T: array, shape = (len(a), len(b)) Optimal transportation plan. """ X = alpha[:, np.newaxis] + beta - C return regul.delta_Omega(X)[1] def get_plan_from_semi_dual(alpha, b, C, regul): r""" Retrieve optimal transportation plan from optimal semi-dual potentials. Parameters ---------- alpha: array, shape = len(a) Optimal semi-dual potentials. b: array, shape = len(b) Second input histogram (should be non-negative and sum to 1). C: array, shape = (len(a), len(b)) Ground cost matrix. regul: Regularization object Should implement a `delta_Omega(X)` method. Returns ------- T: array, shape = (len(a), len(b)) Optimal transportation plan. """ X = alpha[:, np.newaxis] - C return regul.max_Omega(X, b)[1] * b def smooth_ot_dual(a, b, M, reg, reg_type='l2', method="L-BFGS-B", stopThr=1e-9, numItermax=500, verbose=False, log=False, max_nz=None): r""" Solve the regularized OT problem in the dual and return the OT matrix The function solves the smooth relaxed dual formulation (7) in :ref:`[17] `: .. math:: \max_{\alpha,\beta}\quad \mathbf{a}^T\alpha + \mathbf{b}^T\beta - \sum_j \delta_\Omega \left(\alpha+\beta_j-\mathbf{m}_j \right) where : - :math:`\mathbf{m}_j` is the j-th column of the cost matrix - :math:`\delta_\Omega` is the convex conjugate of the regularization term :math:`\Omega` - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (sum to 1) The OT matrix can is reconstructed from the gradient of :math:`\delta_\Omega` (See :ref:`[17] ` Proposition 1). The optimization algorithm is using gradient decent (L-BFGS by default). Parameters ---------- a : np.ndarray (ns,) samples weights in the source domain b : np.ndarray (nt,) or np.ndarray (nt,nbb) samples in the target domain, compute sinkhorn with multiple targets and fixed :math:`\mathbf{M}` if :math:`\mathbf{b}` is a matrix (return OT loss + dual variables in log) M : np.ndarray (ns,nt) loss matrix reg : float Regularization term >0 reg_type : str Regularization type, can be the following (default ='l2'): - 'kl' : Kullback Leibler (~ Neg-entropy used in sinkhorn :ref:`[2] `) - 'l2' : Squared Euclidean regularization - 'sparsity_constrained' : Sparsity-constrained regularization [50] max_nz : int or None, optional. Used only in the case of reg_type = 'sparsity_constrained' to specify the maximum number of nonzeros per column of the optimal plan; not used for other regularization types. method : str Solver to use for scipy.optimize.minimize numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- gamma : (ns, nt) ndarray Optimal transportation matrix for the given parameters log : dict log dictionary return only if log==True in parameters .. _references-smooth-ot-dual: References ---------- .. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013 .. [17] Blondel, M., Seguy, V., & Rolet, A. (2018). Smooth and Sparse Optimal Transport. Proceedings of the Twenty-First International Conference on Artificial Intelligence and Statistics (AISTATS). .. [50] Liu, T., Puigcerver, J., & Blondel, M. (2023). Sparsity-constrained optimal transport. Proceedings of the Eleventh International Conference on Learning Representations (ICLR). See Also -------- ot.lp.emd : Unregularized OT ot.sinhorn : Entropic regularized OT ot.optim.cg : General regularized OT """ nx = get_backend(a, b, M) if reg_type.lower() in ['l2', 'squaredl2']: regul = SquaredL2(gamma=reg) elif reg_type.lower() in ['entropic', 'negentropy', 'kl']: regul = NegEntropy(gamma=reg) elif reg_type.lower() in ['sparsity_constrained', 'sparsity-constrained']: if not isinstance(max_nz, int): raise ValueError( f'max_nz {max_nz} must be an integer') regul = SparsityConstrained(gamma=reg, max_nz=max_nz) else: raise NotImplementedError('Unknown regularization') a0, b0, M0 = a, b, M # convert to humpy a, b, M = nx.to_numpy(a, b, M) # solve dual alpha, beta, res = solve_dual(a, b, M, regul, max_iter=numItermax, tol=stopThr, verbose=verbose) # reconstruct transport matrix G = nx.from_numpy(get_plan_from_dual(alpha, beta, M, regul), type_as=M0) if log: log = {'alpha': nx.from_numpy(alpha, type_as=a0), 'beta': nx.from_numpy(beta, type_as=b0), 'res': res} return G, log else: return G def smooth_ot_semi_dual(a, b, M, reg, reg_type='l2', max_nz=None, method="L-BFGS-B", stopThr=1e-9, numItermax=500, verbose=False, log=False): r""" Solve the regularized OT problem in the semi-dual and return the OT matrix The function solves the smooth relaxed dual formulation (10) in :ref:`[17] `: .. math:: \max_{\alpha}\quad \mathbf{a}^T\alpha- \mathrm{OT}_\Omega^*(\alpha, \mathbf{b}) where : .. math:: \mathrm{OT}_\Omega^*(\alpha,b)=\sum_j \mathbf{b}_j - :math:`\mathbf{m}_j` is the j-th column of the cost matrix - :math:`\mathrm{OT}_\Omega^*(\alpha,b)` is defined in Eq. (9) in :ref:`[17] ` - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (sum to 1) The OT matrix can is reconstructed using :ref:`[17] ` Proposition 2. The optimization algorithm is using gradient decent (L-BFGS by default). Parameters ---------- a : np.ndarray (ns,) samples weights in the source domain b : np.ndarray (nt,) or np.ndarray (nt,nbb) samples in the target domain, compute sinkhorn with multiple targets and fixed:math:`\mathbf{M}` if :math:`\mathbf{b}` is a matrix (return OT loss + dual variables in log) M : np.ndarray (ns,nt) loss matrix reg : float Regularization term >0 reg_type : str Regularization type, can be the following (default ='l2'): - 'kl' : Kullback Leibler (~ Neg-entropy used in sinkhorn :ref:`[2] `) - 'l2' : Squared Euclidean regularization - 'sparsity_constrained' : Sparsity-constrained regularization [50] max_nz : int or None, optional. Used only in the case of reg_type = 'sparsity_constrained' to specify the maximum number of nonzeros per column of the optimal plan; not used for other regularization types. method : str Solver to use for scipy.optimize.minimize numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- gamma : (ns, nt) ndarray Optimal transportation matrix for the given parameters log : dict log dictionary return only if log==True in parameters .. _references-smooth-ot-semi-dual: References ---------- .. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013 .. [17] Blondel, M., Seguy, V., & Rolet, A. (2018). Smooth and Sparse Optimal Transport. Proceedings of the Twenty-First International Conference on Artificial Intelligence and Statistics (AISTATS). .. [50] Liu, T., Puigcerver, J., & Blondel, M. (2023). Sparsity-constrained optimal transport. Proceedings of the Eleventh International Conference on Learning Representations (ICLR). See Also -------- ot.lp.emd : Unregularized OT ot.sinhorn : Entropic regularized OT ot.optim.cg : General regularized OT """ if reg_type.lower() in ['l2', 'squaredl2']: regul = SquaredL2(gamma=reg) elif reg_type.lower() in ['entropic', 'negentropy', 'kl']: regul = NegEntropy(gamma=reg) elif reg_type.lower() in ['sparsity_constrained', 'sparsity-constrained']: if not isinstance(max_nz, int): raise ValueError( f'max_nz {max_nz} must be an integer') regul = SparsityConstrained(gamma=reg, max_nz=max_nz) else: raise NotImplementedError('Unknown regularization') # solve dual alpha, res = solve_semi_dual(a, b, M, regul, max_iter=numItermax, tol=stopThr, verbose=verbose) # reconstruct transport matrix G = get_plan_from_semi_dual(alpha, b, M, regul) if log: log = {'alpha': alpha, 'res': res} return G, log else: return G python-pot-0.9.3+dfsg/ot/solvers.py000066400000000000000000001464541455713015700173020ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ General OT solvers with unified API """ # Author: Remi Flamary # # License: MIT License from .utils import OTResult, dist from .lp import emd2, wasserstein_1d from .backend import get_backend from .unbalanced import mm_unbalanced, sinkhorn_knopp_unbalanced, lbfgsb_unbalanced from .bregman import sinkhorn_log, empirical_sinkhorn2, empirical_sinkhorn2_geomloss from .partial import partial_wasserstein_lagrange from .smooth import smooth_ot_dual from .gromov import (gromov_wasserstein2, fused_gromov_wasserstein2, entropic_gromov_wasserstein2, entropic_fused_gromov_wasserstein2, semirelaxed_gromov_wasserstein2, semirelaxed_fused_gromov_wasserstein2, entropic_semirelaxed_fused_gromov_wasserstein2, entropic_semirelaxed_gromov_wasserstein2) from .partial import partial_gromov_wasserstein2, entropic_partial_gromov_wasserstein2 from .gaussian import empirical_bures_wasserstein_distance from .factored import factored_optimal_transport from .lowrank import lowrank_sinkhorn lst_method_lazy = ['1d', 'gaussian', 'lowrank', 'factored', 'geomloss', 'geomloss_auto', 'geomloss_tensorized', 'geomloss_online', 'geomloss_multiscale'] def solve(M, a=None, b=None, reg=None, reg_type="KL", unbalanced=None, unbalanced_type='KL', method=None, n_threads=1, max_iter=None, plan_init=None, potentials_init=None, tol=None, verbose=False): r"""Solve the discrete optimal transport problem and return :any:`OTResult` object The function solves the following general optimal transport problem .. math:: \min_{\mathbf{T}\geq 0} \quad \sum_{i,j} T_{i,j}M_{i,j} + \lambda_r R(\mathbf{T}) + \lambda_u U(\mathbf{T}\mathbf{1},\mathbf{a}) + \lambda_u U(\mathbf{T}^T\mathbf{1},\mathbf{b}) The regularization is selected with `reg` (:math:`\lambda_r`) and `reg_type`. By default ``reg=None`` and there is no regularization. The unbalanced marginal penalization can be selected with `unbalanced` (:math:`\lambda_u`) and `unbalanced_type`. By default ``unbalanced=None`` and the function solves the exact optimal transport problem (respecting the marginals). Parameters ---------- M : array_like, shape (dim_a, dim_b) Loss matrix a : array-like, shape (dim_a,), optional Samples weights in the source domain (default is uniform) b : array-like, shape (dim_b,), optional Samples weights in the source domain (default is uniform) reg : float, optional Regularization weight :math:`\lambda_r`, by default None (no reg., exact OT) reg_type : str, optional Type of regularization :math:`R` either "KL", "L2", "entropy", by default "KL" unbalanced : float, optional Unbalanced penalization weight :math:`\lambda_u`, by default None (balanced OT) unbalanced_type : str, optional Type of unbalanced penalization function :math:`U` either "KL", "L2", "TV", by default "KL" method : str, optional Method for solving the problem when multiple algorithms are available, default None for automatic selection. n_threads : int, optional Number of OMP threads for exact OT solver, by default 1 max_iter : int, optional Maximum number of iterations, by default None (default values in each solvers) plan_init : array_like, shape (dim_a, dim_b), optional Initialization of the OT plan for iterative methods, by default None potentials_init : (array_like(dim_a,),array_like(dim_b,)), optional Initialization of the OT dual potentials for iterative methods, by default None tol : _type_, optional Tolerance for solution precision, by default None (default values in each solvers) verbose : bool, optional Print information in the solver, by default False Returns ------- res : OTResult() Result of the optimization problem. The information can be obtained as follows: - res.plan : OT plan :math:`\mathbf{T}` - res.potentials : OT dual potentials - res.value : Optimal value of the optimization problem - res.value_linear : Linear OT loss with the optimal OT plan See :any:`OTResult` for more information. Notes ----- The following methods are available for solving the OT problems: - **Classical exact OT problem [1]** (default parameters) : .. math:: \min_\mathbf{T} \quad \langle \mathbf{T}, \mathbf{M} \rangle_F s.t. \ \mathbf{T} \mathbf{1} = \mathbf{a} \mathbf{T}^T \mathbf{1} = \mathbf{b} \mathbf{T} \geq 0 can be solved with the following code: .. code-block:: python res = ot.solve(M, a, b) - **Entropic regularized OT [2]** (when ``reg!=None``): .. math:: \min_\mathbf{T} \quad \langle \mathbf{T}, \mathbf{M} \rangle_F + \lambda R(\mathbf{T}) s.t. \ \mathbf{T} \mathbf{1} = \mathbf{a} \mathbf{T}^T \mathbf{1} = \mathbf{b} \mathbf{T} \geq 0 can be solved with the following code: .. code-block:: python # default is ``"KL"`` regularization (``reg_type="KL"``) res = ot.solve(M, a, b, reg=1.0) # or for original Sinkhorn paper formulation [2] res = ot.solve(M, a, b, reg=1.0, reg_type='entropy') - **Quadratic regularized OT [17]** (when ``reg!=None`` and ``reg_type="L2"``): .. math:: \min_\mathbf{T} \quad \langle \mathbf{T}, \mathbf{M} \rangle_F + \lambda R(\mathbf{T}) s.t. \ \mathbf{T} \mathbf{1} = \mathbf{a} \mathbf{T}^T \mathbf{1} = \mathbf{b} \mathbf{T} \geq 0 can be solved with the following code: .. code-block:: python res = ot.solve(M,a,b,reg=1.0,reg_type='L2') - **Unbalanced OT [41]** (when ``unbalanced!=None``): .. math:: \min_{\mathbf{T}\geq 0} \quad \sum_{i,j} T_{i,j}M_{i,j} + \lambda_u U(\mathbf{T}\mathbf{1},\mathbf{a}) + \lambda_u U(\mathbf{T}^T\mathbf{1},\mathbf{b}) can be solved with the following code: .. code-block:: python # default is ``"KL"`` res = ot.solve(M,a,b,unbalanced=1.0) # quadratic unbalanced OT res = ot.solve(M,a,b,unbalanced=1.0,unbalanced_type='L2') # TV = partial OT res = ot.solve(M,a,b,unbalanced=1.0,unbalanced_type='TV') - **Regularized unbalanced regularized OT [34]** (when ``unbalanced!=None`` and ``reg!=None``): .. math:: \min_{\mathbf{T}\geq 0} \quad \sum_{i,j} T_{i,j}M_{i,j} + \lambda_r R(\mathbf{T}) + \lambda_u U(\mathbf{T}\mathbf{1},\mathbf{a}) + \lambda_u U(\mathbf{T}^T\mathbf{1},\mathbf{b}) can be solved with the following code: .. code-block:: python # default is ``"KL"`` for both res = ot.solve(M,a,b,reg=1.0,unbalanced=1.0) # quadratic unbalanced OT with KL regularization res = ot.solve(M,a,b,reg=1.0,unbalanced=1.0,unbalanced_type='L2') # both quadratic res = ot.solve(M,a,b,reg=1.0, reg_type='L2',unbalanced=1.0,unbalanced_type='L2') .. _references-solve: References ---------- .. [1] Bonneel, N., Van De Panne, M., Paris, S., & Heidrich, W. (2011, December). Displacement interpolation using Lagrangian mass transport. In ACM Transactions on Graphics (TOG) (Vol. 30, No. 6, p. 158). ACM. .. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013 .. [10] Chizat, L., PeyrĂ©, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816. .. [17] Blondel, M., Seguy, V., & Rolet, A. (2018). Smooth and Sparse Optimal Transport. Proceedings of the Twenty-First International Conference on Artificial Intelligence and Statistics (AISTATS). .. [34] Feydy, J., SĂ©journĂ©, T., Vialard, F. X., Amari, S. I., TrouvĂ©, A., & PeyrĂ©, G. (2019, April). Interpolating between optimal transport and MMD using Sinkhorn divergences. In The 22nd International Conference on Artificial Intelligence and Statistics (pp. 2681-2690). PMLR. .. [41] Chapel, L., Flamary, R., Wu, H., FĂ©votte, C., and Gasso, G. (2021). Unbalanced optimal transport through non-negative penalized linear regression. NeurIPS. """ # detect backend arr = [M] if a is not None: arr.append(a) if b is not None: arr.append(b) nx = get_backend(*arr) # create uniform weights if not given if a is None: a = nx.ones(M.shape[0], type_as=M) / M.shape[0] if b is None: b = nx.ones(M.shape[1], type_as=M) / M.shape[1] # default values for solutions potentials = None value = None value_linear = None plan = None status = None if reg is None or reg == 0: # exact OT if unbalanced is None: # Exact balanced OT # default values for EMD solver if max_iter is None: max_iter = 1000000 value_linear, log = emd2(a, b, M, numItermax=max_iter, log=True, return_matrix=True, numThreads=n_threads) value = value_linear potentials = (log['u'], log['v']) plan = log['G'] status = log["warning"] if log["warning"] is not None else 'Converged' elif unbalanced_type.lower() in ['kl', 'l2']: # unbalanced exact OT # default values for exact unbalanced OT if max_iter is None: max_iter = 1000 if tol is None: tol = 1e-12 plan, log = mm_unbalanced(a, b, M, reg_m=unbalanced, div=unbalanced_type.lower(), numItermax=max_iter, stopThr=tol, log=True, verbose=verbose, G0=plan_init) value_linear = log['cost'] if unbalanced_type.lower() == 'kl': value = value_linear + unbalanced * (nx.kl_div(nx.sum(plan, 1), a) + nx.kl_div(nx.sum(plan, 0), b)) else: err_a = nx.sum(plan, 1) - a err_b = nx.sum(plan, 0) - b value = value_linear + unbalanced * nx.sum(err_a**2) + unbalanced * nx.sum(err_b**2) elif unbalanced_type.lower() == 'tv': if max_iter is None: max_iter = 1000000 plan, log = partial_wasserstein_lagrange(a, b, M, reg_m=unbalanced**2, log=True, numItermax=max_iter) value_linear = nx.sum(M * plan) err_a = nx.sum(plan, 1) - a err_b = nx.sum(plan, 0) - b value = value_linear + nx.sqrt(unbalanced**2 / 2.0 * (nx.sum(nx.abs(err_a)) + nx.sum(nx.abs(err_b)))) else: raise (NotImplementedError('Unknown unbalanced_type="{}"'.format(unbalanced_type))) else: # regularized OT if unbalanced is None: # Balanced regularized OT if reg_type.lower() in ['entropy', 'kl']: # default values for sinkhorn if max_iter is None: max_iter = 1000 if tol is None: tol = 1e-9 plan, log = sinkhorn_log(a, b, M, reg=reg, numItermax=max_iter, stopThr=tol, log=True, verbose=verbose) value_linear = nx.sum(M * plan) if reg_type.lower() == 'entropy': value = value_linear + reg * nx.sum(plan * nx.log(plan + 1e-16)) else: value = value_linear + reg * nx.kl_div(plan, a[:, None] * b[None, :]) potentials = (log['log_u'], log['log_v']) elif reg_type.lower() == 'l2': if max_iter is None: max_iter = 1000 if tol is None: tol = 1e-9 plan, log = smooth_ot_dual(a, b, M, reg=reg, numItermax=max_iter, stopThr=tol, log=True, verbose=verbose) value_linear = nx.sum(M * plan) value = value_linear + reg * nx.sum(plan**2) potentials = (log['alpha'], log['beta']) else: raise (NotImplementedError('Not implemented reg_type="{}"'.format(reg_type))) else: # unbalanced AND regularized OT if reg_type.lower() in ['kl'] and unbalanced_type.lower() == 'kl': if max_iter is None: max_iter = 1000 if tol is None: tol = 1e-9 plan, log = sinkhorn_knopp_unbalanced(a, b, M, reg=reg, reg_m=unbalanced, numItermax=max_iter, stopThr=tol, verbose=verbose, log=True) value_linear = nx.sum(M * plan) value = value_linear + reg * nx.kl_div(plan, a[:, None] * b[None, :]) + unbalanced * (nx.kl_div(nx.sum(plan, 1), a) + nx.kl_div(nx.sum(plan, 0), b)) potentials = (log['logu'], log['logv']) elif reg_type.lower() in ['kl', 'l2', 'entropy'] and unbalanced_type.lower() in ['kl', 'l2']: if max_iter is None: max_iter = 1000 if tol is None: tol = 1e-12 plan, log = lbfgsb_unbalanced(a, b, M, reg=reg, reg_m=unbalanced, reg_div=reg_type.lower(), regm_div=unbalanced_type.lower(), numItermax=max_iter, stopThr=tol, verbose=verbose, log=True) value_linear = nx.sum(M * plan) value = log['loss'] else: raise (NotImplementedError('Not implemented reg_type="{}" and unbalanced_type="{}"'.format(reg_type, unbalanced_type))) res = OTResult(potentials=potentials, value=value, value_linear=value_linear, plan=plan, status=status, backend=nx) return res def solve_gromov(Ca, Cb, M=None, a=None, b=None, loss='L2', symmetric=None, alpha=0.5, reg=None, reg_type="entropy", unbalanced=None, unbalanced_type='KL', n_threads=1, method=None, max_iter=None, plan_init=None, tol=None, verbose=False): r""" Solve the discrete (Fused) Gromov-Wasserstein and return :any:`OTResult` object The function solves the following optimization problem: .. math:: \min_{\mathbf{T}\geq 0} \quad (1 - \alpha) \langle \mathbf{T}, \mathbf{M} \rangle_F + \alpha \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j} + \lambda_r R(\mathbf{T}) + \lambda_u U(\mathbf{T}\mathbf{1},\mathbf{a}) + \lambda_u U(\mathbf{T}^T\mathbf{1},\mathbf{b}) The regularization is selected with `reg` (:math:`\lambda_r`) and `reg_type`. By default ``reg=None`` and there is no regularization. The unbalanced marginal penalization can be selected with `unbalanced` (:math:`\lambda_u`) and `unbalanced_type`. By default ``unbalanced=None`` and the function solves the exact optimal transport problem (respecting the marginals). Parameters ---------- Ca : array_like, shape (dim_a, dim_a) Cost matrix in the source domain Cb : array_like, shape (dim_b, dim_b) Cost matrix in the target domain M : array_like, shape (dim_a, dim_b), optional Linear cost matrix for Fused Gromov-Wasserstein (default is None). a : array-like, shape (dim_a,), optional Samples weights in the source domain (default is uniform) b : array-like, shape (dim_b,), optional Samples weights in the source domain (default is uniform) loss : str, optional Type of loss function, either ``"L2"`` or ``"KL"``, by default ``"L2"`` symmetric : bool, optional Use symmetric version of the Gromov-Wasserstein problem, by default None tests whether the matrices are symmetric or True/False to avoid the test. reg : float, optional Regularization weight :math:`\lambda_r`, by default None (no reg., exact OT) reg_type : str, optional Type of regularization :math:`R`, by default "entropy" (only used when ``reg!=None``) alpha : float, optional Weight the quadratic term (alpha*Gromov) and the linear term ((1-alpha)*Wass) in the Fused Gromov-Wasserstein problem. Not used for Gromov problem (when M is not provided). By default ``alpha=None`` corresponds to ``alpha=1`` for Gromov problem (``M==None``) and ``alpha=0.5`` for Fused Gromov-Wasserstein problem (``M!=None``) unbalanced : float, optional Unbalanced penalization weight :math:`\lambda_u`, by default None (balanced OT), Not implemented yet unbalanced_type : str, optional Type of unbalanced penalization function :math:`U` either "KL", "semirelaxed", "partial", by default "KL" but note that it is not implemented yet. n_threads : int, optional Number of OMP threads for exact OT solver, by default 1 method : str, optional Method for solving the problem when multiple algorithms are available, default None for automatic selection. max_iter : int, optional Maximum number of iterations, by default None (default values in each solvers) plan_init : array_like, shape (dim_a, dim_b), optional Initialization of the OT plan for iterative methods, by default None tol : float, optional Tolerance for solution precision, by default None (default values in each solvers) verbose : bool, optional Print information in the solver, by default False Returns ------- res : OTResult() Result of the optimization problem. The information can be obtained as follows: - res.plan : OT plan :math:`\mathbf{T}` - res.potentials : OT dual potentials - res.value : Optimal value of the optimization problem - res.value_linear : Linear OT loss with the optimal OT plan - res.value_quad : Quadratic (GW) part of the OT loss with the optimal OT plan See :any:`OTResult` for more information. Notes ----- The following methods are available for solving the Gromov-Wasserstein problem: - **Classical Gromov-Wasserstein (GW) problem [3]** (default parameters): .. math:: \min_{\mathbf{T}\geq 0} \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j}\mathbf{T}_{k,l} s.t. \ \mathbf{T} \mathbf{1} = \mathbf{a} \mathbf{T}^T \mathbf{1} = \mathbf{b} \mathbf{T} \geq 0 can be solved with the following code: .. code-block:: python res = ot.solve_gromov(Ca, Cb) # uniform weights res = ot.solve_gromov(Ca, Cb, a=a, b=b) # given weights res = ot.solve_gromov(Ca, Cb, loss='KL') # KL loss plan = res.plan # GW plan value = res.value # GW value - **Fused Gromov-Wasserstein (FGW) problem [24]** (when ``M!=None``): .. math:: \min_{\mathbf{T}\geq 0} \quad (1 - \alpha) \langle \mathbf{T}, \mathbf{M} \rangle_F + \alpha \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j}\mathbf{T}_{k,l} s.t. \ \mathbf{T} \mathbf{1} = \mathbf{a} \mathbf{T}^T \mathbf{1} = \mathbf{b} \mathbf{T} \geq 0 can be solved with the following code: .. code-block:: python res = ot.solve_gromov(Ca, Cb, M) # uniform weights, alpha=0.5 (default) res = ot.solve_gromov(Ca, Cb, M, a=a, b=b, alpha=0.1) # given weights and alpha plan = res.plan # FGW plan loss_linear_term = res.value_linear # Wasserstein part of the loss loss_quad_term = res.value_quad # Gromov part of the loss loss = res.value # FGW value - **Regularized (Fused) Gromov-Wasserstein (GW) problem [12]** (when ``reg!=None``): .. math:: \min_{\mathbf{T}\geq 0} \quad (1 - \alpha) \langle \mathbf{T}, \mathbf{M} \rangle_F + \alpha \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j}\mathbf{T}_{k,l} + \lambda_r R(\mathbf{T}) s.t. \ \mathbf{T} \mathbf{1} = \mathbf{a} \mathbf{T}^T \mathbf{1} = \mathbf{b} \mathbf{T} \geq 0 can be solved with the following code: .. code-block:: python res = ot.solve_gromov(Ca, Cb, reg=1.0) # GW entropy regularization (default) res = ot.solve_gromov(Ca, Cb, M, a=a, b=b, reg=10, alpha=0.1) # FGW with entropy plan = res.plan # FGW plan loss_linear_term = res.value_linear # Wasserstein part of the loss loss_quad_term = res.value_quad # Gromov part of the loss loss = res.value # FGW value (including regularization) - **Semi-relaxed (Fused) Gromov-Wasserstein (GW) [48]** (when ``unbalanced='semirelaxed'``): .. math:: \min_{\mathbf{T}\geq 0} \quad (1 - \alpha) \langle \mathbf{T}, \mathbf{M} \rangle_F + \alpha \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j}\mathbf{T}_{k,l} s.t. \ \mathbf{T} \mathbf{1} = \mathbf{a} \mathbf{T} \geq 0 can be solved with the following code: .. code-block:: python res = ot.solve_gromov(Ca, Cb, unbalanced='semirelaxed') # semirelaxed GW res = ot.solve_gromov(Ca, Cb, unbalanced='semirelaxed', reg=1) # entropic semirelaxed GW res = ot.solve_gromov(Ca, Cb, M, unbalanced='semirelaxed', alpha=0.1) # semirelaxed FGW plan = res.plan # FGW plan right_marginal = res.marginal_b # right marginal of the plan - **Partial (Fused) Gromov-Wasserstein (GW) problem [29]** (when ``unbalanced='partial'``): .. math:: \min_{\mathbf{T}\geq 0} \quad (1 - \alpha) \langle \mathbf{T}, \mathbf{M} \rangle_F + \alpha \sum_{i,j,k,l} L(\mathbf{C_1}_{i,k}, \mathbf{C_2}_{j,l}) \mathbf{T}_{i,j}\mathbf{T}_{k,l} s.t. \ \mathbf{T} \mathbf{1} \leq \mathbf{a} \mathbf{T}^T \mathbf{1} \leq \mathbf{b} \mathbf{T} \geq 0 \mathbf{1}^T\mathbf{T}\mathbf{1} = m can be solved with the following code: .. code-block:: python res = ot.solve_gromov(Ca, Cb, unbalanced_type='partial', unbalanced=0.8) # partial GW with m=0.8 .. _references-solve-gromov: References ---------- .. [3] MĂ©moli, F. (2011). Gromov–Wasserstein distances and the metric approach to object matching. Foundations of computational mathematics, 11(4), 417-487. .. [12] Gabriel PeyrĂ©, Marco Cuturi, and Justin Solomon (2016), Gromov-Wasserstein averaging of kernel and distance matrices International Conference on Machine Learning (ICML). .. [24] Vayer, T., Chapel, L., Flamary, R., Tavenard, R. and Courty, N. (2019). Optimal Transport for structured data with application on graphs Proceedings of the 36th International Conference on Machine Learning (ICML). .. [48] CĂ©dric Vincent-Cuaz, RĂ©mi Flamary, Marco Corneli, Titouan Vayer, Nicolas Courty (2022). Semi-relaxed Gromov-Wasserstein divergence and applications on graphs. International Conference on Learning Representations (ICLR), 2022. .. [29] Chapel, L., Alaya, M., Gasso, G. (2020). Partial Optimal Transport with Applications on Positive-Unlabeled Learning, Advances in Neural Information Processing Systems (NeurIPS), 2020. """ # detect backend nx = get_backend(Ca, Cb, M, a, b) # create uniform weights if not given if a is None: a = nx.ones(Ca.shape[0], type_as=Ca) / Ca.shape[0] if b is None: b = nx.ones(Cb.shape[1], type_as=Cb) / Cb.shape[1] # default values for solutions potentials = None value = None value_linear = None value_quad = None plan = None status = None log = None loss_dict = {'l2': 'square_loss', 'kl': 'kl_loss'} if loss.lower() not in loss_dict.keys(): raise (NotImplementedError('Not implemented GW loss="{}"'.format(loss))) loss_fun = loss_dict[loss.lower()] if reg is None or reg == 0: # exact OT if unbalanced is None and unbalanced_type.lower() not in ['semirelaxed']: # Exact balanced OT if M is None or alpha == 1: # Gromov-Wasserstein problem # default values for solver if max_iter is None: max_iter = 10000 if tol is None: tol = 1e-9 value, log = gromov_wasserstein2(Ca, Cb, a, b, loss_fun=loss_fun, log=True, symmetric=symmetric, max_iter=max_iter, G0=plan_init, tol_rel=tol, tol_abs=tol, verbose=verbose) value_quad = value if alpha == 1: # set to 0 for FGW with alpha=1 value_linear = 0 plan = log['T'] potentials = (log['u'], log['v']) elif alpha == 0: # Wasserstein problem # default values for EMD solver if max_iter is None: max_iter = 1000000 value_linear, log = emd2(a, b, M, numItermax=max_iter, log=True, return_matrix=True, numThreads=n_threads) value = value_linear potentials = (log['u'], log['v']) plan = log['G'] status = log["warning"] if log["warning"] is not None else 'Converged' value_quad = 0 else: # Fused Gromov-Wasserstein problem # default values for solver if max_iter is None: max_iter = 10000 if tol is None: tol = 1e-9 value, log = fused_gromov_wasserstein2(M, Ca, Cb, a, b, loss_fun=loss_fun, alpha=alpha, log=True, symmetric=symmetric, max_iter=max_iter, G0=plan_init, tol_rel=tol, tol_abs=tol, verbose=verbose) value_linear = log['lin_loss'] value_quad = log['quad_loss'] plan = log['T'] potentials = (log['u'], log['v']) elif unbalanced_type.lower() in ['semirelaxed']: # Semi-relaxed OT if M is None or alpha == 1: # Semi relaxed Gromov-Wasserstein problem # default values for solver if max_iter is None: max_iter = 10000 if tol is None: tol = 1e-9 value, log = semirelaxed_gromov_wasserstein2(Ca, Cb, a, loss_fun=loss_fun, log=True, symmetric=symmetric, max_iter=max_iter, G0=plan_init, tol_rel=tol, tol_abs=tol, verbose=verbose) value_quad = value if alpha == 1: # set to 0 for FGW with alpha=1 value_linear = 0 plan = log['T'] # potentials = (log['u'], log['v']) TODO else: # Semi relaxed Fused Gromov-Wasserstein problem # default values for solver if max_iter is None: max_iter = 10000 if tol is None: tol = 1e-9 value, log = semirelaxed_fused_gromov_wasserstein2(M, Ca, Cb, a, loss_fun=loss_fun, alpha=alpha, log=True, symmetric=symmetric, max_iter=max_iter, G0=plan_init, tol_rel=tol, tol_abs=tol, verbose=verbose) value_linear = log['lin_loss'] value_quad = log['quad_loss'] plan = log['T'] # potentials = (log['u'], log['v']) TODO elif unbalanced_type.lower() in ['partial']: # Partial OT if M is None: # Partial Gromov-Wasserstein problem if unbalanced > nx.sum(a) or unbalanced > nx.sum(b): raise (ValueError('Partial GW mass given in reg is too large')) if loss.lower() != 'l2': raise (NotImplementedError('Partial GW only implemented with L2 loss')) if symmetric is not None: raise (NotImplementedError('Partial GW only implemented with symmetric=True')) # default values for solver if max_iter is None: max_iter = 1000 if tol is None: tol = 1e-7 value, log = partial_gromov_wasserstein2(Ca, Cb, a, b, m=unbalanced, log=True, numItermax=max_iter, G0=plan_init, tol=tol, verbose=verbose) value_quad = value plan = log['T'] # potentials = (log['u'], log['v']) TODO else: # partial FGW raise (NotImplementedError('Partial FGW not implemented yet')) elif unbalanced_type.lower() in ['kl', 'l2']: # unbalanced exact OT raise (NotImplementedError('Unbalanced_type="{}"'.format(unbalanced_type))) else: raise (NotImplementedError('Unknown unbalanced_type="{}"'.format(unbalanced_type))) else: # regularized OT if unbalanced is None and unbalanced_type.lower() not in ['semirelaxed']: # Balanced regularized OT if reg_type.lower() in ['entropy'] and (M is None or alpha == 1): # Entropic Gromov-Wasserstein problem # default values for solver if max_iter is None: max_iter = 1000 if tol is None: tol = 1e-9 if method is None: method = 'PGD' value_quad, log = entropic_gromov_wasserstein2(Ca, Cb, a, b, epsilon=reg, loss_fun=loss_fun, log=True, symmetric=symmetric, solver=method, max_iter=max_iter, G0=plan_init, tol_rel=tol, tol_abs=tol, verbose=verbose) plan = log['T'] value_linear = 0 value = value_quad + reg * nx.sum(plan * nx.log(plan + 1e-16)) # potentials = (log['log_u'], log['log_v']) #TODO elif reg_type.lower() in ['entropy'] and M is not None and alpha == 0: # Entropic Wasserstein problem # default values for solver if max_iter is None: max_iter = 1000 if tol is None: tol = 1e-9 plan, log = sinkhorn_log(a, b, M, reg=reg, numItermax=max_iter, stopThr=tol, log=True, verbose=verbose) value_linear = nx.sum(M * plan) value = value_linear + reg * nx.sum(plan * nx.log(plan + 1e-16)) potentials = (log['log_u'], log['log_v']) elif reg_type.lower() in ['entropy'] and M is not None: # Entropic Fused Gromov-Wasserstein problem # default values for solver if max_iter is None: max_iter = 1000 if tol is None: tol = 1e-9 if method is None: method = 'PGD' value_noreg, log = entropic_fused_gromov_wasserstein2(M, Ca, Cb, a, b, loss_fun=loss_fun, alpha=alpha, log=True, symmetric=symmetric, solver=method, max_iter=max_iter, G0=plan_init, tol_rel=tol, tol_abs=tol, verbose=verbose) value_linear = log['lin_loss'] value_quad = log['quad_loss'] plan = log['T'] # potentials = (log['u'], log['v']) value = value_noreg + reg * nx.sum(plan * nx.log(plan + 1e-16)) else: raise (NotImplementedError('Not implemented reg_type="{}"'.format(reg_type))) elif unbalanced_type.lower() in ['semirelaxed']: # Semi-relaxed OT if reg_type.lower() in ['entropy'] and (M is None or alpha == 1): # Entropic Semi-relaxed Gromov-Wasserstein problem # default values for solver if max_iter is None: max_iter = 1000 if tol is None: tol = 1e-9 value_quad, log = entropic_semirelaxed_gromov_wasserstein2(Ca, Cb, a, epsilon=reg, loss_fun=loss_fun, log=True, symmetric=symmetric, max_iter=max_iter, G0=plan_init, tol_rel=tol, tol_abs=tol, verbose=verbose) plan = log['T'] value_linear = 0 value = value_quad + reg * nx.sum(plan * nx.log(plan + 1e-16)) else: # Entropic Semi-relaxed FGW problem # default values for solver if max_iter is None: max_iter = 1000 if tol is None: tol = 1e-9 value_noreg, log = entropic_semirelaxed_fused_gromov_wasserstein2(M, Ca, Cb, a, loss_fun=loss_fun, alpha=alpha, log=True, symmetric=symmetric, max_iter=max_iter, G0=plan_init, tol_rel=tol, tol_abs=tol, verbose=verbose) value_linear = log['lin_loss'] value_quad = log['quad_loss'] plan = log['T'] value = value_noreg + reg * nx.sum(plan * nx.log(plan + 1e-16)) elif unbalanced_type.lower() in ['partial']: # Partial OT if M is None: # Partial Gromov-Wasserstein problem if unbalanced > nx.sum(a) or unbalanced > nx.sum(b): raise (ValueError('Partial GW mass given in reg is too large')) if loss.lower() != 'l2': raise (NotImplementedError('Partial GW only implemented with L2 loss')) if symmetric is not None: raise (NotImplementedError('Partial GW only implemented with symmetric=True')) # default values for solver if max_iter is None: max_iter = 1000 if tol is None: tol = 1e-7 value_quad, log = entropic_partial_gromov_wasserstein2(Ca, Cb, a, b, reg=reg, m=unbalanced, log=True, numItermax=max_iter, G0=plan_init, tol=tol, verbose=verbose) value_quad = value plan = log['T'] # potentials = (log['u'], log['v']) TODO else: # partial FGW raise (NotImplementedError('Partial entropic FGW not implemented yet')) else: # unbalanced AND regularized OT raise (NotImplementedError('Not implemented reg_type="{}" and unbalanced_type="{}"'.format(reg_type, unbalanced_type))) res = OTResult(potentials=potentials, value=value, value_linear=value_linear, value_quad=value_quad, plan=plan, status=status, backend=nx, log=log) return res def solve_sample(X_a, X_b, a=None, b=None, metric='sqeuclidean', reg=None, reg_type="KL", unbalanced=None, unbalanced_type='KL', lazy=False, batch_size=None, method=None, n_threads=1, max_iter=None, plan_init=None, rank=100, scaling=0.95, potentials_init=None, X_init=None, tol=None, verbose=False): r"""Solve the discrete optimal transport problem using the samples in the source and target domains. The function solves the following general optimal transport problem .. math:: \min_{\mathbf{T}\geq 0} \quad \sum_{i,j} T_{i,j}M_{i,j} + \lambda_r R(\mathbf{T}) + \lambda_u U(\mathbf{T}\mathbf{1},\mathbf{a}) + \lambda_u U(\mathbf{T}^T\mathbf{1},\mathbf{b}) where the cost matrix :math:`\mathbf{M}` is computed from the samples in the source and target domains such that :math:`M_{i,j} = d(x_i,y_j)` where :math:`d` is a metric (by default the squared Euclidean distance). The regularization is selected with `reg` (:math:`\lambda_r`) and `reg_type`. By default ``reg=None`` and there is no regularization. The unbalanced marginal penalization can be selected with `unbalanced` (:math:`\lambda_u`) and `unbalanced_type`. By default ``unbalanced=None`` and the function solves the exact optimal transport problem (respecting the marginals). Parameters ---------- X_s : array-like, shape (n_samples_a, dim) samples in the source domain X_t : array-like, shape (n_samples_b, dim) samples in the target domain a : array-like, shape (dim_a,), optional Samples weights in the source domain (default is uniform) b : array-like, shape (dim_b,), optional Samples weights in the source domain (default is uniform) reg : float, optional Regularization weight :math:`\lambda_r`, by default None (no reg., exact OT) reg_type : str, optional Type of regularization :math:`R` either "KL", "L2", "entropy", by default "KL" unbalanced : float, optional Unbalanced penalization weight :math:`\lambda_u`, by default None (balanced OT) unbalanced_type : str, optional Type of unbalanced penalization function :math:`U` either "KL", "L2", "TV", by default "KL" lazy : bool, optional Return :any:`OTResultlazy` object to reduce memory cost when True, by default False batch_size : int, optional Batch size for lazy solver, by default None (default values in each solvers) method : str, optional Method for solving the problem, this can be used to select the solver for unbalanced problems (see :any:`ot.solve`), or to select a specific large scale solver. n_threads : int, optional Number of OMP threads for exact OT solver, by default 1 max_iter : int, optional Maximum number of iteration, by default None (default values in each solvers) plan_init : array_like, shape (dim_a, dim_b), optional Initialization of the OT plan for iterative methods, by default None rank : int, optional Rank of the OT matrix for lazy solers (method='factored'), by default 100 scaling : float, optional Scaling factor for the epsilon scaling lazy solvers (method='geomloss'), by default 0.95 potentials_init : (array_like(dim_a,),array_like(dim_b,)), optional Initialization of the OT dual potentials for iterative methods, by default None tol : _type_, optional Tolerance for solution precision, by default None (default values in each solvers) verbose : bool, optional Print information in the solver, by default False Returns ------- res : OTResult() Result of the optimization problem. The information can be obtained as follows: - res.plan : OT plan :math:`\mathbf{T}` - res.potentials : OT dual potentials - res.value : Optimal value of the optimization problem - res.value_linear : Linear OT loss with the optimal OT plan - res.lazy_plan : Lazy OT plan (when ``lazy=True`` or lazy method) See :any:`OTResult` for more information. Notes ----- The following methods are available for solving the OT problems: - **Classical exact OT problem [1]** (default parameters) : .. math:: \min_\mathbf{T} \quad \langle \mathbf{T}, \mathbf{M} \rangle_F s.t. \ \mathbf{T} \mathbf{1} = \mathbf{a} \mathbf{T}^T \mathbf{1} = \mathbf{b} \mathbf{T} \geq 0, M_{i,j} = d(x_i,y_j) can be solved with the following code: .. code-block:: python res = ot.solve_sample(xa, xb, a, b) # for uniform weights res = ot.solve_sample(xa, xb) - **Entropic regularized OT [2]** (when ``reg!=None``): .. math:: \min_\mathbf{T} \quad \langle \mathbf{T}, \mathbf{M} \rangle_F + \lambda R(\mathbf{T}) s.t. \ \mathbf{T} \mathbf{1} = \mathbf{a} \mathbf{T}^T \mathbf{1} = \mathbf{b} \mathbf{T} \geq 0, M_{i,j} = d(x_i,y_j) can be solved with the following code: .. code-block:: python # default is ``"KL"`` regularization (``reg_type="KL"``) res = ot.solve_sample(xa, xb, a, b, reg=1.0) # or for original Sinkhorn paper formulation [2] res = ot.solve_sample(xa, xb, a, b, reg=1.0, reg_type='entropy') # lazy solver of memory complexity O(n) res = ot.solve_sample(xa, xb, a, b, reg=1.0, lazy=True, batch_size=100) # lazy OT plan lazy_plan = res.lazy_plan We also have a very efficient solver with compiled CPU/CUDA code using geomloss/PyKeOps that can be used with the following code: .. code-block:: python # automatic solver res = ot.solve_sample(xa, xb, a, b, reg=1.0, method='geomloss') # force O(n) memory efficient solver res = ot.solve_sample(xa, xb, a, b, reg=1.0, method='geomloss_online') # force pre-computed cost matrix res = ot.solve_sample(xa, xb, a, b, reg=1.0, method='geomloss_tensorized') # use multiscale solver res = ot.solve_sample(xa, xb, a, b, reg=1.0, method='geomloss_multiscale') # One can play with speed (small scaling factor) and precision (scaling close to 1) res = ot.solve_sample(xa, xb, a, b, reg=1.0, method='geomloss', scaling=0.5) - **Quadratic regularized OT [17]** (when ``reg!=None`` and ``reg_type="L2"``): .. math:: \min_\mathbf{T} \quad \langle \mathbf{T}, \mathbf{M} \rangle_F + \lambda R(\mathbf{T}) s.t. \ \mathbf{T} \mathbf{1} = \mathbf{a} \mathbf{T}^T \mathbf{1} = \mathbf{b} \mathbf{T} \geq 0, M_{i,j} = d(x_i,y_j) can be solved with the following code: .. code-block:: python res = ot.solve_sample(xa, xb, a, b, reg=1.0, reg_type='L2') - **Unbalanced OT [41]** (when ``unbalanced!=None``): .. math:: \min_{\mathbf{T}\geq 0} \quad \sum_{i,j} T_{i,j}M_{i,j} + \lambda_u U(\mathbf{T}\mathbf{1},\mathbf{a}) + \lambda_u U(\mathbf{T}^T\mathbf{1},\mathbf{b}) with M_{i,j} = d(x_i,y_j) can be solved with the following code: .. code-block:: python # default is ``"KL"`` res = ot.solve_sample(xa, xb, a, b, unbalanced=1.0) # quadratic unbalanced OT res = ot.solve_sample(xa, xb, a, b, unbalanced=1.0,unbalanced_type='L2') # TV = partial OT res = ot.solve_sample(xa, xb, a, b, unbalanced=1.0,unbalanced_type='TV') - **Regularized unbalanced regularized OT [34]** (when ``unbalanced!=None`` and ``reg!=None``): .. math:: \min_{\mathbf{T}\geq 0} \quad \sum_{i,j} T_{i,j}M_{i,j} + \lambda_r R(\mathbf{T}) + \lambda_u U(\mathbf{T}\mathbf{1},\mathbf{a}) + \lambda_u U(\mathbf{T}^T\mathbf{1},\mathbf{b}) with M_{i,j} = d(x_i,y_j) can be solved with the following code: .. code-block:: python # default is ``"KL"`` for both res = ot.solve_sample(xa, xb, a, b, reg=1.0, unbalanced=1.0) # quadratic unbalanced OT with KL regularization res = ot.solve_sample(xa, xb, a, b, reg=1.0, unbalanced=1.0,unbalanced_type='L2') # both quadratic res = ot.solve_sample(xa, xb, a, b, reg=1.0, reg_type='L2', unbalanced=1.0, unbalanced_type='L2') - **Factored OT [2]** (when ``method='factored'``): This method solve the following OT problem [40]_ .. math:: \mathop{\arg \min}_\mu \quad W_2^2(\mu_a,\mu)+ W_2^2(\mu,\mu_b) where $\mu$ is a uniform weighted empirical distribution of :math:`\mu_a` and :math:`\mu_b` are the empirical measures associated to the samples in the source and target domains, and :math:`W_2` is the Wasserstein distance. This problem is solved using exact OT solvers for `reg=None` and the Sinkhorn solver for `reg!=None`. The solution provides two transport plans that can be used to recover a low rank OT plan between the two distributions. .. code-block:: python res = ot.solve_sample(xa, xb, method='factored', rank=10) # recover the lazy low rank plan factored_solution_lazy = res.lazy_plan # recover the full low rank plan factored_solution = factored_solution_lazy[:] - **Gaussian Bures-Wasserstein [2]** (when ``method='gaussian'``): This method computes the Gaussian Bures-Wasserstein distance between two Gaussian distributions estimated from teh empirical distributions .. math:: \mathcal{W}(\mu_s, \mu_t)_2^2= \left\lVert \mathbf{m}_s - \mathbf{m}_t \right\rVert^2 + \mathcal{B}(\Sigma_s, \Sigma_t)^{2} where : .. math:: \mathbf{B}(\Sigma_s, \Sigma_t)^{2} = \text{Tr}\left(\Sigma_s + \Sigma_t - 2 \sqrt{\Sigma_s^{1/2}\Sigma_t\Sigma_s^{1/2}} \right) The covariances and means are estimated from the data. .. code-block:: python res = ot.solve_sample(xa, xb, method='gaussian') # recover the squared Gaussian Bures-Wasserstein distance BW_dist = res.value - **Wasserstein 1d [1]** (when ``method='1D'``): This method computes the Wasserstein distance between two 1d distributions estimated from the empirical distributions. For multivariate data the distances are computed independently for each dimension. .. code-block:: python res = ot.solve_sample(xa, xb, method='1D') # recover the squared Wasserstein distances W_dists = res.value .. _references-solve-sample: References ---------- .. [1] Bonneel, N., Van De Panne, M., Paris, S., & Heidrich, W. (2011, December). Displacement interpolation using Lagrangian mass transport. In ACM Transactions on Graphics (TOG) (Vol. 30, No. 6, p. 158). ACM. .. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013 .. [10] Chizat, L., PeyrĂ©, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816. .. [17] Blondel, M., Seguy, V., & Rolet, A. (2018). Smooth and Sparse Optimal Transport. Proceedings of the Twenty-First International Conference on Artificial Intelligence and Statistics (AISTATS). .. [34] Feydy, J., SĂ©journĂ©, T., Vialard, F. X., Amari, S. I., TrouvĂ©, A., & PeyrĂ©, G. (2019, April). Interpolating between optimal transport and MMD using Sinkhorn divergences. In The 22nd International Conference on Artificial Intelligence and Statistics (pp. 2681-2690). PMLR. .. [40] Forrow, A., HĂĽtter, J. C., Nitzan, M., Rigollet, P., Schiebinger, G., & Weed, J. (2019, April). Statistical optimal transport via factored couplings. In The 22nd International Conference on Artificial Intelligence and Statistics (pp. 2454-2465). PMLR. .. [41] Chapel, L., Flamary, R., Wu, H., FĂ©votte, C., and Gasso, G. (2021). Unbalanced optimal transport through non-negative penalized linear regression. NeurIPS. .. [65] Scetbon, M., Cuturi, M., & PeyrĂ©, G. (2021). Low-rank Sinkhorn Factorization. In International Conference on Machine Learning. """ if method is not None and method.lower() in lst_method_lazy: lazy0 = lazy lazy = True if not lazy: # default non lazy solver calls ot.solve # compute cost matrix M and use solve function M = dist(X_a, X_b, metric) res = solve(M, a, b, reg, reg_type, unbalanced, unbalanced_type, method, n_threads, max_iter, plan_init, potentials_init, tol, verbose) return res else: # Detect backend nx = get_backend(X_a, X_b, a, b) # default values for solutions potentials = None value = None value_linear = None plan = None lazy_plan = None status = None log = None method = method.lower() if method is not None else '' if method == '1d': # Wasserstein 1d (parallel on all dimensions) if metric == 'sqeuclidean': p = 2 elif metric in ['euclidean', 'cityblock']: p = 1 else: raise (NotImplementedError('Not implemented metric="{}"'.format(metric))) value = wasserstein_1d(X_a, X_b, a, b, p=p) value_linear = value elif method == 'gaussian': # Gaussian Bures-Wasserstein if not metric.lower() in ['sqeuclidean']: raise (NotImplementedError('Not implemented metric="{}"'.format(metric))) if reg is None: reg = 1e-6 value, log = empirical_bures_wasserstein_distance(X_a, X_b, reg=reg, log=True) value = value**2 # return the value (squared bures distance) value_linear = value # return the value elif method == 'factored': # Factored OT if not metric.lower() in ['sqeuclidean']: raise (NotImplementedError('Not implemented metric="{}"'.format(metric))) if max_iter is None: max_iter = 100 if tol is None: tol = 1e-7 if reg is None: reg = 0 Q, R, X, log = factored_optimal_transport(X_a, X_b, reg=reg, r=rank, log=True, stopThr=tol, numItermax=max_iter, verbose=verbose) log['X'] = X value_linear = log['costa'] + log['costb'] value = value_linear # TODO add reg term lazy_plan = log['lazy_plan'] if not lazy0: # store plan if not lazy plan = lazy_plan[:] elif method == "lowrank": if not metric.lower() in ['sqeuclidean']: raise (NotImplementedError('Not implemented metric="{}"'.format(metric))) if max_iter is None: max_iter = 2000 if tol is None: tol = 1e-7 if reg is None: reg = 0 Q, R, g, log = lowrank_sinkhorn(X_a, X_b, rank=rank, reg=reg, a=a, b=b, numItermax=max_iter, stopThr=tol, log=True) value = log['value'] value_linear = log['value_linear'] lazy_plan = log['lazy_plan'] if not lazy0: # store plan if not lazy plan = lazy_plan[:] elif method.startswith('geomloss'): # Geomloss solver for entropic OT split_method = method.split('_') if len(split_method) == 2: backend = split_method[1] else: if lazy0 is None: backend = 'auto' elif lazy0: backend = 'online' else: backend = 'tensorized' value, log = empirical_sinkhorn2_geomloss(X_a, X_b, reg=reg, a=a, b=b, metric=metric, log=True, verbose=verbose, scaling=scaling, backend=backend) lazy_plan = log['lazy_plan'] if not lazy0: # store plan if not lazy plan = lazy_plan[:] # return scaled potentials (to be consistent with other solvers) potentials = (log['f'] / (lazy_plan.blur**2), log['g'] / (lazy_plan.blur**2)) elif reg is None or reg == 0: # exact OT if unbalanced is None: # balanced EMD solver not available for lazy raise (NotImplementedError('Exact OT solver with lazy=True not implemented')) else: raise (NotImplementedError('Non regularized solver with unbalanced_type="{}" not implemented'.format(unbalanced_type))) else: if unbalanced is None: if max_iter is None: max_iter = 1000 if tol is None: tol = 1e-9 if batch_size is None: batch_size = 100 value_linear, log = empirical_sinkhorn2(X_a, X_b, reg, a, b, metric=metric, numIterMax=max_iter, stopThr=tol, isLazy=True, batchSize=batch_size, verbose=verbose, log=True) # compute potentials potentials = (log["u"], log["v"]) lazy_plan = log['lazy_plan'] else: raise (NotImplementedError('Not implemented unbalanced_type="{}" with regularization'.format(unbalanced_type))) res = OTResult(potentials=potentials, value=value, lazy_plan=lazy_plan, value_linear=value_linear, plan=plan, status=status, backend=nx, log=log) return res python-pot-0.9.3+dfsg/ot/stochastic.py000066400000000000000000000631171455713015700177430ustar00rootroot00000000000000""" Stochastic solvers for regularized OT. """ # Authors: Kilian Fatras # RĂ©mi Flamary # # License: MIT License import numpy as np from .utils import dist, check_random_state from .backend import get_backend ############################################################################## # Optimization toolbox for SEMI - DUAL problems ############################################################################## def coordinate_grad_semi_dual(b, M, reg, beta, i): r''' Compute the coordinate gradient update for regularized discrete distributions for :math:`(i, :)` The function computes the gradient of the semi dual problem: .. math:: \max_\mathbf{v} \ \sum_i \mathbf{a}_i \left[ \sum_j \mathbf{v}_j \mathbf{b}_j - \mathrm{reg} \cdot \log \left( \sum_j \mathbf{b}_j \exp \left( \frac{\mathbf{v}_j - \mathbf{M}_{i,j}}{\mathrm{reg}} \right) \right) \right] Where : - :math:`\mathbf{M}` is the (`ns`, `nt`) metric cost matrix - :math:`\mathbf{v}` is a dual variable in :math:`\mathbb{R}^{nt}` - reg is the regularization term - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (sum to 1) The algorithm used for solving the problem is the ASGD & SAG algorithms as proposed in :ref:`[18] ` [alg.1 & alg.2] Parameters ---------- b : ndarray, shape (nt,) Target measure. M : ndarray, shape (ns, nt) Cost matrix. reg : float Regularization term > 0. v : ndarray, shape (nt,) Dual variable. i : int Picked number `i`. Returns ------- coordinate gradient : ndarray, shape (nt,) .. _references-coordinate-grad-semi-dual: References ---------- .. [18] Genevay, A., Cuturi, M., PeyrĂ©, G. & Bach, F. (2016) Stochastic Optimization for Large-scale Optimal Transport. Advances in Neural Information Processing Systems (2016). ''' r = M[i, :] - beta exp_beta = np.exp(-r / reg) * b khi = exp_beta / (np.sum(exp_beta)) return b - khi def sag_entropic_transport(a, b, M, reg, numItermax=10000, lr=None, random_state=None): r""" Compute the SAG algorithm to solve the regularized discrete measures optimal transport max problem The function solves the following optimization problem: .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg} \cdot\Omega(\gamma) s.t. \ \gamma \mathbf{1} = \mathbf{a} \gamma^T \mathbf{1} = \mathbf{b} \gamma \geq 0 Where : - :math:`\mathbf{M}` is the (`ns`, `nt`) metric cost matrix - :math:`\Omega` is the entropic regularization term with :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (sum to 1) The algorithm used for solving the problem is the SAG algorithm as proposed in :ref:`[18] ` [alg.1] Parameters ---------- a : ndarray, shape (ns,), Source measure. b : ndarray, shape (nt,), Target measure. M : ndarray, shape (ns, nt), Cost matrix. reg : float Regularization term > 0 numItermax : int Number of iteration. lr : float Learning rate. random_state : int, RandomState instance or None, default=None Determines random number generation. Pass an int for reproducible output across multiple function calls. Returns ------- v : ndarray, shape (`nt`,) Dual variable. .. _references-sag-entropic-transport: References ---------- .. [18] Genevay, A., Cuturi, M., PeyrĂ©, G. & Bach, F. (2016) Stochastic Optimization for Large-scale Optimal Transport. Advances in Neural Information Processing Systems (2016). """ if lr is None: lr = 1. / max(a / reg) n_source = np.shape(M)[0] n_target = np.shape(M)[1] cur_beta = np.zeros(n_target) stored_gradient = np.zeros((n_source, n_target)) sum_stored_gradient = np.zeros(n_target) rng = check_random_state(random_state) for _ in range(numItermax): i = rng.randint(n_source) cur_coord_grad = a[i] * coordinate_grad_semi_dual(b, M, reg, cur_beta, i) sum_stored_gradient += (cur_coord_grad - stored_gradient[i]) stored_gradient[i] = cur_coord_grad cur_beta += lr * (1. / n_source) * sum_stored_gradient return cur_beta def averaged_sgd_entropic_transport(a, b, M, reg, numItermax=300000, lr=None, random_state=None): r''' Compute the ASGD algorithm to solve the regularized semi continous measures optimal transport max problem The function solves the following optimization problem: .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg}\cdot\Omega(\gamma) s.t. \gamma \mathbf{1} = \mathbf{a} \gamma^T \mathbf{1} = \mathbf{b} \gamma \geq 0 Where : - :math:`\mathbf{M}` is the (`ns`, `nt`) metric cost matrix - :math:`\Omega` is the entropic regularization term with :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (sum to 1) The algorithm used for solving the problem is the ASGD algorithm as proposed in :ref:`[18] ` [alg.2] Parameters ---------- b : ndarray, shape (nt,) target measure M : ndarray, shape (ns, nt) cost matrix reg : float Regularization term > 0 numItermax : int Number of iteration. lr : float Learning rate. random_state : int, RandomState instance or None, default=None Determines random number generation. Pass an int for reproducible output across multiple function calls. Returns ------- ave_v : ndarray, shape (`nt`,) dual variable .. _references-averaged-sgd-entropic-transport: References ---------- .. [18] Genevay, A., Cuturi, M., PeyrĂ©, G. & Bach, F. (2016) Stochastic Optimization for Large-scale Optimal Transport. Advances in Neural Information Processing Systems (2016). ''' if lr is None: lr = 1. / max(a / reg) n_source = np.shape(M)[0] n_target = np.shape(M)[1] cur_beta = np.zeros(n_target) ave_beta = np.zeros(n_target) rng = check_random_state(random_state) for cur_iter in range(numItermax): k = cur_iter + 1 i = rng.randint(n_source) cur_coord_grad = coordinate_grad_semi_dual(b, M, reg, cur_beta, i) cur_beta += (lr / np.sqrt(k)) * cur_coord_grad ave_beta = (1. / k) * cur_beta + (1 - 1. / k) * ave_beta return ave_beta def c_transform_entropic(b, M, reg, beta): r''' The goal is to recover u from the c-transform. The function computes the c-transform of a dual variable from the other dual variable: .. math:: \mathbf{u} = \mathbf{v}^{c,reg} = - \mathrm{reg} \sum_j \mathbf{b}_j \exp\left( \frac{\mathbf{v} - \mathbf{M}}{\mathrm{reg}} \right) Where : - :math:`\mathbf{M}` is the (`ns`, `nt`) metric cost matrix - :math:`\mathbf{u}`, :math:`\mathbf{v}` are dual variables in :math:`\mathbb{R}^{ns} \times \mathbb{R}^{nt}` - reg is the regularization term It is used to recover an optimal u from optimal v solving the semi dual problem, see Proposition 2.1 of :ref:`[18] ` Parameters ---------- b : ndarray, shape (nt,) Target measure M : ndarray, shape (ns, nt) Cost matrix reg : float Regularization term > 0 v : ndarray, shape (nt,) Dual variable. Returns ------- u : ndarray, shape (`ns`,) Dual variable. .. _references-c-transform-entropic: References ---------- .. [18] Genevay, A., Cuturi, M., PeyrĂ©, G. & Bach, F. (2016) Stochastic Optimization for Large-scale Optimal Transport. Advances in Neural Information Processing Systems (2016). ''' n_source = np.shape(M)[0] alpha = np.zeros(n_source) for i in range(n_source): r = M[i, :] - beta min_r = np.min(r) exp_beta = np.exp(-(r - min_r) / reg) * b alpha[i] = min_r - reg * np.log(np.sum(exp_beta)) return alpha def solve_semi_dual_entropic(a, b, M, reg, method, numItermax=10000, lr=None, log=False): r''' Compute the transportation matrix to solve the regularized discrete measures optimal transport max problem The function solves the following optimization problem: .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg} \cdot\Omega(\gamma) s.t. \ \gamma \mathbf{1} = \mathbf{a} \gamma^T \mathbf{1} = \mathbf{b} \gamma \geq 0 Where : - :math:`\mathbf{M}` is the (`ns`, `nt`) metric cost matrix - :math:`\Omega` is the entropic regularization term with :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (sum to 1) The algorithm used for solving the problem is the SAG or ASGD algorithms as proposed in :ref:`[18] ` Parameters ---------- a : ndarray, shape (ns,) source measure b : ndarray, shape (nt,) target measure M : ndarray, shape (ns, nt) cost matrix reg : float Regularization term > 0 methode : str used method (SAG or ASGD) numItermax : int number of iteration lr : float learning rate n_source : int size of the source measure n_target : int size of the target measure log : bool, optional record log if True Returns ------- pi : ndarray, shape (ns, nt) transportation matrix log : dict log dictionary return only if log==True in parameters .. _references-solve-semi-dual-entropic: References ---------- .. [18] Genevay, A., Cuturi, M., PeyrĂ©, G. & Bach, F. (2016) Stochastic Optimization for Large-scale Optimal Transport. Advances in Neural Information Processing Systems (2016). ''' if method.lower() == "sag": opt_beta = sag_entropic_transport(a, b, M, reg, numItermax, lr) elif method.lower() == "asgd": opt_beta = averaged_sgd_entropic_transport(a, b, M, reg, numItermax, lr) else: print("Please, select your method between SAG and ASGD") return None opt_alpha = c_transform_entropic(b, M, reg, opt_beta) pi = (np.exp((opt_alpha[:, None] + opt_beta[None, :] - M[:, :]) / reg) * a[:, None] * b[None, :]) if log: log = {} log['alpha'] = opt_alpha log['beta'] = opt_beta return pi, log else: return pi ############################################################################## # Optimization toolbox for DUAL problems ############################################################################## def batch_grad_dual(a, b, M, reg, alpha, beta, batch_size, batch_alpha, batch_beta): r''' Computes the partial gradient of the dual optimal transport problem. For each :math:`(i,j)` in a batch of coordinates, the partial gradients are : .. math:: \partial_{\mathbf{u}_i} F = \frac{b_s}{l_v} \mathbf{u}_i - \sum_{j \in B_v} \mathbf{a}_i \mathbf{b}_j \exp\left( \frac{\mathbf{u}_i + \mathbf{v}_j - \mathbf{M}_{i,j}}{\mathrm{reg}} \right) \partial_{\mathbf{v}_j} F = \frac{b_s}{l_u} \mathbf{v}_j - \sum_{i \in B_u} \mathbf{a}_i \mathbf{b}_j \exp\left( \frac{\mathbf{u}_i + \mathbf{v}_j - \mathbf{M}_{i,j}}{\mathrm{reg}} \right) Where : - :math:`\mathbf{M}` is the (`ns`, `nt`) metric cost matrix - :math:`\mathbf{u}`, :math:`\mathbf{v}` are dual variables in :math:`\mathbb{R}^{ns} \times \mathbb{R}^{nt}` - reg is the regularization term - :math:`B_u` and :math:`B_v` are lists of index - :math:`b_s` is the size of the batches :math:`B_u` and :math:`B_v` - :math:`l_u` and :math:`l_v` are the lengths of :math:`B_u` and :math:`B_v` - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (sum to 1) The algorithm used for solving the dual problem is the SGD algorithm as proposed in :ref:`[19] ` [alg.1] Parameters ---------- a : ndarray, shape (ns,) source measure b : ndarray, shape (nt,) target measure M : ndarray, shape (ns, nt) cost matrix reg : float Regularization term > 0 alpha : ndarray, shape (ns,) dual variable beta : ndarray, shape (nt,) dual variable batch_size : int size of the batch batch_alpha : ndarray, shape (bs,) batch of index of alpha batch_beta : ndarray, shape (bs,) batch of index of beta Returns ------- grad : ndarray, shape (`ns`,) partial grad F .. _references-batch-grad-dual: References ---------- .. [19] Seguy, V., Bhushan Damodaran, B., Flamary, R., Courty, N., Rolet, A.& Blondel, M. Large-scale Optimal Transport and Mapping Estimation. International Conference on Learning Representation (2018) ''' G = - (np.exp((alpha[batch_alpha, None] + beta[None, batch_beta] - M[batch_alpha, :][:, batch_beta]) / reg) * a[batch_alpha, None] * b[None, batch_beta]) grad_beta = np.zeros(np.shape(M)[1]) grad_alpha = np.zeros(np.shape(M)[0]) grad_beta[batch_beta] = (b[batch_beta] * len(batch_alpha) / np.shape(M)[0] + G.sum(0)) grad_alpha[batch_alpha] = (a[batch_alpha] * len(batch_beta) / np.shape(M)[1] + G.sum(1)) return grad_alpha, grad_beta def sgd_entropic_regularization(a, b, M, reg, batch_size, numItermax, lr, random_state=None): r''' Compute the sgd algorithm to solve the regularized discrete measures optimal transport dual problem The function solves the following optimization problem: .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg} \cdot\Omega(\gamma) s.t. \ \gamma \mathbf{1} = \mathbf{a} \gamma^T \mathbf{1} = \mathbf{b} \gamma \geq 0 Where : - :math:`\mathbf{M}` is the (`ns`, `nt`) metric cost matrix - :math:`\Omega` is the entropic regularization term with :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (sum to 1) Parameters ---------- a : ndarray, shape (ns,) source measure b : ndarray, shape (nt,) target measure M : ndarray, shape (ns, nt) cost matrix reg : float Regularization term > 0 batch_size : int size of the batch numItermax : int number of iteration lr : float learning rate random_state : int, RandomState instance or None, default=None Determines random number generation. Pass an int for reproducible output across multiple function calls. Returns ------- alpha : ndarray, shape (ns,) dual variable beta : ndarray, shape (nt,) dual variable References ---------- .. [19] Seguy, V., Bhushan Damodaran, B., Flamary, R., Courty, N., Rolet, A.& Blondel, M. Large-scale Optimal Transport and Mapping Estimation. International Conference on Learning Representation (2018) ''' n_source = np.shape(M)[0] n_target = np.shape(M)[1] cur_alpha = np.zeros(n_source) cur_beta = np.zeros(n_target) rng = check_random_state(random_state) for cur_iter in range(numItermax): k = np.sqrt(cur_iter + 1) batch_alpha = rng.choice(n_source, batch_size, replace=False) batch_beta = rng.choice(n_target, batch_size, replace=False) update_alpha, update_beta = batch_grad_dual(a, b, M, reg, cur_alpha, cur_beta, batch_size, batch_alpha, batch_beta) cur_alpha[batch_alpha] += (lr / k) * update_alpha[batch_alpha] cur_beta[batch_beta] += (lr / k) * update_beta[batch_beta] return cur_alpha, cur_beta def solve_dual_entropic(a, b, M, reg, batch_size, numItermax=10000, lr=1, log=False): r''' Compute the transportation matrix to solve the regularized discrete measures optimal transport dual problem The function solves the following optimization problem: .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg} \cdot\Omega(\gamma) s.t. \ \gamma \mathbf{1} = \mathbf{a} \gamma^T \mathbf{1} = \mathbf{b} \gamma \geq 0 Where : - :math:`\mathbf{M}` is the (`ns`, `nt`) metric cost matrix - :math:`\Omega` is the entropic regularization term with :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target weights (sum to 1) Parameters ---------- a : ndarray, shape (ns,) source measure b : ndarray, shape (nt,) target measure M : ndarray, shape (ns, nt) cost matrix reg : float Regularization term > 0 batch_size : int size of the batch numItermax : int number of iteration lr : float learning rate log : bool, optional record log if True Returns ------- pi : ndarray, shape (ns, nt) transportation matrix log : dict log dictionary return only if log==True in parameters References ---------- .. [19] Seguy, V., Bhushan Damodaran, B., Flamary, R., Courty, N., Rolet, A.& Blondel, M. Large-scale Optimal Transport and Mapping Estimation. International Conference on Learning Representation (2018) ''' opt_alpha, opt_beta = sgd_entropic_regularization(a, b, M, reg, batch_size, numItermax, lr) pi = (np.exp((opt_alpha[:, None] + opt_beta[None, :] - M[:, :]) / reg) * a[:, None] * b[None, :]) if log: log = {} log['alpha'] = opt_alpha log['beta'] = opt_beta return pi, log else: return pi ################################################################################ # Losses for stochastic optimization ################################################################################ def loss_dual_entropic(u, v, xs, xt, reg=1, ws=None, wt=None, metric='sqeuclidean'): r""" Compute the dual loss of the entropic OT as in equation (6)-(7) of [19] This loss is backend compatible and can be used for stochastic optimization of the dual potentials. It can be used on the full dataset (beware of memory) or on minibatches. Parameters ---------- u : array-like, shape (ns,) Source dual potential v : array-like, shape (nt,) Target dual potential xs : array-like, shape (ns,d) Source samples xt : array-like, shape (ns,d) Target samples reg : float Regularization term > 0 (default=1) ws : array-like, shape (ns,), optional Source sample weights (default unif) wt : array-like, shape (ns,), optional Target sample weights (default unif) metric : string, callable Ground metric for OT (default quadratic). Can be given as a callable function taking (xs,xt) as parameters. Returns ------- dual_loss : array-like Dual loss (to maximize) References ---------- .. [19] Seguy, V., Bhushan Damodaran, B., Flamary, R., Courty, N., Rolet, A.& Blondel, M. Large-scale Optimal Transport and Mapping Estimation. International Conference on Learning Representation (2018) """ nx = get_backend(u, v, xs, xt) if ws is None: ws = nx.ones(xs.shape[0], type_as=xs) / xs.shape[0] if wt is None: wt = nx.ones(xt.shape[0], type_as=xt) / xt.shape[0] if callable(metric): M = metric(xs, xt) else: M = dist(xs, xt, metric=metric) F = -reg * nx.exp((u[:, None] + v[None, :] - M) / reg) return nx.sum(u * ws) + nx.sum(v * wt) + nx.sum(ws[:, None] * F * wt[None, :]) def plan_dual_entropic(u, v, xs, xt, reg=1, ws=None, wt=None, metric='sqeuclidean'): r""" Compute the primal OT plan the entropic OT as in equation (8) of [19] This loss is backend compatible and can be used for stochastic optimization of the dual potentials. It can be used on the full dataset (beware of memory) or on minibatches. Parameters ---------- u : array-like, shape (ns,) Source dual potential v : array-like, shape (nt,) Target dual potential xs : array-like, shape (ns,d) Source samples xt : array-like, shape (ns,d) Target samples reg : float Regularization term > 0 (default=1) ws : array-like, shape (ns,), optional Source sample weights (default unif) wt : array-like, shape (ns,), optional Target sample weights (default unif) metric : string, callable Ground metric for OT (default quadratic). Can be given as a callable function taking (xs,xt) as parameters. Returns ------- G : array-like Primal OT plan References ---------- .. [19] Seguy, V., Bhushan Damodaran, B., Flamary, R., Courty, N., Rolet, A.& Blondel, M. Large-scale Optimal Transport and Mapping Estimation. International Conference on Learning Representation (2018) """ nx = get_backend(u, v, xs, xt) if ws is None: ws = nx.ones(xs.shape[0], type_as=xs) / xs.shape[0] if wt is None: wt = nx.ones(xt.shape[0], type_as=xt) / xt.shape[0] if callable(metric): M = metric(xs, xt) else: M = dist(xs, xt, metric=metric) H = nx.exp((u[:, None] + v[None, :] - M) / reg) return ws[:, None] * H * wt[None, :] def loss_dual_quadratic(u, v, xs, xt, reg=1, ws=None, wt=None, metric='sqeuclidean'): r""" Compute the dual loss of the quadratic regularized OT as in equation (6)-(7) of [19] This loss is backend compatible and can be used for stochastic optimization of the dual potentials. It can be used on the full dataset (beware of memory) or on minibatches. Parameters ---------- u : array-like, shape (ns,) Source dual potential v : array-like, shape (nt,) Target dual potential xs : array-like, shape (ns,d) Source samples xt : array-like, shape (ns,d) Target samples reg : float Regularization term > 0 (default=1) ws : array-like, shape (ns,), optional Source sample weights (default unif) wt : array-like, shape (ns,), optional Target sample weights (default unif) metric : string, callable Ground metric for OT (default quadratic). Can be given as a callable function taking (xs,xt) as parameters. Returns ------- dual_loss : array-like Dual loss (to maximize) References ---------- .. [19] Seguy, V., Bhushan Damodaran, B., Flamary, R., Courty, N., Rolet, A.& Blondel, M. Large-scale Optimal Transport and Mapping Estimation. International Conference on Learning Representation (2018) """ nx = get_backend(u, v, xs, xt) if ws is None: ws = nx.ones(xs.shape[0], type_as=xs) / xs.shape[0] if wt is None: wt = nx.ones(xt.shape[0], type_as=xt) / xt.shape[0] if callable(metric): M = metric(xs, xt) else: M = dist(xs, xt, metric=metric) F = -1.0 / (4 * reg) * nx.maximum(u[:, None] + v[None, :] - M, 0.0)**2 return nx.sum(u * ws) + nx.sum(v * wt) + nx.sum(ws[:, None] * F * wt[None, :]) def plan_dual_quadratic(u, v, xs, xt, reg=1, ws=None, wt=None, metric='sqeuclidean'): r""" Compute the primal OT plan the quadratic regularized OT as in equation (8) of [19] This loss is backend compatible and can be used for stochastic optimization of the dual potentials. It can be used on the full dataset (beware of memory) or on minibatches. Parameters ---------- u : array-like, shape (ns,) Source dual potential v : array-like, shape (nt,) Target dual potential xs : array-like, shape (ns,d) Source samples xt : array-like, shape (ns,d) Target samples reg : float Regularization term > 0 (default=1) ws : array-like, shape (ns,), optional Source sample weights (default unif) wt : array-like, shape (ns,), optional Target sample weights (default unif) metric : string, callable Ground metric for OT (default quadratic). Can be given as a callable function taking (xs,xt) as parameters. Returns ------- G : array-like Primal OT plan References ---------- .. [19] Seguy, V., Bhushan Damodaran, B., Flamary, R., Courty, N., Rolet, A.& Blondel, M. Large-scale Optimal Transport and Mapping Estimation. International Conference on Learning Representation (2018) """ nx = get_backend(u, v, xs, xt) if ws is None: ws = nx.ones(xs.shape[0], type_as=xs) / xs.shape[0] if wt is None: wt = nx.ones(xt.shape[0], type_as=xt) / xt.shape[0] if callable(metric): M = metric(xs, xt) else: M = dist(xs, xt, metric=metric) H = 1.0 / (2 * reg) * nx.maximum(u[:, None] + v[None, :] - M, 0.0) return ws[:, None] * H * wt[None, :] python-pot-0.9.3+dfsg/ot/unbalanced.py000066400000000000000000001625161455713015700176760ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Regularized Unbalanced OT solvers """ # Author: Hicham Janati # Laetitia Chapel # Quang Huy Tran # # License: MIT License from __future__ import division import warnings import numpy as np from scipy.optimize import minimize, Bounds from .backend import get_backend from .utils import list_to_array, get_parameter_pair def sinkhorn_unbalanced(a, b, M, reg, reg_m, method='sinkhorn', reg_type="entropy", warmstart=None, numItermax=1000, stopThr=1e-6, verbose=False, log=False, **kwargs): r""" Solve the unbalanced entropic regularization optimal transport problem and return the OT plan The function solves the following optimization problem: .. math:: W = \min_\gamma \ \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg} \cdot \Omega(\gamma) + \mathrm{reg_{m1}} \cdot \mathrm{KL}(\gamma \mathbf{1}, \mathbf{a}) + \mathrm{reg_{m2}} \cdot \mathrm{KL}(\gamma^T \mathbf{1}, \mathbf{b}) s.t. \gamma \geq 0 where : - :math:`\mathbf{M}` is the (`dim_a`, `dim_b`) metric cost matrix - :math:`\Omega` is the entropic regularization term, can be either KL divergence or negative entropy - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target unbalanced distributions - KL is the Kullback-Leibler divergence The algorithm used for solving the problem is the generalized Sinkhorn-Knopp matrix scaling algorithm as proposed in :ref:`[10, 25] ` Parameters ---------- a : array-like (dim_a,) Unnormalized histogram of dimension `dim_a` b : array-like (dim_b,) or array-like (dim_b, n_hists) One or multiple unnormalized histograms of dimension `dim_b`. If many, compute all the OT distances :math:`(\mathbf{a}, \mathbf{b}_i)_i` M : array-like (dim_a, dim_b) loss matrix reg : float Entropy regularization term > 0 reg_m: float or indexable object of length 1 or 2 Marginal relaxation term. If reg_m is a scalar or an indexable object of length 1, then the same reg_m is applied to both marginal relaxations. The entropic balanced OT can be recovered using `reg_m=float("inf")`. For semi-relaxed case, use either `reg_m=(float("inf"), scalar)` or `reg_m=(scalar, float("inf"))`. If reg_m is an array, it must have the same backend as input arrays (a, b, M). method : str method used for the solver either 'sinkhorn', 'sinkhorn_stabilized' or 'sinkhorn_reg_scaling', see those function for specific parameters reg_type : string, optional Regularizer term. Can take two values: 'entropy' (negative entropy) :math:`\Omega(\gamma) = \sum_{i,j} \gamma_{i,j} \log(\gamma_{i,j}) - \sum_{i,j} \gamma_{i,j}`, or 'kl' (Kullback-Leibler) :math:`\Omega(\gamma) = \text{KL}(\gamma, \mathbf{a} \mathbf{b}^T)`. warmstart: tuple of arrays, shape (dim_a, dim_b), optional Initialization of dual potentials. If provided, the dual potentials should be given (that is the logarithm of the u,v sinkhorn scaling vectors). numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- if n_hists == 1: - gamma : (dim_a, dim_b) array-like Optimal transportation matrix for the given parameters - log : dict log dictionary returned only if `log` is `True` else: - ot_distance : (n_hists,) array-like the OT distance between :math:`\mathbf{a}` and each of the histograms :math:`\mathbf{b}_i` - log : dict log dictionary returned only if `log` is `True` Examples -------- >>> import ot >>> a=[.5, .5] >>> b=[.5, .5] >>> M=[[0., 1.], [1., 0.]] >>> ot.sinkhorn_unbalanced(a, b, M, 1, 1) array([[0.51122814, 0.18807032], [0.18807032, 0.51122814]]) .. _references-sinkhorn-unbalanced: References ---------- .. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013 .. [9] Schmitzer, B. (2016). Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. arXiv preprint arXiv:1610.06519. .. [10] Chizat, L., PeyrĂ©, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816. .. [25] Frogner C., Zhang C., Mobahi H., Araya-Polo M., Poggio T. : Learning with a Wasserstein Loss, Advances in Neural Information Processing Systems (NIPS) 2015 See Also -------- ot.unbalanced.sinkhorn_knopp_unbalanced : Unbalanced Classic Sinkhorn :ref:`[10] ` ot.unbalanced.sinkhorn_stabilized_unbalanced: Unbalanced Stabilized sinkhorn :ref:`[9, 10] ` ot.unbalanced.sinkhorn_reg_scaling_unbalanced: Unbalanced Sinkhorn with epsilon scaling :ref:`[9, 10] ` """ if method.lower() == 'sinkhorn': return sinkhorn_knopp_unbalanced(a, b, M, reg, reg_m, reg_type, warmstart, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, **kwargs) elif method.lower() == 'sinkhorn_stabilized': return sinkhorn_stabilized_unbalanced(a, b, M, reg, reg_m, reg_type, warmstart, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, **kwargs) elif method.lower() in ['sinkhorn_reg_scaling']: warnings.warn('Method not implemented yet. Using classic Sinkhorn-Knopp') return sinkhorn_knopp_unbalanced(a, b, M, reg, reg_m, reg_type, warmstart, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, **kwargs) else: raise ValueError("Unknown method '%s'." % method) def sinkhorn_unbalanced2(a, b, M, reg, reg_m, method='sinkhorn', reg_type="entropy", warmstart=None, numItermax=1000, stopThr=1e-6, verbose=False, log=False, **kwargs): r""" Solve the entropic regularization unbalanced optimal transport problem and return the loss The function solves the following optimization problem: .. math:: W = \min_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg} \cdot \Omega(\gamma) + \mathrm{reg_{m1}} \cdot \mathrm{KL}(\gamma \mathbf{1}, \mathbf{a}) + \mathrm{reg_{m2}} \cdot \mathrm{KL}(\gamma^T \mathbf{1}, \mathbf{b}) s.t. \gamma\geq 0 where : - :math:`\mathbf{M}` is the (`dim_a`, `dim_b`) metric cost matrix - :math:`\Omega` is the entropic regularization term, can be either KL divergence or negative entropy - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target unbalanced distributions - KL is the Kullback-Leibler divergence The algorithm used for solving the problem is the generalized Sinkhorn-Knopp matrix scaling algorithm as proposed in :ref:`[10, 25] ` Parameters ---------- a : array-like (dim_a,) Unnormalized histogram of dimension `dim_a` b : array-like (dim_b,) or array-like (dim_b, n_hists) One or multiple unnormalized histograms of dimension `dim_b`. If many, compute all the OT distances :math:`(\mathbf{a}, \mathbf{b}_i)_i` M : array-like (dim_a, dim_b) loss matrix reg : float Entropy regularization term > 0 reg_m: float or indexable object of length 1 or 2 Marginal relaxation term. If reg_m is a scalar or an indexable object of length 1, then the same reg_m is applied to both marginal relaxations. The entropic balanced OT can be recovered using `reg_m=float("inf")`. For semi-relaxed case, use either `reg_m=(float("inf"), scalar)` or `reg_m=(scalar, float("inf"))`. If reg_m is an array, it must have the same backend as input arrays (a, b, M). method : str method used for the solver either 'sinkhorn', 'sinkhorn_stabilized' or 'sinkhorn_reg_scaling', see those function for specific parameterss reg_type : string, optional Regularizer term. Can take two values: 'entropy' (negative entropy) :math:`\Omega(\gamma) = \sum_{i,j} \gamma_{i,j} \log(\gamma_{i,j}) - \sum_{i,j} \gamma_{i,j}`, or 'kl' (Kullback-Leibler) :math:`\Omega(\gamma) = \text{KL}(\gamma, \mathbf{a} \mathbf{b}^T)`. warmstart: tuple of arrays, shape (dim_a, dim_b), optional Initialization of dual potentials. If provided, the dual potentials should be given (that is the logarithm of the u,v sinkhorn scaling vectors). numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- ot_distance : (n_hists,) array-like the OT distance between :math:`\mathbf{a}` and each of the histograms :math:`\mathbf{b}_i` log : dict log dictionary returned only if `log` is `True` Examples -------- >>> import ot >>> import numpy as np >>> a=[.5, .10] >>> b=[.5, .5] >>> M=[[0., 1.],[1., 0.]] >>> np.round(ot.unbalanced.sinkhorn_unbalanced2(a, b, M, 1., 1.), 8) 0.31912858 .. _references-sinkhorn-unbalanced2: References ---------- .. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013 .. [9] Schmitzer, B. (2016). Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. arXiv preprint arXiv:1610.06519. .. [10] Chizat, L., PeyrĂ©, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816. .. [25] Frogner C., Zhang C., Mobahi H., Araya-Polo M., Poggio T. : Learning with a Wasserstein Loss, Advances in Neural Information Processing Systems (NIPS) 2015 See Also -------- ot.unbalanced.sinkhorn_knopp : Unbalanced Classic Sinkhorn :ref:`[10] ` ot.unbalanced.sinkhorn_stabilized: Unbalanced Stabilized sinkhorn :ref:`[9, 10] ` ot.unbalanced.sinkhorn_reg_scaling: Unbalanced Sinkhorn with epsilon scaling :ref:`[9, 10] ` """ M, a, b = list_to_array(M, a, b) nx = get_backend(M, a, b) if len(b.shape) < 2: if method.lower() == 'sinkhorn': res = sinkhorn_knopp_unbalanced(a, b, M, reg, reg_m, reg_type, warmstart, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, **kwargs) elif method.lower() == 'sinkhorn_stabilized': res = sinkhorn_stabilized_unbalanced(a, b, M, reg, reg_m, reg_type, warmstart, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, **kwargs) elif method.lower() in ['sinkhorn_reg_scaling']: warnings.warn('Method not implemented yet. Using classic Sinkhorn-Knopp') res = sinkhorn_knopp_unbalanced(a, b, M, reg, reg_m, reg_type, warmstart, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, **kwargs) else: raise ValueError('Unknown method %s.' % method) if log: return nx.sum(M * res[0]), res[1] else: return nx.sum(M * res) else: if method.lower() == 'sinkhorn': return sinkhorn_knopp_unbalanced(a, b, M, reg, reg_m, reg_type, warmstart, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, **kwargs) elif method.lower() == 'sinkhorn_stabilized': return sinkhorn_stabilized_unbalanced(a, b, M, reg, reg_m, reg_type, warmstart, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, **kwargs) elif method.lower() in ['sinkhorn_reg_scaling']: warnings.warn('Method not implemented yet. Using classic Sinkhorn-Knopp') return sinkhorn_knopp_unbalanced(a, b, M, reg, reg_m, reg_type, warmstart, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, **kwargs) else: raise ValueError('Unknown method %s.' % method) def sinkhorn_knopp_unbalanced(a, b, M, reg, reg_m, reg_type="entropy", warmstart=None, numItermax=1000, stopThr=1e-6, verbose=False, log=False, **kwargs): r""" Solve the entropic regularization unbalanced optimal transport problem and return the OT plan The function solves the following optimization problem: .. math:: W = \min_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg} \cdot \Omega(\gamma) + \mathrm{reg_{m1}} \cdot \mathrm{KL}(\gamma \mathbf{1}, \mathbf{a}) + \mathrm{reg_{m2}} \cdot \mathrm{KL}(\gamma^T \mathbf{1}, \mathbf{b}) s.t. \gamma \geq 0 where : - :math:`\mathbf{M}` is the (`dim_a`, `dim_b`) metric cost matrix - :math:`\Omega` is the entropic regularization term, can be either KL divergence or negative entropy - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target unbalanced distributions - KL is the Kullback-Leibler divergence The algorithm used for solving the problem is the generalized Sinkhorn-Knopp matrix scaling algorithm as proposed in :ref:`[10, 25] ` Parameters ---------- a : array-like (dim_a,) Unnormalized histogram of dimension `dim_a` b : array-like (dim_b,) or array-like (dim_b, n_hists) One or multiple unnormalized histograms of dimension `dim_b` If many, compute all the OT distances (a, b_i) M : array-like (dim_a, dim_b) loss matrix reg : float Entropy regularization term > 0 reg_m: float or indexable object of length 1 or 2 Marginal relaxation term. If reg_m is a scalar or an indexable object of length 1, then the same reg_m is applied to both marginal relaxations. The entropic balanced OT can be recovered using `reg_m=float("inf")`. For semi-relaxed case, use either `reg_m=(float("inf"), scalar)` or `reg_m=(scalar, float("inf"))`. If reg_m is an array, it must have the same backend as input arrays (a, b, M). reg_type : string, optional Regularizer term. Can take two values: 'entropy' (negative entropy) :math:`\Omega(\gamma) = \sum_{i,j} \gamma_{i,j} \log(\gamma_{i,j}) - \sum_{i,j} \gamma_{i,j}`, or 'kl' (Kullback-Leibler) :math:`\Omega(\gamma) = \text{KL}(\gamma, \mathbf{a} \mathbf{b}^T)`. warmstart: tuple of arrays, shape (dim_a, dim_b), optional Initialization of dual potentials. If provided, the dual potentials should be given (that is the logarithm of the u,v sinkhorn scaling vectors). numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (> 0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- if n_hists == 1: - gamma : (dim_a, dim_b) array-like Optimal transportation matrix for the given parameters - log : dict log dictionary returned only if `log` is `True` else: - ot_distance : (n_hists,) array-like the OT distance between :math:`\mathbf{a}` and each of the histograms :math:`\mathbf{b}_i` - log : dict log dictionary returned only if `log` is `True` Examples -------- >>> import ot >>> a=[.5, .5] >>> b=[.5, .5] >>> M=[[0., 1.],[1., 0.]] >>> ot.unbalanced.sinkhorn_knopp_unbalanced(a, b, M, 1., 1.) array([[0.51122814, 0.18807032], [0.18807032, 0.51122814]]) .. _references-sinkhorn-knopp-unbalanced: References ---------- .. [10] Chizat, L., PeyrĂ©, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816. .. [25] Frogner C., Zhang C., Mobahi H., Araya-Polo M., Poggio T. : Learning with a Wasserstein Loss, Advances in Neural Information Processing Systems (NIPS) 2015 See Also -------- ot.lp.emd : Unregularized OT ot.optim.cg : General regularized OT """ M, a, b = list_to_array(M, a, b) nx = get_backend(M, a, b) dim_a, dim_b = M.shape if len(a) == 0: a = nx.ones(dim_a, type_as=M) / dim_a if len(b) == 0: b = nx.ones(dim_b, type_as=M) / dim_b if len(b.shape) > 1: n_hists = b.shape[1] else: n_hists = 0 reg_m1, reg_m2 = get_parameter_pair(reg_m) if log: log = {'err': []} # we assume that no distances are null except those of the diagonal of # distances if warmstart is None: if n_hists: u = nx.ones((dim_a, 1), type_as=M) v = nx.ones((dim_b, n_hists), type_as=M) a = a.reshape(dim_a, 1) else: u = nx.ones(dim_a, type_as=M) v = nx.ones(dim_b, type_as=M) else: u, v = nx.exp(warmstart[0]), nx.exp(warmstart[1]) if reg_type == "kl": K = nx.exp(-M / reg) * a.reshape(-1)[:, None] * b.reshape(-1)[None, :] elif reg_type == "entropy": K = nx.exp(-M / reg) fi_1 = reg_m1 / (reg_m1 + reg) if reg_m1 != float("inf") else 1 fi_2 = reg_m2 / (reg_m2 + reg) if reg_m2 != float("inf") else 1 err = 1. for i in range(numItermax): uprev = u vprev = v Kv = nx.dot(K, v) u = (a / Kv) ** fi_1 Ktu = nx.dot(K.T, u) v = (b / Ktu) ** fi_2 if (nx.any(Ktu == 0.) or nx.any(nx.isnan(u)) or nx.any(nx.isnan(v)) or nx.any(nx.isinf(u)) or nx.any(nx.isinf(v))): # we have reached the machine precision # come back to previous solution and quit loop warnings.warn('Numerical errors at iteration %s' % i) u = uprev v = vprev break err_u = nx.max(nx.abs(u - uprev)) / max( nx.max(nx.abs(u)), nx.max(nx.abs(uprev)), 1. ) err_v = nx.max(nx.abs(v - vprev)) / max( nx.max(nx.abs(v)), nx.max(nx.abs(vprev)), 1. ) err = 0.5 * (err_u + err_v) if log: log['err'].append(err) if verbose: if i % 50 == 0: print( '{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(i, err)) if err < stopThr: break if log: log['logu'] = nx.log(u + 1e-300) log['logv'] = nx.log(v + 1e-300) if n_hists: # return only loss res = nx.einsum('ik,ij,jk,ij->k', u, K, v, M) if log: return res, log else: return res else: # return OT matrix if log: return u[:, None] * K * v[None, :], log else: return u[:, None] * K * v[None, :] def sinkhorn_stabilized_unbalanced(a, b, M, reg, reg_m, reg_type="entropy", warmstart=None, tau=1e5, numItermax=1000, stopThr=1e-6, verbose=False, log=False, **kwargs): r""" Solve the entropic regularization unbalanced optimal transport problem and return the loss The function solves the following optimization problem using log-domain stabilization as proposed in :ref:`[10] `: .. math:: W = \min_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg} \cdot \Omega(\gamma) + \mathrm{reg_{m1}} \cdot \mathrm{KL}(\gamma \mathbf{1}, \mathbf{a}) + \mathrm{reg_{m2}} \cdot \mathrm{KL}(\gamma^T \mathbf{1}, \mathbf{b}) s.t. \gamma \geq 0 where : - :math:`\mathbf{M}` is the (`dim_a`, `dim_b`) metric cost matrix - :math:`\Omega` is the entropic regularization term, can be either KL divergence or negative entropy - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target unbalanced distributions - KL is the Kullback-Leibler divergence The algorithm used for solving the problem is the generalized Sinkhorn-Knopp matrix scaling algorithm as proposed in :ref:`[10, 25] ` Parameters ---------- a : array-like (dim_a,) Unnormalized histogram of dimension `dim_a` b : array-like (dim_b,) or array-like (dim_b, n_hists) One or multiple unnormalized histograms of dimension `dim_b`. If many, compute all the OT distances :math:`(\mathbf{a}, \mathbf{b}_i)_i` M : array-like (dim_a, dim_b) loss matrix reg : float Entropy regularization term > 0 reg_m: float or indexable object of length 1 or 2 Marginal relaxation term. If reg_m is a scalar or an indexable object of length 1, then the same reg_m is applied to both marginal relaxations. The entropic balanced OT can be recovered using `reg_m=float("inf")`. For semi-relaxed case, use either `reg_m=(float("inf"), scalar)` or `reg_m=(scalar, float("inf"))`. If reg_m is an array, it must have the same backend as input arrays (a, b, M). reg_type : string, optional Regularizer term. Can take two values: 'entropy' (negative entropy) :math:`\Omega(\gamma) = \sum_{i,j} \gamma_{i,j} \log(\gamma_{i,j}) - \sum_{i,j} \gamma_{i,j}`, or 'kl' (Kullback-Leibler) :math:`\Omega(\gamma) = \text{KL}(\gamma, \mathbf{a} \mathbf{b}^T)`. warmstart: tuple of arrays, shape (dim_a, dim_b), optional Initialization of dual potentials. If provided, the dual potentials should be given (that is the logarithm of the u,v sinkhorn scaling vectors). tau : float threshold for max value in u or v for log scaling numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- if n_hists == 1: - gamma : (dim_a, dim_b) array-like Optimal transportation matrix for the given parameters - log : dict log dictionary returned only if `log` is `True` else: - ot_distance : (n_hists,) array-like the OT distance between :math:`\mathbf{a}` and each of the histograms :math:`\mathbf{b}_i` - log : dict log dictionary returned only if `log` is `True` Examples -------- >>> import ot >>> a=[.5, .5] >>> b=[.5, .5] >>> M=[[0., 1.],[1., 0.]] >>> ot.unbalanced.sinkhorn_stabilized_unbalanced(a, b, M, 1., 1.) array([[0.51122814, 0.18807032], [0.18807032, 0.51122814]]) .. _references-sinkhorn-stabilized-unbalanced: References ---------- .. [10] Chizat, L., PeyrĂ©, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816. .. [25] Frogner C., Zhang C., Mobahi H., Araya-Polo M., Poggio T. : Learning with a Wasserstein Loss, Advances in Neural Information Processing Systems (NIPS) 2015 See Also -------- ot.lp.emd : Unregularized OT ot.optim.cg : General regularized OT """ a, b, M = list_to_array(a, b, M) nx = get_backend(M, a, b) dim_a, dim_b = M.shape if len(a) == 0: a = nx.ones(dim_a, type_as=M) / dim_a if len(b) == 0: b = nx.ones(dim_b, type_as=M) / dim_b if len(b.shape) > 1: n_hists = b.shape[1] else: n_hists = 0 reg_m1, reg_m2 = get_parameter_pair(reg_m) if log: log = {'err': []} # we assume that no distances are null except those of the diagonal of # distances if warmstart is None: if n_hists: u = nx.ones((dim_a, n_hists), type_as=M) v = nx.ones((dim_b, n_hists), type_as=M) a = a.reshape(dim_a, 1) else: u = nx.ones(dim_a, type_as=M) v = nx.ones(dim_b, type_as=M) else: u, v = nx.exp(warmstart[0]), nx.exp(warmstart[1]) if reg_type == "kl": log_ab = nx.log(a + 1e-16).reshape(-1)[:, None] + nx.log(b + 1e-16).reshape(-1)[None, :] M0 = M - reg * log_ab else: M0 = M K = nx.exp(-M0 / reg) fi_1 = reg_m1 / (reg_m1 + reg) if reg_m1 != float("inf") else 1 fi_2 = reg_m2 / (reg_m2 + reg) if reg_m2 != float("inf") else 1 cpt = 0 err = 1. alpha = nx.zeros(dim_a, type_as=M) beta = nx.zeros(dim_b, type_as=M) ones_a = nx.ones(dim_a, type_as=M) ones_b = nx.ones(dim_b, type_as=M) while (err > stopThr and cpt < numItermax): uprev = u vprev = v Kv = nx.dot(K, v) f_alpha = nx.exp(- alpha / (reg + reg_m1)) if reg_m1 != float("inf") else ones_a f_beta = nx.exp(- beta / (reg + reg_m2)) if reg_m2 != float("inf") else ones_b if n_hists: f_alpha = f_alpha[:, None] f_beta = f_beta[:, None] u = ((a / (Kv + 1e-16)) ** fi_1) * f_alpha Ktu = nx.dot(K.T, u) v = ((b / (Ktu + 1e-16)) ** fi_2) * f_beta absorbing = False if nx.any(u > tau) or nx.any(v > tau): absorbing = True if n_hists: alpha = alpha + reg * nx.log(nx.max(u, 1)) beta = beta + reg * nx.log(nx.max(v, 1)) else: alpha = alpha + reg * nx.log(nx.max(u)) beta = beta + reg * nx.log(nx.max(v)) K = nx.exp((alpha[:, None] + beta[None, :] - M0) / reg) v = nx.ones(v.shape, type_as=v) Kv = nx.dot(K, v) if (nx.any(Ktu == 0.) or nx.any(nx.isnan(u)) or nx.any(nx.isnan(v)) or nx.any(nx.isinf(u)) or nx.any(nx.isinf(v))): # we have reached the machine precision # come back to previous solution and quit loop warnings.warn('Numerical errors at iteration %s' % cpt) u = uprev v = vprev break if (cpt % 10 == 0 and not absorbing) or cpt == 0: # we can speed up the process by checking for the error only all # the 10th iterations err = nx.max(nx.abs(u - uprev)) / max( nx.max(nx.abs(u)), nx.max(nx.abs(uprev)), 1. ) if log: log['err'].append(err) if verbose: if cpt % 200 == 0: print( '{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(cpt, err)) cpt = cpt + 1 if err > stopThr: warnings.warn("Stabilized Unbalanced Sinkhorn did not converge." + "Try a larger entropy `reg` or a lower mass `reg_m`." + "Or a larger absorption threshold `tau`.") if n_hists: logu = alpha[:, None] / reg + nx.log(u) logv = beta[:, None] / reg + nx.log(v) else: logu = alpha / reg + nx.log(u) logv = beta / reg + nx.log(v) if log: log['logu'] = logu log['logv'] = logv if n_hists: # return only loss res = nx.logsumexp( nx.log(M + 1e-100)[:, :, None] + logu[:, None, :] + logv[None, :, :] - M0[:, :, None] / reg, axis=(0, 1) ) res = nx.exp(res) if log: return res, log else: return res else: # return OT matrix ot_matrix = nx.exp(logu[:, None] + logv[None, :] - M0 / reg) if log: return ot_matrix, log else: return ot_matrix def barycenter_unbalanced_stabilized(A, M, reg, reg_m, weights=None, tau=1e3, numItermax=1000, stopThr=1e-6, verbose=False, log=False): r"""Compute the entropic unbalanced wasserstein barycenter of :math:`\mathbf{A}` with stabilization. The function solves the following optimization problem: .. math:: \mathbf{a} = \mathop{\arg \min}_\mathbf{a} \quad \sum_i W_{u_{reg}}(\mathbf{a},\mathbf{a}_i) where : - :math:`W_{u_{reg}}(\cdot,\cdot)` is the unbalanced entropic regularized Wasserstein distance (see :py:func:`ot.unbalanced.sinkhorn_unbalanced`) - :math:`\mathbf{a}_i` are training distributions in the columns of matrix :math:`\mathbf{A}` - reg and :math:`\mathbf{M}` are respectively the regularization term and the cost matrix for OT - reg_mis the marginal relaxation hyperparameter The algorithm used for solving the problem is the generalized Sinkhorn-Knopp matrix scaling algorithm as proposed in :ref:`[10] ` Parameters ---------- A : array-like (dim, n_hists) `n_hists` training distributions :math:`\mathbf{a}_i` of dimension `dim` M : array-like (dim, dim) ground metric matrix for OT. reg : float Entropy regularization term > 0 reg_m : float Marginal relaxation term > 0 tau : float Stabilization threshold for log domain absorption. weights : array-like (n_hists,) optional Weight of each distribution (barycentric coordinates) If None, uniform weights are used. numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (> 0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- a : (dim,) array-like Unbalanced Wasserstein barycenter log : dict log dictionary return only if log==True in parameters .. _references-barycenter-unbalanced-stabilized: References ---------- .. [3] Benamou, J. D., Carlier, G., Cuturi, M., Nenna, L., & PeyrĂ©, G. (2015). Iterative Bregman projections for regularized transportation problems. SIAM Journal on Scientific Computing, 37(2), A1111-A1138. .. [10] Chizat, L., PeyrĂ©, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816. """ A, M = list_to_array(A, M) nx = get_backend(A, M) dim, n_hists = A.shape if weights is None: weights = nx.ones(n_hists, type_as=A) / n_hists else: assert (len(weights) == A.shape[1]) if log: log = {'err': []} fi = reg_m / (reg_m + reg) u = nx.ones((dim, n_hists), type_as=A) / dim v = nx.ones((dim, n_hists), type_as=A) / dim # print(reg) K = nx.exp(-M / reg) fi = reg_m / (reg_m + reg) cpt = 0 err = 1. alpha = nx.zeros(dim, type_as=A) beta = nx.zeros(dim, type_as=A) q = nx.ones(dim, type_as=A) / dim for i in range(numItermax): qprev = nx.copy(q) Kv = nx.dot(K, v) f_alpha = nx.exp(- alpha / (reg + reg_m)) f_beta = nx.exp(- beta / (reg + reg_m)) f_alpha = f_alpha[:, None] f_beta = f_beta[:, None] u = ((A / (Kv + 1e-16)) ** fi) * f_alpha Ktu = nx.dot(K.T, u) q = (Ktu ** (1 - fi)) * f_beta q = nx.dot(q, weights) ** (1 / (1 - fi)) Q = q[:, None] v = ((Q / (Ktu + 1e-16)) ** fi) * f_beta absorbing = False if nx.any(u > tau) or nx.any(v > tau): absorbing = True alpha = alpha + reg * nx.log(nx.max(u, 1)) beta = beta + reg * nx.log(nx.max(v, 1)) K = nx.exp((alpha[:, None] + beta[None, :] - M) / reg) v = nx.ones(v.shape, type_as=v) Kv = nx.dot(K, v) if (nx.any(Ktu == 0.) or nx.any(nx.isnan(u)) or nx.any(nx.isnan(v)) or nx.any(nx.isinf(u)) or nx.any(nx.isinf(v))): # we have reached the machine precision # come back to previous solution and quit loop warnings.warn('Numerical errors at iteration %s' % cpt) q = qprev break if (i % 10 == 0 and not absorbing) or i == 0: # we can speed up the process by checking for the error only all # the 10th iterations err = nx.max(nx.abs(q - qprev)) / max( nx.max(nx.abs(q)), nx.max(nx.abs(qprev)), 1. ) if log: log['err'].append(err) if verbose: if i % 50 == 0: print( '{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(i, err)) if err < stopThr: break if err > stopThr: warnings.warn("Stabilized Unbalanced Sinkhorn did not converge." + "Try a larger entropy `reg` or a lower mass `reg_m`." + "Or a larger absorption threshold `tau`.") if log: log['niter'] = i log['logu'] = nx.log(u + 1e-300) log['logv'] = nx.log(v + 1e-300) return q, log else: return q def barycenter_unbalanced_sinkhorn(A, M, reg, reg_m, weights=None, numItermax=1000, stopThr=1e-6, verbose=False, log=False): r"""Compute the entropic unbalanced wasserstein barycenter of :math:`\mathbf{A}`. The function solves the following optimization problem with :math:`\mathbf{a}` .. math:: \mathbf{a} = \mathop{\arg \min}_\mathbf{a} \quad \sum_i W_{u_{reg}}(\mathbf{a},\mathbf{a}_i) where : - :math:`W_{u_{reg}}(\cdot,\cdot)` is the unbalanced entropic regularized Wasserstein distance (see :py:func:`ot.unbalanced.sinkhorn_unbalanced`) - :math:`\mathbf{a}_i` are training distributions in the columns of matrix :math:`\mathbf{A}` - reg and :math:`\mathbf{M}` are respectively the regularization term and the cost matrix for OT - reg_mis the marginal relaxation hyperparameter The algorithm used for solving the problem is the generalized Sinkhorn-Knopp matrix scaling algorithm as proposed in :ref:`[10] ` Parameters ---------- A : array-like (dim, n_hists) `n_hists` training distributions :math:`\mathbf{a}_i` of dimension `dim` M : array-like (dim, dim) ground metric matrix for OT. reg : float Entropy regularization term > 0 reg_m: float Marginal relaxation term > 0 weights : array-like (n_hists,) optional Weight of each distribution (barycentric coodinates) If None, uniform weights are used. numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (> 0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- a : (dim,) array-like Unbalanced Wasserstein barycenter log : dict log dictionary return only if log==True in parameters .. _references-barycenter-unbalanced-sinkhorn: References ---------- .. [3] Benamou, J. D., Carlier, G., Cuturi, M., Nenna, L., & PeyrĂ©, G. (2015). Iterative Bregman projections for regularized transportation problems. SIAM Journal on Scientific Computing, 37(2), A1111-A1138. .. [10] Chizat, L., PeyrĂ©, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprin arXiv:1607.05816. """ A, M = list_to_array(A, M) nx = get_backend(A, M) dim, n_hists = A.shape if weights is None: weights = nx.ones(n_hists, type_as=A) / n_hists else: assert (len(weights) == A.shape[1]) if log: log = {'err': []} K = nx.exp(-M / reg) fi = reg_m / (reg_m + reg) v = nx.ones((dim, n_hists), type_as=A) u = nx.ones((dim, 1), type_as=A) q = nx.ones(dim, type_as=A) err = 1. for i in range(numItermax): uprev = nx.copy(u) vprev = nx.copy(v) qprev = nx.copy(q) Kv = nx.dot(K, v) u = (A / Kv) ** fi Ktu = nx.dot(K.T, u) q = nx.dot(Ktu ** (1 - fi), weights) q = q ** (1 / (1 - fi)) Q = q[:, None] v = (Q / Ktu) ** fi if (nx.any(Ktu == 0.) or nx.any(nx.isnan(u)) or nx.any(nx.isnan(v)) or nx.any(nx.isinf(u)) or nx.any(nx.isinf(v))): # we have reached the machine precision # come back to previous solution and quit loop warnings.warn('Numerical errors at iteration %s' % i) u = uprev v = vprev q = qprev break # compute change in barycenter err = nx.max(nx.abs(q - qprev)) / max( nx.max(nx.abs(q)), nx.max(nx.abs(qprev)), 1.0 ) if log: log['err'].append(err) # if barycenter did not change + at least 10 iterations - stop if err < stopThr and i > 10: break if verbose: if i % 10 == 0: print( '{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19) print('{:5d}|{:8e}|'.format(i, err)) if log: log['niter'] = i log['logu'] = nx.log(u + 1e-300) log['logv'] = nx.log(v + 1e-300) return q, log else: return q def barycenter_unbalanced(A, M, reg, reg_m, method="sinkhorn", weights=None, numItermax=1000, stopThr=1e-6, verbose=False, log=False, **kwargs): r"""Compute the entropic unbalanced wasserstein barycenter of :math:`\mathbf{A}`. The function solves the following optimization problem with :math:`\mathbf{a}` .. math:: \mathbf{a} = \mathop{\arg \min}_\mathbf{a} \quad \sum_i W_{u_{reg}}(\mathbf{a},\mathbf{a}_i) where : - :math:`W_{u_{reg}}(\cdot,\cdot)` is the unbalanced entropic regularized Wasserstein distance (see :py:func:`ot.unbalanced.sinkhorn_unbalanced`) - :math:`\mathbf{a}_i` are training distributions in the columns of matrix :math:`\mathbf{A}` - reg and :math:`\mathbf{M}` are respectively the regularization term and the cost matrix for OT - reg_mis the marginal relaxation hyperparameter The algorithm used for solving the problem is the generalized Sinkhorn-Knopp matrix scaling algorithm as proposed in :ref:`[10] ` Parameters ---------- A : array-like (dim, n_hists) `n_hists` training distributions :math:`\mathbf{a}_i` of dimension `dim` M : array-like (dim, dim) ground metric matrix for OT. reg : float Entropy regularization term > 0 reg_m: float Marginal relaxation term > 0 weights : array-like (n_hists,) optional Weight of each distribution (barycentric coodinates) If None, uniform weights are used. numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (> 0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- a : (dim,) array-like Unbalanced Wasserstein barycenter log : dict log dictionary return only if log==True in parameters .. _references-barycenter-unbalanced: References ---------- .. [3] Benamou, J. D., Carlier, G., Cuturi, M., Nenna, L., & PeyrĂ©, G. (2015). Iterative Bregman projections for regularized transportation problems. SIAM Journal on Scientific Computing, 37(2), A1111-A1138. .. [10] Chizat, L., PeyrĂ©, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprin arXiv:1607.05816. """ if method.lower() == 'sinkhorn': return barycenter_unbalanced_sinkhorn(A, M, reg, reg_m, weights=weights, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, **kwargs) elif method.lower() == 'sinkhorn_stabilized': return barycenter_unbalanced_stabilized(A, M, reg, reg_m, weights=weights, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, **kwargs) elif method.lower() in ['sinkhorn_reg_scaling']: warnings.warn('Method not implemented yet. Using classic Sinkhorn Knopp') return barycenter_unbalanced(A, M, reg, reg_m, weights=weights, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=log, **kwargs) else: raise ValueError("Unknown method '%s'." % method) def mm_unbalanced(a, b, M, reg_m, c=None, reg=0, div='kl', G0=None, numItermax=1000, stopThr=1e-15, verbose=False, log=False): r""" Solve the unbalanced optimal transport problem and return the OT plan. The function solves the following optimization problem: .. math:: W = \min_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg_{m1}} \cdot \mathrm{div}(\gamma \mathbf{1}, \mathbf{a}) + \mathrm{reg_{m2}} \cdot \mathrm{div}(\gamma^T \mathbf{1}, \mathbf{b}) + \mathrm{reg} \cdot \mathrm{div}(\gamma, \mathbf{c}) s.t. \gamma \geq 0 where: - :math:`\mathbf{M}` is the (`dim_a`, `dim_b`) metric cost matrix - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target unbalanced distributions - :math:`\mathbf{c}` is a reference distribution for the regularization - div is a divergence, either Kullback-Leibler or :math:`\ell_2` divergence The algorithm used for solving the problem is a maximization- minimization algorithm as proposed in :ref:`[41] ` Parameters ---------- a : array-like (dim_a,) Unnormalized histogram of dimension `dim_a` b : array-like (dim_b,) Unnormalized histogram of dimension `dim_b` M : array-like (dim_a, dim_b) loss matrix reg_m: float or indexable object of length 1 or 2 Marginal relaxation term >= 0, but cannot be infinity. If reg_m is a scalar or an indexable object of length 1, then the same reg_m is applied to both marginal relaxations. If reg_m is an array, it must have the same backend as input arrays (a, b, M). reg : float, optional (default = 0) Regularization term >= 0. By default, solve the unregularized problem c : array-like (dim_a, dim_b), optional (default = None) Reference measure for the regularization. If None, then use `\mathbf{c} = \mathbf{a} \mathbf{b}^T`. div: string, optional Divergence to quantify the difference between the marginals. Can take two values: 'kl' (Kullback-Leibler) or 'l2' (quadratic) G0: array-like (dim_a, dim_b) Initialization of the transport matrix numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (> 0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- gamma : (dim_a, dim_b) array-like Optimal transportation matrix for the given parameters log : dict log dictionary returned only if `log` is `True` Examples -------- >>> import ot >>> import numpy as np >>> a=[.5, .5] >>> b=[.5, .5] >>> M=[[1., 36.],[9., 4.]] >>> np.round(ot.unbalanced.mm_unbalanced(a, b, M, 5, div='kl'), 2) array([[0.45, 0. ], [0. , 0.34]]) >>> np.round(ot.unbalanced.mm_unbalanced(a, b, M, 5, div='l2'), 2) array([[0.4, 0. ], [0. , 0.1]]) .. _references-regpath: References ---------- .. [41] Chapel, L., Flamary, R., Wu, H., FĂ©votte, C., and Gasso, G. (2021). Unbalanced optimal transport through non-negative penalized linear regression. NeurIPS. See Also -------- ot.lp.emd : Unregularized OT ot.unbalanced.sinkhorn_unbalanced : Entropic regularized OT """ M, a, b = list_to_array(M, a, b) nx = get_backend(M, a, b) dim_a, dim_b = M.shape if len(a) == 0: a = nx.ones(dim_a, type_as=M) / dim_a if len(b) == 0: b = nx.ones(dim_b, type_as=M) / dim_b G = a[:, None] * b[None, :] if G0 is None else G0 c = a[:, None] * b[None, :] if c is None else c reg_m1, reg_m2 = get_parameter_pair(reg_m) if log: log = {'err': [], 'G': []} if div not in ["kl", "l2"]: warnings.warn("The div parameter should be either equal to 'kl' or \ 'l2': it has been set to 'kl'.") div = 'kl' if div == 'kl': sum_r = reg + reg_m1 + reg_m2 r1, r2, r = reg_m1 / sum_r, reg_m2 / sum_r, reg / sum_r K = (a[:, None]**r1) * (b[None, :]**r2) * (c**r) * nx.exp(- M / sum_r) elif div == 'l2': K = reg_m1 * a[:, None] + reg_m2 * b[None, :] + reg * c - M K = nx.maximum(K, nx.zeros((dim_a, dim_b), type_as=M)) for i in range(numItermax): Gprev = G if div == 'kl': Gd = (nx.sum(G, 1, keepdims=True)**r1) * (nx.sum(G, 0, keepdims=True)**r2) + 1e-16 G = K * G**(r1 + r2) / Gd elif div == 'l2': Gd = reg_m1 * nx.sum(G, 1, keepdims=True) + \ reg_m2 * nx.sum(G, 0, keepdims=True) + reg * G + 1e-16 G = K * G / Gd err = nx.sqrt(nx.sum((G - Gprev) ** 2)) if log: log['err'].append(err) log['G'].append(G) if verbose: print('{:5d}|{:8e}|'.format(i, err)) if err < stopThr: break if log: log['cost'] = nx.sum(G * M) return G, log else: return G def mm_unbalanced2(a, b, M, reg_m, c=None, reg=0, div='kl', G0=None, numItermax=1000, stopThr=1e-15, verbose=False, log=False): r""" Solve the unbalanced optimal transport problem and return the OT plan. The function solves the following optimization problem: .. math:: W = \min_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + \mathrm{reg_{m1}} \cdot \mathrm{div}(\gamma \mathbf{1}, \mathbf{a}) + \mathrm{reg_{m2}} \cdot \mathrm{div}(\gamma^T \mathbf{1}, \mathbf{b}) + \mathrm{reg} \cdot \mathrm{div}(\gamma, \mathbf{c}) s.t. \gamma \geq 0 where: - :math:`\mathbf{M}` is the (`dim_a`, `dim_b`) metric cost matrix - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target unbalanced distributions - :math:`\mathbf{c}` is a reference distribution for the regularization - :math:`\mathrm{div}` is a divergence, either Kullback-Leibler or :math:`\ell_2` divergence The algorithm used for solving the problem is a maximization- minimization algorithm as proposed in :ref:`[41] ` Parameters ---------- a : array-like (dim_a,) Unnormalized histogram of dimension `dim_a` b : array-like (dim_b,) Unnormalized histogram of dimension `dim_b` M : array-like (dim_a, dim_b) loss matrix reg_m: float or indexable object of length 1 or 2 Marginal relaxation term >= 0, but cannot be infinity. If reg_m is a scalar or an indexable object of length 1, then the same reg_m is applied to both marginal relaxations. If reg_m is an array, it must have the same backend as input arrays (a, b, M). reg : float, optional (default = 0) Entropy regularization term >= 0. By default, solve the unregularized problem c : array-like (dim_a, dim_b), optional (default = None) Reference measure for the regularization. If None, then use `\mathbf{c} = mathbf{a} mathbf{b}^T`. div: string, optional Divergence to quantify the difference between the marginals. Can take two values: 'kl' (Kullback-Leibler) or 'l2' (quadratic) G0: array-like (dim_a, dim_b) Initialization of the transport matrix numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (> 0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- ot_distance : array-like the OT distance between :math:`\mathbf{a}` and :math:`\mathbf{b}` log : dict log dictionary returned only if `log` is `True` Examples -------- >>> import ot >>> import numpy as np >>> a=[.5, .5] >>> b=[.5, .5] >>> M=[[1., 36.],[9., 4.]] >>> np.round(ot.unbalanced.mm_unbalanced2(a, b, M, 5, div='l2'), 2) 0.8 >>> np.round(ot.unbalanced.mm_unbalanced2(a, b, M, 5, div='kl'), 2) 1.79 References ---------- .. [41] Chapel, L., Flamary, R., Wu, H., FĂ©votte, C., and Gasso, G. (2021). Unbalanced optimal transport through non-negative penalized linear regression. NeurIPS. See Also -------- ot.lp.emd2 : Unregularized OT loss ot.unbalanced.sinkhorn_unbalanced2 : Entropic regularized OT loss """ _, log_mm = mm_unbalanced(a, b, M, reg_m, c=c, reg=reg, div=div, G0=G0, numItermax=numItermax, stopThr=stopThr, verbose=verbose, log=True) if log: return log_mm['cost'], log_mm else: return log_mm['cost'] def _get_loss_unbalanced(a, b, c, M, reg, reg_m1, reg_m2, reg_div='kl', regm_div='kl'): """ return the loss function (scipy.optimize compatible) for regularized unbalanced OT """ m, n = M.shape def kl(p, q): return np.sum(p * np.log(p / q + 1e-16)) - np.sum(p) + np.sum(q) def reg_l2(G): return np.sum((G - c)**2) / 2 def grad_l2(G): return G - c def reg_kl(G): return kl(G, c) def grad_kl(G): return np.log(G / c + 1e-16) def reg_entropy(G): return np.sum(G * np.log(G + 1e-16)) - np.sum(G) def grad_entropy(G): return np.log(G + 1e-16) if reg_div == 'kl': reg_fun = reg_kl grad_reg_fun = grad_kl elif reg_div == 'entropy': reg_fun = reg_entropy grad_reg_fun = grad_entropy else: reg_fun = reg_l2 grad_reg_fun = grad_l2 def marg_l2(G): return reg_m1 * 0.5 * np.sum((G.sum(1) - a)**2) + \ reg_m2 * 0.5 * np.sum((G.sum(0) - b)**2) def grad_marg_l2(G): return reg_m1 * np.outer((G.sum(1) - a), np.ones(n)) + \ reg_m2 * np.outer(np.ones(m), (G.sum(0) - b)) def marg_kl(G): return reg_m1 * kl(G.sum(1), a) + reg_m2 * kl(G.sum(0), b) def grad_marg_kl(G): return reg_m1 * np.outer(np.log(G.sum(1) / a + 1e-16), np.ones(n)) + \ reg_m2 * np.outer(np.ones(m), np.log(G.sum(0) / b + 1e-16)) if regm_div == 'kl': regm_fun = marg_kl grad_regm_fun = grad_marg_kl else: regm_fun = marg_l2 grad_regm_fun = grad_marg_l2 def _func(G): G = G.reshape((m, n)) # compute loss val = np.sum(G * M) + reg * reg_fun(G) + regm_fun(G) # compute gradient grad = M + reg * grad_reg_fun(G) + grad_regm_fun(G) return val, grad.ravel() return _func def lbfgsb_unbalanced(a, b, M, reg, reg_m, c=None, reg_div='kl', regm_div='kl', G0=None, numItermax=1000, stopThr=1e-15, method='L-BFGS-B', verbose=False, log=False): r""" Solve the unbalanced optimal transport problem and return the OT plan using L-BFGS-B. The function solves the following optimization problem: .. math:: W = \min_\gamma \quad \langle \gamma, \mathbf{M} \rangle_F + + \mathrm{reg} \mathrm{div}(\gamma, \mathbf{c}) \mathrm{reg_{m1}} \cdot \mathrm{div_m}(\gamma \mathbf{1}, \mathbf{a}) + \mathrm{reg_{m2}} \cdot \mathrm{div}(\gamma^T \mathbf{1}, \mathbf{b}) s.t. \gamma \geq 0 where: - :math:`\mathbf{M}` is the (`dim_a`, `dim_b`) metric cost matrix - :math:`\mathbf{a}` and :math:`\mathbf{b}` are source and target unbalanced distributions - :math:`\mathbf{c}` is a reference distribution for the regularization - :math:`\mathrm{div}` is a divergence, either Kullback-Leibler or :math:`\ell_2` divergence The algorithm used for solving the problem is a L-BFGS-B from scipy.optimize Parameters ---------- a : array-like (dim_a,) Unnormalized histogram of dimension `dim_a` b : array-like (dim_b,) Unnormalized histogram of dimension `dim_b` M : array-like (dim_a, dim_b) loss matrix reg: float regularization term >=0 c : array-like (dim_a, dim_b), optional (default = None) Reference measure for the regularization. If None, then use `\mathbf{c} = \mathbf{a} \mathbf{b}^T`. reg_m: float or indexable object of length 1 or 2 Marginal relaxation term >= 0, but cannot be infinity. If reg_m is a scalar or an indexable object of length 1, then the same reg_m is applied to both marginal relaxations. If reg_m is an array, it must be a Numpy array. reg_div: string, optional Divergence used for regularization. Can take three values: 'entropy' (negative entropy), or 'kl' (Kullback-Leibler) or 'l2' (quadratic). regm_div: string, optional Divergence to quantify the difference between the marginals. Can take two values: 'kl' (Kullback-Leibler) or 'l2' (quadratic) G0: array-like (dim_a, dim_b) Initialization of the transport matrix numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshold on error (> 0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- gamma : (dim_a, dim_b) array-like Optimal transportation matrix for the given parameters log : dict log dictionary returned only if `log` is `True` Examples -------- >>> import ot >>> import numpy as np >>> a=[.5, .5] >>> b=[.5, .5] >>> M=[[1., 36.],[9., 4.]] >>> np.round(ot.unbalanced.lbfgsb_unbalanced(a, b, M, reg=0, reg_m=5, reg_div='kl', regm_div='kl'), 2) array([[0.45, 0. ], [0. , 0.34]]) >>> np.round(ot.unbalanced.lbfgsb_unbalanced(a, b, M, reg=0, reg_m=5, reg_div='l2', regm_div='l2'), 2) array([[0.4, 0. ], [0. , 0.1]]) References ---------- .. [41] Chapel, L., Flamary, R., Wu, H., FĂ©votte, C., and Gasso, G. (2021). Unbalanced optimal transport through non-negative penalized linear regression. NeurIPS. See Also -------- ot.lp.emd2 : Unregularized OT loss ot.unbalanced.sinkhorn_unbalanced2 : Entropic regularized OT loss """ M, a, b = list_to_array(M, a, b) nx = get_backend(M, a, b) M0 = M # convert to numpy a, b, M = nx.to_numpy(a, b, M) G0 = np.zeros(M.shape) if G0 is None else nx.to_numpy(G0) c = a[:, None] * b[None, :] if c is None else nx.to_numpy(c) reg_m1, reg_m2 = get_parameter_pair(reg_m) _func = _get_loss_unbalanced(a, b, c, M, reg, reg_m1, reg_m2, reg_div, regm_div) res = minimize(_func, G0.ravel(), method=method, jac=True, bounds=Bounds(0, np.inf), tol=stopThr, options=dict(maxiter=numItermax, disp=verbose)) G = nx.from_numpy(res.x.reshape(M.shape), type_as=M0) if log: log = {'loss': nx.from_numpy(res.fun, type_as=M0), 'res': res} return G, log else: return G python-pot-0.9.3+dfsg/ot/utils.py000066400000000000000000001216501455713015700167340ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Various useful functions """ # Author: Remi Flamary # # License: MIT License from functools import reduce import time import numpy as np from scipy.spatial.distance import cdist import sys import warnings from inspect import signature from .backend import get_backend, Backend, NumpyBackend, JaxBackend __time_tic_toc = time.time() def tic(): r""" Python implementation of Matlab tic() function """ global __time_tic_toc __time_tic_toc = time.time() def toc(message='Elapsed time : {} s'): r""" Python implementation of Matlab toc() function """ t = time.time() print(message.format(t - __time_tic_toc)) return t - __time_tic_toc def toq(): r""" Python implementation of Julia toc() function """ t = time.time() return t - __time_tic_toc def kernel(x1, x2, method='gaussian', sigma=1, **kwargs): r"""Compute kernel matrix""" nx = get_backend(x1, x2) if method.lower() in ['gaussian', 'gauss', 'rbf']: K = nx.exp(-dist(x1, x2) / (2 * sigma**2)) return K def laplacian(x): r"""Compute Laplacian matrix""" nx = get_backend(x) L = nx.diag(nx.sum(x, axis=0)) - x return L def list_to_array(*lst): r""" Convert a list if in numpy format """ if len(lst) > 1: return [np.array(a) if isinstance(a, list) else a for a in lst] else: return np.array(lst[0]) if isinstance(lst[0], list) else lst[0] def proj_simplex(v, z=1): r"""Compute the closest point (orthogonal projection) on the generalized `(n-1)`-simplex of a vector :math:`\mathbf{v}` wrt. to the Euclidean distance, thus solving: .. math:: \mathcal{P}(w) \in \mathop{\arg \min}_\gamma \| \gamma - \mathbf{v} \|_2 s.t. \ \gamma^T \mathbf{1} = z \gamma \geq 0 If :math:`\mathbf{v}` is a 2d array, compute all the projections wrt. axis 0 .. note:: This function is backend-compatible and will work on arrays from all compatible backends. Parameters ---------- v : {array-like}, shape (n, d) z : int, optional 'size' of the simplex (each vectors sum to z, 1 by default) Returns ------- h : ndarray, shape (`n`, `d`) Array of projections on the simplex """ nx = get_backend(v) n = v.shape[0] if v.ndim == 1: d1 = 1 v = v[:, None] else: d1 = 0 d = v.shape[1] # sort u in ascending order u = nx.sort(v, axis=0) # take the descending order u = nx.flip(u, 0) cssv = nx.cumsum(u, axis=0) - z ind = nx.arange(n, type_as=v)[:, None] + 1 cond = u - cssv / ind > 0 rho = nx.sum(cond, 0) theta = cssv[rho - 1, nx.arange(d)] / rho w = nx.maximum(v - theta[None, :], nx.zeros(v.shape, type_as=v)) if d1: return w[:, 0] else: return w def projection_sparse_simplex(V, max_nz, z=1, axis=None, nx=None): r"""Projection of :math:`\mathbf{V}` onto the simplex with cardinality constraint (maximum number of non-zero elements) and then scaled by `z`. .. math:: P\left(\mathbf{V}, max_nz, z\right) = \mathop{\arg \min}_{\substack{\mathbf{y} >= 0 \\ \sum_i \mathbf{y}_i = z} \\ ||p||_0 \le \text{max_nz}} \quad \|\mathbf{y} - \mathbf{V}\|^2 Parameters ---------- V: 1-dim or 2-dim ndarray z: float or array If array, len(z) must be compatible with :math:`\mathbf{V}` axis: None or int - axis=None: project :math:`\mathbf{V}` by :math:`P(\mathbf{V}.\mathrm{ravel}(), max_nz, z)` - axis=1: project each :math:`\mathbf{V}_i` by :math:`P(\mathbf{V}_i, max_nz, z_i)` - axis=0: project each :math:`\mathbf{V}_{:, j}` by :math:`P(\mathbf{V}_{:, j}, max_nz, z_j)` Returns ------- projection: ndarray, shape :math:`\mathbf{V}`.shape References: Sparse projections onto the simplex Anastasios Kyrillidis, Stephen Becker, Volkan Cevher and, Christoph Koch ICML 2013 https://arxiv.org/abs/1206.1529 """ if nx is None: nx = get_backend(V) if V.ndim == 1: return projection_sparse_simplex( # V[nx.newaxis, :], max_nz, z, axis=1).ravel() V[None, :], max_nz, z, axis=1).ravel() if V.ndim > 2: raise ValueError('V.ndim must be <= 2') if axis == 1: # For each row of V, find top max_nz values; arrange the # corresponding column indices such that their values are # in a descending order. max_nz_indices = nx.argsort(V, axis=1)[:, -max_nz:] max_nz_indices = nx.flip(max_nz_indices, axis=1) row_indices = nx.arange(V.shape[0]) row_indices = row_indices.reshape(-1, 1) print(row_indices.shape) # Extract the top max_nz values for each row # and then project to simplex. U = V[row_indices, max_nz_indices] z = nx.ones(len(U)) * z cssv = nx.cumsum(U, axis=1) - z[:, None] ind = nx.arange(max_nz) + 1 cond = U - cssv / ind > 0 # rho = nx.count_nonzero(cond, axis=1) rho = nx.sum(cond, axis=1) theta = cssv[nx.arange(len(U)), rho - 1] / rho nz_projection = nx.maximum(U - theta[:, None], 0) # Put the projection of max_nz_values to their original column indices # while keeping other values zero. sparse_projection = nx.zeros(V.shape, type_as=nz_projection) if isinstance(nx, JaxBackend): # in Jax, we need to use the `at` property of `jax.numpy.ndarray` # to do in-place array modificatons. sparse_projection = sparse_projection.at[ row_indices, max_nz_indices].set(nz_projection) else: sparse_projection[row_indices, max_nz_indices] = nz_projection return sparse_projection elif axis == 0: return projection_sparse_simplex(V.T, max_nz, z, axis=1).T else: V = V.ravel().reshape(1, -1) return projection_sparse_simplex(V, max_nz, z, axis=1).ravel() def unif(n, type_as=None): r""" Return a uniform histogram of length `n` (simplex). Parameters ---------- n : int number of bins in the histogram type_as : array_like array of the same type of the expected output (numpy/pytorch/jax) Returns ------- h : array_like (`n`,) histogram of length `n` such that :math:`\forall i, \mathbf{h}_i = \frac{1}{n}` """ if type_as is None: return np.ones((n,)) / n else: nx = get_backend(type_as) return nx.ones((n,), type_as=type_as) / n def clean_zeros(a, b, M): r""" Remove all components with zeros weights in :math:`\mathbf{a}` and :math:`\mathbf{b}` """ M2 = M[a > 0, :][:, b > 0].copy() # copy force c style matrix (froemd) a2 = a[a > 0] b2 = b[b > 0] return a2, b2, M2 def euclidean_distances(X, Y, squared=False): r""" Considering the rows of :math:`\mathbf{X}` (and :math:`\mathbf{Y} = \mathbf{X}`) as vectors, compute the distance matrix between each pair of vectors. .. note:: This function is backend-compatible and will work on arrays from all compatible backends. Parameters ---------- X : array-like, shape (n_samples_1, n_features) Y : array-like, shape (n_samples_2, n_features) squared : boolean, optional Return squared Euclidean distances. Returns ------- distances : array-like, shape (`n_samples_1`, `n_samples_2`) """ nx = get_backend(X, Y) a2 = nx.einsum('ij,ij->i', X, X) b2 = nx.einsum('ij,ij->i', Y, Y) c = -2 * nx.dot(X, Y.T) c += a2[:, None] c += b2[None, :] c = nx.maximum(c, 0) if not squared: c = nx.sqrt(c) if X is Y: c = c * (1 - nx.eye(X.shape[0], type_as=c)) return c def dist(x1, x2=None, metric='sqeuclidean', p=2, w=None): r"""Compute distance between samples in :math:`\mathbf{x_1}` and :math:`\mathbf{x_2}` .. note:: This function is backend-compatible and will work on arrays from all compatible backends. Parameters ---------- x1 : array-like, shape (n1,d) matrix with `n1` samples of size `d` x2 : array-like, shape (n2,d), optional matrix with `n2` samples of size `d` (if None then :math:`\mathbf{x_2} = \mathbf{x_1}`) metric : str | callable, optional 'sqeuclidean' or 'euclidean' on all backends. On numpy the function also accepts from the scipy.spatial.distance.cdist function : 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulczynski1', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', 'yule'. p : float, optional p-norm for the Minkowski and the Weighted Minkowski metrics. Default value is 2. w : array-like, rank 1 Weights for the weighted metrics. Returns ------- M : array-like, shape (`n1`, `n2`) distance matrix computed with given metric """ if x2 is None: x2 = x1 if metric == "sqeuclidean": return euclidean_distances(x1, x2, squared=True) elif metric == "euclidean": return euclidean_distances(x1, x2, squared=False) else: if not get_backend(x1, x2).__name__ == 'numpy': raise NotImplementedError() else: if isinstance(metric, str) and metric.endswith("minkowski"): return cdist(x1, x2, metric=metric, p=p, w=w) if w is not None: return cdist(x1, x2, metric=metric, w=w) return cdist(x1, x2, metric=metric) def dist0(n, method='lin_square'): r"""Compute standard cost matrices of size (`n`, `n`) for OT problems Parameters ---------- n : int Size of the cost matrix. method : str, optional Type of loss matrix chosen from: * 'lin_square' : linear sampling between 0 and `n-1`, quadratic loss Returns ------- M : ndarray, shape (`n1`, `n2`) Distance matrix computed with given metric. """ res = 0 if method == 'lin_square': x = np.arange(n, dtype=np.float64).reshape((n, 1)) res = dist(x, x) return res def cost_normalization(C, norm=None): r""" Apply normalization to the loss matrix Parameters ---------- C : ndarray, shape (n1, n2) The cost matrix to normalize. norm : str Type of normalization from 'median', 'max', 'log', 'loglog'. Any other value do not normalize. Returns ------- C : ndarray, shape (`n1`, `n2`) The input cost matrix normalized according to given norm. """ nx = get_backend(C) if norm is None: pass elif norm == "median": C /= float(nx.median(C)) elif norm == "max": C /= float(nx.max(C)) elif norm == "log": C = nx.log(1 + C) elif norm == "loglog": C = nx.log(1 + nx.log(1 + C)) else: raise ValueError('Norm %s is not a valid option.\n' 'Valid options are:\n' 'median, max, log, loglog' % norm) return C def dots(*args): r""" dots function for multiple matrix multiply """ nx = get_backend(*args) return reduce(nx.dot, args) def is_all_finite(*args): r"""Tests element-wise for finiteness in all arguments.""" nx = get_backend(*args) return all(not nx.any(~nx.isfinite(arg)) for arg in args) def label_normalization(y, start=0, nx=None): r""" Transform labels to start at a given value Parameters ---------- y : array-like, shape (n, ) The vector of labels to be normalized. start : int Desired value for the smallest label in :math:`\mathbf{y}` (default=0) nx : Backend, optional Backend to perform computations on. If omitted, the backend defaults to that of `y`. Returns ------- y : array-like, shape (`n1`, ) The input vector of labels normalized according to given start value. """ if nx is None: nx = get_backend(y) diff = nx.min(y) - start return y if diff == 0 else (y - diff) def labels_to_masks(y, type_as=None, nx=None): r"""Transforms (n_samples,) vector of labels into a (n_samples, n_labels) matrix of masks. Parameters ---------- y : array-like, shape (n_samples, ) The vector of labels. type_as : array_like Array of the same type of the expected output. nx : Backend, optional Backend to perform computations on. If omitted, the backend defaults to that of `y`. Returns ------- masks : array-like, shape (n_samples, n_labels) The (n_samples, n_labels) matrix of label masks. """ if nx is None: nx = get_backend(y) if type_as is None: type_as = y labels_u, labels_idx = nx.unique(y, return_inverse=True) n_labels = labels_u.shape[0] masks = nx.eye(n_labels, type_as=type_as)[labels_idx] return masks def parmap(f, X, nprocs="default"): r""" parallel map for multiprocessing. The function has been deprecated and only performs a regular map. """ return list(map(f, X)) def check_params(**kwargs): r"""check_params: check whether some parameters are missing """ missing_params = [] check = True for param in kwargs: if kwargs[param] is None: missing_params.append(param) if len(missing_params) > 0: print("POT - Warning: following necessary parameters are missing") for p in missing_params: print("\n", p) check = False return check def check_random_state(seed): r"""Turn `seed` into a np.random.RandomState instance Parameters ---------- seed : None | int | instance of RandomState If `seed` is None, return the RandomState singleton used by np.random. If `seed` is an int, return a new RandomState instance seeded with `seed`. If `seed` is already a RandomState instance, return it. Otherwise raise ValueError. """ if seed is None or seed is np.random: return np.random.mtrand._rand if isinstance(seed, (int, np.integer)): return np.random.RandomState(seed) if isinstance(seed, np.random.RandomState): return seed raise ValueError('{} cannot be used to seed a numpy.random.RandomState' ' instance'.format(seed)) def get_coordinate_circle(x): r"""For :math:`x\in S^1 \subset \mathbb{R}^2`, returns the coordinates in turn (in [0,1[). .. math:: u = \frac{\pi + \mathrm{atan2}(-x_2,-x_1)}{2\pi} Parameters ---------- x: ndarray, shape (n, 2) Samples on the circle with ambient coordinates Returns ------- x_t: ndarray, shape (n,) Coordinates on [0,1[ Examples -------- >>> u = np.array([[0.2,0.5,0.8]]) * (2 * np.pi) >>> x1, y1 = np.cos(u), np.sin(u) >>> x = np.concatenate([x1, y1]).T >>> get_coordinate_circle(x) array([0.2, 0.5, 0.8]) """ nx = get_backend(x) x_t = (nx.atan2(-x[:, 1], -x[:, 0]) + np.pi) / (2 * np.pi) return x_t def reduce_lazytensor(a, func, axis=None, nx=None, batch_size=100): """ Reduce a LazyTensor along an axis with function fun using batches. When axis=None, reduce the LazyTensor to a scalar as a sum of fun over batches taken along dim. .. warning:: This function works for tensor of any order but the reduction can be done only along the first two axis (or global). Also, in order to work, it requires that the slice of size `batch_size` along the axis to reduce (or axis 0 if `axis=None`) is can be computed and fits in memory. Parameters ---------- a : LazyTensor LazyTensor to reduce func : callable Function to apply to the LazyTensor axis : int, optional Axis along which to reduce the LazyTensor. If None, reduce the LazyTensor to a scalar as a sum of fun over batches taken along axis 0. If 0 or 1 reduce the LazyTensor to a vector/matrix as a sum of fun over batches taken along axis. nx : Backend, optional Backend to use for the reduction batch_size : int, optional Size of the batches to use for the reduction (default=100) Returns ------- res : array-like Result of the reduction """ if nx is None: nx = get_backend(a[0:1]) if axis is None: res = 0.0 for i in range(0, a.shape[0], batch_size): res += func(a[i:i + batch_size]) return res elif axis == 0: res = nx.zeros(a.shape[1:], type_as=a[0]) if nx.__name__ in ["jax", "tf"]: lst = [] for j in range(0, a.shape[1], batch_size): lst.append(func(a[:, j:j + batch_size], 0)) return nx.concatenate(lst, axis=0) else: for j in range(0, a.shape[1], batch_size): res[j:j + batch_size] = func(a[:, j:j + batch_size], axis=0) return res elif axis == 1: if len(a.shape) == 2: shape = (a.shape[0]) else: shape = (a.shape[0], *a.shape[2:]) res = nx.zeros(shape, type_as=a[0]) if nx.__name__ in ["jax", "tf"]: lst = [] for i in range(0, a.shape[0], batch_size): lst.append(func(a[i:i + batch_size], 1)) return nx.concatenate(lst, axis=0) else: for i in range(0, a.shape[0], batch_size): res[i:i + batch_size] = func(a[i:i + batch_size], axis=1) return res else: raise (NotImplementedError("Only axis=None, 0 or 1 is implemented for now.")) def get_lowrank_lazytensor(Q, R, d=None, nx=None): """ Get a low rank LazyTensor T=Q@R^T or T=Q@diag(d)@R^T Parameters ---------- Q : ndarray, shape (n, r) First factor of the lowrank tensor R : ndarray, shape (m, r) Second factor of the lowrank tensor d : ndarray, shape (r,), optional Diagonal of the lowrank tensor nx : Backend, optional Backend to use for the reduction Returns ------- T : LazyTensor Lowrank tensor T=Q@R^T or T=Q@diag(d)@R^T """ if nx is None: nx = get_backend(Q, R, d) shape = (Q.shape[0], R.shape[0]) if d is None: def func(i, j, Q, R): return nx.dot(Q[i], R[j].T) T = LazyTensor(shape, func, Q=Q, R=R) else: def func(i, j, Q, R, d): return nx.dot(Q[i] * d[None, :], R[j].T) T = LazyTensor(shape, func, Q=Q, R=R, d=d) return T def get_parameter_pair(parameter): r"""Extract a pair of parameters from a given parameter Used in unbalanced OT and COOT solvers to handle marginal regularization and entropic regularization. Parameters ---------- parameter : float or indexable object nx : backend object Returns ------- param_1 : float param_2 : float """ if isinstance(parameter, float) or isinstance(parameter, int): param_1, param_2 = parameter, parameter elif len(parameter) == 1: param_1, param_2 = parameter[0], parameter[0] else: if len(parameter) > 2: raise ValueError("Parameter must be either a scalar, \ or an indexable object of length 1 or 2.") else: param_1, param_2 = parameter[0], parameter[1] return param_1, param_2 class deprecated(object): r"""Decorator to mark a function or class as deprecated. deprecated class from scikit-learn package https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/deprecation.py Issue a warning when the function is called/the class is instantiated and adds a warning to the docstring. The optional extra argument will be appended to the deprecation message and the docstring. .. note:: To use this with the default value for extra, use empty parentheses: >>> from ot.deprecation import deprecated # doctest: +SKIP >>> @deprecated() # doctest: +SKIP ... def some_function(): pass # doctest: +SKIP Parameters ---------- extra : str To be added to the deprecation messages. """ # Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary, # but with many changes. def __init__(self, extra=''): self.extra = extra def __call__(self, obj): r"""Call method Parameters ---------- obj : object """ if isinstance(obj, type): return self._decorate_class(obj) else: return self._decorate_fun(obj) def _decorate_class(self, cls): msg = "Class %s is deprecated" % cls.__name__ if self.extra: msg += "; %s" % self.extra # FIXME: we should probably reset __new__ for full generality init = cls.__init__ def wrapped(*args, **kwargs): warnings.warn(msg, category=DeprecationWarning) return init(*args, **kwargs) cls.__init__ = wrapped wrapped.__name__ = '__init__' wrapped.__doc__ = self._update_doc(init.__doc__) wrapped.deprecated_original = init return cls def _decorate_fun(self, fun): r"""Decorate function fun""" msg = "Function %s is deprecated" % fun.__name__ if self.extra: msg += "; %s" % self.extra def wrapped(*args, **kwargs): warnings.warn(msg, category=DeprecationWarning) return fun(*args, **kwargs) wrapped.__name__ = fun.__name__ wrapped.__dict__ = fun.__dict__ wrapped.__doc__ = self._update_doc(fun.__doc__) return wrapped def _update_doc(self, olddoc): newdoc = "DEPRECATED" if self.extra: newdoc = "%s: %s" % (newdoc, self.extra) if olddoc: newdoc = "%s\n\n%s" % (newdoc, olddoc) return newdoc def _is_deprecated(func): r"""Helper to check if func is wraped by our deprecated decorator""" if sys.version_info < (3, 5): raise NotImplementedError("This is only available for python3.5 " "or above") closures = getattr(func, '__closure__', []) if closures is None: closures = [] is_deprecated = ('deprecated' in ''.join([c.cell_contents for c in closures if isinstance(c.cell_contents, str)])) return is_deprecated class BaseEstimator(object): r"""Base class for most objects in POT Code adapted from sklearn BaseEstimator class Notes ----- All estimators should specify all the parameters that can be set at the class level in their ``__init__`` as explicit keyword arguments (no ``*args`` or ``**kwargs``). """ nx: Backend = None def _get_backend(self, *arrays): nx = get_backend( *[input_ for input_ in arrays if input_ is not None] ) if nx.__name__ in ("tf",): raise TypeError("Domain adaptation does not support TF backend.") self.nx = nx return nx @classmethod def _get_param_names(cls): r"""Get parameter names for the estimator""" # fetch the constructor or the original constructor before # deprecation wrapping if any init = getattr(cls.__init__, 'deprecated_original', cls.__init__) if init is object.__init__: # No explicit constructor to introspect return [] # introspect the constructor arguments to find the model parameters # to represent init_signature = signature(init) # Consider the constructor parameters excluding 'self' parameters = [p for p in init_signature.parameters.values() if p.name != 'self' and p.kind != p.VAR_KEYWORD] for p in parameters: if p.kind == p.VAR_POSITIONAL: raise RuntimeError("POT estimators should always " "specify their parameters in the signature" " of their __init__ (no varargs)." " %s with constructor %s doesn't " " follow this convention." % (cls, init_signature)) # Extract and sort argument names excluding 'self' return sorted([p.name for p in parameters]) def get_params(self, deep=True): r"""Get parameters for this estimator. Parameters ---------- deep : bool, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : mapping of string to any Parameter names mapped to their values. """ out = dict() for key in self._get_param_names(): # We need deprecation warnings to always be on in order to # catch deprecated param values. # This is set in utils/__init__.py but it gets overwritten # when running under python3 somehow. warnings.simplefilter("always", DeprecationWarning) try: with warnings.catch_warnings(record=True) as w: value = getattr(self, key, None) if len(w) and w[0].category == DeprecationWarning: # if the parameter is deprecated, don't show it continue finally: warnings.filters.pop(0) # XXX: should we rather test if instance of estimator? if deep and hasattr(value, 'get_params'): deep_items = value.get_params().items() out.update((key + '__' + k, val) for k, val in deep_items) out[key] = value return out def set_params(self, **params): r"""Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form ``__`` so that it's possible to update each component of a nested object. Returns ------- self """ if not params: # Simple optimisation to gain speed (inspect is slow) return self valid_params = self.get_params(deep=True) # for key, value in iteritems(params): for key, value in params.items(): split = key.split('__', 1) if len(split) > 1: # nested objects case name, sub_name = split if name not in valid_params: raise ValueError('Invalid parameter %s for estimator %s. ' 'Check the list of available parameters ' 'with `estimator.get_params().keys()`.' % (name, self)) sub_object = valid_params[name] sub_object.set_params(**{sub_name: value}) else: # simple objects case if key not in valid_params: raise ValueError('Invalid parameter %s for estimator %s. ' 'Check the list of available parameters ' 'with `estimator.get_params().keys()`.' % (key, self.__class__.__name__)) setattr(self, key, value) return self class UndefinedParameter(Exception): r""" Aim at raising an Exception when a undefined parameter is called """ pass class OTResult: """ Base class for OT results. Parameters ---------- potentials : tuple of array-like, shape (`n1`, `n2`) Dual potentials, i.e. Lagrange multipliers for the marginal constraints. This pair of arrays has the same shape, numerical type and properties as the input weights "a" and "b". value : float, array-like Full transport cost, including possible regularization terms and quadratic term for Gromov Wasserstein solutions. value_linear : float, array-like The linear part of the transport cost, i.e. the product between the transport plan and the cost. value_quad : float, array-like The quadratic part of the transport cost for Gromov-Wasserstein solutions. plan : array-like, shape (`n1`, `n2`) Transport plan, encoded as a dense array. log : dict Dictionary containing potential information about the solver. backend : Backend Backend used to compute the results. sparse_plan : array-like, shape (`n1`, `n2`) Transport plan, encoded as a sparse array. lazy_plan : LazyTensor Transport plan, encoded as a symbolic POT or KeOps LazyTensor. status : int or str Status of the solver. batch_size : int Batch size used to compute the results/marginals for LazyTensor. Attributes ---------- potentials : tuple of array-like, shape (`n1`, `n2`) Dual potentials, i.e. Lagrange multipliers for the marginal constraints. This pair of arrays has the same shape, numerical type and properties as the input weights "a" and "b". potential_a : array-like, shape (`n1`,) First dual potential, associated to the "source" measure "a". potential_b : array-like, shape (`n2`,) Second dual potential, associated to the "target" measure "b". value : float, array-like Full transport cost, including possible regularization terms and quadratic term for Gromov Wasserstein solutions. value_linear : float, array-like The linear part of the transport cost, i.e. the product between the transport plan and the cost. value_quad : float, array-like The quadratic part of the transport cost for Gromov-Wasserstein solutions. plan : array-like, shape (`n1`, `n2`) Transport plan, encoded as a dense array. sparse_plan : array-like, shape (`n1`, `n2`) Transport plan, encoded as a sparse array. lazy_plan : LazyTensor Transport plan, encoded as a symbolic POT or KeOps LazyTensor. marginals : tuple of array-like, shape (`n1`,), (`n2`,) Marginals of the transport plan: should be very close to "a" and "b" for balanced OT. marginal_a : array-like, shape (`n1`,) Marginal of the transport plan for the "source" measure "a". marginal_b : array-like, shape (`n2`,) Marginal of the transport plan for the "target" measure "b". """ def __init__(self, potentials=None, value=None, value_linear=None, value_quad=None, plan=None, log=None, backend=None, sparse_plan=None, lazy_plan=None, status=None, batch_size=100): self._potentials = potentials self._value = value self._value_linear = value_linear self._value_quad = value_quad self._plan = plan self._log = log self._sparse_plan = sparse_plan self._lazy_plan = lazy_plan self._backend = backend if backend is not None else NumpyBackend() self._status = status self._batch_size = batch_size # I assume that other solvers may return directly # some primal objects? # In the code below, let's define the main quantities # that may be of interest to users. # An OT solver returns an object that inherits from OTResult # (e.g. SinkhornOTResult) and implements the relevant # methods (e.g. "plan" and "lazy_plan" but not "sparse_plan", etc.). # log is a dictionary containing potential information about the solver # Dual potentials -------------------------------------------- def __repr__(self): s = 'OTResult(' if self._value is not None: s += 'value={},'.format(self._value) if self._value_linear is not None: s += 'value_linear={},'.format(self._value_linear) if self._plan is not None: s += 'plan={}(shape={}),'.format(self._plan.__class__.__name__, self._plan.shape) if self._lazy_plan is not None: s += 'lazy_plan={}(shape={}),'.format(self._lazy_plan.__class__.__name__, self._lazy_plan.shape) if s[-1] != '(': s = s[:-1] + ')' else: s = s + ')' return s @property def potentials(self): """Dual potentials, i.e. Lagrange multipliers for the marginal constraints. This pair of arrays has the same shape, numerical type and properties as the input weights "a" and "b". """ if self._potentials is not None: return self._potentials else: raise NotImplementedError() @property def potential_a(self): """First dual potential, associated to the "source" measure "a".""" if self._potentials is not None: return self._potentials[0] else: raise NotImplementedError() @property def potential_b(self): """Second dual potential, associated to the "target" measure "b".""" if self._potentials is not None: return self._potentials[1] else: raise NotImplementedError() # Transport plan ------------------------------------------- @property def plan(self): """Transport plan, encoded as a dense array.""" # N.B.: We may catch out-of-memory errors and suggest # the use of lazy_plan or sparse_plan when appropriate. if self._plan is not None: return self._plan else: raise NotImplementedError() @property def sparse_plan(self): """Transport plan, encoded as a sparse array.""" if self._sparse_plan is not None: return self._sparse_plan elif self._plan is not None: return self._backend.tocsr(self._plan) else: raise NotImplementedError() @property def lazy_plan(self): """Transport plan, encoded as a symbolic KeOps LazyTensor.""" if self._lazy_plan is not None: return self._lazy_plan else: raise NotImplementedError() # Loss values -------------------------------- @property def value(self): """Full transport cost, including possible regularization terms and quadratic term for Gromov Wasserstein solutions.""" if self._value is not None: return self._value else: raise NotImplementedError() @property def value_linear(self): """The "minimal" transport cost, i.e. the product between the transport plan and the cost.""" if self._value_linear is not None: return self._value_linear else: raise NotImplementedError() @property def value_quad(self): """The quadratic part of the transport cost for Gromov-Wasserstein solutions.""" if self._value_quad is not None: return self._value_quad else: raise NotImplementedError() # Marginal constraints ------------------------- @property def marginals(self): """Marginals of the transport plan: should be very close to "a" and "b" for balanced OT.""" if self._plan is not None: return self.marginal_a, self.marginal_b else: raise NotImplementedError() @property def marginal_a(self): """First marginal of the transport plan, with the same shape as "a".""" if self._plan is not None: return self._backend.sum(self._plan, 1) elif self._lazy_plan is not None: lp = self._lazy_plan bs = self._batch_size nx = self._backend return reduce_lazytensor(lp, nx.sum, axis=1, nx=nx, batch_size=bs) else: raise NotImplementedError() @property def marginal_b(self): """Second marginal of the transport plan, with the same shape as "b".""" if self._plan is not None: return self._backend.sum(self._plan, 0) elif self._lazy_plan is not None: lp = self._lazy_plan bs = self._batch_size nx = self._backend return reduce_lazytensor(lp, nx.sum, axis=0, nx=nx, batch_size=bs) else: raise NotImplementedError() @property def status(self): """Optimization status of the solver.""" if self._status is not None: return self._status else: raise NotImplementedError() @property def log(self): """Dictionary containing potential information about the solver.""" if self._log is not None: return self._log else: raise NotImplementedError() # Barycentric mappings ------------------------- # Return the displacement vectors as an array # that has the same shape as "xa"/"xb" (for samples) # or "a"/"b" * D (for images)? @property def a_to_b(self): """Displacement vectors from the first to the second measure.""" raise NotImplementedError() @property def b_to_a(self): """Displacement vectors from the second to the first measure.""" raise NotImplementedError() # # Wasserstein barycenters ---------------------- # @property # def masses(self): # """Masses for the Wasserstein barycenter.""" # raise NotImplementedError() # @property # def samples(self): # """Sample locations for the Wasserstein barycenter.""" # raise NotImplementedError() # Miscellaneous -------------------------------- @property def citation(self): """Appropriate citation(s) for this result, in plain text and BibTex formats.""" # The string below refers to the POT library: # successor methods may concatenate the relevant references # to the original definitions, solvers and underlying numerical backends. return """POT library: POT Python Optimal Transport library, Journal of Machine Learning Research, 22(78):1â’8, 2021. Website: https://pythonot.github.io/ RĂ©mi Flamary, Nicolas Courty, Alexandre Gramfort, Mokhtar Z. Alaya, AurĂ©lie Boisbunon, Stanislas Chambon, Laetitia Chapel, Adrien Corenflos, Kilian Fatras, Nemo Fournier, LĂ©o Gautheron, Nathalie T.H. Gayraud, Hicham Janati, Alain Rakotomamonjy, Ievgen Redko, Antoine Rolet, Antony Schutz, Vivien Seguy, Danica J. Sutherland, Romain Tavenard, Alexander Tong, Titouan Vayer; @article{flamary2021pot, author = {R{\'e}mi Flamary and Nicolas Courty and Alexandre Gramfort and Mokhtar Z. Alaya and Aur{\'e}lie Boisbunon and Stanislas Chambon and Laetitia Chapel and Adrien Corenflos and Kilian Fatras and Nemo Fournier and L{\'e}o Gautheron and Nathalie T.H. Gayraud and Hicham Janati and Alain Rakotomamonjy and Ievgen Redko and Antoine Rolet and Antony Schutz and Vivien Seguy and Danica J. Sutherland and Romain Tavenard and Alexander Tong and Titouan Vayer}, title = {{POT}: {Python} {Optimal} {Transport}}, journal = {Journal of Machine Learning Research}, year = {2021}, volume = {22}, number = {78}, pages = {1-8}, url = {http://jmlr.org/papers/v22/20-451.html} } """ class LazyTensor(object): """ A lazy tensor is a tensor that is not stored in memory. Instead, it is defined by a function that computes its values on the fly from slices. Parameters ---------- shape : tuple shape of the tensor getitem : callable function that computes the values of the indices/slices and tensors as arguments kwargs : dict named arguments for the function, those names will be used as attributed of the LazyTensor object Examples -------- >>> import numpy as np >>> v = np.arange(5) >>> def getitem(i,j, v): ... return v[i,None]+v[None,j] >>> T = LazyTensor((5,5),getitem, v=v) >>> T[1,2] array([3]) >>> T[1,:] array([[1, 2, 3, 4, 5]]) >>> T[:] array([[0, 1, 2, 3, 4], [1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7], [4, 5, 6, 7, 8]]) """ def __init__(self, shape, getitem, **kwargs): self._getitem = getitem self.shape = shape self.ndim = len(shape) self.kwargs = kwargs # set attributes for named arguments/arrays for key, value in kwargs.items(): setattr(self, key, value) def __getitem__(self, key): k = [] if isinstance(key, int) or isinstance(key, slice): k.append(key) for i in range(self.ndim - 1): k.append(slice(None)) elif isinstance(key, tuple): k = list(key) for i in range(self.ndim - len(key)): k.append(slice(None)) else: raise NotImplementedError("Only integer, slice, and tuple indexing is supported") return self._getitem(*k, **self.kwargs) def __repr__(self): return "LazyTensor(shape={},attributes=({}))".format(self.shape, ','.join(self.kwargs.keys())) python-pot-0.9.3+dfsg/ot/weak.py000066400000000000000000000072551455713015700165270ustar00rootroot00000000000000""" Weak optimal ransport solvers """ # Author: Remi Flamary # # License: MIT License from .backend import get_backend from .optim import cg import numpy as np __all__ = ['weak_optimal_transport'] def weak_optimal_transport(Xa, Xb, a=None, b=None, verbose=False, log=False, G0=None, **kwargs): r"""Solves the weak optimal transport problem between two empirical distributions .. math:: \gamma = \mathop{\arg \min}_\gamma \quad \sum_i \mathbf{a}_i \left(\mathbf{X^a}_i - \frac{1}{\mathbf{a}_i} \sum_j \gamma_{ij} \mathbf{X^b}_j \right)^2 s.t. \ \gamma \mathbf{1} = \mathbf{a} \gamma^T \mathbf{1} = \mathbf{b} \gamma \geq 0 where : - :math:`X^a` and :math:`X^b` are the sample matrices. - :math:`\mathbf{a}` and :math:`\mathbf{b}` are the sample weights .. note:: This function is backend-compatible and will work on arrays from all compatible backends. But the algorithm uses the C++ CPU backend which can lead to copy overhead on GPU arrays. Uses the conditional gradient algorithm to solve the problem proposed in :ref:`[39] `. Parameters ---------- Xa : (ns,d) array-like, float Source samples Xb : (nt,d) array-like, float Target samples a : (ns,) array-like, float Source histogram (uniform weight if empty list) b : (nt,) array-like, float Target histogram (uniform weight if empty list)) G0 : (ns,nt) array-like, float initial guess (default is indep joint density) numItermax : int, optional Max number of iterations numItermaxEmd : int, optional Max number of iterations for emd stopThr : float, optional Stop threshold on the relative variation (>0) stopThr2 : float, optional Stop threshold on the absolute variation (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- gamma: array-like, shape (ns, nt) Optimal transportation matrix for the given parameters log: dict, optional If input log is true, a dictionary containing the cost and dual variables and exit status .. _references-weak: References ---------- .. [39] Gozlan, N., Roberto, C., Samson, P. M., & Tetali, P. (2017). Kantorovich duality for general transport costs and applications. Journal of Functional Analysis, 273(11), 3327-3405. See Also -------- ot.bregman.sinkhorn : Entropic regularized OT ot.optim.cg : General regularized OT """ nx = get_backend(Xa, Xb) Xa2 = nx.to_numpy(Xa) Xb2 = nx.to_numpy(Xb) if a is None: a2 = np.ones((Xa.shape[0])) / Xa.shape[0] else: a2 = nx.to_numpy(a) if b is None: b2 = np.ones((Xb.shape[0])) / Xb.shape[0] else: b2 = nx.to_numpy(b) # init uniform if G0 is None: T0 = a2[:, None] * b2[None, :] else: T0 = nx.to_numpy(G0) # weak OT loss def f(T): return np.dot(a2, np.sum((Xa2 - np.dot(T, Xb2) / a2[:, None])**2, 1)) # weak OT gradient def df(T): return -2 * np.dot(Xa2 - np.dot(T, Xb2) / a2[:, None], Xb2.T) # solve with conditional gradient and return solution if log: res, log = cg(a2, b2, 0, 1, f, df, T0, log=log, verbose=verbose, **kwargs) log['u'] = nx.from_numpy(log['u'], type_as=Xa) log['v'] = nx.from_numpy(log['v'], type_as=Xb) return nx.from_numpy(res, type_as=Xa), log else: return nx.from_numpy(cg(a2, b2, 0, 1, f, df, T0, log=log, verbose=verbose, **kwargs), type_as=Xa) python-pot-0.9.3+dfsg/pyproject.toml000066400000000000000000000002031455713015700175020ustar00rootroot00000000000000[build-system] requires = ["setuptools", "wheel", "oldest-supported-numpy", "cython>=0.23"] build-backend = "setuptools.build_meta"python-pot-0.9.3+dfsg/pytest.ini000066400000000000000000000000001455713015700166120ustar00rootroot00000000000000python-pot-0.9.3+dfsg/requirements.txt000066400000000000000000000002211455713015700200520ustar00rootroot00000000000000numpy>=1.20 scipy>=1.6 matplotlib autograd pymanopt cvxopt scikit-learn torch jax jaxlib tensorflow pytest torch_geometric cvxpy geomloss pykeopspython-pot-0.9.3+dfsg/setup.cfg000066400000000000000000000013231455713015700164130ustar00rootroot00000000000000[metadata] description_file = README.md [flake8] exclude = __init__.py ignore = E265,E501,W605,W503,W504 [tool:pytest] addopts = --showlocals --durations=20 --doctest-modules -ra --cov-report= --cov=ot --doctest-ignore-import-errors --junit-xml=junit-results.xml --ignore=docs --ignore=examples --ignore=notebooks [pycodestyle] exclude = __init__.py,constants.py,fixes.py ignore = E241,E305,W504 [pydocstyle] convention = pep257 match_dir = ^(?!\.|docs|examples).*$ match = (?!tests/__init__\.py|fixes).*\.py add-ignore = D100,D104,D107,D413 add-select = D214,D215,D404,D405,D406,D407,D408,D409,D410,D411 ignore-decorators = ^(copy_.*_doc_to_|on_trait_change|cached_property|deprecated|property|.*setter).* python-pot-0.9.3+dfsg/setup.py000066400000000000000000000072001455713015700163040ustar00rootroot00000000000000#!/usr/bin/env python import os import re import subprocess import sys from setuptools import find_packages, setup from setuptools.extension import Extension import numpy from Cython.Build import cythonize sys.path.append(os.path.join("ot", "helpers")) from openmp_helpers import check_openmp_support # dirty but working __version__ = re.search( r'__version__\s*=\s*[\'"]([^\'"]*)[\'"]', # It excludes inline comment too open('ot/__init__.py').read()).group(1) # The beautiful part is, I don't even need to check exceptions here. # If something messes up, let the build process fail noisy, BEFORE my release! # thanks PyPI for handling markdown now ROOT = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(ROOT, 'README.md'), encoding="utf-8") as f: README = f.read() # clean cython output is clean is called if 'clean' in sys.argv[1:]: if os.path.isfile('ot/lp/emd_wrap.cpp'): os.remove('ot/lp/emd_wrap.cpp') # add platform dependant optional compilation argument openmp_supported, flags = check_openmp_support() compile_args = ["/O2" if sys.platform == "win32" else "-O3"] link_args = [] if openmp_supported: compile_args += flags link_args += flags if sys.platform.startswith('darwin'): compile_args.append("-stdlib=libc++") sdk_path = subprocess.check_output(['xcrun', '--show-sdk-path']) os.environ['CFLAGS'] = '-isysroot "{}"'.format(sdk_path.rstrip().decode("utf-8")) setup( name='POT', version=__version__, description='Python Optimal Transport Library', long_description=README, long_description_content_type='text/markdown', author=u'Remi Flamary, Nicolas Courty, POT Contributors', author_email='remi.flamary@gmail.com, ncourty@gmail.com', url='https://github.com/PythonOT/POT', packages=find_packages(exclude=["benchmarks"]), ext_modules=cythonize(Extension( name="ot.lp.emd_wrap", sources=["ot/lp/emd_wrap.pyx", "ot/lp/EMD_wrapper.cpp"], # cython/c++ src files language="c++", include_dirs=[numpy.get_include(), os.path.join(ROOT, 'ot/lp')], extra_compile_args=compile_args, extra_link_args=link_args )), platforms=['linux', 'macosx', 'windows'], download_url='https://github.com/PythonOT/POT/archive/{}.tar.gz'.format(__version__), license='MIT', scripts=[], data_files=[], setup_requires=["oldest-supported-numpy", "cython>=0.23"], install_requires=["numpy>=1.16", "scipy>=1.6"], python_requires=">=3.6", classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: MIT License', 'Environment :: Console', 'Operating System :: OS Independent', 'Operating System :: POSIX :: Linux', 'Operating System :: MacOS', 'Operating System :: POSIX', 'Operating System :: Microsoft :: Windows', 'Programming Language :: Python', 'Programming Language :: C++', 'Programming Language :: C', 'Programming Language :: Cython', 'Topic :: Utilities', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'Topic :: Scientific/Engineering :: Mathematics', 'Topic :: Scientific/Engineering :: Information Analysis', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', ] ) python-pot-0.9.3+dfsg/test/000077500000000000000000000000001455713015700155525ustar00rootroot00000000000000python-pot-0.9.3+dfsg/test/conftest.py000066400000000000000000000034761455713015700177630ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Configuration file for pytest # License: MIT License import functools import os import pytest from ot.backend import get_backend_list, jax, tf if jax: os.environ['XLA_PYTHON_CLIENT_PREALLOCATE'] = 'false' from jax.config import config config.update("jax_enable_x64", True) if tf: # make sure TF doesn't allocate entire GPU import tensorflow as tf physical_devices = tf.config.list_physical_devices('GPU') for device in physical_devices: try: tf.config.experimental.set_memory_growth(device, True) except Exception: pass # allow numpy API for TF from tensorflow.python.ops.numpy_ops import np_config np_config.enable_numpy_behavior() backend_list = get_backend_list() @pytest.fixture(params=backend_list) def nx(request): backend = request.param yield backend def skip_arg(arg, value, reason=None, getter=lambda x: x): if isinstance(arg, (tuple, list)): n = len(arg) else: arg = (arg, ) n = 1 if n != 1 and isinstance(value, (tuple, list)): pass else: value = (value, ) if isinstance(getter, (tuple, list)): pass else: getter = [getter] * n if reason is None: reason = f"Param {arg} should be skipped for value {value}" def wrapper(function): @functools.wraps(function) def wrapped(*args, **kwargs): if all( arg[i] in kwargs.keys() and getter[i](kwargs[arg[i]]) == value[i] for i in range(n) ): pytest.skip(reason) return function(*args, **kwargs) return wrapped return wrapper def pytest_configure(config): pytest.skip_arg = skip_arg pytest.skip_backend = functools.partial(skip_arg, "nx", getter=str) python-pot-0.9.3+dfsg/test/test_1d_solver.py000066400000000000000000000235301455713015700210640ustar00rootroot00000000000000"""Tests for module 1d Wasserstein solver""" # Author: Adrien Corenflos # Nicolas Courty # # License: MIT License import numpy as np import pytest import ot from ot.backend import tf from ot.lp import wasserstein_1d from scipy.stats import wasserstein_distance def test_emd_1d_emd2_1d_with_weights(): # test emd1d gives similar results as emd n = 20 m = 30 rng = np.random.RandomState(0) u = rng.randn(n, 1) v = rng.randn(m, 1) w_u = rng.uniform(0., 1., n) w_u = w_u / w_u.sum() w_v = rng.uniform(0., 1., m) w_v = w_v / w_v.sum() M = ot.dist(u, v, metric='sqeuclidean') G, log = ot.emd(w_u, w_v, M, log=True) wass = log["cost"] G_1d, log = ot.emd_1d(u, v, w_u, w_v, metric='sqeuclidean', log=True) wass1d = log["cost"] wass1d_emd2 = ot.emd2_1d(u, v, w_u, w_v, metric='sqeuclidean', log=False) wass1d_euc = ot.emd2_1d(u, v, w_u, w_v, metric='euclidean', log=False) # check loss is similar np.testing.assert_allclose(wass, wass1d) np.testing.assert_allclose(wass, wass1d_emd2) # check loss is similar to scipy's implementation for Euclidean metric wass_sp = wasserstein_distance(u.reshape((-1,)), v.reshape((-1,)), w_u, w_v) np.testing.assert_allclose(wass_sp, wass1d_euc) # check constraints np.testing.assert_allclose(w_u, G.sum(1)) np.testing.assert_allclose(w_v, G.sum(0)) def test_wasserstein_1d(nx): rng = np.random.RandomState(0) n = 100 x = np.linspace(0, 5, n) rho_u = np.abs(rng.randn(n)) rho_u /= rho_u.sum() rho_v = np.abs(rng.randn(n)) rho_v /= rho_v.sum() xb, rho_ub, rho_vb = nx.from_numpy(x, rho_u, rho_v) # test 1 : wasserstein_1d should be close to scipy W_1 implementation np.testing.assert_almost_equal(wasserstein_1d(xb, xb, rho_ub, rho_vb, p=1), wasserstein_distance(x, x, rho_u, rho_v)) # test 2 : wasserstein_1d should be close to one when only translating the support np.testing.assert_almost_equal(wasserstein_1d(xb, xb + 1, p=2), 1.) # test 3 : arrays test X = np.stack((np.linspace(0, 5, n), np.linspace(0, 5, n) * 10), -1) Xb = nx.from_numpy(X) res = wasserstein_1d(Xb, Xb, rho_ub, rho_vb, p=2) np.testing.assert_almost_equal(100 * res[0], res[1], decimal=4) def test_wasserstein_1d_type_devices(nx): rng = np.random.RandomState(0) n = 10 x = np.linspace(0, 5, n) rho_u = np.abs(rng.randn(n)) rho_u /= rho_u.sum() rho_v = np.abs(rng.randn(n)) rho_v /= rho_v.sum() for tp in nx.__type_list__: print(nx.dtype_device(tp)) xb, rho_ub, rho_vb = nx.from_numpy(x, rho_u, rho_v, type_as=tp) res = wasserstein_1d(xb, xb, rho_ub, rho_vb, p=1) nx.assert_same_dtype_device(xb, res) @pytest.mark.skipif(not tf, reason="tf not installed") def test_wasserstein_1d_device_tf(): nx = ot.backend.TensorflowBackend() rng = np.random.RandomState(0) n = 10 x = np.linspace(0, 5, n) rho_u = np.abs(rng.randn(n)) rho_u /= rho_u.sum() rho_v = np.abs(rng.randn(n)) rho_v /= rho_v.sum() # Check that everything stays on the CPU with tf.device("/CPU:0"): xb, rho_ub, rho_vb = nx.from_numpy(x, rho_u, rho_v) res = wasserstein_1d(xb, xb, rho_ub, rho_vb, p=1) nx.assert_same_dtype_device(xb, res) if len(tf.config.list_physical_devices('GPU')) > 0: # Check that everything happens on the GPU xb, rho_ub, rho_vb = nx.from_numpy(x, rho_u, rho_v) res = wasserstein_1d(xb, xb, rho_ub, rho_vb, p=1) nx.assert_same_dtype_device(xb, res) assert nx.dtype_device(res)[1].startswith("GPU") def test_emd_1d_emd2_1d(): # test emd1d gives similar results as emd n = 20 m = 30 rng = np.random.RandomState(0) u = rng.randn(n, 1) v = rng.randn(m, 1) M = ot.dist(u, v, metric='sqeuclidean') G, log = ot.emd([], [], M, log=True) wass = log["cost"] G_1d, log = ot.emd_1d(u, v, [], [], metric='sqeuclidean', log=True) wass1d = log["cost"] wass1d_emd2 = ot.emd2_1d(u, v, [], [], metric='sqeuclidean', log=False) wass1d_euc = ot.emd2_1d(u, v, [], [], metric='euclidean', log=False) # check loss is similar np.testing.assert_allclose(wass, wass1d) np.testing.assert_allclose(wass, wass1d_emd2) # check loss is similar to scipy's implementation for Euclidean metric wass_sp = wasserstein_distance(u.reshape((-1,)), v.reshape((-1,))) np.testing.assert_allclose(wass_sp, wass1d_euc) # check constraints np.testing.assert_allclose(np.ones((n,)) / n, G.sum(1)) np.testing.assert_allclose(np.ones((m,)) / m, G.sum(0)) # check G is similar np.testing.assert_allclose(G, G_1d, atol=1e-15) # check AssertionError is raised if called on non 1d arrays u = rng.randn(n, 2) v = rng.randn(m, 2) with pytest.raises(AssertionError): ot.emd_1d(u, v, [], []) def test_emd1d_type_devices(nx): rng = np.random.RandomState(0) n = 10 x = np.linspace(0, 5, n) rho_u = np.abs(rng.randn(n)) rho_u /= rho_u.sum() rho_v = np.abs(rng.randn(n)) rho_v /= rho_v.sum() for tp in nx.__type_list__: print(nx.dtype_device(tp)) xb, rho_ub, rho_vb = nx.from_numpy(x, rho_u, rho_v, type_as=tp) emd = ot.emd_1d(xb, xb, rho_ub, rho_vb) emd2 = ot.emd2_1d(xb, xb, rho_ub, rho_vb) nx.assert_same_dtype_device(xb, emd) nx.assert_same_dtype_device(xb, emd2) @pytest.mark.skipif(not tf, reason="tf not installed") def test_emd1d_device_tf(): nx = ot.backend.TensorflowBackend() rng = np.random.RandomState(0) n = 10 x = np.linspace(0, 5, n) rho_u = np.abs(rng.randn(n)) rho_u /= rho_u.sum() rho_v = np.abs(rng.randn(n)) rho_v /= rho_v.sum() # Check that everything stays on the CPU with tf.device("/CPU:0"): xb, rho_ub, rho_vb = nx.from_numpy(x, rho_u, rho_v) emd = ot.emd_1d(xb, xb, rho_ub, rho_vb) emd2 = ot.emd2_1d(xb, xb, rho_ub, rho_vb) nx.assert_same_dtype_device(xb, emd) nx.assert_same_dtype_device(xb, emd2) if len(tf.config.list_physical_devices('GPU')) > 0: # Check that everything happens on the GPU xb, rho_ub, rho_vb = nx.from_numpy(x, rho_u, rho_v) emd = ot.emd_1d(xb, xb, rho_ub, rho_vb) emd2 = ot.emd2_1d(xb, xb, rho_ub, rho_vb) nx.assert_same_dtype_device(xb, emd) nx.assert_same_dtype_device(xb, emd2) assert nx.dtype_device(emd)[1].startswith("GPU") def test_wasserstein_1d_circle(): # test binary_search_circle and wasserstein_circle give similar results as emd n = 20 m = 30 rng = np.random.RandomState(0) u = rng.rand(n,) v = rng.rand(m,) w_u = rng.uniform(0., 1., n) w_u = w_u / w_u.sum() w_v = rng.uniform(0., 1., m) w_v = w_v / w_v.sum() M1 = np.minimum(np.abs(u[:, None] - v[None]), 1 - np.abs(u[:, None] - v[None])) wass1 = ot.emd2(w_u, w_v, M1) wass1_bsc = ot.binary_search_circle(u, v, w_u, w_v, p=1) w1_circle = ot.wasserstein_circle(u, v, w_u, w_v, p=1) M2 = M1**2 wass2 = ot.emd2(w_u, w_v, M2) wass2_bsc = ot.binary_search_circle(u, v, w_u, w_v, p=2) w2_circle = ot.wasserstein_circle(u, v, w_u, w_v, p=2) # check loss is similar np.testing.assert_allclose(wass1, wass1_bsc) np.testing.assert_allclose(wass1, w1_circle, rtol=1e-2) np.testing.assert_allclose(wass2, wass2_bsc) np.testing.assert_allclose(wass2, w2_circle) @pytest.skip_backend("tf") def test_wasserstein1d_circle_devices(nx): rng = np.random.RandomState(0) n = 10 x = np.linspace(0, 1, n) rho_u = np.abs(rng.randn(n)) rho_u /= rho_u.sum() rho_v = np.abs(rng.randn(n)) rho_v /= rho_v.sum() for tp in nx.__type_list__: print(nx.dtype_device(tp)) xb, rho_ub, rho_vb = nx.from_numpy(x, rho_u, rho_v, type_as=tp) w1 = ot.wasserstein_circle(xb, xb, rho_ub, rho_vb, p=1) w2_bsc = ot.wasserstein_circle(xb, xb, rho_ub, rho_vb, p=2) nx.assert_same_dtype_device(xb, w1) nx.assert_same_dtype_device(xb, w2_bsc) def test_wasserstein_1d_unif_circle(): # test semidiscrete_wasserstein2_unif_circle versus wasserstein_circle n = 20 m = 1000 rng = np.random.RandomState(0) u = rng.rand(n,) v = rng.rand(m,) # w_u = rng.uniform(0., 1., n) # w_u = w_u / w_u.sum() w_u = ot.utils.unif(n) w_v = ot.utils.unif(m) M1 = np.minimum(np.abs(u[:, None] - v[None]), 1 - np.abs(u[:, None] - v[None])) wass2 = ot.emd2(w_u, w_v, M1**2) wass2_circle = ot.wasserstein_circle(u, v, w_u, w_v, p=2, eps=1e-15) wass2_unif_circle = ot.semidiscrete_wasserstein2_unif_circle(u, w_u) # check loss is similar np.testing.assert_allclose(wass2, wass2_unif_circle, atol=1e-2) np.testing.assert_allclose(wass2_circle, wass2_unif_circle, atol=1e-2) def test_wasserstein1d_unif_circle_devices(nx): rng = np.random.RandomState(0) n = 10 x = np.linspace(0, 1, n) rho_u = np.abs(rng.randn(n)) rho_u /= rho_u.sum() for tp in nx.__type_list__: print(nx.dtype_device(tp)) xb, rho_ub = nx.from_numpy(x, rho_u, type_as=tp) w2 = ot.semidiscrete_wasserstein2_unif_circle(xb, rho_ub) nx.assert_same_dtype_device(xb, w2) def test_binary_search_circle_log(): n = 20 m = 30 rng = np.random.RandomState(0) u = rng.rand(n,) v = rng.rand(m,) wass2_bsc, log = ot.binary_search_circle(u, v, p=2, log=True) optimal_thetas = log["optimal_theta"] assert optimal_thetas.shape[0] == 1 def test_wasserstein_circle_bad_shape(): n = 20 m = 30 rng = np.random.RandomState(0) u = rng.rand(n, 2) v = rng.rand(m, 1) with pytest.raises(ValueError): _ = ot.wasserstein_circle(u, v, p=2) with pytest.raises(ValueError): _ = ot.wasserstein_circle(u, v, p=1) python-pot-0.9.3+dfsg/test/test_backend.py000066400000000000000000000535461455713015700205670ustar00rootroot00000000000000"""Tests for backend module """ # Author: Remi Flamary # Nicolas Courty # # # License: MIT License import numpy as np import pytest from numpy.testing import assert_array_almost_equal_nulp import ot import ot.backend from ot.backend import get_backend, get_backend_list, jax, tf, to_numpy, torch def test_get_backend_list(): lst = get_backend_list() assert len(lst) > 0 assert isinstance(lst[0], ot.backend.NumpyBackend) def test_to_numpy(nx): v = nx.zeros(10) M = nx.ones((10, 10)) v2 = to_numpy(v) assert isinstance(v2, np.ndarray) v2, M2 = to_numpy(v, M) assert isinstance(M2, np.ndarray) def test_get_backend_invalid(): # error if no parameters with pytest.raises(ValueError): get_backend() # error if unknown types with pytest.raises(ValueError): get_backend(1, 2.0) def test_get_backend(nx): A = np.zeros((3, 2)) B = np.zeros((3, 1)) nx_np = get_backend(A) assert nx_np.__name__ == 'numpy' A2, B2 = nx.from_numpy(A, B) effective_nx = get_backend(A2) assert effective_nx.__name__ == nx.__name__ effective_nx = get_backend(A2, B2) assert effective_nx.__name__ == nx.__name__ if nx.__name__ != "numpy": # test that types mathcing different backends in input raise an error with pytest.raises(ValueError): get_backend(A, B2) else: # Check that subclassing a numpy array does not break get_backend # note: This is only tested for numpy as this is hard to be consistent # with other backends class nx_subclass(nx.__type__): pass A3 = nx_subclass(0) effective_nx = get_backend(A3, B2) assert effective_nx.__name__ == nx.__name__ def test_convert_between_backends(nx): A = np.zeros((3, 2)) B = np.zeros((3, 1)) A2 = nx.from_numpy(A) B2 = nx.from_numpy(B) assert isinstance(A2, nx.__type__) assert isinstance(B2, nx.__type__) nx2 = get_backend(A2, B2) assert nx2.__name__ == nx.__name__ assert_array_almost_equal_nulp(nx.to_numpy(A2), A) assert_array_almost_equal_nulp(nx.to_numpy(B2), B) def test_empty_backend(): rnd = np.random.RandomState(0) M = rnd.randn(10, 3) v = rnd.randn(3) nx = ot.backend.Backend() with pytest.raises(NotImplementedError): nx.from_numpy(M) with pytest.raises(NotImplementedError): nx.to_numpy(M) with pytest.raises(NotImplementedError): nx.set_gradients(0, 0, 0) with pytest.raises(NotImplementedError): nx.zeros((10, 3)) with pytest.raises(NotImplementedError): nx.ones((10, 3)) with pytest.raises(NotImplementedError): nx.arange(10, 1, 2) with pytest.raises(NotImplementedError): nx.full((10, 3), 3.14) with pytest.raises(NotImplementedError): nx.eye((10, 3)) with pytest.raises(NotImplementedError): nx.sum(M) with pytest.raises(NotImplementedError): nx.cumsum(M) with pytest.raises(NotImplementedError): nx.max(M) with pytest.raises(NotImplementedError): nx.min(M) with pytest.raises(NotImplementedError): nx.maximum(v, v) with pytest.raises(NotImplementedError): nx.minimum(v, v) with pytest.raises(NotImplementedError): nx.abs(M) with pytest.raises(NotImplementedError): nx.log(M) with pytest.raises(NotImplementedError): nx.exp(M) with pytest.raises(NotImplementedError): nx.sqrt(M) with pytest.raises(NotImplementedError): nx.power(v, 2) with pytest.raises(NotImplementedError): nx.dot(v, v) with pytest.raises(NotImplementedError): nx.norm(M) with pytest.raises(NotImplementedError): nx.exp(M) with pytest.raises(NotImplementedError): nx.any(M) with pytest.raises(NotImplementedError): nx.isnan(M) with pytest.raises(NotImplementedError): nx.isinf(M) with pytest.raises(NotImplementedError): nx.einsum('ij->i', M) with pytest.raises(NotImplementedError): nx.sort(M) with pytest.raises(NotImplementedError): nx.argsort(M) with pytest.raises(NotImplementedError): nx.searchsorted(v, v) with pytest.raises(NotImplementedError): nx.flip(M) with pytest.raises(NotImplementedError): nx.outer(v, v) with pytest.raises(NotImplementedError): nx.clip(M, -1, 1) with pytest.raises(NotImplementedError): nx.repeat(M, 0, 1) with pytest.raises(NotImplementedError): nx.take_along_axis(M, v, 0) with pytest.raises(NotImplementedError): nx.concatenate([v, v]) with pytest.raises(NotImplementedError): nx.zero_pad(M, v) with pytest.raises(NotImplementedError): nx.argmax(M) with pytest.raises(NotImplementedError): nx.argmin(M) with pytest.raises(NotImplementedError): nx.mean(M) with pytest.raises(NotImplementedError): nx.median(M) with pytest.raises(NotImplementedError): nx.std(M) with pytest.raises(NotImplementedError): nx.linspace(0, 1, 50) with pytest.raises(NotImplementedError): nx.meshgrid(v, v) with pytest.raises(NotImplementedError): nx.diag(M) with pytest.raises(NotImplementedError): nx.unique([M, M]) with pytest.raises(NotImplementedError): nx.logsumexp(M) with pytest.raises(NotImplementedError): nx.stack([M, M]) with pytest.raises(NotImplementedError): nx.reshape(M, (5, 3, 2)) with pytest.raises(NotImplementedError): nx.seed(42) with pytest.raises(NotImplementedError): nx.rand() with pytest.raises(NotImplementedError): nx.randn() nx.coo_matrix(M, M, M) with pytest.raises(NotImplementedError): nx.issparse(M) with pytest.raises(NotImplementedError): nx.tocsr(M) with pytest.raises(NotImplementedError): nx.eliminate_zeros(M) with pytest.raises(NotImplementedError): nx.todense(M) with pytest.raises(NotImplementedError): nx.where(M, M, M) with pytest.raises(NotImplementedError): nx.copy(M) with pytest.raises(NotImplementedError): nx.allclose(M, M) with pytest.raises(NotImplementedError): nx.squeeze(M) with pytest.raises(NotImplementedError): nx.bitsize(M) with pytest.raises(NotImplementedError): nx.device_type(M) with pytest.raises(NotImplementedError): nx._bench(lambda x: x, M, n_runs=1) with pytest.raises(NotImplementedError): nx.solve(M, v) with pytest.raises(NotImplementedError): nx.trace(M) with pytest.raises(NotImplementedError): nx.inv(M) with pytest.raises(NotImplementedError): nx.sqrtm(M) with pytest.raises(NotImplementedError): nx.kl_div(M, M) with pytest.raises(NotImplementedError): nx.isfinite(M) with pytest.raises(NotImplementedError): nx.array_equal(M, M) with pytest.raises(NotImplementedError): nx.is_floating_point(M) with pytest.raises(NotImplementedError): nx.tile(M, (10, 1)) with pytest.raises(NotImplementedError): nx.floor(M) with pytest.raises(NotImplementedError): nx.prod(M) with pytest.raises(NotImplementedError): nx.sort2(M) with pytest.raises(NotImplementedError): nx.qr(M) with pytest.raises(NotImplementedError): nx.atan2(v, v) with pytest.raises(NotImplementedError): nx.transpose(M) with pytest.raises(NotImplementedError): nx.detach(M) with pytest.raises(NotImplementedError): nx.matmul(M, M.T) with pytest.raises(NotImplementedError): nx.nan_to_num(M) def test_func_backends(nx): rnd = np.random.RandomState(0) M = rnd.randn(10, 3) SquareM = rnd.randn(10, 10) v = rnd.randn(3) val = np.array([1.0]) M1 = rnd.randn(1, 2, 10, 10) M2 = rnd.randn(3, 1, 10, 10) # Sparse tensors test sp_row = np.array([0, 3, 1, 0, 3]) sp_col = np.array([0, 3, 1, 2, 2]) sp_data = np.array([4, 5, 7, 9, 0], dtype=np.float64) lst_tot = [] for nx in [ot.backend.NumpyBackend(), nx]: print('Backend: ', nx.__name__) lst_b = [] lst_name = [] Mb = nx.from_numpy(M) SquareMb = nx.from_numpy(SquareM) vb = nx.from_numpy(v) M1b = nx.from_numpy(M1) M2b = nx.from_numpy(M2) val = nx.from_numpy(val) sp_rowb = nx.from_numpy(sp_row) sp_colb = nx.from_numpy(sp_col) sp_datab = nx.from_numpy(sp_data) A = nx.set_gradients(val, v, v) lst_b.append(nx.to_numpy(A)) lst_name.append('set_gradients') A = nx.zeros((10, 3)) A = nx.zeros((10, 3), type_as=Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('zeros') A = nx.ones((10, 3)) A = nx.ones((10, 3), type_as=Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('ones') A = nx.arange(10, 1, 2) lst_b.append(nx.to_numpy(A)) lst_name.append('arange') A = nx.full((10, 3), 3.14) A = nx.full((10, 3), 3.14, type_as=Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('full') A = nx.eye(10, 3) A = nx.eye(10, 3, type_as=Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('eye') A = nx.sum(Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('sum') A = nx.sum(Mb, axis=1, keepdims=True) lst_b.append(nx.to_numpy(A)) lst_name.append('sum(axis)') A = nx.cumsum(Mb, 0) lst_b.append(nx.to_numpy(A)) lst_name.append('cumsum(axis)') A = nx.max(Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('max') A = nx.max(Mb, axis=1, keepdims=True) lst_b.append(nx.to_numpy(A)) lst_name.append('max(axis)') A = nx.min(Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('min') A = nx.min(Mb, axis=1, keepdims=True) lst_b.append(nx.to_numpy(A)) lst_name.append('min(axis)') A = nx.maximum(vb, 0) lst_b.append(nx.to_numpy(A)) lst_name.append('maximum') A = nx.minimum(vb, 0) lst_b.append(nx.to_numpy(A)) lst_name.append('minimum') A = nx.sign(vb) lst_b.append(nx.to_numpy(A)) lst_name.append('sign') A = nx.abs(Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('abs') A = nx.log(A) lst_b.append(nx.to_numpy(A)) lst_name.append('log') A = nx.exp(Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('exp') A = nx.sqrt(nx.abs(Mb)) lst_b.append(nx.to_numpy(A)) lst_name.append('sqrt') A = nx.power(Mb, 2) lst_b.append(nx.to_numpy(A)) lst_name.append('power') A = nx.dot(vb, vb) lst_b.append(nx.to_numpy(A)) lst_name.append('dot(v,v)') A = nx.dot(Mb, vb) lst_b.append(nx.to_numpy(A)) lst_name.append('dot(M,v)') A = nx.dot(Mb, Mb.T) lst_b.append(nx.to_numpy(A)) lst_name.append('dot(M,M)') A = nx.norm(vb) lst_b.append(nx.to_numpy(A)) lst_name.append('norm') A = nx.norm(Mb, axis=1) lst_b.append(nx.to_numpy(A)) lst_name.append('norm(M,axis=1)') A = nx.norm(Mb, axis=1, keepdims=True) lst_b.append(nx.to_numpy(A)) lst_name.append('norm(M,axis=1,keepdims=True)') A = nx.any(vb > 0) lst_b.append(nx.to_numpy(A)) lst_name.append('any') A = nx.isnan(vb) lst_b.append(nx.to_numpy(A)) lst_name.append('isnan') A = nx.isinf(vb) lst_b.append(nx.to_numpy(A)) lst_name.append('isinf') A = nx.einsum('ij->i', Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('einsum(ij->i)') A = nx.einsum('ij,j->i', Mb, vb) lst_b.append(nx.to_numpy(A)) lst_name.append('nx.einsum(ij,j->i)') A = nx.einsum('ij->i', Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('nx.einsum(ij->i)') A = nx.sort(Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('sort') A = nx.argsort(Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('argsort') tmp = nx.sort(Mb) A = nx.searchsorted(tmp, tmp, 'right') lst_b.append(nx.to_numpy(A)) lst_name.append('searchsorted') A = nx.flip(Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('flip') A = nx.outer(vb, vb) lst_b.append(nx.to_numpy(A)) lst_name.append('outer') A = nx.clip(vb, 0, 1) lst_b.append(nx.to_numpy(A)) lst_name.append('clip') A = nx.repeat(Mb, 0) A = nx.repeat(Mb, 2, -1) lst_b.append(nx.to_numpy(A)) lst_name.append('repeat') A = nx.take_along_axis(vb, nx.arange(3), -1) lst_b.append(nx.to_numpy(A)) lst_name.append('take_along_axis') A = nx.concatenate((Mb, Mb), -1) lst_b.append(nx.to_numpy(A)) lst_name.append('concatenate') A = nx.zero_pad(Mb, len(Mb.shape) * [(3, 3)]) lst_b.append(nx.to_numpy(A)) lst_name.append('zero_pad') A = nx.argmax(Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('argmax') A = nx.argmin(Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('argmin') A = nx.mean(Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('mean') A = nx.median(Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('median') A = nx.std(Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('std') A = nx.linspace(0, 1, 50) A = nx.linspace(0, 1, 50, type_as=Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('linspace') X, Y = nx.meshgrid(vb, vb) lst_b.append(np.stack([nx.to_numpy(X), nx.to_numpy(Y)])) lst_name.append('meshgrid') A = nx.diag(Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('diag2D') A = nx.diag(vb, 1) lst_b.append(nx.to_numpy(A)) lst_name.append('diag1D') A = nx.unique(nx.from_numpy(np.stack([M, M]))) lst_b.append(nx.to_numpy(A)) lst_name.append('unique') A, A2 = nx.unique(nx.from_numpy(np.stack([M, M]).reshape(-1)), return_inverse=True) lst_b.append(nx.to_numpy(A)) lst_name.append('unique(M,return_inverse=True)[0]') lst_b.append(nx.to_numpy(A2)) lst_name.append('unique(M,return_inverse=True)[1]') A = nx.logsumexp(Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('logsumexp') A = nx.stack([Mb, Mb]) lst_b.append(nx.to_numpy(A)) lst_name.append('stack') A = nx.reshape(Mb, (5, 3, 2)) lst_b.append(nx.to_numpy(A)) lst_name.append('reshape') sp_Mb = nx.coo_matrix(sp_datab, sp_rowb, sp_colb, shape=(4, 4)) nx.todense(Mb) lst_b.append(nx.to_numpy(nx.todense(sp_Mb))) lst_name.append('coo_matrix') assert not nx.issparse(Mb), 'Assert fail on: issparse (expected False)' assert nx.issparse(sp_Mb) or nx.__name__ in ("jax", "tf"), 'Assert fail on: issparse (expected True)' A = nx.tocsr(sp_Mb) lst_b.append(nx.to_numpy(nx.todense(A))) lst_name.append('tocsr') A = nx.eliminate_zeros(nx.copy(sp_datab), threshold=5.) lst_b.append(nx.to_numpy(A)) lst_name.append('eliminate_zeros (dense)') A = nx.eliminate_zeros(sp_Mb) lst_b.append(nx.to_numpy(nx.todense(A))) lst_name.append('eliminate_zeros (sparse)') A = nx.where(Mb >= nx.stack([nx.linspace(0, 1, 10)] * 3, axis=1), Mb, 0.0) lst_b.append(nx.to_numpy(A)) lst_name.append('where (cond, x, y)') A = nx.where(nx.from_numpy(np.array([True, False]))) lst_b.append(nx.to_numpy(nx.stack(A))) lst_name.append('where (cond)') A = nx.copy(Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('copy') assert nx.allclose(Mb, Mb), 'Assert fail on: allclose (expected True)' assert not nx.allclose(2 * Mb, Mb), 'Assert fail on: allclose (expected False)' A = nx.squeeze(nx.zeros((3, 1, 4, 1))) assert tuple(A.shape) == (3, 4), 'Assert fail on: squeeze' A = nx.bitsize(Mb) lst_b.append(float(A)) lst_name.append("bitsize") A = nx.device_type(Mb) assert A in ("CPU", "GPU") nx._bench(lambda x: x, M, n_runs=1) A = nx.solve(SquareMb, Mb) lst_b.append(nx.to_numpy(A)) lst_name.append('solve') A = nx.trace(SquareMb) lst_b.append(nx.to_numpy(A)) lst_name.append('trace') A = nx.inv(SquareMb) lst_b.append(nx.to_numpy(A)) lst_name.append('matrix inverse') A = nx.sqrtm(SquareMb.T @ SquareMb) lst_b.append(nx.to_numpy(A)) lst_name.append("matrix square root") D, U = nx.eigh(SquareMb.T @ SquareMb) lst_b.append(nx.to_numpy(nx.dot(U, nx.dot(nx.diag(D), U.T)))) lst_name.append("eigh ") A = nx.kl_div(nx.abs(Mb), nx.abs(Mb) + 1) lst_b.append(nx.to_numpy(A)) lst_name.append("Kullback-Leibler divergence") A = nx.concatenate([vb, nx.from_numpy(np.array([np.inf, np.nan]))], axis=0) A = nx.isfinite(A) lst_b.append(nx.to_numpy(A)) lst_name.append("isfinite") A = nx.tile(vb, (10, 1)) lst_b.append(nx.to_numpy(A)) lst_name.append("tile") A = nx.floor(Mb) lst_b.append(nx.to_numpy(A)) lst_name.append("floor") A = nx.prod(Mb) lst_b.append(nx.to_numpy(A)) lst_name.append("prod") A, B = nx.sort2(Mb) lst_b.append(nx.to_numpy(A)) lst_name.append("sort2 sort") lst_b.append(nx.to_numpy(B)) lst_name.append("sort2 argsort") A, B = nx.qr(Mb) lst_b.append(nx.to_numpy(A)) lst_name.append("QR Q") lst_b.append(nx.to_numpy(B)) lst_name.append("QR R") A = nx.atan2(vb, vb) lst_b.append(nx.to_numpy(A)) lst_name.append("atan2") A = nx.transpose(Mb) lst_b.append(nx.to_numpy(A)) lst_name.append("transpose") A = nx.detach(Mb) lst_b.append(nx.to_numpy(A)) lst_name.append("detach") A, B = nx.detach(Mb, Mb) lst_b.append(nx.to_numpy(A)) lst_name.append("detach A") lst_b.append(nx.to_numpy(B)) lst_name.append("detach B") A = nx.matmul(Mb, Mb.T) lst_b.append(nx.to_numpy(A)) lst_name.append("matmul") A = nx.matmul(M1b, M2b) lst_b.append(nx.to_numpy(A)) lst_name.append("matmul broadcast") vec = nx.from_numpy(np.array([1, np.nan, -1])) vec = nx.nan_to_num(vec, nan=0) lst_b.append(nx.to_numpy(vec)) lst_name.append("nan_to_num") assert not nx.array_equal(Mb, vb), "array_equal (shape)" assert nx.array_equal(Mb, Mb), "array_equal (elements) - expected true" assert not nx.array_equal( Mb, Mb + nx.eye(*list(Mb.shape)) ), "array_equal (elements) - expected false" assert nx.is_floating_point(Mb), "is_floating_point - expected true" assert not nx.is_floating_point( nx.from_numpy(np.array([0, 1, 2], dtype=int)) ), "is_floating_point - expected false" lst_tot.append(lst_b) lst_np = lst_tot[0] lst_b = lst_tot[1] for a1, a2, name in zip(lst_np, lst_b, lst_name): np.testing.assert_allclose( a2, a1, atol=1e-7, err_msg=f'ASSERT FAILED ON: {name}' ) def test_random_backends(nx): tmp_u = nx.rand() assert tmp_u < 1 tmp_n = nx.randn() nx.seed(0) M1 = nx.to_numpy(nx.rand(5, 2)) nx.seed(0) M2 = nx.to_numpy(nx.rand(5, 2, type_as=tmp_n)) assert np.all(M1 >= 0) assert np.all(M1 < 1) assert M1.shape == (5, 2) assert np.allclose(M1, M2) nx.seed(0) M1 = nx.to_numpy(nx.randn(5, 2)) nx.seed(0) M2 = nx.to_numpy(nx.randn(5, 2, type_as=tmp_u)) nx.seed(42) v1 = nx.randn() v2 = nx.randn() assert v1 != v2 def test_gradients_backends(): rnd = np.random.RandomState(0) v = rnd.randn(10) c = rnd.randn() e = rnd.randn() if torch: nx = ot.backend.TorchBackend() v2 = torch.tensor(v, requires_grad=True) c2 = torch.tensor(c, requires_grad=True) val = c2 * torch.sum(v2 * v2) val2 = nx.set_gradients(val, (v2, c2), (v2, c2)) val2.backward() assert torch.equal(v2.grad, v2) assert torch.equal(c2.grad, c2) if jax: nx = ot.backend.JaxBackend() with jax.checking_leaks(): def fun(a, b, d): val = b * nx.sum(a ** 4) + d return nx.set_gradients(val, (a, b, d), (a, b, 2 * d)) grad_val = jax.grad(fun, argnums=(0, 1, 2))(v, c, e) np.testing.assert_almost_equal(fun(v, c, e), c * np.sum(v ** 4) + e, decimal=4) np.testing.assert_allclose(grad_val[0], v, atol=1e-4) np.testing.assert_allclose(grad_val[2], 2 * e, atol=1e-4) if tf: nx = ot.backend.TensorflowBackend() w = tf.Variable(tf.random.normal((3, 2)), name='w') b = tf.Variable(tf.random.normal((2,), dtype=tf.float32), name='b') x = tf.random.normal((1, 3), dtype=tf.float32) with tf.GradientTape() as tape: y = x @ w + b loss = tf.reduce_mean(y ** 2) manipulated_loss = nx.set_gradients(loss, (w, b), (w, b)) [dl_dw, dl_db] = tape.gradient(manipulated_loss, [w, b]) assert nx.allclose(dl_dw, w) assert nx.allclose(dl_db, b) def test_get_backend_none(): a, b = np.zeros((2, 3)), None nx = get_backend(a, b) assert str(nx) == 'numpy' with pytest.raises(ValueError): get_backend(None, None) python-pot-0.9.3+dfsg/test/test_bregman.py000066400000000000000000001402551455713015700206050ustar00rootroot00000000000000"""Tests for module bregman on OT with bregman projections """ # Author: Remi Flamary # Kilian Fatras # Quang Huy Tran # Eduardo Fernandes Montesuma # # License: MIT License import warnings from itertools import product import numpy as np import pytest import ot from ot.backend import tf, torch from ot.bregman import geomloss @pytest.mark.parametrize("verbose, warn", product([True, False], [True, False])) def test_sinkhorn(verbose, warn): # test sinkhorn n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) G = ot.sinkhorn(u, u, M, 1, stopThr=1e-10, verbose=verbose, warn=warn) # check constraints np.testing.assert_allclose( u, G.sum(1), atol=1e-05) # cf convergence sinkhorn np.testing.assert_allclose( u, G.sum(0), atol=1e-05) # cf convergence sinkhorn with pytest.warns(UserWarning): ot.sinkhorn(u, u, M, 1, stopThr=0, numItermax=1) @pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_stabilized", "sinkhorn_epsilon_scaling", "greenkhorn", "sinkhorn_log"]) def test_convergence_warning(method): # test sinkhorn n = 100 a1 = ot.datasets.make_1D_gauss(n, m=30, s=10) a2 = ot.datasets.make_1D_gauss(n, m=40, s=10) A = np.asarray([a1, a2]).T M = ot.utils.dist0(n) with pytest.warns(UserWarning): ot.sinkhorn(a1, a2, M, 1., method=method, stopThr=0, numItermax=1) if method in ["sinkhorn", "sinkhorn_stabilized", "sinkhorn_log"]: with pytest.warns(UserWarning): ot.barycenter(A, M, 1, method=method, stopThr=0, numItermax=1) with pytest.warns(UserWarning): ot.sinkhorn2(a1, a2, M, 1, method=method, stopThr=0, numItermax=1, warn=True) with warnings.catch_warnings(): warnings.simplefilter("error") ot.sinkhorn2(a1, a2, M, 1, method=method, stopThr=0, numItermax=1, warn=False) def test_not_implemented_method(): # test sinkhorn w = 10 n = w ** 2 rng = np.random.RandomState(42) A_img = rng.rand(2, w, w) A_flat = A_img.reshape(n, 2) a1, a2 = A_flat.T M_flat = ot.utils.dist0(n) not_implemented = "new_method" reg = 0.01 with pytest.raises(ValueError): ot.sinkhorn(a1, a2, M_flat, reg, method=not_implemented) with pytest.raises(ValueError): ot.sinkhorn2(a1, a2, M_flat, reg, method=not_implemented) with pytest.raises(ValueError): ot.barycenter(A_flat, M_flat, reg, method=not_implemented) with pytest.raises(ValueError): ot.bregman.barycenter_debiased(A_flat, M_flat, reg, method=not_implemented) with pytest.raises(ValueError): ot.bregman.convolutional_barycenter2d(A_img, reg, method=not_implemented) with pytest.raises(ValueError): ot.bregman.convolutional_barycenter2d_debiased(A_img, reg, method=not_implemented) @pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_stabilized"]) def test_nan_warning(method): # test sinkhorn n = 100 a1 = ot.datasets.make_1D_gauss(n, m=30, s=10) a2 = ot.datasets.make_1D_gauss(n, m=40, s=10) M = ot.utils.dist0(n) reg = 0 with pytest.warns(UserWarning): # warn set to False to avoid catching a convergence warning instead ot.sinkhorn(a1, a2, M, reg, method=method, warn=False) def test_sinkhorn_stabilization(): # test sinkhorn n = 100 a1 = ot.datasets.make_1D_gauss(n, m=30, s=10) a2 = ot.datasets.make_1D_gauss(n, m=40, s=10) M = ot.utils.dist0(n) reg = 1e-5 loss1 = ot.sinkhorn2(a1, a2, M, reg, method="sinkhorn_log") loss2 = ot.sinkhorn2(a1, a2, M, reg, tau=1, method="sinkhorn_stabilized") np.testing.assert_allclose( loss1, loss2, atol=1e-06) # cf convergence sinkhorn @pytest.mark.parametrize("method, verbose, warn", product(["sinkhorn", "sinkhorn_stabilized", "sinkhorn_log"], [True, False], [True, False])) def test_sinkhorn_multi_b(method, verbose, warn): # test sinkhorn n = 10 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) b = rng.rand(n, 3) b = b / np.sum(b, 0, keepdims=True) M = ot.dist(x, x) loss0, log = ot.sinkhorn(u, b, M, .1, method=method, stopThr=1e-10, log=True) loss = [ot.sinkhorn2(u, b[:, k], M, .1, method=method, stopThr=1e-10, verbose=verbose, warn=warn) for k in range(3)] # check constraints np.testing.assert_allclose( loss0, loss, atol=1e-4) # cf convergence sinkhorn def test_sinkhorn_backends(nx): n_samples = 100 n_features = 2 rng = np.random.RandomState(0) x = rng.randn(n_samples, n_features) y = rng.randn(n_samples, n_features) a = ot.utils.unif(n_samples) M = ot.dist(x, y) G = ot.sinkhorn(a, a, M, 1) ab, M_nx = nx.from_numpy(a, M) Gb = ot.sinkhorn(ab, ab, M_nx, 1) np.allclose(G, nx.to_numpy(Gb)) def test_sinkhorn2_backends(nx): n_samples = 100 n_features = 2 rng = np.random.RandomState(0) x = rng.randn(n_samples, n_features) y = rng.randn(n_samples, n_features) a = ot.utils.unif(n_samples) M = ot.dist(x, y) G = ot.sinkhorn(a, a, M, 1) ab, M_nx = nx.from_numpy(a, M) Gb = ot.sinkhorn2(ab, ab, M_nx, 1) np.allclose(G, nx.to_numpy(Gb)) def test_sinkhorn2_gradients(): n_samples = 100 n_features = 2 rng = np.random.RandomState(0) x = rng.randn(n_samples, n_features) y = rng.randn(n_samples, n_features) a = ot.utils.unif(n_samples) M = ot.dist(x, y) if torch: a1 = torch.tensor(a, requires_grad=True) b1 = torch.tensor(a, requires_grad=True) M1 = torch.tensor(M, requires_grad=True) val = ot.sinkhorn2(a1, b1, M1, 1) val.backward() assert a1.shape == a1.grad.shape assert b1.shape == b1.grad.shape assert M1.shape == M1.grad.shape def test_sinkhorn_empty(): # test sinkhorn n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) G, log = ot.sinkhorn([], [], M, 1, stopThr=1e-10, method="sinkhorn_log", verbose=True, log=True) # check constraints np.testing.assert_allclose(u, G.sum(1), atol=1e-05) np.testing.assert_allclose(u, G.sum(0), atol=1e-05) G, log = ot.sinkhorn([], [], M, 1, stopThr=1e-10, verbose=True, log=True) # check constraints np.testing.assert_allclose(u, G.sum(1), atol=1e-05) np.testing.assert_allclose(u, G.sum(0), atol=1e-05) G, log = ot.sinkhorn([], [], M, 1, stopThr=1e-10, method='sinkhorn_stabilized', verbose=True, log=True) # check constraints np.testing.assert_allclose(u, G.sum(1), atol=1e-05) np.testing.assert_allclose(u, G.sum(0), atol=1e-05) G, log = ot.sinkhorn( [], [], M, 1, stopThr=1e-10, method='sinkhorn_epsilon_scaling', verbose=True, log=True) # check constraints np.testing.assert_allclose(u, G.sum(1), atol=1e-05) np.testing.assert_allclose(u, G.sum(0), atol=1e-05) # test empty weights greenkhorn ot.sinkhorn([], [], M, 1, method='greenkhorn', stopThr=1e-10, log=True) @pytest.skip_backend('tf') @pytest.skip_backend("jax") def test_sinkhorn_variants(nx): # test sinkhorn n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) ub, M_nx = nx.from_numpy(u, M) G = ot.sinkhorn(u, u, M, 1, method='sinkhorn', stopThr=1e-10) Gl = nx.to_numpy(ot.sinkhorn( ub, ub, M_nx, 1, method='sinkhorn_log', stopThr=1e-10)) G0 = nx.to_numpy(ot.sinkhorn( ub, ub, M_nx, 1, method='sinkhorn', stopThr=1e-10)) Gs = nx.to_numpy(ot.sinkhorn( ub, ub, M_nx, 1, method='sinkhorn_stabilized', stopThr=1e-10)) Ges = nx.to_numpy(ot.sinkhorn( ub, ub, M_nx, 1, method='sinkhorn_epsilon_scaling', stopThr=1e-10)) G_green = nx.to_numpy(ot.sinkhorn( ub, ub, M_nx, 1, method='greenkhorn', stopThr=1e-10)) # check values np.testing.assert_allclose(G, G0, atol=1e-05) np.testing.assert_allclose(G, Gl, atol=1e-05) np.testing.assert_allclose(G0, Gs, atol=1e-05) np.testing.assert_allclose(G0, Ges, atol=1e-05) np.testing.assert_allclose(G0, G_green, atol=1e-5) @pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_stabilized", "sinkhorn_epsilon_scaling", "greenkhorn", "sinkhorn_log"]) @pytest.skip_arg(("nx", "method"), ("tf", "sinkhorn_epsilon_scaling"), reason="tf does not support sinkhorn_epsilon_scaling", getter=str) @pytest.skip_arg(("nx", "method"), ("tf", "greenkhorn"), reason="tf does not support greenkhorn", getter=str) @pytest.skip_arg(("nx", "method"), ("jax", "sinkhorn_epsilon_scaling"), reason="jax does not support sinkhorn_epsilon_scaling", getter=str) @pytest.skip_arg(("nx", "method"), ("jax", "greenkhorn"), reason="jax does not support greenkhorn", getter=str) def test_sinkhorn_variants_dtype_device(nx, method): n = 100 rng = np.random.RandomState(42) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) for tp in nx.__type_list__: print(nx.dtype_device(tp)) ub, Mb = nx.from_numpy(u, M, type_as=tp) Gb = ot.sinkhorn(ub, ub, Mb, 1, method=method, stopThr=1e-10) nx.assert_same_dtype_device(Mb, Gb) @pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_stabilized", "sinkhorn_log"]) def test_sinkhorn2_variants_dtype_device(nx, method): n = 100 rng = np.random.RandomState(42) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) for tp in nx.__type_list__: print(nx.dtype_device(tp)) ub, Mb = nx.from_numpy(u, M, type_as=tp) lossb = ot.sinkhorn2(ub, ub, Mb, 1, method=method, stopThr=1e-10) nx.assert_same_dtype_device(Mb, lossb) @pytest.mark.skipif(not tf, reason="tf not installed") @pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_stabilized", "sinkhorn_log"]) def test_sinkhorn2_variants_device_tf(method): nx = ot.backend.TensorflowBackend() n = 100 rng = np.random.RandomState(42) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) # Check that everything stays on the CPU with tf.device("/CPU:0"): ub, Mb = nx.from_numpy(u, M) Gb = ot.sinkhorn(ub, ub, Mb, 1, method=method, stopThr=1e-10) lossb = ot.sinkhorn2(ub, ub, Mb, 1, method=method, stopThr=1e-10) nx.assert_same_dtype_device(Mb, Gb) nx.assert_same_dtype_device(Mb, lossb) # Check that everything happens on the GPU ub, Mb = nx.from_numpy(u, M) Gb = ot.sinkhorn(ub, ub, Mb, 1, method=method, stopThr=1e-10) lossb = ot.sinkhorn2(ub, ub, Mb, 1, method=method, stopThr=1e-10) nx.assert_same_dtype_device(Mb, Gb) nx.assert_same_dtype_device(Mb, lossb) # Check this only if GPU is available if len(tf.config.list_physical_devices('GPU')) > 0: assert nx.dtype_device(Gb)[1].startswith("GPU") @pytest.skip_backend('tf') @pytest.skip_backend("jax") def test_sinkhorn_variants_multi_b(nx): # test sinkhorn n = 50 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) b = rng.rand(n, 3) b = b / np.sum(b, 0, keepdims=True) M = ot.dist(x, x) ub, bb, M_nx = nx.from_numpy(u, b, M) G = ot.sinkhorn(u, b, M, 1, method='sinkhorn', stopThr=1e-10) Gl = nx.to_numpy(ot.sinkhorn( ub, bb, M_nx, 1, method='sinkhorn_log', stopThr=1e-10)) G0 = nx.to_numpy(ot.sinkhorn( ub, bb, M_nx, 1, method='sinkhorn', stopThr=1e-10)) Gs = nx.to_numpy(ot.sinkhorn( ub, bb, M_nx, 1, method='sinkhorn_stabilized', stopThr=1e-10)) # check values np.testing.assert_allclose(G, G0, atol=1e-05) np.testing.assert_allclose(G, Gl, atol=1e-05) np.testing.assert_allclose(G0, Gs, atol=1e-05) @pytest.skip_backend('tf') @pytest.skip_backend("jax") def test_sinkhorn2_variants_multi_b(nx): # test sinkhorn n = 50 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) b = rng.rand(n, 3) b = b / np.sum(b, 0, keepdims=True) M = ot.dist(x, x) ub, bb, M_nx = nx.from_numpy(u, b, M) G = ot.sinkhorn2(u, b, M, 1, method='sinkhorn', stopThr=1e-10) Gl = nx.to_numpy(ot.sinkhorn2( ub, bb, M_nx, 1, method='sinkhorn_log', stopThr=1e-10)) G0 = nx.to_numpy(ot.sinkhorn2( ub, bb, M_nx, 1, method='sinkhorn', stopThr=1e-10)) Gs = nx.to_numpy(ot.sinkhorn2( ub, bb, M_nx, 1, method='sinkhorn_stabilized', stopThr=1e-10)) # check values np.testing.assert_allclose(G, G0, atol=1e-05) np.testing.assert_allclose(G, Gl, atol=1e-05) np.testing.assert_allclose(G0, Gs, atol=1e-05) def test_sinkhorn_variants_log(): # test sinkhorn n = 50 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) G0, log0 = ot.sinkhorn(u, u, M, 1, method='sinkhorn', stopThr=1e-10, log=True) Gl, logl = ot.sinkhorn( u, u, M, 1, method='sinkhorn_log', stopThr=1e-10, log=True) Gs, logs = ot.sinkhorn( u, u, M, 1, method='sinkhorn_stabilized', stopThr=1e-10, log=True) Ges, loges = ot.sinkhorn( u, u, M, 1, method='sinkhorn_epsilon_scaling', stopThr=1e-10, log=True,) G_green, loggreen = ot.sinkhorn( u, u, M, 1, method='greenkhorn', stopThr=1e-10, log=True) # check values np.testing.assert_allclose(G0, Gs, atol=1e-05) np.testing.assert_allclose(G0, Gl, atol=1e-05) np.testing.assert_allclose(G0, Ges, atol=1e-05) np.testing.assert_allclose(G0, G_green, atol=1e-5) @pytest.mark.parametrize("verbose, warn", product([True, False], [True, False])) def test_sinkhorn_variants_log_multib(verbose, warn): # test sinkhorn n = 50 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) b = rng.rand(n, 3) b = b / np.sum(b, 0, keepdims=True) M = ot.dist(x, x) G0, log0 = ot.sinkhorn(u, b, M, 1, method='sinkhorn', stopThr=1e-10, log=True) Gl, logl = ot.sinkhorn(u, b, M, 1, method='sinkhorn_log', stopThr=1e-10, log=True, verbose=verbose, warn=warn) Gs, logs = ot.sinkhorn(u, b, M, 1, method='sinkhorn_stabilized', stopThr=1e-10, log=True, verbose=verbose, warn=warn) # check values np.testing.assert_allclose(G0, Gs, atol=1e-05) np.testing.assert_allclose(G0, Gl, atol=1e-05) @pytest.mark.parametrize("method, verbose, warn", product(["sinkhorn", "sinkhorn_stabilized", "sinkhorn_log"], [True, False], [True, False])) def test_barycenter(nx, method, verbose, warn): n_bins = 100 # nb bins # Gaussian distributions a1 = ot.datasets.make_1D_gauss(n_bins, m=30, s=10) # m= mean, s= std a2 = ot.datasets.make_1D_gauss(n_bins, m=40, s=10) # creating matrix A containing all distributions A = np.vstack((a1, a2)).T # loss matrix + normalization M = ot.utils.dist0(n_bins) M /= M.max() alpha = 0.5 # 0<=alpha<=1 weights = np.array([1 - alpha, alpha]) A_nx, M_nx, weights_nx = nx.from_numpy(A, M, weights) reg = 1e-2 if nx.__name__ in ("jax", "tf") and method == "sinkhorn_log": with pytest.raises(NotImplementedError): ot.bregman.barycenter(A_nx, M_nx, reg, weights, method=method) else: # wasserstein bary_wass_np = ot.bregman.barycenter( A, M, reg, weights, method=method, verbose=verbose, warn=warn) bary_wass, _ = ot.bregman.barycenter( A_nx, M_nx, reg, weights_nx, method=method, log=True) bary_wass = nx.to_numpy(bary_wass) np.testing.assert_allclose(1, np.sum(bary_wass)) np.testing.assert_allclose(bary_wass, bary_wass_np) ot.bregman.barycenter(A_nx, M_nx, reg, log=True) def test_free_support_sinkhorn_barycenter(): measures_locations = [ np.array([-1.]).reshape((1, 1)), # First dirac support np.array([1.]).reshape((1, 1)) # Second dirac support ] measures_weights = [ np.array([1.]), # First dirac sample weights np.array([1.]) # Second dirac sample weights ] # Barycenter initialization X_init = np.array([-12.]).reshape((1, 1)) # Obvious barycenter locations. Take a look on test_ot.py, test_free_support_barycenter bar_locations = np.array([0.]).reshape((1, 1)) # Calculate free support barycenter w/ Sinkhorn algorithm. We set the entropic regularization # term to 1, but this should be, in general, fine-tuned to the problem. X = ot.bregman.free_support_sinkhorn_barycenter( measures_locations, measures_weights, X_init, reg=1) # Verifies if calculated barycenter matches ground-truth np.testing.assert_allclose(X, bar_locations, rtol=1e-5, atol=1e-7) @pytest.mark.parametrize("method, verbose, warn", product(["sinkhorn", "sinkhorn_stabilized", "sinkhorn_log"], [True, False], [True, False])) def test_barycenter_assymetric_cost(nx, method, verbose, warn): n_bins = 20 # nb bins # Gaussian distributions A = ot.datasets.make_1D_gauss(n_bins, m=30, s=10) # m= mean, s= std # creating matrix A containing all distributions A = A[:, None] # assymetric loss matrix + normalization rng = np.random.RandomState(42) M = rng.randn(n_bins, n_bins) ** 2 M /= M.max() A_nx, M_nx = nx.from_numpy(A, M) reg = 1e-2 if nx.__name__ in ("jax", "tf") and method == "sinkhorn_log": with pytest.raises(NotImplementedError): ot.bregman.barycenter(A_nx, M_nx, reg, method=method) else: # wasserstein bary_wass_np = ot.bregman.barycenter( A, M, reg, method=method, verbose=verbose, warn=warn) bary_wass, _ = ot.bregman.barycenter( A_nx, M_nx, reg, method=method, log=True) bary_wass = nx.to_numpy(bary_wass) np.testing.assert_allclose(1, np.sum(bary_wass)) np.testing.assert_allclose(bary_wass, bary_wass_np) ot.bregman.barycenter(A_nx, M_nx, reg, log=True) @pytest.mark.parametrize("method, verbose, warn", product(["sinkhorn", "sinkhorn_log"], [True, False], [True, False])) def test_barycenter_debiased(nx, method, verbose, warn): n_bins = 100 # nb bins # Gaussian distributions a1 = ot.datasets.make_1D_gauss(n_bins, m=30, s=10) # m= mean, s= std a2 = ot.datasets.make_1D_gauss(n_bins, m=40, s=10) # creating matrix A containing all distributions A = np.vstack((a1, a2)).T # loss matrix + normalization M = ot.utils.dist0(n_bins) M /= M.max() alpha = 0.5 # 0<=alpha<=1 weights = np.array([1 - alpha, alpha]) A_nx, M_nx, weights_nx = nx.from_numpy(A, M, weights) # wasserstein reg = 1e-2 if nx.__name__ in ("jax", "tf") and method == "sinkhorn_log": with pytest.raises(NotImplementedError): ot.bregman.barycenter_debiased( A_nx, M_nx, reg, weights, method=method) else: bary_wass_np = ot.bregman.barycenter_debiased(A, M, reg, weights, method=method, verbose=verbose, warn=warn) bary_wass, _ = ot.bregman.barycenter_debiased( A_nx, M_nx, reg, weights_nx, method=method, log=True) bary_wass = nx.to_numpy(bary_wass) np.testing.assert_allclose(1, np.sum(bary_wass), atol=1e-3) np.testing.assert_allclose(bary_wass, bary_wass_np, atol=1e-5) ot.bregman.barycenter_debiased( A_nx, M_nx, reg, log=True, verbose=False) @pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_log"]) def test_convergence_warning_barycenters(method): w = 10 n_bins = w ** 2 # nb bins # Gaussian distributions a1 = ot.datasets.make_1D_gauss(n_bins, m=30, s=10) # m= mean, s= std a2 = ot.datasets.make_1D_gauss(n_bins, m=40, s=10) # creating matrix A containing all distributions A = np.vstack((a1, a2)).T A_img = A.reshape(2, w, w) A_img /= A_img.sum((1, 2))[:, None, None] # loss matrix + normalization M = ot.utils.dist0(n_bins) M /= M.max() alpha = 0.5 # 0<=alpha<=1 weights = np.array([1 - alpha, alpha]) reg = 0.1 with pytest.warns(UserWarning): ot.bregman.barycenter_debiased( A, M, reg, weights, method=method, numItermax=1) with pytest.warns(UserWarning): ot.bregman.barycenter(A, M, reg, weights, method=method, numItermax=1) with pytest.warns(UserWarning): ot.bregman.convolutional_barycenter2d(A_img, reg, weights, method=method, numItermax=1) with pytest.warns(UserWarning): ot.bregman.convolutional_barycenter2d_debiased(A_img, reg, weights, method=method, numItermax=1) def test_barycenter_stabilization(nx): n_bins = 100 # nb bins # Gaussian distributions a1 = ot.datasets.make_1D_gauss(n_bins, m=30, s=10) # m= mean, s= std a2 = ot.datasets.make_1D_gauss(n_bins, m=40, s=10) # creating matrix A containing all distributions A = np.vstack((a1, a2)).T # loss matrix + normalization M = ot.utils.dist0(n_bins) M /= M.max() alpha = 0.5 # 0<=alpha<=1 weights = np.array([1 - alpha, alpha]) A_nx, M_nx, weights_b = nx.from_numpy(A, M, weights) # wasserstein reg = 1e-2 bar_np = ot.bregman.barycenter( A, M, reg, weights, method="sinkhorn", stopThr=1e-8, verbose=True) bar_stable = nx.to_numpy(ot.bregman.barycenter( A_nx, M_nx, reg, weights_b, method="sinkhorn_stabilized", stopThr=1e-8, verbose=True )) bar = nx.to_numpy(ot.bregman.barycenter( A_nx, M_nx, reg, weights_b, method="sinkhorn", stopThr=1e-8, verbose=True )) np.testing.assert_allclose(bar, bar_stable) np.testing.assert_allclose(bar, bar_np) def create_random_images_dist(seed, size=20): """Creates an array of two random images of size (size, size). Returns an array of shape (2, size, size).""" rng = np.random.RandomState(seed) # First image a1 = rng.rand(size, size) a1 += a1.min() a1 = a1 / np.sum(a1) # Ensure that it is a probability distribution # Second image a2 = rng.rand(size, size) a2 += a2.min() a2 = a2 / np.sum(a2) # Creating matrix A containing all distributions A = np.zeros((2, size, size)) A[0, :, :] = a1 A[1, :, :] = a2 return A @pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_log"]) def test_wasserstein_bary_2d(nx, method): # Create the array of images to test A = create_random_images_dist(42, size=20) A_nx = nx.from_numpy(A) # wasserstein reg = 1e-2 if nx.__name__ in ("jax", "tf") and method == "sinkhorn_log": with pytest.raises(NotImplementedError): ot.bregman.convolutional_barycenter2d(A_nx, reg, method=method) else: bary_wass_np, log_np = ot.bregman.convolutional_barycenter2d( A, reg, method=method, verbose=True, log=True ) bary_wass = nx.to_numpy( ot.bregman.convolutional_barycenter2d(A_nx, reg, method=method) ) np.testing.assert_allclose(1, np.sum(bary_wass), rtol=1e-3) np.testing.assert_allclose(bary_wass, bary_wass_np, atol=1e-3) # help in checking if log and verbose do not bug the function ot.bregman.convolutional_barycenter2d(A, reg, log=True, verbose=True) @pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_log"]) def test_wasserstein_bary_2d_dtype_device(nx, method): # Create the array of images to test A = create_random_images_dist(42, size=20) for tp in nx.__type_list__: print(nx.dtype_device(tp)) Ab = nx.from_numpy(A, type_as=tp) # wasserstein reg = 1e-2 if nx.__name__ in ("jax", "tf") and method == "sinkhorn_log": with pytest.raises(NotImplementedError): ot.bregman.convolutional_barycenter2d(Ab, reg, method=method) else: # Compute the barycenter with numpy bary_wass_np, log_np = ot.bregman.convolutional_barycenter2d( A, reg, method=method, verbose=True, log=True ) # Compute the barycenter with the backend bary_wass_b = ot.bregman.convolutional_barycenter2d(Ab, reg, method=method) # Convert the backend result to numpy, to compare with the numpy result bary_wass = nx.to_numpy(bary_wass_b) np.testing.assert_allclose(1, np.sum(bary_wass), rtol=1e-3) np.testing.assert_allclose(bary_wass, bary_wass_np, atol=1e-3) # help in checking if log and verbose do not bug the function ot.bregman.convolutional_barycenter2d(A, reg, log=True, verbose=True) # Test that the dtype and device are the same after the computation nx.assert_same_dtype_device(Ab, bary_wass_b) @pytest.mark.skipif(not tf, reason="tf not installed") @pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_log"]) def test_wasserstein_bary_2d_device_tf(method): # Using the Tensorflow backend nx = ot.backend.TensorflowBackend() # Create the array of images to test A = create_random_images_dist(42, size=20) # Check that everything stays on the CPU with tf.device("/CPU:0"): Ab = nx.from_numpy(A) # wasserstein reg = 1e-2 if method == "sinkhorn_log": with pytest.raises(NotImplementedError): ot.bregman.convolutional_barycenter2d(Ab, reg, method=method) else: # Compute the barycenter with numpy bary_wass_np, log_np = ot.bregman.convolutional_barycenter2d( A, reg, method=method, verbose=True, log=True ) # Compute the barycenter with the backend bary_wass_b = ot.bregman.convolutional_barycenter2d(Ab, reg, method=method) # Convert the backend result to numpy, to compare with the numpy result bary_wass = nx.to_numpy(bary_wass_b) np.testing.assert_allclose(1, np.sum(bary_wass), rtol=1e-3) np.testing.assert_allclose(bary_wass, bary_wass_np, atol=1e-3) # help in checking if log and verbose do not bug the function ot.bregman.convolutional_barycenter2d(A, reg, log=True, verbose=True) # Test that the dtype and device are the same after the computation nx.assert_same_dtype_device(Ab, bary_wass_b) # Check that everything happens on the GPU Ab = nx.from_numpy(A) # wasserstein reg = 1e-2 if method == "sinkhorn_log": with pytest.raises(NotImplementedError): ot.bregman.convolutional_barycenter2d(Ab, reg, method=method) else: # Compute the barycenter with numpy bary_wass_np, log_np = ot.bregman.convolutional_barycenter2d( A, reg, method=method, verbose=True, log=True ) # Compute the barycenter with the backend bary_wass_b = ot.bregman.convolutional_barycenter2d(Ab, reg, method=method) # Convert the backend result to numpy, to compare with the numpy result bary_wass = nx.to_numpy(bary_wass_b) np.testing.assert_allclose(1, np.sum(bary_wass), rtol=1e-3) np.testing.assert_allclose(bary_wass, bary_wass_np, atol=1e-3) # help in checking if log and verbose do not bug the function ot.bregman.convolutional_barycenter2d(A, reg, log=True, verbose=True) # Test that the dtype and device are the same after the computation nx.assert_same_dtype_device(Ab, bary_wass_b) # Check this only if GPU is available if len(tf.config.list_physical_devices("GPU")) > 0: assert nx.dtype_device(bary_wass_b)[1].startswith("GPU") @pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_log"]) def test_wasserstein_bary_2d_debiased(nx, method): # Create the array of images to test A = create_random_images_dist(42, size=20) A_nx = nx.from_numpy(A) # wasserstein reg = 1e-2 if nx.__name__ in ("jax", "tf") and method == "sinkhorn_log": with pytest.raises(NotImplementedError): ot.bregman.convolutional_barycenter2d_debiased(A_nx, reg, method=method) else: bary_wass_np, log_np = ot.bregman.convolutional_barycenter2d_debiased( A, reg, method=method, verbose=True, log=True ) bary_wass = nx.to_numpy( ot.bregman.convolutional_barycenter2d_debiased(A_nx, reg, method=method) ) np.testing.assert_allclose(1, np.sum(bary_wass), rtol=1e-3) np.testing.assert_allclose(bary_wass, bary_wass_np, atol=1e-3) # help in checking if log and verbose do not bug the function ot.bregman.convolutional_barycenter2d_debiased(A, reg, log=True, verbose=True) @pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_log"]) def test_wasserstein_bary_2d_debiased_dtype_device(nx, method): # Create the array of images to test A = create_random_images_dist(42, size=20) for tp in nx.__type_list__: print(nx.dtype_device(tp)) Ab = nx.from_numpy(A, type_as=tp) # wasserstein reg = 1e-2 if nx.__name__ in ("jax", "tf") and method == "sinkhorn_log": with pytest.raises(NotImplementedError): ot.bregman.convolutional_barycenter2d_debiased(Ab, reg, method=method) else: # Compute the barycenter with numpy bary_wass_np, log_np = ot.bregman.convolutional_barycenter2d_debiased( A, reg, method=method, verbose=True, log=True ) # Compute the barycenter with the backend bary_wass_b = ot.bregman.convolutional_barycenter2d_debiased( Ab, reg, method=method ) # Convert the backend result to numpy, to compare with the numpy result bary_wass = nx.to_numpy(bary_wass_b) np.testing.assert_allclose(1, np.sum(bary_wass), rtol=1e-3) np.testing.assert_allclose(bary_wass, bary_wass_np, atol=1e-3) # help in checking if log and verbose do not bug the function ot.bregman.convolutional_barycenter2d_debiased( A, reg, log=True, verbose=True ) # Test that the dtype and device are the same after the computation nx.assert_same_dtype_device(Ab, bary_wass_b) @pytest.mark.skipif(not tf, reason="tf not installed") @pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_log"]) def test_wasserstein_bary_2d_debiased_device_tf(method): # Using the Tensorflow backend nx = ot.backend.TensorflowBackend() # Create the array of images to test A = create_random_images_dist(42, size=20) # Check that everything stays on the CPU with tf.device("/CPU:0"): Ab = nx.from_numpy(A) # wasserstein reg = 1e-2 if method == "sinkhorn_log": with pytest.raises(NotImplementedError): ot.bregman.convolutional_barycenter2d_debiased(Ab, reg, method=method) else: # Compute the barycenter with numpy bary_wass_np, log_np = ot.bregman.convolutional_barycenter2d_debiased( A, reg, method=method, verbose=True, log=True ) # Compute the barycenter with the backend bary_wass_b = ot.bregman.convolutional_barycenter2d_debiased( Ab, reg, method=method ) # Convert the backend result to numpy, to compare with the numpy result bary_wass = nx.to_numpy(bary_wass_b) np.testing.assert_allclose(1, np.sum(bary_wass), rtol=1e-3) np.testing.assert_allclose(bary_wass, bary_wass_np, atol=1e-3) # help in checking if log and verbose do not bug the function ot.bregman.convolutional_barycenter2d_debiased( A, reg, log=True, verbose=True ) # Test that the dtype and device are the same after the computation nx.assert_same_dtype_device(Ab, bary_wass_b) # Check that everything happens on the GPU Ab = nx.from_numpy(A) # wasserstein reg = 1e-2 if method == "sinkhorn_log": with pytest.raises(NotImplementedError): ot.bregman.convolutional_barycenter2d_debiased(Ab, reg, method=method) else: # Compute the barycenter with numpy bary_wass_np, log_np = ot.bregman.convolutional_barycenter2d_debiased( A, reg, method=method, verbose=True, log=True ) # Compute the barycenter with the backend bary_wass_b = ot.bregman.convolutional_barycenter2d_debiased( Ab, reg, method=method ) # Convert the backend result to numpy, to compare with the numpy result bary_wass = nx.to_numpy(bary_wass_b) np.testing.assert_allclose(1, np.sum(bary_wass), rtol=1e-3) def test_unmix(nx): n_bins = 50 # nb bins # Gaussian distributions a1 = ot.datasets.make_1D_gauss(n_bins, m=20, s=10) # m= mean, s= std a2 = ot.datasets.make_1D_gauss(n_bins, m=40, s=10) a = ot.datasets.make_1D_gauss(n_bins, m=30, s=10) # creating matrix A containing all distributions D = np.vstack((a1, a2)).T # loss matrix + normalization M = ot.utils.dist0(n_bins) M /= M.max() M0 = ot.utils.dist0(2) M0 /= M0.max() h0 = ot.unif(2) ab, Db, M_nx, M0b, h0b = nx.from_numpy(a, D, M, M0, h0) # wasserstein reg = 1e-3 um_np = ot.bregman.unmix(a, D, M, M0, h0, reg, 1, alpha=0.01) um = nx.to_numpy(ot.bregman.unmix( ab, Db, M_nx, M0b, h0b, reg, 1, alpha=0.01)) np.testing.assert_allclose(1, np.sum(um), rtol=1e-03, atol=1e-03) np.testing.assert_allclose([0.5, 0.5], um, rtol=1e-03, atol=1e-03) np.testing.assert_allclose(um, um_np) ot.bregman.unmix(ab, Db, M_nx, M0b, h0b, reg, 1, alpha=0.01, log=True, verbose=True) def test_empirical_sinkhorn(nx): # test sinkhorn n = 10 a = ot.unif(n) b = ot.unif(n) X_s = np.reshape(1.0 * np.arange(n), (n, 1)) X_t = np.reshape(1.0 * np.arange(0, n), (n, 1)) M = ot.dist(X_s, X_t) M_m = ot.dist(X_s, X_t, metric='euclidean') ab, bb, X_sb, X_tb, M_nx, M_mb = nx.from_numpy(a, b, X_s, X_t, M, M_m) G_sqe = nx.to_numpy(ot.bregman.empirical_sinkhorn(X_sb, X_tb, 1)) sinkhorn_sqe = nx.to_numpy(ot.sinkhorn(ab, bb, M_nx, 1)) G_log, log_es = ot.bregman.empirical_sinkhorn(X_sb, X_tb, 0.1, log=True) G_log = nx.to_numpy(G_log) sinkhorn_log, log_s = ot.sinkhorn(ab, bb, M_nx, 0.1, log=True) sinkhorn_log = nx.to_numpy(sinkhorn_log) G_m = nx.to_numpy(ot.bregman.empirical_sinkhorn( X_sb, X_tb, 1, metric='euclidean')) sinkhorn_m = nx.to_numpy(ot.sinkhorn(ab, bb, M_mb, 1)) loss_emp_sinkhorn = nx.to_numpy( ot.bregman.empirical_sinkhorn2(X_sb, X_tb, 1)) loss_sinkhorn = nx.to_numpy(ot.sinkhorn2(ab, bb, M_nx, 1)) # check constraints np.testing.assert_allclose( sinkhorn_sqe.sum(1), G_sqe.sum(1), atol=1e-05) # metric sqeuclidian np.testing.assert_allclose( sinkhorn_sqe.sum(0), G_sqe.sum(0), atol=1e-05) # metric sqeuclidian np.testing.assert_allclose( sinkhorn_log.sum(1), G_log.sum(1), atol=1e-05) # log np.testing.assert_allclose( sinkhorn_log.sum(0), G_log.sum(0), atol=1e-05) # log np.testing.assert_allclose( sinkhorn_m.sum(1), G_m.sum(1), atol=1e-05) # metric euclidian np.testing.assert_allclose( sinkhorn_m.sum(0), G_m.sum(0), atol=1e-05) # metric euclidian np.testing.assert_allclose(loss_emp_sinkhorn, loss_sinkhorn, atol=1e-05) @pytest.mark.skipif(not geomloss, reason="pytorch not installed") @pytest.skip_backend('tf') @pytest.skip_backend("cupy") @pytest.skip_backend("jax") @pytest.mark.parametrize("metric", ["sqeuclidean", "euclidean"]) def test_geomloss_solver(nx, metric): # test sinkhorn n = 10 a = ot.unif(n) b = ot.unif(n) X_s = np.reshape(1.0 * np.arange(n), (n, 1)) X_t = np.reshape(1.0 * np.arange(0, n), (n, 1)) ab, bb, X_sb, X_tb = nx.from_numpy(a, b, X_s, X_t) G_sqe = nx.to_numpy(ot.bregman.empirical_sinkhorn(X_sb, X_tb, 1, metric=metric)) value, log = ot.bregman.empirical_sinkhorn2_geomloss(X_sb, X_tb, 1, metric=metric, log=True) G_geomloss = nx.to_numpy(log['lazy_plan'][:]) print(value) # call with log = False ot.bregman.empirical_sinkhorn2_geomloss(X_sb, X_tb, 1, metric=metric) # check equality of plans np.testing.assert_allclose(G_sqe, G_geomloss, atol=1e-03) # metric sqeuclidian # check error on wrong metric with pytest.raises(ValueError): ot.bregman.empirical_sinkhorn2_geomloss(X_sb, X_tb, 1, metric='wrong_metric') def test_lazy_empirical_sinkhorn(nx): # test sinkhorn n = 10 a = ot.unif(n) b = ot.unif(n) numIterMax = 1000 X_s = np.reshape(np.arange(n, dtype=np.float64), (n, 1)) X_t = np.reshape(np.arange(0, n, dtype=np.float64), (n, 1)) M = ot.dist(X_s, X_t) M_m = ot.dist(X_s, X_t, metric='euclidean') ab, bb, X_sb, X_tb, M_nx, M_mb = nx.from_numpy(a, b, X_s, X_t, M, M_m) f, g = ot.bregman.empirical_sinkhorn( X_sb, X_tb, 1, numIterMax=numIterMax, isLazy=True, batchSize=(1, 3), verbose=True) f, g = nx.to_numpy(f), nx.to_numpy(g) G_sqe = np.exp(f[:, None] + g[None, :] - M / 1) sinkhorn_sqe = nx.to_numpy(ot.sinkhorn(ab, bb, M_nx, 1)) f, g, log_es = ot.bregman.empirical_sinkhorn( X_sb, X_tb, 1, numIterMax=numIterMax, isLazy=True, batchSize=5, log=True) f, g = nx.to_numpy(f), nx.to_numpy(g) G_log = np.exp(f[:, None] + g[None, :] - M / 1) sinkhorn_log, log_s = ot.sinkhorn(ab, bb, M_nx, 1, log=True) sinkhorn_log = nx.to_numpy(sinkhorn_log) f, g = ot.bregman.empirical_sinkhorn( X_sb, X_tb, 1, metric='euclidean', numIterMax=numIterMax, isLazy=True, batchSize=1) f, g = nx.to_numpy(f), nx.to_numpy(g) G_m = np.exp(f[:, None] + g[None, :] - M_m / 1) sinkhorn_m = nx.to_numpy(ot.sinkhorn(ab, bb, M_mb, 1)) loss_emp_sinkhorn, log = ot.bregman.empirical_sinkhorn2( X_sb, X_tb, 1, numIterMax=numIterMax, isLazy=True, batchSize=5, log=True) G_lazy = nx.to_numpy(log['lazy_plan'][:]) loss_emp_sinkhorn = nx.to_numpy(loss_emp_sinkhorn) loss_sinkhorn = nx.to_numpy(ot.sinkhorn2(ab, bb, M_nx, 1)) loss_emp_sinkhorn = ot.bregman.empirical_sinkhorn2( X_sb, X_tb, 1, numIterMax=numIterMax, isLazy=True, batchSize=1, log=False) # check constraints np.testing.assert_allclose( sinkhorn_sqe.sum(1), G_sqe.sum(1), atol=1e-05) # metric sqeuclidian np.testing.assert_allclose( sinkhorn_sqe.sum(0), G_sqe.sum(0), atol=1e-05) # metric sqeuclidian np.testing.assert_allclose( sinkhorn_log.sum(1), G_log.sum(1), atol=1e-05) # log np.testing.assert_allclose( sinkhorn_log.sum(0), G_log.sum(0), atol=1e-05) # log np.testing.assert_allclose( sinkhorn_m.sum(1), G_m.sum(1), atol=1e-05) # metric euclidian np.testing.assert_allclose( sinkhorn_m.sum(0), G_m.sum(0), atol=1e-05) # metric euclidian np.testing.assert_allclose(loss_emp_sinkhorn, loss_sinkhorn, atol=1e-05) np.testing.assert_allclose(G_log, G_lazy, atol=1e-05) def test_empirical_sinkhorn_divergence(nx): # Test sinkhorn divergence n = 10 a = np.linspace(1, n, n) a /= a.sum() b = ot.unif(n) X_s = np.reshape(np.arange(n, dtype=np.float64), (n, 1)) X_t = np.reshape(np.arange(0, n * 2, 2, dtype=np.float64), (n, 1)) M = ot.dist(X_s, X_t) M_s = ot.dist(X_s, X_s) M_t = ot.dist(X_t, X_t) ab, bb, X_sb, X_tb, M_nx, M_sb, M_tb = nx.from_numpy( a, b, X_s, X_t, M, M_s, M_t) emp_sinkhorn_div = nx.to_numpy( ot.bregman.empirical_sinkhorn_divergence(X_sb, X_tb, 1, a=ab, b=bb)) sinkhorn_div = nx.to_numpy( ot.sinkhorn2(ab, bb, M_nx, 1) - 1 / 2 * ot.sinkhorn2(ab, ab, M_sb, 1) - 1 / 2 * ot.sinkhorn2(bb, bb, M_tb, 1) ) emp_sinkhorn_div_np = ot.bregman.empirical_sinkhorn_divergence( X_s, X_t, 1, a=a, b=b) # check constraints np.testing.assert_allclose( emp_sinkhorn_div, emp_sinkhorn_div_np, atol=1e-05) np.testing.assert_allclose( emp_sinkhorn_div, sinkhorn_div, atol=1e-05) # cf conv emp sinkhorn ot.bregman.empirical_sinkhorn_divergence( X_sb, X_tb, 1, a=ab, b=bb, log=True) @pytest.mark.skipif(not torch, reason="No torch available") def test_empirical_sinkhorn_divergence_gradient(): # Test sinkhorn divergence n = 10 a = np.linspace(1, n, n) a /= a.sum() b = ot.unif(n) X_s = np.reshape(np.arange(n, dtype=np.float64), (n, 1)) X_t = np.reshape(np.arange(0, n * 2, 2, dtype=np.float64), (n, 1)) nx = ot.backend.TorchBackend() ab, bb, X_sb, X_tb = nx.from_numpy(a, b, X_s, X_t) ab.requires_grad = True bb.requires_grad = True X_sb.requires_grad = True X_tb.requires_grad = True emp_sinkhorn_div = ot.bregman.empirical_sinkhorn_divergence( X_sb, X_tb, 1, a=ab, b=bb) emp_sinkhorn_div.backward() assert ab.grad is not None assert bb.grad is not None assert X_sb.grad is not None assert X_tb.grad is not None def test_stabilized_vs_sinkhorn_multidim(nx): # test if stable version matches sinkhorn # for multidimensional inputs n = 100 # Gaussian distributions a = ot.datasets.make_1D_gauss(n, m=20, s=5) # m= mean, s= std b1 = ot.datasets.make_1D_gauss(n, m=60, s=8) b2 = ot.datasets.make_1D_gauss(n, m=30, s=4) # creating matrix A containing all distributions b = np.vstack((b1, b2)).T M = ot.utils.dist0(n) M /= np.median(M) epsilon = 0.1 ab, bb, M_nx = nx.from_numpy(a, b, M) G_np, _ = ot.bregman.sinkhorn( a, b, M, reg=epsilon, method="sinkhorn", log=True) G, log = ot.bregman.sinkhorn(ab, bb, M_nx, reg=epsilon, method="sinkhorn_stabilized", log=True) G = nx.to_numpy(G) G2, log2 = ot.bregman.sinkhorn(ab, bb, M_nx, epsilon, method="sinkhorn", log=True) G2 = nx.to_numpy(G2) np.testing.assert_allclose(G_np, G2) np.testing.assert_allclose(G, G2) def test_implemented_methods(): IMPLEMENTED_METHODS = ['sinkhorn', 'sinkhorn_stabilized'] ONLY_1D_methods = ['greenkhorn', 'sinkhorn_epsilon_scaling'] NOT_VALID_TOKENS = ['foo'] # test generalized sinkhorn for unbalanced OT barycenter n = 3 rng = np.random.RandomState(42) x = rng.randn(n, 2) a = ot.utils.unif(n) # make dists unbalanced b = ot.utils.unif(n) A = rng.rand(n, 2) A /= A.sum(0, keepdims=True) M = ot.dist(x, x) epsilon = 1.0 for method in IMPLEMENTED_METHODS: ot.bregman.sinkhorn(a, b, M, epsilon, method=method) ot.bregman.sinkhorn2(a, b, M, epsilon, method=method) ot.bregman.barycenter(A, M, reg=epsilon, method=method) with pytest.raises(ValueError): for method in set(NOT_VALID_TOKENS): ot.bregman.sinkhorn(a, b, M, epsilon, method=method) ot.bregman.sinkhorn2(a, b, M, epsilon, method=method) ot.bregman.barycenter(A, M, reg=epsilon, method=method) for method in ONLY_1D_methods: ot.bregman.sinkhorn(a, b, M, epsilon, method=method) with pytest.raises(ValueError): ot.bregman.sinkhorn2(a, b, M, epsilon, method=method) @pytest.skip_backend('tf') @pytest.skip_backend("cupy") @pytest.skip_backend("jax") @pytest.mark.filterwarnings("ignore:Bottleneck") def test_screenkhorn(nx): # test screenkhorn rng = np.random.RandomState(0) n = 100 a = ot.unif(n) b = ot.unif(n) x = rng.randn(n, 2) M = ot.dist(x, x) ab, bb, M_nx = nx.from_numpy(a, b, M) # sinkhorn G_sink = nx.to_numpy(ot.sinkhorn(ab, bb, M_nx, 1e-1)) # screenkhorn G_screen = nx.to_numpy(ot.bregman.screenkhorn( ab, bb, M_nx, 1e-1, uniform=True, verbose=True)) # check marginals np.testing.assert_allclose(G_sink.sum(0), G_screen.sum(0), atol=1e-02) np.testing.assert_allclose(G_sink.sum(1), G_screen.sum(1), atol=1e-02) def test_convolutional_barycenter_non_square(nx): # test for image with height not equal width A = np.ones((2, 2, 3)) / (2 * 3) A_nx = nx.from_numpy(A) b_np = ot.bregman.convolutional_barycenter2d(A, 1e-03) b = nx.to_numpy(ot.bregman.convolutional_barycenter2d(A_nx, 1e-03)) np.testing.assert_allclose(np.ones((2, 3)) / (2 * 3), b, atol=1e-02) np.testing.assert_allclose(np.ones((2, 3)) / (2 * 3), b, atol=1e-02) np.testing.assert_allclose(b, b_np) def test_sinkhorn_warmstart(): m, n = 10, 20 a = ot.unif(m) b = ot.unif(n) Xs = np.arange(m) * 1.0 Xt = np.arange(n) * 1.0 M = ot.dist(Xs.reshape(-1, 1), Xt.reshape(-1, 1)) # Generate warmstart from dual vectors of unregularized OT _, log = ot.lp.emd(a, b, M, log=True) warmstart = (log["u"], log["v"]) reg = 1 # Optimal plan with uniform warmstart pi_unif, _ = ot.bregman.sinkhorn( a, b, M, reg, method="sinkhorn", log=True, warmstart=None) # Optimal plan with warmstart generated from unregularized OT pi_sh, _ = ot.bregman.sinkhorn( a, b, M, reg, method="sinkhorn", log=True, warmstart=warmstart) pi_sh_log, _ = ot.bregman.sinkhorn( a, b, M, reg, method="sinkhorn_log", log=True, warmstart=warmstart) pi_sh_stab, _ = ot.bregman.sinkhorn( a, b, M, reg, method="sinkhorn_stabilized", log=True, warmstart=warmstart) pi_sh_sc, _ = ot.bregman.sinkhorn( a, b, M, reg, method="sinkhorn_epsilon_scaling", log=True, warmstart=warmstart) np.testing.assert_allclose(pi_unif, pi_sh, atol=1e-05) np.testing.assert_allclose(pi_unif, pi_sh_log, atol=1e-05) np.testing.assert_allclose(pi_unif, pi_sh_stab, atol=1e-05) np.testing.assert_allclose(pi_unif, pi_sh_sc, atol=1e-05) def test_empirical_sinkhorn_warmstart(): m, n = 10, 20 Xs = np.arange(m).reshape(-1, 1) * 1.0 Xt = np.arange(n).reshape(-1, 1) * 1.0 M = ot.dist(Xs, Xt) # Generate warmstart from dual vectors of unregularized OT a = ot.unif(m) b = ot.unif(n) _, log = ot.lp.emd(a, b, M, log=True) warmstart = (log["u"], log["v"]) reg = 1 # Optimal plan with uniform warmstart f, g, _ = ot.bregman.empirical_sinkhorn( X_s=Xs, X_t=Xt, reg=reg, isLazy=True, log=True, warmstart=None) pi_unif = np.exp(f[:, None] + g[None, :] - M / reg) # Optimal plan with warmstart generated from unregularized OT f, g, _ = ot.bregman.empirical_sinkhorn( X_s=Xs, X_t=Xt, reg=reg, isLazy=True, log=True, warmstart=warmstart) pi_ws_lazy = np.exp(f[:, None] + g[None, :] - M / reg) pi_ws_not_lazy, _ = ot.bregman.empirical_sinkhorn( X_s=Xs, X_t=Xt, reg=reg, isLazy=False, log=True, warmstart=warmstart) np.testing.assert_allclose(pi_unif, pi_ws_lazy, atol=1e-05) np.testing.assert_allclose(pi_unif, pi_ws_not_lazy, atol=1e-05) def test_empirical_sinkhorn_divergence_warmstart(): m, n = 10, 20 Xs = np.arange(m).reshape(-1, 1) * 1.0 Xt = np.arange(n).reshape(-1, 1) * 1.0 M = ot.dist(Xs, Xt) # Generate warmstart from dual vectors of unregularized OT a = ot.unif(m) b = ot.unif(n) _, log = ot.lp.emd(a, b, M, log=True) warmstart = (log["u"], log["v"]) reg = 1 # Optimal plan with uniform warmstart sd_unif, _ = ot.bregman.empirical_sinkhorn_divergence( X_s=Xs, X_t=Xt, reg=reg, isLazy=True, log=True, warmstart=None) # Optimal plan with warmstart generated from unregularized OT sd_ws_lazy, _ = ot.bregman.empirical_sinkhorn_divergence( X_s=Xs, X_t=Xt, reg=reg, isLazy=True, log=True, warmstart=warmstart) sd_ws_not_lazy, _ = ot.bregman.empirical_sinkhorn_divergence( X_s=Xs, X_t=Xt, reg=reg, isLazy=False, log=True, warmstart=warmstart) np.testing.assert_allclose(sd_unif, sd_ws_lazy, atol=1e-05) np.testing.assert_allclose(sd_unif, sd_ws_not_lazy, atol=1e-05) python-pot-0.9.3+dfsg/test/test_coot.py000066400000000000000000000311641455713015700201340ustar00rootroot00000000000000"""Tests for module COOT on OT """ # Author: Quang Huy Tran # # License: MIT License import numpy as np import ot from ot.coot import co_optimal_transport as coot from ot.coot import co_optimal_transport2 as coot2 import pytest @pytest.mark.parametrize("verbose", [False, True, 1, 0]) def test_coot(nx, verbose): n_samples = 60 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss( n_samples, mu_s, cov_s, random_state=4) xt = xs[::-1].copy() xs_nx = nx.from_numpy(xs) xt_nx = nx.from_numpy(xt) # test couplings pi_sample, pi_feature = coot(X=xs, Y=xt, verbose=verbose) pi_sample_nx, pi_feature_nx = coot(X=xs_nx, Y=xt_nx, verbose=verbose) pi_sample_nx = nx.to_numpy(pi_sample_nx) pi_feature_nx = nx.to_numpy(pi_feature_nx) anti_id_sample = np.flipud(np.eye(n_samples, n_samples)) / n_samples id_feature = np.eye(2, 2) / 2 np.testing.assert_allclose(pi_sample, anti_id_sample, atol=1e-04) np.testing.assert_allclose(pi_sample_nx, anti_id_sample, atol=1e-04) np.testing.assert_allclose(pi_feature, id_feature, atol=1e-04) np.testing.assert_allclose(pi_feature_nx, id_feature, atol=1e-04) # test marginal distributions px_s, px_f = ot.unif(n_samples), ot.unif(2) py_s, py_f = ot.unif(n_samples), ot.unif(2) np.testing.assert_allclose(px_s, pi_sample_nx.sum(0), atol=1e-04) np.testing.assert_allclose(py_s, pi_sample_nx.sum(1), atol=1e-04) np.testing.assert_allclose(px_f, pi_feature_nx.sum(0), atol=1e-04) np.testing.assert_allclose(py_f, pi_feature_nx.sum(1), atol=1e-04) np.testing.assert_allclose(px_s, pi_sample.sum(0), atol=1e-04) np.testing.assert_allclose(py_s, pi_sample.sum(1), atol=1e-04) np.testing.assert_allclose(px_f, pi_feature.sum(0), atol=1e-04) np.testing.assert_allclose(py_f, pi_feature.sum(1), atol=1e-04) # test COOT distance coot_np = coot2(X=xs, Y=xt, verbose=verbose) coot_nx = nx.to_numpy(coot2(X=xs_nx, Y=xt_nx, verbose=verbose)) np.testing.assert_allclose(coot_np, 0, atol=1e-08) np.testing.assert_allclose(coot_nx, 0, atol=1e-08) def test_entropic_coot(nx): n_samples = 60 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss( n_samples, mu_s, cov_s, random_state=4) xt = xs[::-1].copy() xs_nx = nx.from_numpy(xs) xt_nx = nx.from_numpy(xt) epsilon = (1, 1e-1) nits_ot = 2000 # test couplings pi_sample, pi_feature = coot(X=xs, Y=xt, epsilon=epsilon, nits_ot=nits_ot) pi_sample_nx, pi_feature_nx = coot( X=xs_nx, Y=xt_nx, epsilon=epsilon, nits_ot=nits_ot) pi_sample_nx = nx.to_numpy(pi_sample_nx) pi_feature_nx = nx.to_numpy(pi_feature_nx) np.testing.assert_allclose(pi_sample, pi_sample_nx, atol=1e-04) np.testing.assert_allclose(pi_feature, pi_feature_nx, atol=1e-04) # test marginal distributions px_s, px_f = ot.unif(n_samples), ot.unif(2) py_s, py_f = ot.unif(n_samples), ot.unif(2) np.testing.assert_allclose(px_s, pi_sample_nx.sum(0), atol=1e-04) np.testing.assert_allclose(py_s, pi_sample_nx.sum(1), atol=1e-04) np.testing.assert_allclose(px_f, pi_feature_nx.sum(0), atol=1e-04) np.testing.assert_allclose(py_f, pi_feature_nx.sum(1), atol=1e-04) np.testing.assert_allclose(px_s, pi_sample.sum(0), atol=1e-04) np.testing.assert_allclose(py_s, pi_sample.sum(1), atol=1e-04) np.testing.assert_allclose(px_f, pi_feature.sum(0), atol=1e-04) np.testing.assert_allclose(py_f, pi_feature.sum(1), atol=1e-04) # test entropic COOT distance coot_np = coot2(X=xs, Y=xt, epsilon=epsilon, nits_ot=nits_ot) coot_nx = nx.to_numpy( coot2(X=xs_nx, Y=xt_nx, epsilon=epsilon, nits_ot=nits_ot)) np.testing.assert_allclose(coot_np, coot_nx, atol=1e-08) def test_coot_with_linear_terms(nx): n_samples = 60 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss( n_samples, mu_s, cov_s, random_state=4) xt = xs[::-1].copy() xs_nx = nx.from_numpy(xs) xt_nx = nx.from_numpy(xt) M_samp = np.ones((n_samples, n_samples)) np.fill_diagonal(np.fliplr(M_samp), 0) M_feat = np.ones((2, 2)) np.fill_diagonal(M_feat, 0) M_samp_nx, M_feat_nx = nx.from_numpy(M_samp), nx.from_numpy(M_feat) alpha = (1, 2) # test couplings anti_id_sample = np.flipud(np.eye(n_samples, n_samples)) / n_samples id_feature = np.eye(2, 2) / 2 pi_sample, pi_feature = coot( X=xs, Y=xt, alpha=alpha, M_samp=M_samp, M_feat=M_feat) pi_sample_nx, pi_feature_nx = coot( X=xs_nx, Y=xt_nx, alpha=alpha, M_samp=M_samp_nx, M_feat=M_feat_nx) pi_sample_nx = nx.to_numpy(pi_sample_nx) pi_feature_nx = nx.to_numpy(pi_feature_nx) np.testing.assert_allclose(pi_sample, anti_id_sample, atol=1e-04) np.testing.assert_allclose(pi_sample_nx, anti_id_sample, atol=1e-04) np.testing.assert_allclose(pi_feature, id_feature, atol=1e-04) np.testing.assert_allclose(pi_feature_nx, id_feature, atol=1e-04) # test marginal distributions px_s, px_f = ot.unif(n_samples), ot.unif(2) py_s, py_f = ot.unif(n_samples), ot.unif(2) np.testing.assert_allclose(px_s, pi_sample_nx.sum(0), atol=1e-04) np.testing.assert_allclose(py_s, pi_sample_nx.sum(1), atol=1e-04) np.testing.assert_allclose(px_f, pi_feature_nx.sum(0), atol=1e-04) np.testing.assert_allclose(py_f, pi_feature_nx.sum(1), atol=1e-04) np.testing.assert_allclose(px_s, pi_sample.sum(0), atol=1e-04) np.testing.assert_allclose(py_s, pi_sample.sum(1), atol=1e-04) np.testing.assert_allclose(px_f, pi_feature.sum(0), atol=1e-04) np.testing.assert_allclose(py_f, pi_feature.sum(1), atol=1e-04) # test COOT distance coot_np = coot2(X=xs, Y=xt, alpha=alpha, M_samp=M_samp, M_feat=M_feat) coot_nx = nx.to_numpy( coot2(X=xs_nx, Y=xt_nx, alpha=alpha, M_samp=M_samp_nx, M_feat=M_feat_nx)) np.testing.assert_allclose(coot_np, 0, atol=1e-08) np.testing.assert_allclose(coot_nx, 0, atol=1e-08) def test_coot_raise_value_error(nx): n_samples = 80 # nb samples mu_s = np.array([2, 4]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss( n_samples, mu_s, cov_s, random_state=43) xt = xs[::-1].copy() xs_nx = nx.from_numpy(xs) xt_nx = nx.from_numpy(xt) # raise value error of method sinkhorn def coot_sh(method_sinkhorn): return coot(X=xs, Y=xt, method_sinkhorn=method_sinkhorn) def coot_sh_nx(method_sinkhorn): return coot(X=xs_nx, Y=xt_nx, method_sinkhorn=method_sinkhorn) np.testing.assert_raises(ValueError, coot_sh, "not_sinkhorn") np.testing.assert_raises(ValueError, coot_sh_nx, "not_sinkhorn") # raise value error for epsilon def coot_eps(epsilon): return coot(X=xs, Y=xt, epsilon=epsilon) def coot_eps_nx(epsilon): return coot(X=xs_nx, Y=xt_nx, epsilon=epsilon) np.testing.assert_raises(ValueError, coot_eps, (1, 2, 3)) np.testing.assert_raises(ValueError, coot_eps_nx, [1, 2, 3, 4]) # raise value error for alpha def coot_alpha(alpha): return coot(X=xs, Y=xt, alpha=alpha) def coot_alpha_nx(alpha): return coot(X=xs_nx, Y=xt_nx, alpha=alpha) np.testing.assert_raises(ValueError, coot_alpha, [1]) np.testing.assert_raises(ValueError, coot_alpha_nx, np.arange(4)) def test_coot_warmstart(nx): n_samples = 80 # nb samples mu_s = np.array([2, 3]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss( n_samples, mu_s, cov_s, random_state=125) xt = xs[::-1].copy() xs_nx = nx.from_numpy(xs) xt_nx = nx.from_numpy(xt) # initialize warmstart rng = np.random.RandomState(42) init_pi_sample = rng.rand(n_samples, n_samples) init_pi_sample = init_pi_sample / np.sum(init_pi_sample) init_pi_sample_nx = nx.from_numpy(init_pi_sample) init_pi_feature = rng.rand(2, 2) init_pi_feature /= init_pi_feature / np.sum(init_pi_feature) init_pi_feature_nx = nx.from_numpy(init_pi_feature) init_duals_sample = (rng.random(n_samples) * 2 - 1, rng.random(n_samples) * 2 - 1) init_duals_sample_nx = (nx.from_numpy(init_duals_sample[0]), nx.from_numpy(init_duals_sample[1])) init_duals_feature = (rng.random(2) * 2 - 1, rng.random(2) * 2 - 1) init_duals_feature_nx = (nx.from_numpy(init_duals_feature[0]), nx.from_numpy(init_duals_feature[1])) warmstart = { "pi_sample": init_pi_sample, "pi_feature": init_pi_feature, "duals_sample": init_duals_sample, "duals_feature": init_duals_feature } warmstart_nx = { "pi_sample": init_pi_sample_nx, "pi_feature": init_pi_feature_nx, "duals_sample": init_duals_sample_nx, "duals_feature": init_duals_feature_nx } # test couplings pi_sample, pi_feature = coot(X=xs, Y=xt, warmstart=warmstart) pi_sample_nx, pi_feature_nx = coot( X=xs_nx, Y=xt_nx, warmstart=warmstart_nx) pi_sample_nx = nx.to_numpy(pi_sample_nx) pi_feature_nx = nx.to_numpy(pi_feature_nx) anti_id_sample = np.flipud(np.eye(n_samples, n_samples)) / n_samples id_feature = np.eye(2, 2) / 2 np.testing.assert_allclose(pi_sample, anti_id_sample, atol=1e-04) np.testing.assert_allclose(pi_sample_nx, anti_id_sample, atol=1e-04) np.testing.assert_allclose(pi_feature, id_feature, atol=1e-04) np.testing.assert_allclose(pi_feature_nx, id_feature, atol=1e-04) # test marginal distributions px_s, px_f = ot.unif(n_samples), ot.unif(2) py_s, py_f = ot.unif(n_samples), ot.unif(2) np.testing.assert_allclose(px_s, pi_sample_nx.sum(0), atol=1e-04) np.testing.assert_allclose(py_s, pi_sample_nx.sum(1), atol=1e-04) np.testing.assert_allclose(px_f, pi_feature_nx.sum(0), atol=1e-04) np.testing.assert_allclose(py_f, pi_feature_nx.sum(1), atol=1e-04) np.testing.assert_allclose(px_s, pi_sample.sum(0), atol=1e-04) np.testing.assert_allclose(py_s, pi_sample.sum(1), atol=1e-04) np.testing.assert_allclose(px_f, pi_feature.sum(0), atol=1e-04) np.testing.assert_allclose(py_f, pi_feature.sum(1), atol=1e-04) # test COOT distance coot_np = coot2(X=xs, Y=xt, warmstart=warmstart) coot_nx = nx.to_numpy(coot2(X=xs_nx, Y=xt_nx, warmstart=warmstart_nx)) np.testing.assert_allclose(coot_np, 0, atol=1e-08) np.testing.assert_allclose(coot_nx, 0, atol=1e-08) def test_coot_log(nx): n_samples = 90 # nb samples mu_s = np.array([-2, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss( n_samples, mu_s, cov_s, random_state=43) xt = xs[::-1].copy() xs_nx = nx.from_numpy(xs) xt_nx = nx.from_numpy(xt) pi_sample, pi_feature, log = coot(X=xs, Y=xt, log=True) pi_sample_nx, pi_feature_nx, log_nx = coot(X=xs_nx, Y=xt_nx, log=True) duals_sample, duals_feature = log["duals_sample"], log["duals_feature"] assert len(duals_sample) == 2 assert len(duals_feature) == 2 assert len(duals_sample[0]) == n_samples assert len(duals_sample[1]) == n_samples assert len(duals_feature[0]) == 2 assert len(duals_feature[1]) == 2 duals_sample_nx = log_nx["duals_sample"] assert len(duals_sample_nx) == 2 assert len(duals_sample_nx[0]) == n_samples assert len(duals_sample_nx[1]) == n_samples duals_feature_nx = log_nx["duals_feature"] assert len(duals_feature_nx) == 2 assert len(duals_feature_nx[0]) == 2 assert len(duals_feature_nx[1]) == 2 list_coot = log["distances"] assert len(list_coot) >= 1 list_coot_nx = log_nx["distances"] assert len(list_coot_nx) >= 1 # test with coot distance coot_np, log = coot2(X=xs, Y=xt, log=True) coot_nx, log_nx = coot2(X=xs_nx, Y=xt_nx, log=True) duals_sample, duals_feature = log["duals_sample"], log["duals_feature"] assert len(duals_sample) == 2 assert len(duals_feature) == 2 assert len(duals_sample[0]) == n_samples assert len(duals_sample[1]) == n_samples assert len(duals_feature[0]) == 2 assert len(duals_feature[1]) == 2 duals_sample_nx = log_nx["duals_sample"] assert len(duals_sample_nx) == 2 assert len(duals_sample_nx[0]) == n_samples assert len(duals_sample_nx[1]) == n_samples duals_feature_nx = log_nx["duals_feature"] assert len(duals_feature_nx) == 2 assert len(duals_feature_nx[0]) == 2 assert len(duals_feature_nx[1]) == 2 list_coot = log["distances"] assert len(list_coot) >= 1 list_coot_nx = log_nx["distances"] assert len(list_coot_nx) >= 1 python-pot-0.9.3+dfsg/test/test_da.py000066400000000000000000000735051455713015700175610ustar00rootroot00000000000000"""Tests for module da on Domain Adaptation """ # Author: Remi Flamary # # License: MIT License import numpy as np from numpy.testing import assert_allclose, assert_equal import pytest import ot from ot.datasets import make_data_classif from ot.utils import unif try: # test if cudamat installed import sklearn # noqa: F401 nosklearn = False except ImportError: nosklearn = True try: # test if cvxpy is installed import cvxpy # noqa: F401 nocvxpy = False except ImportError: nocvxpy = True def test_class_jax_tf(): backends = [] from ot.backend import jax, tf if jax: backends.append(ot.backend.JaxBackend()) if tf: backends.append(ot.backend.TensorflowBackend()) for nx in backends: ns = 150 nt = 200 Xs, ys = make_data_classif('3gauss', ns) Xt, yt = make_data_classif('3gauss2', nt) Xs, ys, Xt, yt = nx.from_numpy(Xs, ys, Xt, yt) otda = ot.da.SinkhornLpl1Transport() with pytest.raises(TypeError): otda.fit(Xs=Xs, ys=ys, Xt=Xt) @pytest.skip_backend("jax") @pytest.skip_backend("tf") @pytest.mark.parametrize("class_to_test", [ot.da.EMDTransport, ot.da.SinkhornTransport, ot.da.SinkhornLpl1Transport, ot.da.SinkhornL1l2Transport, ot.da.SinkhornL1l2Transport]) def test_log_da(nx, class_to_test): ns = 50 nt = 50 Xs, ys = make_data_classif('3gauss', ns) Xt, yt = make_data_classif('3gauss2', nt) Xs, ys, Xt, yt = nx.from_numpy(Xs, ys, Xt, yt) otda = class_to_test(log=True) # test its computed otda.fit(Xs=Xs, ys=ys, Xt=Xt) assert hasattr(otda, "log_") @pytest.skip_backend("jax") @pytest.skip_backend("tf") def test_sinkhorn_lpl1_transport_class(nx): """test_sinkhorn_transport """ ns = 50 nt = 50 Xs, ys = make_data_classif('3gauss', ns) Xt, yt = make_data_classif('3gauss2', nt) Xs, ys, Xt, yt = nx.from_numpy(Xs, ys, Xt, yt) otda = ot.da.SinkhornLpl1Transport() # test its computed otda.fit(Xs=Xs, ys=ys, Xt=Xt) assert hasattr(otda, "cost_") assert not np.any(np.isnan(nx.to_numpy(otda.cost_))), "cost is finite" assert hasattr(otda, "coupling_") assert np.all(np.isfinite(nx.to_numpy(otda.coupling_))), "coupling is finite" # test dimensions of coupling assert_equal(otda.cost_.shape, ((Xs.shape[0], Xt.shape[0]))) assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0]))) # test margin constraints mu_s = unif(ns) mu_t = unif(nt) assert_allclose( nx.to_numpy(nx.sum(otda.coupling_, axis=0)), mu_t, rtol=1e-3, atol=1e-3) assert_allclose( nx.to_numpy(nx.sum(otda.coupling_, axis=1)), mu_s, rtol=1e-3, atol=1e-3) # test transform transp_Xs = otda.transform(Xs=Xs) assert_equal(transp_Xs.shape, Xs.shape) Xs_new = nx.from_numpy(make_data_classif('3gauss', ns + 1)[0]) transp_Xs_new = otda.transform(Xs_new) # check that the oos method is working assert_equal(transp_Xs_new.shape, Xs_new.shape) # test inverse transform transp_Xt = otda.inverse_transform(Xt=Xt) assert_equal(transp_Xt.shape, Xt.shape) Xt_new = nx.from_numpy(make_data_classif('3gauss2', nt + 1)[0]) transp_Xt_new = otda.inverse_transform(Xt=Xt_new) # check that the oos method is working assert_equal(transp_Xt_new.shape, Xt_new.shape) # test fit_transform transp_Xs = otda.fit_transform(Xs=Xs, ys=ys, Xt=Xt) assert_equal(transp_Xs.shape, Xs.shape) # check label propagation transp_yt = otda.transform_labels(ys) assert_equal(transp_yt.shape[0], yt.shape[0]) assert_equal(transp_yt.shape[1], len(np.unique(ys))) # check inverse label propagation transp_ys = otda.inverse_transform_labels(yt) assert_equal(transp_ys.shape[0], ys.shape[0]) assert_equal(transp_ys.shape[1], len(np.unique(yt))) # test unsupervised vs semi-supervised mode otda_unsup = ot.da.SinkhornLpl1Transport() otda_unsup.fit(Xs=Xs, ys=ys, Xt=Xt) n_unsup = nx.sum(otda_unsup.cost_) otda_semi = ot.da.SinkhornLpl1Transport() otda_semi.fit(Xs=Xs, ys=ys, Xt=Xt, yt=yt) assert_equal(otda_semi.cost_.shape, ((Xs.shape[0], Xt.shape[0]))) n_semisup = nx.sum(otda_semi.cost_) # check that the cost matrix norms are indeed different assert np.allclose(n_unsup, n_semisup, atol=1e-7), "semisupervised mode is not working" # check that the coupling forbids mass transport between labeled source # and labeled target samples mass_semi = nx.sum( otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max]) assert mass_semi == 0, "semisupervised mode not working" @pytest.skip_backend("tf") def test_sinkhorn_l1l2_transport_class(nx): """test_sinkhorn_transport """ ns = 50 nt = 50 Xs, ys = make_data_classif('3gauss', ns, random_state=42) Xt, yt = make_data_classif('3gauss2', nt, random_state=43) # prepare semi-supervised labels yt_semi = np.copy(yt) yt_semi[np.arange(0, nt, 2)] = -1 Xs, ys, Xt, yt, yt_semi = nx.from_numpy(Xs, ys, Xt, yt, yt_semi) otda = ot.da.SinkhornL1l2Transport(max_inner_iter=500) otda.fit(Xs=Xs, ys=ys, Xt=Xt) # test its computed assert hasattr(otda, "cost_") assert hasattr(otda, "coupling_") assert hasattr(otda, "log_") # test dimensions of coupling assert_equal(otda.cost_.shape, ((Xs.shape[0], Xt.shape[0]))) assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0]))) # test margin constraints mu_s = unif(ns) mu_t = unif(nt) assert_allclose( nx.to_numpy(nx.sum(otda.coupling_, axis=0)), mu_t, rtol=1e-3, atol=1e-3) assert_allclose( nx.to_numpy(nx.sum(otda.coupling_, axis=1)), mu_s, rtol=1e-3, atol=1e-3) # test transform transp_Xs = otda.transform(Xs=Xs) assert_equal(transp_Xs.shape, Xs.shape) Xs_new = nx.from_numpy(make_data_classif('3gauss', ns + 1)[0]) transp_Xs_new = otda.transform(Xs_new) # check that the oos method is working assert_equal(transp_Xs_new.shape, Xs_new.shape) # test inverse transform transp_Xt = otda.inverse_transform(Xt=Xt) assert_equal(transp_Xt.shape, Xt.shape) # check label propagation transp_yt = otda.transform_labels(ys) assert_equal(transp_yt.shape[0], yt.shape[0]) assert_equal(transp_yt.shape[1], len(np.unique(ys))) # check inverse label propagation transp_ys = otda.inverse_transform_labels(yt) assert_equal(transp_ys.shape[0], ys.shape[0]) assert_equal(transp_ys.shape[1], len(np.unique(yt))) Xt_new = nx.from_numpy(make_data_classif('3gauss2', nt + 1)[0]) transp_Xt_new = otda.inverse_transform(Xt=Xt_new) # check that the oos method is working assert_equal(transp_Xt_new.shape, Xt_new.shape) # test fit_transform transp_Xs = otda.fit_transform(Xs=Xs, ys=ys, Xt=Xt) assert_equal(transp_Xs.shape, Xs.shape) # test unsupervised vs semi-supervised mode otda_unsup = ot.da.SinkhornL1l2Transport() otda_unsup.fit(Xs=Xs, ys=ys, Xt=Xt) n_unsup = nx.sum(otda_unsup.cost_) otda_semi = ot.da.SinkhornL1l2Transport() otda_semi.fit(Xs=Xs, ys=ys, Xt=Xt, yt=yt_semi) assert_equal(otda_semi.cost_.shape, ((Xs.shape[0], Xt.shape[0]))) n_semisup = nx.sum(otda_semi.cost_) # check that the cost matrix norms are indeed different assert np.allclose(n_unsup, n_semisup, atol=1e-7), "semisupervised mode is not working" # check that the coupling forbids mass transport between labeled source # and labeled target samples mass_semi = nx.sum(otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max]) mass_semi = otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max] assert_allclose(nx.to_numpy(mass_semi), np.zeros_like(mass_semi), rtol=1e-9, atol=1e-9) # check everything runs well with log=True otda = ot.da.SinkhornL1l2Transport(log=True) otda.fit(Xs=Xs, ys=ys, Xt=Xt) assert len(otda.log_.keys()) != 0 @pytest.skip_backend("jax") @pytest.skip_backend("tf") def test_sinkhorn_transport_class(nx): """test_sinkhorn_transport """ ns = 50 nt = 50 Xs, ys = make_data_classif('3gauss', ns) Xt, yt = make_data_classif('3gauss2', nt) Xs, ys, Xt, yt = nx.from_numpy(Xs, ys, Xt, yt) otda = ot.da.SinkhornTransport() # test its computed otda.fit(Xs=Xs, Xt=Xt) assert hasattr(otda, "cost_") assert hasattr(otda, "coupling_") assert hasattr(otda, "log_") # test dimensions of coupling assert_equal(otda.cost_.shape, ((Xs.shape[0], Xt.shape[0]))) assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0]))) # test margin constraints mu_s = unif(ns) mu_t = unif(nt) assert_allclose( nx.to_numpy(nx.sum(otda.coupling_, axis=0)), mu_t, rtol=1e-3, atol=1e-3) assert_allclose( nx.to_numpy(nx.sum(otda.coupling_, axis=1)), mu_s, rtol=1e-3, atol=1e-3) # test transform transp_Xs = otda.transform(Xs=Xs) assert_equal(transp_Xs.shape, Xs.shape) Xs_new = nx.from_numpy(make_data_classif('3gauss', ns + 1)[0]) transp_Xs_new = otda.transform(Xs_new) # check that the oos method is working assert_equal(transp_Xs_new.shape, Xs_new.shape) # test inverse transform transp_Xt = otda.inverse_transform(Xt=Xt) assert_equal(transp_Xt.shape, Xt.shape) # check label propagation transp_yt = otda.transform_labels(ys) assert_equal(transp_yt.shape[0], yt.shape[0]) assert_equal(transp_yt.shape[1], len(np.unique(ys))) # check inverse label propagation transp_ys = otda.inverse_transform_labels(yt) assert_equal(transp_ys.shape[0], ys.shape[0]) assert_equal(transp_ys.shape[1], len(np.unique(yt))) Xt_new = nx.from_numpy(make_data_classif('3gauss2', nt + 1)[0]) transp_Xt_new = otda.inverse_transform(Xt=Xt_new) # check that the oos method is working assert_equal(transp_Xt_new.shape, Xt_new.shape) # test fit_transform transp_Xs = otda.fit_transform(Xs=Xs, Xt=Xt) assert_equal(transp_Xs.shape, Xs.shape) # test unsupervised vs semi-supervised mode otda_unsup = ot.da.SinkhornTransport() otda_unsup.fit(Xs=Xs, Xt=Xt) n_unsup = nx.sum(otda_unsup.cost_) otda_semi = ot.da.SinkhornTransport() otda_semi.fit(Xs=Xs, ys=ys, Xt=Xt, yt=yt) assert_equal(otda_semi.cost_.shape, ((Xs.shape[0], Xt.shape[0]))) n_semisup = nx.sum(otda_semi.cost_) # check that the cost matrix norms are indeed different assert np.allclose(n_unsup, n_semisup, atol=1e-7), "semisupervised mode is not working" # check that the coupling forbids mass transport between labeled source # and labeled target samples mass_semi = nx.sum( otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max]) assert mass_semi == 0, "semisupervised mode not working" # check everything runs well with log=True otda = ot.da.SinkhornTransport(log=True) otda.fit(Xs=Xs, ys=ys, Xt=Xt) assert len(otda.log_.keys()) != 0 @pytest.skip_backend("jax") @pytest.skip_backend("tf") def test_unbalanced_sinkhorn_transport_class(nx): """test_sinkhorn_transport """ ns = 50 nt = 50 Xs, ys = make_data_classif('3gauss', ns) Xt, yt = make_data_classif('3gauss2', nt) Xs, ys, Xt, yt = nx.from_numpy(Xs, ys, Xt, yt) for log in [True, False]: otda = ot.da.UnbalancedSinkhornTransport(log=log) # test its computed otda.fit(Xs=Xs, Xt=Xt) assert hasattr(otda, "cost_") assert hasattr(otda, "coupling_") assert hasattr(otda, "log_") # test dimensions of coupling assert_equal(otda.cost_.shape, ((Xs.shape[0], Xt.shape[0]))) assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0]))) assert not np.any(np.isnan(nx.to_numpy(otda.cost_))), "cost is finite" # test coupling assert np.all(np.isfinite(nx.to_numpy(otda.coupling_))), "coupling is finite" # test transform transp_Xs = otda.transform(Xs=Xs) assert_equal(transp_Xs.shape, Xs.shape) # check label propagation transp_yt = otda.transform_labels(ys) assert_equal(transp_yt.shape[0], yt.shape[0]) assert_equal(transp_yt.shape[1], len(np.unique(ys))) # check inverse label propagation transp_ys = otda.inverse_transform_labels(yt) assert_equal(transp_ys.shape[0], ys.shape[0]) assert_equal(transp_ys.shape[1], len(np.unique(yt))) Xs_new = nx.from_numpy(make_data_classif('3gauss', ns + 1)[0]) transp_Xs_new = otda.transform(Xs_new) # check that the oos method is working assert_equal(transp_Xs_new.shape, Xs_new.shape) # test inverse transform transp_Xt = otda.inverse_transform(Xt=Xt) assert_equal(transp_Xt.shape, Xt.shape) Xt_new = nx.from_numpy(make_data_classif('3gauss2', nt + 1)[0]) transp_Xt_new = otda.inverse_transform(Xt=Xt_new) # check that the oos method is working assert_equal(transp_Xt_new.shape, Xt_new.shape) # test fit_transform transp_Xs = otda.fit_transform(Xs=Xs, Xt=Xt) assert_equal(transp_Xs.shape, Xs.shape) # test unsupervised vs semi-supervised mode otda_unsup = ot.da.SinkhornTransport() otda_unsup.fit(Xs=Xs, Xt=Xt) assert not np.any(np.isnan(nx.to_numpy(otda_unsup.cost_))), "cost is finite" n_unsup = nx.sum(otda_unsup.cost_) otda_semi = ot.da.SinkhornTransport() otda_semi.fit(Xs=Xs, ys=ys, Xt=Xt, yt=yt) assert not np.any(np.isnan(nx.to_numpy(otda_semi.cost_))), "cost is finite" assert_equal(otda_semi.cost_.shape, ((Xs.shape[0], Xt.shape[0]))) n_semisup = nx.sum(otda_semi.cost_) # check that the cost matrix norms are indeed different assert np.allclose(n_unsup, n_semisup, atol=1e-7), "semisupervised mode is not working" # check everything runs well with log=True otda = ot.da.SinkhornTransport(log=True) otda.fit(Xs=Xs, ys=ys, Xt=Xt) assert not np.any(np.isnan(nx.to_numpy(otda.cost_))), "cost is finite" assert len(otda.log_.keys()) != 0 @pytest.skip_backend("jax") @pytest.skip_backend("tf") def test_emd_transport_class(nx): """test_sinkhorn_transport """ ns = 50 nt = 50 Xs, ys = make_data_classif('3gauss', ns) Xt, yt = make_data_classif('3gauss2', nt) Xs, ys, Xt, yt = nx.from_numpy(Xs, ys, Xt, yt) otda = ot.da.EMDTransport() # test its computed otda.fit(Xs=Xs, Xt=Xt) assert hasattr(otda, "cost_") assert hasattr(otda, "coupling_") # test dimensions of coupling assert_equal(otda.cost_.shape, ((Xs.shape[0], Xt.shape[0]))) assert not np.any(np.isnan(nx.to_numpy(otda.cost_))), "cost is finite" assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0]))) assert np.all(np.isfinite(nx.to_numpy(otda.coupling_))), "coupling is finite" # test margin constraints mu_s = unif(ns) mu_t = unif(nt) assert_allclose( nx.to_numpy(nx.sum(otda.coupling_, axis=0)), mu_t, rtol=1e-3, atol=1e-3) assert_allclose( nx.to_numpy(nx.sum(otda.coupling_, axis=1)), mu_s, rtol=1e-3, atol=1e-3) # test transform transp_Xs = otda.transform(Xs=Xs) assert_equal(transp_Xs.shape, Xs.shape) Xs_new = nx.from_numpy(make_data_classif('3gauss', ns + 1)[0]) transp_Xs_new = otda.transform(Xs_new) # check that the oos method is working assert_equal(transp_Xs_new.shape, Xs_new.shape) # test inverse transform transp_Xt = otda.inverse_transform(Xt=Xt) assert_equal(transp_Xt.shape, Xt.shape) # check label propagation transp_yt = otda.transform_labels(ys) assert_equal(transp_yt.shape[0], yt.shape[0]) assert_equal(transp_yt.shape[1], len(np.unique(ys))) # check inverse label propagation transp_ys = otda.inverse_transform_labels(yt) assert_equal(transp_ys.shape[0], ys.shape[0]) assert_equal(transp_ys.shape[1], len(np.unique(yt))) Xt_new = nx.from_numpy(make_data_classif('3gauss2', nt + 1)[0]) transp_Xt_new = otda.inverse_transform(Xt=Xt_new) # check that the oos method is working assert_equal(transp_Xt_new.shape, Xt_new.shape) # test fit_transform transp_Xs = otda.fit_transform(Xs=Xs, Xt=Xt) assert_equal(transp_Xs.shape, Xs.shape) # test unsupervised vs semi-supervised mode otda_unsup = ot.da.EMDTransport() otda_unsup.fit(Xs=Xs, ys=ys, Xt=Xt) assert_equal(otda_unsup.cost_.shape, ((Xs.shape[0], Xt.shape[0]))) assert not np.any(np.isnan(nx.to_numpy(otda_unsup.cost_))), "cost is finite" assert_equal(otda_unsup.coupling_.shape, ((Xs.shape[0], Xt.shape[0]))) assert np.all(np.isfinite(nx.to_numpy(otda_unsup.coupling_))), "coupling is finite" n_unsup = nx.sum(otda_unsup.cost_) otda_semi = ot.da.EMDTransport() otda_semi.fit(Xs=Xs, ys=ys, Xt=Xt, yt=yt) assert_equal(otda_semi.cost_.shape, ((Xs.shape[0], Xt.shape[0]))) assert not np.any(np.isnan(nx.to_numpy(otda_semi.cost_))), "cost is finite" assert_equal(otda_semi.coupling_.shape, ((Xs.shape[0], Xt.shape[0]))) assert np.all(np.isfinite(nx.to_numpy(otda_semi.coupling_))), "coupling is finite" n_semisup = nx.sum(otda_semi.cost_) # check that the cost matrix norms are indeed different assert np.allclose(n_unsup, n_semisup, atol=1e-7), "semisupervised mode is not working" # check that the coupling forbids mass transport between labeled source # and labeled target samples mass_semi = nx.sum( otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max]) mass_semi = otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max] # we need to use a small tolerance here, otherwise the test breaks assert_allclose(nx.to_numpy(mass_semi), np.zeros(list(mass_semi.shape)), rtol=1e-2, atol=1e-2) @pytest.skip_backend("jax") @pytest.skip_backend("tf") @pytest.mark.parametrize("kernel", ["linear", "gaussian"]) @pytest.mark.parametrize("bias", ["unbiased", "biased"]) def test_mapping_transport_class(nx, kernel, bias): """test_mapping_transport """ ns = 20 nt = 30 Xs, ys = make_data_classif('3gauss', ns) Xt, yt = make_data_classif('3gauss2', nt) Xs_new, _ = make_data_classif('3gauss', ns + 1) Xs, Xt, Xs_new = nx.from_numpy(Xs, Xt, Xs_new) # Mapping tests bias = bias == "biased" otda = ot.da.MappingTransport(kernel=kernel, bias=bias) otda.fit(Xs=Xs, Xt=Xt) assert hasattr(otda, "coupling_") assert hasattr(otda, "mapping_") assert hasattr(otda, "log_") assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0]))) S = Xs.shape[0] if kernel == "gaussian" else Xs.shape[1] # if linear if bias: S += 1 assert_equal(otda.mapping_.shape, ((S, Xt.shape[1]))) # test margin constraints mu_s = unif(ns) mu_t = unif(nt) assert_allclose( nx.to_numpy(nx.sum(otda.coupling_, axis=0)), mu_t, rtol=1e-3, atol=1e-3) assert_allclose( nx.to_numpy(nx.sum(otda.coupling_, axis=1)), mu_s, rtol=1e-3, atol=1e-3) # test transform transp_Xs = otda.transform(Xs=Xs) assert_equal(transp_Xs.shape, Xs.shape) transp_Xs_new = otda.transform(Xs_new) # check that the oos method is working assert_equal(transp_Xs_new.shape, Xs_new.shape) # check everything runs well with log=True otda = ot.da.MappingTransport(kernel=kernel, bias=bias, log=True) otda.fit(Xs=Xs, Xt=Xt) assert len(otda.log_.keys()) != 0 @pytest.skip_backend("jax") @pytest.skip_backend("tf") def test_mapping_transport_class_specific_seed(nx): # check that it does not crash when derphi is very close to 0 ns = 20 nt = 30 rng = np.random.RandomState(39) Xs, ys = make_data_classif('3gauss', ns, random_state=rng) Xt, yt = make_data_classif('3gauss2', nt, random_state=rng) otda = ot.da.MappingTransport(kernel="gaussian", bias=False) otda.fit(Xs=nx.from_numpy(Xs), Xt=nx.from_numpy(Xt)) @pytest.skip_backend("jax") @pytest.skip_backend("tf") def test_linear_mapping_class(nx): ns = 50 nt = 50 Xs, ys = make_data_classif('3gauss', ns) Xt, yt = make_data_classif('3gauss2', nt) Xsb, Xtb = nx.from_numpy(Xs, Xt) for log in [True, False]: otmap = ot.da.LinearTransport(log=log) otmap.fit(Xs=Xsb, Xt=Xtb) assert hasattr(otmap, "A_") assert hasattr(otmap, "B_") assert hasattr(otmap, "A1_") assert hasattr(otmap, "B1_") Xst = nx.to_numpy(otmap.transform(Xs=Xsb)) Ct = np.cov(Xt.T) Cst = np.cov(Xst.T) np.testing.assert_allclose(Ct, Cst, rtol=1e-2, atol=1e-2) Xts = nx.to_numpy(otmap.inverse_transform(Xt=Xtb)) Cs = np.cov(Xs.T) Cts = np.cov(Xts.T) np.testing.assert_allclose(Cs, Cts, rtol=1e-2, atol=1e-2) @pytest.skip_backend("jax") @pytest.skip_backend("tf") def test_linear_gw_mapping_class(nx): ns = 50 nt = 50 Xs, ys = make_data_classif('3gauss', ns) Xt, yt = make_data_classif('3gauss2', nt) Xsb, Xtb = nx.from_numpy(Xs, Xt) for log in [True, False]: otmap = ot.da.LinearGWTransport(log=log) otmap.fit(Xs=Xsb, Xt=Xtb) assert hasattr(otmap, "A_") assert hasattr(otmap, "B_") assert hasattr(otmap, "A1_") assert hasattr(otmap, "B1_") Xst = nx.to_numpy(otmap.transform(Xs=Xsb)) Ct = np.cov(Xt.T) Cst = np.cov(Xst.T) np.testing.assert_allclose(Ct, Cst, rtol=1e-2, atol=1e-2) @pytest.skip_backend("jax") @pytest.skip_backend("tf") def test_jcpot_transport_class(nx): """test_jcpot_transport """ ns1 = 50 ns2 = 50 nt = 50 Xs1, ys1 = make_data_classif('3gauss', ns1) Xs2, ys2 = make_data_classif('3gauss', ns2) Xt, yt = make_data_classif('3gauss2', nt) Xs1, ys1, Xs2, ys2, Xt, yt = nx.from_numpy(Xs1, ys1, Xs2, ys2, Xt, yt) Xs = [Xs1, Xs2] ys = [ys1, ys2] for log in [True, False]: otda = ot.da.JCPOTTransport(reg_e=1, max_iter=10000, tol=1e-9, verbose=True, log=log) # test its computed otda.fit(Xs=Xs, ys=ys, Xt=Xt) assert hasattr(otda, "coupling_") assert hasattr(otda, "proportions_") assert hasattr(otda, "log_") # test dimensions of coupling for i, xs in enumerate(Xs): assert_equal(otda.coupling_[i].shape, ((xs.shape[0], Xt.shape[0]))) # test all margin constraints mu_t = unif(nt) for i in range(len(Xs)): # test margin constraints w.r.t. uniform target weights for each coupling matrix assert_allclose( nx.to_numpy(nx.sum(otda.coupling_[i], axis=0)), mu_t, rtol=1e-3, atol=1e-3) if log: # test margin constraints w.r.t. modified source weights for each source domain assert_allclose( nx.to_numpy( nx.dot(otda.log_['D1'][i], nx.sum(otda.coupling_[i], axis=1)) ), nx.to_numpy(otda.proportions_), rtol=1e-3, atol=1e-3 ) # test transform transp_Xs = otda.transform(Xs=Xs) [assert_equal(x.shape, y.shape) for x, y in zip(transp_Xs, Xs)] Xs_new = nx.from_numpy(make_data_classif('3gauss', ns1 + 1)[0]) transp_Xs_new = otda.transform(Xs_new) # check that the oos method is working assert_equal(transp_Xs_new.shape, Xs_new.shape) # check label propagation transp_yt = otda.transform_labels(ys) assert_equal(transp_yt.shape[0], yt.shape[0]) assert_equal(transp_yt.shape[1], len(np.unique(nx.to_numpy(*ys)))) # check inverse label propagation transp_ys = otda.inverse_transform_labels(yt) for x, y in zip(transp_ys, ys): assert_equal(x.shape[0], y.shape[0]) assert_equal(x.shape[1], len(np.unique(nx.to_numpy(y)))) def test_jcpot_barycenter(nx): """test_jcpot_barycenter """ ns1 = 50 ns2 = 50 nt = 50 sigma = 0.1 ps1 = .2 ps2 = .9 pt = .4 Xs1, ys1 = make_data_classif('2gauss_prop', ns1, nz=sigma, p=ps1) Xs2, ys2 = make_data_classif('2gauss_prop', ns2, nz=sigma, p=ps2) Xt, _ = make_data_classif('2gauss_prop', nt, nz=sigma, p=pt) Xs1b, ys1b, Xs2b, ys2b, Xtb = nx.from_numpy(Xs1, ys1, Xs2, ys2, Xt) Xsb = [Xs1b, Xs2b] ysb = [ys1b, ys2b] prop = ot.bregman.jcpot_barycenter(Xsb, ysb, Xtb, reg=.5, metric='sqeuclidean', numItermax=10000, stopThr=1e-9, verbose=False, log=False) np.testing.assert_allclose(nx.to_numpy(prop), [1 - pt, pt], rtol=1e-3, atol=1e-3) @pytest.mark.skipif(nosklearn, reason="No sklearn available") @pytest.skip_backend("jax") @pytest.skip_backend("tf") def test_emd_laplace_class(nx): """test_emd_laplace_transport """ ns = 50 nt = 50 Xs, ys = make_data_classif('3gauss', ns) Xt, yt = make_data_classif('3gauss2', nt) Xs, ys, Xt, yt = nx.from_numpy(Xs, ys, Xt, yt) for log in [True, False]: otda = ot.da.EMDLaplaceTransport(reg_lap=0.01, max_iter=1000, tol=1e-9, verbose=False, log=log) # test its computed otda.fit(Xs=Xs, ys=ys, Xt=Xt) assert hasattr(otda, "coupling_") assert hasattr(otda, "log_") # test dimensions of coupling assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0]))) # test all margin constraints mu_s = unif(ns) mu_t = unif(nt) assert_allclose( nx.to_numpy(nx.sum(otda.coupling_, axis=0)), mu_t, rtol=1e-3, atol=1e-3) assert_allclose( nx.to_numpy(nx.sum(otda.coupling_, axis=1)), mu_s, rtol=1e-3, atol=1e-3) # test transform transp_Xs = otda.transform(Xs=Xs) [assert_equal(x.shape, y.shape) for x, y in zip(transp_Xs, Xs)] Xs_new = nx.from_numpy(make_data_classif('3gauss', ns + 1)[0]) transp_Xs_new = otda.transform(Xs_new) # check that the oos method is working assert_equal(transp_Xs_new.shape, Xs_new.shape) # test inverse transform transp_Xt = otda.inverse_transform(Xt=Xt) assert_equal(transp_Xt.shape, Xt.shape) Xt_new = nx.from_numpy(make_data_classif('3gauss2', nt + 1)[0]) transp_Xt_new = otda.inverse_transform(Xt=Xt_new) # check that the oos method is working assert_equal(transp_Xt_new.shape, Xt_new.shape) # test fit_transform transp_Xs = otda.fit_transform(Xs=Xs, Xt=Xt) assert_equal(transp_Xs.shape, Xs.shape) # check label propagation transp_yt = otda.transform_labels(ys) assert_equal(transp_yt.shape[0], yt.shape[0]) assert_equal(transp_yt.shape[1], len(np.unique(nx.to_numpy(ys)))) # check inverse label propagation transp_ys = otda.inverse_transform_labels(yt) assert_equal(transp_ys.shape[0], ys.shape[0]) assert_equal(transp_ys.shape[1], len(np.unique(nx.to_numpy(yt)))) @pytest.mark.skipif(nocvxpy, reason="No CVXPY available") def test_nearest_brenier_potential(nx): X = nx.ones((2, 2)) for ssnb in [ot.da.NearestBrenierPotential(log=True), ot.da.NearestBrenierPotential(log=False)]: ssnb.fit(Xs=X, Xt=X) G_lu = ssnb.transform(Xs=X) # 'new' input isn't new, so should be equal to target np.testing.assert_almost_equal(nx.to_numpy(G_lu[0]), nx.to_numpy(X)) np.testing.assert_almost_equal(nx.to_numpy(G_lu[1]), nx.to_numpy(X)) @pytest.mark.skipif(nosklearn, reason="No sklearn available") @pytest.skip_backend("jax") @pytest.skip_backend("tf") def test_emd_laplace(nx): """Complements :code:`test_emd_laplace_class` for uncovered options in :code:`emd_laplace`""" ns = 50 nt = 50 Xs, ys = make_data_classif('3gauss', ns) Xt, yt = make_data_classif('3gauss2', nt) Xs, ys, Xt, yt = nx.from_numpy(Xs, ys, Xt, yt) M = ot.dist(Xs, Xt) with pytest.raises(ValueError): ot.da.emd_laplace(ot.unif(ns), ot.unif(nt), Xs, Xt, M, sim_param=['INVALID', 'INPUT', 2]) with pytest.raises(ValueError): ot.da.emd_laplace(ot.unif(ns), ot.unif(nt), Xs, Xt, M, sim=['INVALID', 'INPUT', 2]) # test all margin constraints with gaussian similarity and disp regularisation coupling = ot.da.emd_laplace(ot.unif(ns, type_as=Xs), ot.unif(nt, type_as=Xs), Xs, Xt, M, sim='gauss', reg='disp') assert_allclose( nx.to_numpy(nx.sum(coupling, axis=0)), unif(nt), rtol=1e-3, atol=1e-3) assert_allclose( nx.to_numpy(nx.sum(coupling, axis=1)), unif(ns), rtol=1e-3, atol=1e-3) @pytest.skip_backend("jax") @pytest.skip_backend("tf") def test_sinkhorn_l1l2_gl_cost_vectorized(nx): n_samples, n_labels = 150, 3 rng = np.random.RandomState(42) G = rng.rand(n_samples, n_samples) labels_a = rng.randint(n_labels, size=(n_samples,)) G, labels_a = nx.from_numpy(G), nx.from_numpy(labels_a) # previously used implementation for the cost estimator lstlab = nx.unique(labels_a) def f(G): res = 0 for i in range(G.shape[1]): for lab in lstlab: temp = G[labels_a == lab, i] res += nx.norm(temp) return res def df(G): W = nx.zeros(G.shape, type_as=G) for i in range(G.shape[1]): for lab in lstlab: temp = G[labels_a == lab, i] n = nx.norm(temp) if n: W[labels_a == lab, i] = temp / n return W # new vectorized implementation for the cost estimator labels_u, labels_idx = nx.unique(labels_a, return_inverse=True) n_labels = labels_u.shape[0] unroll_labels_idx = nx.eye(n_labels, type_as=labels_u)[None, labels_idx] def f2(G): G_split = nx.repeat(G.T[:, :, None], n_labels, axis=2) return nx.sum(nx.norm(G_split * unroll_labels_idx, axis=1)) def df2(G): G_split = nx.repeat(G.T[:, :, None], n_labels, axis=2) * unroll_labels_idx W = nx.norm(G_split * unroll_labels_idx, axis=1, keepdims=True) G_norm = G_split / nx.clip(W, 1e-12, None) return nx.sum(G_norm, axis=2).T assert np.allclose(f(G), f2(G)) assert np.allclose(df(G), df2(G)) python-pot-0.9.3+dfsg/test/test_dmmot.py000066400000000000000000000052121455713015700203030ustar00rootroot00000000000000"""Tests for ot.lp.dmmot module """ # Author: Ronak Mehta # Xizheng Yu # # License: MIT License import numpy as np import ot def create_test_data(nx): n = 4 a1 = ot.datasets.make_1D_gauss(n, m=20, s=5) a2 = ot.datasets.make_1D_gauss(n, m=60, s=8) A = np.vstack([a1, a2]).T x = np.arange(n, dtype=np.float64).reshape((n, 1)) A, x = nx.from_numpy(A, x) return A, x def test_dmmot_monge_1dgrid_loss(nx): A, x = create_test_data(nx) # Compute loss using dmmot_monge_1dgrid_loss primal_obj = ot.lp.dmmot_monge_1dgrid_loss(A) primal_obj = nx.to_numpy(primal_obj) expected_primal_obj = 0.13667759626298503 np.testing.assert_allclose(primal_obj, expected_primal_obj, rtol=1e-7, err_msg="Test failed: \ Expected different primal objective value") # Compute loss using exact OT solver with absolute ground metric A, x = nx.to_numpy(A, x) M = ot.utils.dist(x, metric='cityblock') # absolute ground metric bary, _ = ot.barycenter(A, M, 1e-2, weights=None, verbose=False, log=True) ot_obj = 0.0 for x in A.T: # deal with C-contiguous error from tensorflow backend (not sure why) x = np.ascontiguousarray(x) # compute loss _, log = ot.lp.emd(x, np.array(bary / np.sum(bary)), M, log=True) ot_obj += log['cost'] np.testing.assert_allclose(primal_obj, ot_obj, rtol=1e-7, err_msg="Test failed: \ Expected different primal objective value") def test_dmmot_monge_1dgrid_optimize(nx): # test discrete_mmot_converge result A, _ = create_test_data(nx) d = 2 niters = 10 result = ot.lp.dmmot_monge_1dgrid_optimize(A, niters, lr_init=1e-3, lr_decay=1) expected_obj = np.array([[0.05553516, 0.13082618, 0.27327479, 0.54036388], [0.04185365, 0.09570724, 0.24384705, 0.61859206]]) assert len(result) == d, "Test failed: Expected a list of length n" for i in range(d): np.testing.assert_allclose(result[i], expected_obj[i], atol=1e-7, rtol=1e-7, err_msg="Test failed: \ Expected vectors of all zeros") python-pot-0.9.3+dfsg/test/test_dr.py000066400000000000000000000135541455713015700176000ustar00rootroot00000000000000"""Tests for module dr on Dimensionality Reduction """ # Author: Remi Flamary # Minhui Huang # Antoine Collas # # License: MIT License import numpy as np import ot import pytest try: # test if autograd and pymanopt are installed import ot.dr nogo = False except ImportError: nogo = True @pytest.mark.skipif(nogo, reason="Missing modules (autograd or pymanopt)") def test_fda(): n_samples = 90 # nb samples in source and target datasets rng = np.random.RandomState(0) # generate gaussian dataset xs, ys = ot.datasets.make_data_classif('gaussrot', n_samples, random_state=rng) n_features_noise = 8 xs = np.hstack((xs, rng.randn(n_samples, n_features_noise))) p = 1 Pfda, projfda = ot.dr.fda(xs, ys, p) projfda(xs) np.testing.assert_allclose(np.sum(Pfda**2, 0), np.ones(p)) @pytest.mark.skipif(nogo, reason="Missing modules (autograd or pymanopt)") def test_wda(): n_samples = 100 # nb samples in source and target datasets rng = np.random.RandomState(0) # generate gaussian dataset xs, ys = ot.datasets.make_data_classif('gaussrot', n_samples, random_state=rng) n_features_noise = 8 xs = np.hstack((xs, rng.randn(n_samples, n_features_noise))) p = 2 Pwda, projwda = ot.dr.wda(xs, ys, p, maxiter=10) projwda(xs) np.testing.assert_allclose(np.sum(Pwda**2, 0), np.ones(p)) @pytest.mark.skipif(nogo, reason="Missing modules (autograd or pymanopt)") def test_wda_low_reg(): n_samples = 100 # nb samples in source and target datasets rng = np.random.RandomState(0) # generate gaussian dataset xs, ys = ot.datasets.make_data_classif('gaussrot', n_samples, random_state=rng) n_features_noise = 8 xs = np.hstack((xs, rng.randn(n_samples, n_features_noise))) p = 2 Pwda, projwda = ot.dr.wda(xs, ys, p, reg=0.01, maxiter=10, sinkhorn_method='sinkhorn_log') projwda(xs) np.testing.assert_allclose(np.sum(Pwda**2, 0), np.ones(p)) @pytest.mark.skipif(nogo, reason="Missing modules (autograd or pymanopt)") def test_wda_normalized(): n_samples = 100 # nb samples in source and target datasets rng = np.random.RandomState(0) # generate gaussian dataset xs, ys = ot.datasets.make_data_classif('gaussrot', n_samples, random_state=rng) n_features_noise = 8 xs = np.hstack((xs, rng.randn(n_samples, n_features_noise))) p = 2 P0 = rng.randn(10, p) P0 /= P0.sum(0, keepdims=True) Pwda, projwda = ot.dr.wda(xs, ys, p, maxiter=10, P0=P0, normalize=True) projwda(xs) np.testing.assert_allclose(np.sum(Pwda**2, 0), np.ones(p)) @pytest.mark.skipif(nogo, reason="Missing modules (autograd or pymanopt)") def test_prw(): d = 100 # Dimension n = 100 # Number samples k = 3 # Subspace dimension dim = 3 def fragmented_hypercube(n, d, dim, rng): assert dim <= d assert dim >= 1 assert dim == int(dim) a = (1. / n) * np.ones(n) b = (1. / n) * np.ones(n) # First measure : uniform on the hypercube X = rng.uniform(-1, 1, size=(n, d)) # Second measure : fragmentation tmp_y = rng.uniform(-1, 1, size=(n, d)) Y = tmp_y + 2 * np.sign(tmp_y) * np.array(dim * [1] + (d - dim) * [0]) return a, b, X, Y rng = np.random.RandomState(42) a, b, X, Y = fragmented_hypercube(n, d, dim, rng) tau = 0.002 reg = 0.2 pi, U = ot.dr.projection_robust_wasserstein(X, Y, a, b, tau, reg=reg, k=k, maxiter=1000, verbose=1) U0 = rng.randn(d, k) U0, _ = np.linalg.qr(U0) pi, U = ot.dr.projection_robust_wasserstein(X, Y, a, b, tau, U0=U0, reg=reg, k=k, maxiter=1000, verbose=1) @pytest.mark.skipif(nogo, reason="Missing modules (autograd or pymanopt)") def test_ewca(): d = 5 n_samples = 50 k = 3 rng = np.random.RandomState(0) # generate gaussian dataset A = rng.normal(size=(d, d)) Q, _ = np.linalg.qr(A) D = rng.normal(size=d) D = (D / np.linalg.norm(D)) ** 4 cov = Q @ np.diag(D) @ Q.T X = rng.multivariate_normal(np.zeros(d), cov, size=n_samples) X = X - X.mean(0, keepdims=True) assert X.shape == (n_samples, d) # compute first 3 components with BCD pi, U = ot.dr.ewca(X, reg=0.01, method='BCD', k=k, verbose=1, sinkhorn_method='sinkhorn_log') assert pi.shape == (n_samples, n_samples) assert (pi >= 0).all() assert np.allclose(pi.sum(0), 1 / n_samples, atol=1e-3) assert np.allclose(pi.sum(1), 1 / n_samples, atol=1e-3) assert U.shape == (d, k) assert np.allclose(U.T @ U, np.eye(k), atol=1e-3) # test that U contains the principal components U_first_eigvec = np.linalg.svd(X.T, full_matrices=False)[0][:, :k] _, cos, _ = np.linalg.svd(U.T @ U_first_eigvec, full_matrices=False) assert np.allclose(cos, np.ones(k), atol=1e-3) # compute first 3 components with MM pi, U = ot.dr.ewca(X, reg=0.01, method='MM', k=k, verbose=1, sinkhorn_method='sinkhorn_log') assert pi.shape == (n_samples, n_samples) assert (pi >= 0).all() assert np.allclose(pi.sum(0), 1 / n_samples, atol=1e-3) assert np.allclose(pi.sum(1), 1 / n_samples, atol=1e-3) assert U.shape == (d, k) assert np.allclose(U.T @ U, np.eye(k), atol=1e-3) # test that U contains the principal components U_first_eigvec = np.linalg.svd(X.T, full_matrices=False)[0][:, :k] _, cos, _ = np.linalg.svd(U.T @ U_first_eigvec, full_matrices=False) assert np.allclose(cos, np.ones(k), atol=1e-3) # compute last 3 components pi, U = ot.dr.ewca(X, reg=100000, method='MM', k=k, verbose=1, sinkhorn_method='sinkhorn_log') # test that U contains the last principal components U_last_eigvec = np.linalg.svd(X.T, full_matrices=False)[0][:, -k:] _, cos, _ = np.linalg.svd(U.T @ U_last_eigvec, full_matrices=False) assert np.allclose(cos, np.ones(k), atol=1e-3) python-pot-0.9.3+dfsg/test/test_factored.py000066400000000000000000000030011455713015700207440ustar00rootroot00000000000000"""Tests for main module ot.weak """ # Author: Remi Flamary # # License: MIT License import ot import numpy as np def test_factored_ot(): # test weak ot solver and identity stationary point n = 50 rng = np.random.RandomState(0) xs = rng.randn(n, 2) xt = rng.randn(n, 2) u = ot.utils.unif(n) Ga, Gb, X, log = ot.factored_optimal_transport(xs, xt, u, u, r=10, log=True) # check constraints np.testing.assert_allclose(u, Ga.sum(1)) np.testing.assert_allclose(u, Gb.sum(0)) Ga, Gb, X, log = ot.factored_optimal_transport(xs, xt, u, u, reg=1, r=10, log=True) # check constraints np.testing.assert_allclose(u, Ga.sum(1)) np.testing.assert_allclose(u, Gb.sum(0)) np.testing.assert_allclose(1, log['lazy_plan'][:].sum()) def test_factored_ot_backends(nx): # test weak ot solver for different backends n = 50 rng = np.random.RandomState(0) xs = rng.randn(n, 2) xt = rng.randn(n, 2) u = ot.utils.unif(n) xs2 = nx.from_numpy(xs) xt2 = nx.from_numpy(xt) u2 = nx.from_numpy(u) Ga2, Gb2, X2 = ot.factored_optimal_transport(xs2, xt2, u2, u2, r=10) # check constraints np.testing.assert_allclose(u, nx.to_numpy(Ga2).sum(1)) np.testing.assert_allclose(u, nx.to_numpy(Gb2).sum(0)) Ga2, Gb2, X2 = ot.factored_optimal_transport(xs2, xt2, reg=1, r=10, X0=X2) # check constraints np.testing.assert_allclose(u, nx.to_numpy(Ga2).sum(1)) np.testing.assert_allclose(u, nx.to_numpy(Gb2).sum(0)) python-pot-0.9.3+dfsg/test/test_gaussian.py000066400000000000000000000177621455713015700210120ustar00rootroot00000000000000"""Tests for module gaussian""" # Author: Theo Gnassounou # Remi Flamary # # License: MIT License import numpy as np import pytest import ot from ot.datasets import make_data_classif from ot.utils import is_all_finite def test_bures_wasserstein_mapping(nx): ns = 50 nt = 50 Xs, ys = make_data_classif('3gauss', ns) Xt, yt = make_data_classif('3gauss2', nt) ms = np.mean(Xs, axis=0)[None, :] mt = np.mean(Xt, axis=0)[None, :] Cs = np.cov(Xs.T) Ct = np.cov(Xt.T) Xsb, msb, mtb, Csb, Ctb = nx.from_numpy(Xs, ms, mt, Cs, Ct) A_log, b_log, log = ot.gaussian.bures_wasserstein_mapping(msb, mtb, Csb, Ctb, log=True) A, b = ot.gaussian.bures_wasserstein_mapping(msb, mtb, Csb, Ctb, log=False) Xst = nx.to_numpy(nx.dot(Xsb, A) + b) Xst_log = nx.to_numpy(nx.dot(Xsb, A_log) + b_log) Cst = np.cov(Xst.T) Cst_log = np.cov(Xst_log.T) np.testing.assert_allclose(Cst_log, Cst, rtol=1e-2, atol=1e-2) np.testing.assert_allclose(Ct, Cst, rtol=1e-2, atol=1e-2) @pytest.mark.parametrize("bias", [True, False]) def test_empirical_bures_wasserstein_mapping(nx, bias): ns = 50 nt = 50 Xs, ys = make_data_classif('3gauss', ns) Xt, yt = make_data_classif('3gauss2', nt) if not bias: ms = np.mean(Xs, axis=0)[None, :] mt = np.mean(Xt, axis=0)[None, :] Xs = Xs - ms Xt = Xt - mt Xsb, Xtb = nx.from_numpy(Xs, Xt) A, b, log = ot.gaussian.empirical_bures_wasserstein_mapping(Xsb, Xtb, log=True, bias=bias) A_log, b_log = ot.gaussian.empirical_bures_wasserstein_mapping(Xsb, Xtb, log=False, bias=bias) Xst = nx.to_numpy(nx.dot(Xsb, A) + b) Xst_log = nx.to_numpy(nx.dot(Xsb, A_log) + b_log) Ct = np.cov(Xt.T) Cst = np.cov(Xst.T) Cst_log = np.cov(Xst_log.T) np.testing.assert_allclose(Cst_log, Cst, rtol=1e-2, atol=1e-2) np.testing.assert_allclose(Ct, Cst, rtol=1e-2, atol=1e-2) def test_empirical_bures_wasserstein_mapping_numerical_error_warning(): rng = np.random.RandomState(42) Xs = rng.rand(766, 800) * 5 Xt = rng.rand(295, 800) * 2 with pytest.warns(): A, b = ot.gaussian.empirical_bures_wasserstein_mapping(Xs, Xt, reg=1e-8) assert not is_all_finite(A, b) def test_bures_wasserstein_distance(nx): ms, mt = np.array([0]).astype(np.float32), np.array([10]).astype(np.float32) Cs, Ct = np.array([[1]]).astype(np.float32), np.array([[1]]).astype(np.float32) msb, mtb, Csb, Ctb = nx.from_numpy(ms, mt, Cs, Ct) Wb_log, log = ot.gaussian.bures_wasserstein_distance(msb, mtb, Csb, Ctb, log=True) Wb = ot.gaussian.bures_wasserstein_distance(msb, mtb, Csb, Ctb, log=False) np.testing.assert_allclose(nx.to_numpy(Wb_log), nx.to_numpy(Wb), rtol=1e-2, atol=1e-2) np.testing.assert_allclose(10, nx.to_numpy(Wb), rtol=1e-2, atol=1e-2) @pytest.mark.parametrize("bias", [True, False]) def test_empirical_bures_wasserstein_distance(nx, bias): ns = 400 nt = 400 rng = np.random.RandomState(10) Xs = rng.normal(0, 1, ns)[:, np.newaxis] Xt = rng.normal(10 * bias, 1, nt)[:, np.newaxis] Xsb, Xtb = nx.from_numpy(Xs, Xt) Wb_log, log = ot.gaussian.empirical_bures_wasserstein_distance(Xsb, Xtb, log=True, bias=bias) Wb = ot.gaussian.empirical_bures_wasserstein_distance(Xsb, Xtb, log=False, bias=bias) np.testing.assert_allclose(nx.to_numpy(Wb_log), nx.to_numpy(Wb), rtol=1e-2, atol=1e-2) np.testing.assert_allclose(10 * bias, nx.to_numpy(Wb), rtol=1e-2, atol=1e-2) def test_bures_wasserstein_barycenter(nx): n = 50 k = 10 X = [] y = [] m = [] C = [] for _ in range(k): X_, y_ = make_data_classif('3gauss', n) m_ = np.mean(X_, axis=0)[None, :] C_ = np.cov(X_.T) X.append(X_) y.append(y_) m.append(m_) C.append(C_) m = np.array(m) C = np.array(C) X = nx.from_numpy(*X) m = nx.from_numpy(m) C = nx.from_numpy(C) mblog, Cblog, log = ot.gaussian.bures_wasserstein_barycenter(m, C, log=True) mb, Cb = ot.gaussian.bures_wasserstein_barycenter(m, C, log=False) np.testing.assert_allclose(Cb, Cblog, rtol=1e-2, atol=1e-2) np.testing.assert_allclose(mb, mblog, rtol=1e-2, atol=1e-2) # Test weights argument weights = nx.ones(k) / k mbw, Cbw = ot.gaussian.bures_wasserstein_barycenter(m, C, weights=weights, log=False) np.testing.assert_allclose(Cbw, Cb, rtol=1e-2, atol=1e-2) # test with closed form for diagonal covariance matrices Cdiag = [nx.diag(nx.diag(C[i])) for i in range(k)] Cdiag = nx.stack(Cdiag, axis=0) mbdiag, Cbdiag = ot.gaussian.bures_wasserstein_barycenter(m, Cdiag, log=False) Cdiag_sqrt = [nx.sqrtm(C) for C in Cdiag] Cdiag_sqrt = nx.stack(Cdiag_sqrt, axis=0) Cdiag_mean = nx.mean(Cdiag_sqrt, axis=0) Cdiag_cf = Cdiag_mean @ Cdiag_mean np.testing.assert_allclose(Cbdiag, Cdiag_cf, rtol=1e-2, atol=1e-2) @pytest.mark.parametrize("bias", [True, False]) def test_empirical_bures_wasserstein_barycenter(nx, bias): n = 50 k = 10 X = [] y = [] for _ in range(k): X_, y_ = make_data_classif('3gauss', n) X.append(X_) y.append(y_) X = nx.from_numpy(*X) mblog, Cblog, log = ot.gaussian.empirical_bures_wasserstein_barycenter(X, log=True, bias=bias) mb, Cb = ot.gaussian.empirical_bures_wasserstein_barycenter(X, log=False, bias=bias) np.testing.assert_allclose(Cb, Cblog, rtol=1e-2, atol=1e-2) np.testing.assert_allclose(mb, mblog, rtol=1e-2, atol=1e-2) @pytest.mark.parametrize("d_target", [1, 2, 3, 10]) def test_gaussian_gromov_wasserstein_distance(nx, d_target): ns = 400 nt = 400 rng = np.random.RandomState(10) Xs, ys = make_data_classif('3gauss', ns, random_state=rng) Xt, yt = make_data_classif('3gauss2', nt, random_state=rng) Xt = np.concatenate((Xt, rng.normal(0, 1, (nt, 8))), axis=1) Xt = Xt[:, 0:d_target].reshape((nt, d_target)) ms = np.mean(Xs, axis=0)[None, :] mt = np.mean(Xt, axis=0)[None, :] Cs = np.cov(Xs.T) Ct = np.cov(Xt.T).reshape((d_target, d_target)) Xsb, Xtb, msb, mtb, Csb, Ctb = nx.from_numpy(Xs, Xt, ms, mt, Cs, Ct) Gb, log = ot.gaussian.gaussian_gromov_wasserstein_distance(Csb, Ctb, log=True) Ge, log = ot.gaussian.empirical_gaussian_gromov_wasserstein_distance(Xsb, Xtb, log=True) # no log Ge0 = ot.gaussian.empirical_gaussian_gromov_wasserstein_distance(Xsb, Xtb, log=False) np.testing.assert_allclose(nx.to_numpy(Gb), nx.to_numpy(Ge), rtol=1e-2, atol=1e-2) np.testing.assert_allclose(nx.to_numpy(Ge), nx.to_numpy(Ge0), rtol=1e-2, atol=1e-2) @pytest.mark.parametrize("d_target", [1, 2, 3, 10]) def test_gaussian_gromov_wasserstein_mapping(nx, d_target): ns = 400 nt = 400 rng = np.random.RandomState(10) Xs, ys = make_data_classif('3gauss', ns, random_state=rng) Xt, yt = make_data_classif('3gauss2', nt, random_state=rng) Xt = np.concatenate((Xt, rng.normal(0, 1, (nt, 8))), axis=1) Xt = Xt[:, 0:d_target].reshape((nt, d_target)) ms = np.mean(Xs, axis=0)[None, :] mt = np.mean(Xt, axis=0)[None, :] Cs = np.cov(Xs.T) Ct = np.cov(Xt.T).reshape((d_target, d_target)) Xsb, Xtb, msb, mtb, Csb, Ctb = nx.from_numpy(Xs, Xt, ms, mt, Cs, Ct) A, b, log = ot.gaussian.gaussian_gromov_wasserstein_mapping(msb, mtb, Csb, Ctb, log=True) Ae, be, loge = ot.gaussian.empirical_gaussian_gromov_wasserstein_mapping(Xsb, Xtb, log=True) # no log + skewness Ae0, be0 = ot.gaussian.empirical_gaussian_gromov_wasserstein_mapping(Xsb, Xtb, log=False, sign_eigs='skewness') Xst = nx.to_numpy(nx.dot(Xsb, A) + b) Cst = np.cov(Xst.T) np.testing.assert_allclose(nx.to_numpy(A), nx.to_numpy(Ae)) if d_target <= 2: np.testing.assert_allclose(Ct, Cst) # test the other way around (target to source) Ai, bi, logi = ot.gaussian.gaussian_gromov_wasserstein_mapping(mtb, msb, Ctb, Csb, log=True) Xtt = nx.to_numpy(nx.dot(Xtb, Ai) + bi) Ctt = np.cov(Xtt.T) if d_target >= 2: np.testing.assert_allclose(Cs, Ctt) python-pot-0.9.3+dfsg/test/test_gnn.py000066400000000000000000000202551455713015700177510ustar00rootroot00000000000000"""Tests for module gnn""" # Author: Sonia Mazelet # RĂ©mi Flamary # # License: MIT License import pytest try: # test if pytorch_geometric is installed import torch_geometric import torch from torch_geometric.nn import Linear from torch_geometric.data import Data as GraphData from torch_geometric.loader import DataLoader import torch.nn as nn from ot.gnn import TFGWPooling, TWPooling except ImportError: torch_geometric = False @pytest.mark.skipif(not torch_geometric, reason="pytorch_geometric not installed") def test_TFGW_optim(): # Test the TFGW layer by passing two graphs through the layer and doing backpropagation. class pooling_TFGW(nn.Module): """ Pooling architecture using the TFGW layer. """ def __init__(self, n_features, n_templates, n_template_nodes): """ Pooling architecture using the TFGW layer. """ super().__init__() self.n_features = n_features self.n_templates = n_templates self.n_template_nodes = n_template_nodes self.TFGW = TFGWPooling(self.n_templates, self.n_template_nodes, self.n_features) self.linear = Linear(self.n_templates, 1) def forward(self, x, edge_index): x = self.TFGW(x, edge_index) x = self.linear(x) return x torch.manual_seed(0) n_templates = 3 n_template_nodes = 3 n_nodes = 10 n_features = 3 n_epochs = 3 C1 = torch.randint(0, 2, size=(n_nodes, n_nodes)) C2 = torch.randint(0, 2, size=(n_nodes, n_nodes)) edge_index1 = torch.stack(torch.where(C1 == 1)) edge_index2 = torch.stack(torch.where(C2 == 1)) x1 = torch.rand(n_nodes, n_features) x2 = torch.rand(n_nodes, n_features) graph1 = GraphData(x=x1, edge_index=edge_index1, y=torch.tensor([0.])) graph2 = GraphData(x=x2, edge_index=edge_index2, y=torch.tensor([1.])) dataset = DataLoader([graph1, graph2], batch_size=1) model_FGW = pooling_TFGW(n_features, n_templates, n_template_nodes) optimizer = torch.optim.Adam(model_FGW.parameters(), lr=0.01) criterion = torch.nn.CrossEntropyLoss() model_FGW.train() for i in range(n_epochs): for data in dataset: out = model_FGW(data.x, data.edge_index) loss = criterion(out, data.y) loss.backward() optimizer.step() optimizer.zero_grad() @pytest.mark.skipif(not torch_geometric, reason="pytorch_geometric not installed") def test_TFGW_variants(): # Test the TFGW layer by passing two graphs through the layer and doing backpropagation. class GNN_pooling(nn.Module): """ Pooling architecture using the TW layer. """ def __init__(self, n_features, n_templates, n_template_nodes, pooling_layer): """ Pooling architecture using the TW layer. """ super().__init__() self.n_features = n_features self.n_templates = n_templates self.n_template_nodes = n_template_nodes self.TFGW = pooling_layer self.linear = Linear(self.n_templates, 1) def forward(self, x, edge_index, batch=None): x = self.TFGW(x, edge_index, batch=batch) x = self.linear(x) return x n_templates = 3 n_template_nodes = 3 n_nodes = 10 n_features = 3 torch.manual_seed(0) C1 = torch.randint(0, 2, size=(n_nodes, n_nodes)) edge_index1 = torch.stack(torch.where(C1 == 1)) x1 = torch.rand(n_nodes, n_features) graph1 = GraphData(x=x1, edge_index=edge_index1, y=torch.tensor([0.])) batch1 = torch.tensor([1] * n_nodes) batch1[:n_nodes // 2] = 0 criterion = torch.nn.CrossEntropyLoss() for train_node_weights in [True, False]: for alpha in [None, 0, 0.5]: for multi_alpha in [True, False]: model = GNN_pooling(n_features, n_templates, n_template_nodes, pooling_layer=TFGWPooling(n_templates, n_template_nodes, n_features, alpha=alpha, multi_alpha=multi_alpha, train_node_weights=train_node_weights)) # predict out1 = model(graph1.x, graph1.edge_index) loss = criterion(out1, graph1.y) loss.backward() # predict on batch out1 = model(graph1.x, graph1.edge_index, batch1) @pytest.mark.skipif(not torch_geometric, reason="pytorch_geometric not installed") def test_TW_variants(): # Test the TFGW layer by passing two graphs through the layer and doing backpropagation. class GNN_pooling(nn.Module): """ Pooling architecture using the TW layer. """ def __init__(self, n_features, n_templates, n_template_nodes, pooling_layer): """ Pooling architecture using the TW layer. """ super().__init__() self.n_features = n_features self.n_templates = n_templates self.n_template_nodes = n_template_nodes self.TFGW = pooling_layer self.linear = Linear(self.n_templates, 1) def forward(self, x, edge_index, batch=None): x = self.TFGW(x, edge_index, batch=batch) x = self.linear(x) return x n_templates = 3 n_template_nodes = 3 n_nodes = 10 n_features = 3 torch.manual_seed(0) C1 = torch.randint(0, 2, size=(n_nodes, n_nodes)) edge_index1 = torch.stack(torch.where(C1 == 1)) x1 = torch.rand(n_nodes, n_features) graph1 = GraphData(x=x1, edge_index=edge_index1, y=torch.tensor([0.])) batch1 = torch.tensor([1] * n_nodes) batch1[:n_nodes // 2] = 0 criterion = torch.nn.CrossEntropyLoss() for train_node_weights in [True, False]: model = GNN_pooling(n_features, n_templates, n_template_nodes, pooling_layer=TWPooling(n_templates, n_template_nodes, n_features, train_node_weights=train_node_weights)) out1 = model(graph1.x, graph1.edge_index) loss = criterion(out1, graph1.y) loss.backward() # predict on batch out1 = model(graph1.x, graph1.edge_index, batch1) @pytest.mark.skipif(not torch_geometric, reason="pytorch_geometric not installed") def test_TW(): # Test the TW layer by passing two graphs through the layer and doing backpropagation. class pooling_TW(nn.Module): """ Pooling architecture using the TW layer. """ def __init__(self, n_features, n_templates, n_template_nodes): """ Pooling architecture using the TW layer. """ super().__init__() self.n_features = n_features self.n_templates = n_templates self.n_template_nodes = n_template_nodes self.TFGW = TWPooling(self.n_templates, self.n_template_nodes, self.n_features) self.linear = Linear(self.n_templates, 1) def forward(self, x, edge_index): x = self.TFGW(x, edge_index) x = self.linear(x) return x torch.manual_seed(0) n_templates = 3 n_template_nodes = 3 n_nodes = 10 n_features = 3 n_epochs = 3 C1 = torch.randint(0, 2, size=(n_nodes, n_nodes)) C2 = torch.randint(0, 2, size=(n_nodes, n_nodes)) edge_index1 = torch.stack(torch.where(C1 == 1)) edge_index2 = torch.stack(torch.where(C2 == 1)) x1 = torch.rand(n_nodes, n_features) x2 = torch.rand(n_nodes, n_features) graph1 = GraphData(x=x1, edge_index=edge_index1, y=torch.tensor([0.])) graph2 = GraphData(x=x2, edge_index=edge_index2, y=torch.tensor([1.])) dataset = DataLoader([graph1, graph2], batch_size=1) model_W = pooling_TW(n_features, n_templates, n_template_nodes) optimizer = torch.optim.Adam(model_W.parameters(), lr=0.01) criterion = torch.nn.CrossEntropyLoss() model_W.train() for i in range(n_epochs): for data in dataset: out = model_W(data.x, data.edge_index) loss = criterion(out, data.y) loss.backward() optimizer.step() optimizer.zero_grad() python-pot-0.9.3+dfsg/test/test_gromov.py000066400000000000000000003571521455713015700205110ustar00rootroot00000000000000"""Tests for module gromov """ # Author: Erwan Vautier # Nicolas Courty # Titouan Vayer # CĂ©dric Vincent-Cuaz # # License: MIT License import numpy as np import pytest import warnings import ot from ot.backend import NumpyBackend from ot.backend import torch, tf def test_gromov(nx): n_samples = 20 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=1) xt = xs[::-1].copy() p = ot.unif(n_samples) q = ot.unif(n_samples) G0 = p[:, None] * q[None, :] C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() C1b, C2b, pb, qb, G0b = nx.from_numpy(C1, C2, p, q, G0) G = ot.gromov.gromov_wasserstein( C1, C2, None, q, 'square_loss', G0=G0, verbose=True, alpha_min=0., alpha_max=1.) Gb = nx.to_numpy(ot.gromov.gromov_wasserstein( C1b, C2b, pb, None, 'square_loss', symmetric=True, G0=G0b, verbose=True)) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-04) # cf convergence gromov Id = (1 / (1.0 * n_samples)) * np.eye(n_samples, n_samples) np.testing.assert_allclose(Gb, np.flipud(Id), atol=1e-04) for armijo in [False, True]: gw, log = ot.gromov.gromov_wasserstein2(C1, C2, None, q, 'kl_loss', armijo=armijo, log=True) gwb, logb = ot.gromov.gromov_wasserstein2(C1b, C2b, pb, None, 'kl_loss', armijo=armijo, log=True) gwb = nx.to_numpy(gwb) gw_val = ot.gromov.gromov_wasserstein2(C1, C2, p, q, 'kl_loss', armijo=armijo, G0=G0, log=False) gw_valb = nx.to_numpy( ot.gromov.gromov_wasserstein2(C1b, C2b, pb, qb, 'kl_loss', armijo=armijo, G0=G0b, log=False) ) G = log['T'] Gb = nx.to_numpy(logb['T']) np.testing.assert_allclose(gw, gwb, atol=1e-06) np.testing.assert_allclose(gwb, 0, atol=1e-1, rtol=1e-1) np.testing.assert_allclose(gw_val, gw_valb, atol=1e-06) np.testing.assert_allclose(gwb, gw_valb, atol=1e-1, rtol=1e-1) # cf log=False # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-04) # cf convergence gromov def test_asymmetric_gromov(nx): n_samples = 20 # nb samples rng = np.random.RandomState(0) C1 = rng.uniform(low=0., high=10, size=(n_samples, n_samples)) idx = np.arange(n_samples) rng.shuffle(idx) C2 = C1[idx, :][:, idx] p = ot.unif(n_samples) q = ot.unif(n_samples) G0 = p[:, None] * q[None, :] C1b, C2b, pb, qb, G0b = nx.from_numpy(C1, C2, p, q, G0) G, log = ot.gromov.gromov_wasserstein(C1, C2, p, q, 'square_loss', G0=G0, log=True, symmetric=False, verbose=True) Gb, logb = ot.gromov.gromov_wasserstein(C1b, C2b, pb, qb, 'square_loss', log=True, symmetric=False, G0=G0b, verbose=True) Gb = nx.to_numpy(Gb) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-04) # cf convergence gromov np.testing.assert_allclose(log['gw_dist'], 0., atol=1e-04) np.testing.assert_allclose(logb['gw_dist'], 0., atol=1e-04) gw, log = ot.gromov.gromov_wasserstein2(C1, C2, p, q, 'square_loss', G0=G0, log=True, symmetric=False, verbose=True) gwb, logb = ot.gromov.gromov_wasserstein2(C1b, C2b, pb, qb, 'square_loss', log=True, symmetric=False, G0=G0b, verbose=True) G = log['T'] Gb = nx.to_numpy(logb['T']) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-04) # cf convergence gromov np.testing.assert_allclose(log['gw_dist'], 0., atol=1e-04) np.testing.assert_allclose(logb['gw_dist'], 0., atol=1e-04) def test_gromov_integer_warnings(nx): n_samples = 10 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=1) xt = xs[::-1].copy() p = ot.unif(n_samples) q = ot.unif(n_samples) G0 = p[:, None] * q[None, :] C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() C1 = C1.astype(np.int32) C1b, C2b, pb, qb, G0b = nx.from_numpy(C1, C2, p, q, G0) G = ot.gromov.gromov_wasserstein( C1, C2, None, q, 'square_loss', G0=G0, verbose=True, alpha_min=0., alpha_max=1.) Gb = nx.to_numpy(ot.gromov.gromov_wasserstein( C1b, C2b, pb, None, 'square_loss', symmetric=True, G0=G0b, verbose=True)) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose(G, 0., atol=1e-09) def test_gromov_dtype_device(nx): # setup n_samples = 20 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=4) xt = xs[::-1].copy() p = ot.unif(n_samples) q = ot.unif(n_samples) G0 = p[:, None] * q[None, :] C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() for tp in nx.__type_list__: print(nx.dtype_device(tp)) C1b, C2b, pb, qb, G0b = nx.from_numpy(C1, C2, p, q, G0, type_as=tp) with warnings.catch_warnings(): warnings.filterwarnings('error') Gb = ot.gromov.gromov_wasserstein(C1b, C2b, pb, qb, 'square_loss', G0=G0b, verbose=True) gw_valb = ot.gromov.gromov_wasserstein2(C1b, C2b, pb, qb, 'kl_loss', armijo=True, G0=G0b, log=False) nx.assert_same_dtype_device(C1b, Gb) nx.assert_same_dtype_device(C1b, gw_valb) @pytest.mark.skipif(not tf, reason="tf not installed") def test_gromov_device_tf(): nx = ot.backend.TensorflowBackend() n_samples = 20 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=4) xt = xs[::-1].copy() p = ot.unif(n_samples) q = ot.unif(n_samples) G0 = p[:, None] * q[None, :] C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() # Check that everything stays on the CPU with tf.device("/CPU:0"): C1b, C2b, pb, qb, G0b = nx.from_numpy(C1, C2, p, q, G0) Gb = ot.gromov.gromov_wasserstein(C1b, C2b, pb, qb, 'square_loss', G0=G0b, verbose=True) gw_valb = ot.gromov.gromov_wasserstein2(C1b, C2b, pb, qb, 'kl_loss', armijo=True, G0=G0b, log=False) nx.assert_same_dtype_device(C1b, Gb) nx.assert_same_dtype_device(C1b, gw_valb) if len(tf.config.list_physical_devices('GPU')) > 0: # Check that everything happens on the GPU C1b, C2b, pb, qb, G0b = nx.from_numpy(C1, C2, p, q, G0) Gb = ot.gromov.gromov_wasserstein(C1b, C2b, pb, qb, 'square_loss', verbose=True) gw_valb = ot.gromov.gromov_wasserstein2(C1b, C2b, pb, qb, 'kl_loss', armijo=True, log=False) nx.assert_same_dtype_device(C1b, Gb) nx.assert_same_dtype_device(C1b, gw_valb) assert nx.dtype_device(Gb)[1].startswith("GPU") def test_gromov2_gradients(): n_samples = 20 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=4) xt = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=5) p = ot.unif(n_samples) q = ot.unif(n_samples) C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() if torch: devices = [torch.device("cpu")] if torch.cuda.is_available(): devices.append(torch.device("cuda")) for device in devices: # classical gradients p1 = torch.tensor(p, requires_grad=True, device=device) q1 = torch.tensor(q, requires_grad=True, device=device) C11 = torch.tensor(C1, requires_grad=True, device=device) C12 = torch.tensor(C2, requires_grad=True, device=device) # Test with exact line-search val = ot.gromov_wasserstein2(C11, C12, p1, q1) val.backward() assert val.device == p1.device assert q1.shape == q1.grad.shape assert p1.shape == p1.grad.shape assert C11.shape == C11.grad.shape assert C12.shape == C12.grad.shape # Test with armijo line-search # classical gradients p1 = torch.tensor(p, requires_grad=True, device=device) q1 = torch.tensor(q, requires_grad=True, device=device) C11 = torch.tensor(C1, requires_grad=True, device=device) C12 = torch.tensor(C2, requires_grad=True, device=device) q1.grad = None p1.grad = None C11.grad = None C12.grad = None val = ot.gromov_wasserstein2(C11, C12, p1, q1, armijo=True) val.backward() assert val.device == p1.device assert q1.shape == q1.grad.shape assert p1.shape == p1.grad.shape assert C11.shape == C11.grad.shape assert C12.shape == C12.grad.shape def test_gw_helper_backend(nx): n_samples = 10 # nb samples mu = np.array([0, 0]) cov = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov, random_state=0) xt = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov, random_state=1) p = ot.unif(n_samples) q = ot.unif(n_samples) G0 = p[:, None] * q[None, :] C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() C1b, C2b, pb, qb, G0b = nx.from_numpy(C1, C2, p, q, G0) Gb, logb = ot.gromov.gromov_wasserstein(C1b, C2b, pb, qb, 'square_loss', armijo=False, symmetric=True, G0=G0b, log=True) # calls with nx=None constCb, hC1b, hC2b = ot.gromov.init_matrix(C1b, C2b, pb, qb, loss_fun='square_loss') def f(G): return ot.gromov.gwloss(constCb, hC1b, hC2b, G, None) def df(G): return ot.gromov.gwggrad(constCb, hC1b, hC2b, G, None) def line_search(cost, G, deltaG, Mi, cost_G): return ot.gromov.solve_gromov_linesearch(G, deltaG, cost_G, C1b, C2b, M=0., reg=1., nx=None) # feed the precomputed local optimum Gb to cg res, log = ot.optim.cg(pb, qb, 0., 1., f, df, Gb, line_search, log=True, numItermax=1e4, stopThr=1e-9, stopThr2=1e-9) # check constraints np.testing.assert_allclose(res, Gb, atol=1e-06) @pytest.mark.parametrize('loss_fun', [ 'square_loss', 'kl_loss', pytest.param('unknown_loss', marks=pytest.mark.xfail(raises=ValueError)), ]) def test_gw_helper_validation(loss_fun): n_samples = 10 # nb samples mu = np.array([0, 0]) cov = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov, random_state=0) xt = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov, random_state=1) p = ot.unif(n_samples) q = ot.unif(n_samples) C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) ot.gromov.init_matrix(C1, C2, p, q, loss_fun=loss_fun) @pytest.skip_backend("jax", reason="test very slow with jax backend") @pytest.skip_backend("tf", reason="test very slow with tf backend") @pytest.mark.parametrize('loss_fun', [ 'square_loss', 'kl_loss', pytest.param('unknown_loss', marks=pytest.mark.xfail(raises=ValueError)), ]) def test_entropic_gromov(nx, loss_fun): n_samples = 10 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=42) xt = xs[::-1].copy() p = ot.unif(n_samples) q = ot.unif(n_samples) G0 = p[:, None] * q[None, :] C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() C1b, C2b, pb, qb, G0b = nx.from_numpy(C1, C2, p, q, G0) G, log = ot.gromov.entropic_gromov_wasserstein( C1, C2, None, q, loss_fun, symmetric=None, G0=G0, epsilon=1e-2, max_iter=10, verbose=True, log=True) Gb = nx.to_numpy(ot.gromov.entropic_gromov_wasserstein( C1b, C2b, pb, None, loss_fun, symmetric=True, G0=None, epsilon=1e-2, max_iter=10, verbose=True, log=False )) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-04) # cf convergence gromov @pytest.skip_backend("jax", reason="test very slow with jax backend") @pytest.skip_backend("tf", reason="test very slow with tf backend") @pytest.mark.parametrize('loss_fun', [ 'square_loss', 'kl_loss', pytest.param('unknown_loss', marks=pytest.mark.xfail(raises=ValueError)), ]) def test_entropic_gromov2(nx, loss_fun): n_samples = 10 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=42) xt = xs[::-1].copy() p = ot.unif(n_samples) q = ot.unif(n_samples) G0 = p[:, None] * q[None, :] C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() C1b, C2b, pb, qb, G0b = nx.from_numpy(C1, C2, p, q, G0) gw, log = ot.gromov.entropic_gromov_wasserstein2( C1, C2, p, None, loss_fun, symmetric=True, G0=None, max_iter=10, epsilon=1e-2, log=True) gwb, logb = ot.gromov.entropic_gromov_wasserstein2( C1b, C2b, None, qb, loss_fun, symmetric=None, G0=G0b, max_iter=10, epsilon=1e-2, log=True) gwb = nx.to_numpy(gwb) G = log['T'] Gb = nx.to_numpy(logb['T']) np.testing.assert_allclose(gw, gwb, atol=1e-06) np.testing.assert_allclose(gw, 0, atol=1e-1, rtol=1e-1) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-04) # cf convergence gromov @pytest.skip_backend("tf", reason="test very slow with tf backend") def test_entropic_proximal_gromov(nx): n_samples = 10 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=42) xt = xs[::-1].copy() p = ot.unif(n_samples) q = ot.unif(n_samples) G0 = p[:, None] * q[None, :] C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() C1b, C2b, pb, qb, G0b = nx.from_numpy(C1, C2, p, q, G0) with pytest.raises(ValueError): loss_fun = 'weird_loss_fun' G, log = ot.gromov.entropic_gromov_wasserstein( C1, C2, None, q, loss_fun, symmetric=None, G0=G0, epsilon=1e-1, max_iter=10, solver='PPA', verbose=True, log=True, numItermax=1) G, log = ot.gromov.entropic_gromov_wasserstein( C1, C2, None, q, 'square_loss', symmetric=None, G0=G0, epsilon=1e-1, max_iter=10, solver='PPA', verbose=True, log=True, numItermax=1) Gb = nx.to_numpy(ot.gromov.entropic_gromov_wasserstein( C1b, C2b, pb, None, 'square_loss', symmetric=True, G0=None, epsilon=1e-1, max_iter=10, solver='PPA', verbose=True, log=False, numItermax=1 )) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-02) # cf convergence gromov gw, log = ot.gromov.entropic_gromov_wasserstein2( C1, C2, p, q, 'kl_loss', symmetric=True, G0=None, max_iter=10, epsilon=1e-1, solver='PPA', warmstart=True, log=True) gwb, logb = ot.gromov.entropic_gromov_wasserstein2( C1b, C2b, pb, qb, 'kl_loss', symmetric=None, G0=G0b, max_iter=10, epsilon=1e-1, solver='PPA', warmstart=True, log=True) gwb = nx.to_numpy(gwb) G = log['T'] Gb = nx.to_numpy(logb['T']) np.testing.assert_allclose(gw, gwb, atol=1e-06) np.testing.assert_allclose(gw, 0, atol=1e-1, rtol=1e-1) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-02) # cf convergence gromov @pytest.skip_backend("tf", reason="test very slow with tf backend") def test_asymmetric_entropic_gromov(nx): n_samples = 10 # nb samples rng = np.random.RandomState(0) C1 = rng.uniform(low=0., high=10, size=(n_samples, n_samples)) idx = np.arange(n_samples) rng.shuffle(idx) C2 = C1[idx, :][:, idx] p = ot.unif(n_samples) q = ot.unif(n_samples) G0 = p[:, None] * q[None, :] C1b, C2b, pb, qb, G0b = nx.from_numpy(C1, C2, p, q, G0) G = ot.gromov.entropic_gromov_wasserstein( C1, C2, p, q, 'square_loss', symmetric=None, G0=G0, epsilon=1e-1, max_iter=5, verbose=True, log=False) Gb = nx.to_numpy(ot.gromov.entropic_gromov_wasserstein( C1b, C2b, pb, qb, 'square_loss', symmetric=False, G0=None, epsilon=1e-1, max_iter=5, verbose=True, log=False )) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-04) # cf convergence gromov gw = ot.gromov.entropic_gromov_wasserstein2( C1, C2, None, None, 'kl_loss', symmetric=False, G0=None, max_iter=5, epsilon=1e-1, log=False) gwb = ot.gromov.entropic_gromov_wasserstein2( C1b, C2b, pb, qb, 'kl_loss', symmetric=None, G0=G0b, max_iter=5, epsilon=1e-1, log=False) gwb = nx.to_numpy(gwb) np.testing.assert_allclose(gw, gwb, atol=1e-06) np.testing.assert_allclose(gw, 0, atol=1e-1, rtol=1e-1) @pytest.skip_backend("jax", reason="test very slow with jax backend") @pytest.skip_backend("tf", reason="test very slow with tf backend") def test_entropic_gromov_dtype_device(nx): # setup n_samples = 5 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=42) xt = xs[::-1].copy() p = ot.unif(n_samples) q = ot.unif(n_samples) C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() for tp in nx.__type_list__: print(nx.dtype_device(tp)) C1b, C2b, pb, qb = nx.from_numpy(C1, C2, p, q, type_as=tp) for solver in ['PGD', 'PPA', 'BAPG']: if solver == 'BAPG': Gb = ot.gromov.BAPG_gromov_wasserstein( C1b, C2b, pb, qb, max_iter=2, verbose=True) gw_valb = ot.gromov.BAPG_gromov_wasserstein2( C1b, C2b, pb, qb, max_iter=2, verbose=True) else: Gb = ot.gromov.entropic_gromov_wasserstein( C1b, C2b, pb, qb, max_iter=2, solver=solver, verbose=True) gw_valb = ot.gromov.entropic_gromov_wasserstein2( C1b, C2b, pb, qb, max_iter=2, solver=solver, verbose=True) nx.assert_same_dtype_device(C1b, Gb) nx.assert_same_dtype_device(C1b, gw_valb) def test_BAPG_gromov(nx): n_samples = 10 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=42) xt = xs[::-1].copy() p = ot.unif(n_samples) q = ot.unif(n_samples) G0 = p[:, None] * q[None, :] C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() C1b, C2b, pb, qb, G0b = nx.from_numpy(C1, C2, p, q, G0) # complete test with marginal loss = True marginal_loss = True with pytest.raises(ValueError): loss_fun = 'weird_loss_fun' G, log = ot.gromov.BAPG_gromov_wasserstein( C1, C2, None, q, loss_fun, symmetric=None, G0=G0, epsilon=1e-1, max_iter=10, marginal_loss=marginal_loss, verbose=True, log=True) G, log = ot.gromov.BAPG_gromov_wasserstein( C1, C2, None, q, 'square_loss', symmetric=None, G0=G0, epsilon=1e-1, max_iter=10, marginal_loss=marginal_loss, verbose=True, log=True) Gb = nx.to_numpy(ot.gromov.BAPG_gromov_wasserstein( C1b, C2b, pb, None, 'square_loss', symmetric=True, G0=None, epsilon=1e-1, max_iter=10, marginal_loss=marginal_loss, verbose=True, log=False )) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-02) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-02) # cf convergence gromov with pytest.warns(UserWarning): gw = ot.gromov.BAPG_gromov_wasserstein2( C1, C2, p, q, 'kl_loss', symmetric=False, G0=None, max_iter=10, epsilon=1e-2, marginal_loss=marginal_loss, log=False) gw, log = ot.gromov.BAPG_gromov_wasserstein2( C1, C2, p, q, 'kl_loss', symmetric=False, G0=None, max_iter=10, epsilon=1., marginal_loss=marginal_loss, log=True) gwb, logb = ot.gromov.BAPG_gromov_wasserstein2( C1b, C2b, pb, qb, 'kl_loss', symmetric=None, G0=G0b, max_iter=10, epsilon=1., marginal_loss=marginal_loss, log=True) gwb = nx.to_numpy(gwb) G = log['T'] Gb = nx.to_numpy(logb['T']) np.testing.assert_allclose(gw, gwb, atol=1e-06) np.testing.assert_allclose(gw, 0, atol=1e-1, rtol=1e-1) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-02) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-02) # cf convergence gromov marginal_loss = False G, log = ot.gromov.BAPG_gromov_wasserstein( C1, C2, None, q, 'square_loss', symmetric=None, G0=G0, epsilon=1e-1, max_iter=10, marginal_loss=marginal_loss, verbose=True, log=True) Gb = nx.to_numpy(ot.gromov.BAPG_gromov_wasserstein( C1b, C2b, pb, None, 'square_loss', symmetric=False, G0=None, epsilon=1e-1, max_iter=10, marginal_loss=marginal_loss, verbose=True, log=False )) @pytest.skip_backend("tf", reason="test very slow with tf backend") def test_entropic_fgw(nx): n_samples = 5 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=42) xt = xs[::-1].copy() rng = np.random.RandomState(42) ys = rng.randn(xs.shape[0], 2) yt = ys[::-1].copy() p = ot.unif(n_samples) q = ot.unif(n_samples) G0 = p[:, None] * q[None, :] C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() M = ot.dist(ys, yt) Mb, C1b, C2b, pb, qb, G0b = nx.from_numpy(M, C1, C2, p, q, G0) with pytest.raises(ValueError): loss_fun = 'weird_loss_fun' G, log = ot.gromov.entropic_fused_gromov_wasserstein( M, C1, C2, None, None, loss_fun, symmetric=None, G0=G0, epsilon=1e-1, max_iter=10, verbose=True, log=True) G, log = ot.gromov.entropic_fused_gromov_wasserstein( M, C1, C2, None, None, 'square_loss', symmetric=None, G0=G0, epsilon=1e-1, max_iter=10, verbose=True, log=True) Gb = nx.to_numpy(ot.gromov.entropic_fused_gromov_wasserstein( Mb, C1b, C2b, pb, qb, 'square_loss', symmetric=True, G0=None, epsilon=1e-1, max_iter=10, verbose=True, log=False )) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-04) # cf convergence gromov fgw, log = ot.gromov.entropic_fused_gromov_wasserstein2( M, C1, C2, p, q, 'kl_loss', symmetric=True, G0=None, max_iter=10, epsilon=1e-1, log=True) fgwb, logb = ot.gromov.entropic_fused_gromov_wasserstein2( Mb, C1b, C2b, pb, qb, 'kl_loss', symmetric=None, G0=G0b, max_iter=10, epsilon=1e-1, log=True) fgwb = nx.to_numpy(fgwb) G = log['T'] Gb = nx.to_numpy(logb['T']) np.testing.assert_allclose(fgw, fgwb, atol=1e-06) np.testing.assert_allclose(fgw, 0, atol=1e-1, rtol=1e-1) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-04) # cf convergence gromov @pytest.skip_backend("tf", reason="test very slow with tf backend") def test_entropic_proximal_fgw(nx): n_samples = 5 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=42) xt = xs[::-1].copy() rng = np.random.RandomState(42) ys = rng.randn(xs.shape[0], 2) yt = ys[::-1].copy() p = ot.unif(n_samples) q = ot.unif(n_samples) G0 = p[:, None] * q[None, :] C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() M = ot.dist(ys, yt) Mb, C1b, C2b, pb, qb, G0b = nx.from_numpy(M, C1, C2, p, q, G0) G, log = ot.gromov.entropic_fused_gromov_wasserstein( M, C1, C2, p, q, 'square_loss', symmetric=None, G0=G0, epsilon=1e-1, max_iter=10, solver='PPA', verbose=True, log=True, numItermax=1) Gb = nx.to_numpy(ot.gromov.entropic_fused_gromov_wasserstein( Mb, C1b, C2b, pb, qb, 'square_loss', symmetric=True, G0=None, epsilon=1e-1, max_iter=10, solver='PPA', verbose=True, log=False, numItermax=1 )) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-04) # cf convergence gromov fgw, log = ot.gromov.entropic_fused_gromov_wasserstein2( M, C1, C2, p, None, 'kl_loss', symmetric=True, G0=None, max_iter=5, epsilon=1e-1, solver='PPA', warmstart=True, log=True) fgwb, logb = ot.gromov.entropic_fused_gromov_wasserstein2( Mb, C1b, C2b, None, qb, 'kl_loss', symmetric=None, G0=G0b, max_iter=5, epsilon=1e-1, solver='PPA', warmstart=True, log=True) fgwb = nx.to_numpy(fgwb) G = log['T'] Gb = nx.to_numpy(logb['T']) np.testing.assert_allclose(fgw, fgwb, atol=1e-06) np.testing.assert_allclose(fgw, 0, atol=1e-1, rtol=1e-1) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-04) # cf convergence gromov def test_BAPG_fgw(nx): n_samples = 5 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=42) xt = xs[::-1].copy() rng = np.random.RandomState(42) ys = rng.randn(xs.shape[0], 2) yt = ys[::-1].copy() p = ot.unif(n_samples) q = ot.unif(n_samples) G0 = p[:, None] * q[None, :] C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() M = ot.dist(ys, yt) Mb, C1b, C2b, pb, qb, G0b = nx.from_numpy(M, C1, C2, p, q, G0) with pytest.raises(ValueError): loss_fun = 'weird_loss_fun' G, log = ot.gromov.BAPG_fused_gromov_wasserstein( M, C1, C2, p, q, loss_fun=loss_fun, max_iter=1, log=True) # complete test with marginal loss = True marginal_loss = True G, log = ot.gromov.BAPG_fused_gromov_wasserstein( M, C1, C2, p, q, 'square_loss', symmetric=None, G0=G0, epsilon=1e-1, max_iter=10, marginal_loss=marginal_loss, log=True) Gb = nx.to_numpy(ot.gromov.BAPG_fused_gromov_wasserstein( Mb, C1b, C2b, pb, qb, 'square_loss', symmetric=True, G0=None, epsilon=1e-1, max_iter=10, marginal_loss=marginal_loss, verbose=True)) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-02) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-02) # cf convergence gromov with pytest.warns(UserWarning): fgw = ot.gromov.BAPG_fused_gromov_wasserstein2( M, C1, C2, p, q, 'kl_loss', symmetric=False, G0=None, max_iter=10, epsilon=1e-3, marginal_loss=marginal_loss, log=False) fgw, log = ot.gromov.BAPG_fused_gromov_wasserstein2( M, C1, C2, p, None, 'kl_loss', symmetric=True, G0=None, max_iter=5, epsilon=1, marginal_loss=marginal_loss, log=True) fgwb, logb = ot.gromov.BAPG_fused_gromov_wasserstein2( Mb, C1b, C2b, None, qb, 'kl_loss', symmetric=None, G0=G0b, max_iter=5, epsilon=1, marginal_loss=marginal_loss, log=True) fgwb = nx.to_numpy(fgwb) G = log['T'] Gb = nx.to_numpy(logb['T']) np.testing.assert_allclose(fgw, fgwb, atol=1e-06) np.testing.assert_allclose(fgw, 0, atol=1e-1, rtol=1e-1) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-02) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-02) # cf convergence gromov # Tests with marginal_loss = False marginal_loss = False G, log = ot.gromov.BAPG_fused_gromov_wasserstein( M, C1, C2, p, q, 'square_loss', symmetric=False, G0=G0, epsilon=1e-1, max_iter=10, marginal_loss=marginal_loss, log=True) Gb = nx.to_numpy(ot.gromov.BAPG_fused_gromov_wasserstein( Mb, C1b, C2b, pb, qb, 'square_loss', symmetric=None, G0=None, epsilon=1e-1, max_iter=10, marginal_loss=marginal_loss, verbose=True)) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-02) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-02) # cf convergence gromov def test_asymmetric_entropic_fgw(nx): n_samples = 5 # nb samples rng = np.random.RandomState(0) C1 = rng.uniform(low=0., high=10, size=(n_samples, n_samples)) idx = np.arange(n_samples) rng.shuffle(idx) C2 = C1[idx, :][:, idx] ys = rng.randn(n_samples, 2) yt = ys[idx, :] M = ot.dist(ys, yt) p = ot.unif(n_samples) q = ot.unif(n_samples) G0 = p[:, None] * q[None, :] Mb, C1b, C2b, pb, qb, G0b = nx.from_numpy(M, C1, C2, p, q, G0) G = ot.gromov.entropic_fused_gromov_wasserstein( M, C1, C2, p, q, 'square_loss', symmetric=None, G0=G0, max_iter=5, epsilon=1e-1, verbose=True, log=False) Gb = nx.to_numpy(ot.gromov.entropic_fused_gromov_wasserstein( Mb, C1b, C2b, pb, qb, 'square_loss', symmetric=False, G0=None, max_iter=5, epsilon=1e-1, verbose=True, log=False )) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-04) # cf convergence gromov fgw = ot.gromov.entropic_fused_gromov_wasserstein2( M, C1, C2, p, q, 'kl_loss', symmetric=False, G0=None, max_iter=5, epsilon=1e-1, log=False) fgwb = ot.gromov.entropic_fused_gromov_wasserstein2( Mb, C1b, C2b, pb, qb, 'kl_loss', symmetric=None, G0=G0b, max_iter=5, epsilon=1e-1, log=False) fgwb = nx.to_numpy(fgwb) np.testing.assert_allclose(fgw, fgwb, atol=1e-06) np.testing.assert_allclose(fgw, 0, atol=1e-1, rtol=1e-1) @pytest.skip_backend("jax", reason="test very slow with jax backend") @pytest.skip_backend("tf", reason="test very slow with tf backend") def test_entropic_fgw_dtype_device(nx): # setup n_samples = 5 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) rng = np.random.RandomState(42) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=rng) xt = xs[::-1].copy() ys = rng.randn(xs.shape[0], 2) yt = ys[::-1].copy() p = ot.unif(n_samples) q = ot.unif(n_samples) C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() M = ot.dist(ys, yt) for tp in nx.__type_list__: print(nx.dtype_device(tp)) Mb, C1b, C2b, pb, qb = nx.from_numpy(M, C1, C2, p, q, type_as=tp) for solver in ['PGD', 'PPA', 'BAPG']: if solver == 'BAPG': Gb = ot.gromov.BAPG_fused_gromov_wasserstein( Mb, C1b, C2b, pb, qb, max_iter=2) fgw_valb = ot.gromov.BAPG_fused_gromov_wasserstein2( Mb, C1b, C2b, pb, qb, max_iter=2) else: Gb = ot.gromov.entropic_fused_gromov_wasserstein( Mb, C1b, C2b, pb, qb, max_iter=2, solver=solver) fgw_valb = ot.gromov.entropic_fused_gromov_wasserstein2( Mb, C1b, C2b, pb, qb, max_iter=2, solver=solver) nx.assert_same_dtype_device(C1b, Gb) nx.assert_same_dtype_device(C1b, fgw_valb) def test_entropic_fgw_barycenter(nx): ns = 5 nt = 10 rng = np.random.RandomState(42) Xs, ys = ot.datasets.make_data_classif('3gauss', ns, random_state=42) Xt, yt = ot.datasets.make_data_classif('3gauss2', nt, random_state=42) ys = rng.randn(Xs.shape[0], 2) yt = rng.randn(Xt.shape[0], 2) C1 = ot.dist(Xs) C2 = ot.dist(Xt) p1 = ot.unif(ns) p2 = ot.unif(nt) n_samples = 3 p = ot.unif(n_samples) ysb, ytb, C1b, C2b, p1b, p2b, pb = nx.from_numpy(ys, yt, C1, C2, p1, p2, p) with pytest.raises(ValueError): loss_fun = 'weird_loss_fun' X, C, log = ot.gromov.entropic_fused_gromov_barycenters( n_samples, [ys, yt], [C1, C2], None, p, [.5, .5], loss_fun, 0.1, max_iter=10, tol=1e-3, verbose=True, warmstartT=True, random_state=42, solver='PPA', numItermax=10, log=True, symmetric=True, ) with pytest.raises(ValueError): stop_criterion = 'unknown stop criterion' X, C, log = ot.gromov.entropic_fused_gromov_barycenters( n_samples, [ys, yt], [C1, C2], None, p, [.5, .5], 'square_loss', 0.1, max_iter=10, tol=1e-3, stop_criterion=stop_criterion, verbose=True, warmstartT=True, random_state=42, solver='PPA', numItermax=10, log=True, symmetric=True, ) for stop_criterion in ['barycenter', 'loss']: X, C, log = ot.gromov.entropic_fused_gromov_barycenters( n_samples, [ys, yt], [C1, C2], None, p, [.5, .5], 'square_loss', epsilon=0.1, max_iter=10, tol=1e-3, stop_criterion=stop_criterion, verbose=True, warmstartT=True, random_state=42, solver='PPA', numItermax=10, log=True, symmetric=True ) Xb, Cb = ot.gromov.entropic_fused_gromov_barycenters( n_samples, [ysb, ytb], [C1b, C2b], [p1b, p2b], None, [.5, .5], 'square_loss', epsilon=0.1, max_iter=10, tol=1e-3, stop_criterion=stop_criterion, verbose=False, warmstartT=True, random_state=42, solver='PPA', numItermax=10, log=False, symmetric=True) Xb, Cb = nx.to_numpy(Xb, Cb) np.testing.assert_allclose(C, Cb, atol=1e-06) np.testing.assert_allclose(Cb.shape, (n_samples, n_samples)) np.testing.assert_allclose(X, Xb, atol=1e-06) np.testing.assert_allclose(Xb.shape, (n_samples, ys.shape[1])) # test with 'kl_loss' and log=True # providing init_C, init_Y generator = ot.utils.check_random_state(42) xalea = generator.randn(n_samples, 2) init_C = ot.utils.dist(xalea, xalea) init_C /= init_C.max() init_Cb = nx.from_numpy(init_C) init_Y = np.zeros((n_samples, ys.shape[1]), dtype=ys.dtype) init_Yb = nx.from_numpy(init_Y) X, C, log = ot.gromov.entropic_fused_gromov_barycenters( n_samples, [ys, yt], [C1, C2], [p1, p2], p, None, 'kl_loss', 0.1, True, max_iter=10, tol=1e-3, verbose=False, warmstartT=False, random_state=42, solver='PPA', numItermax=1, init_C=init_C, init_Y=init_Y, log=True ) Xb, Cb, logb = ot.gromov.entropic_fused_gromov_barycenters( n_samples, [ysb, ytb], [C1b, C2b], [p1b, p2b], pb, [.5, .5], 'kl_loss', 0.1, True, max_iter=10, tol=1e-3, verbose=False, warmstartT=False, random_state=42, solver='PPA', numItermax=1, init_C=init_Cb, init_Y=init_Yb, log=True) Xb, Cb = nx.to_numpy(Xb, Cb) np.testing.assert_allclose(C, Cb, atol=1e-06) np.testing.assert_allclose(Cb.shape, (n_samples, n_samples)) np.testing.assert_allclose(X, Xb, atol=1e-06) np.testing.assert_allclose(Xb.shape, (n_samples, ys.shape[1])) np.testing.assert_array_almost_equal(log['err_feature'], nx.to_numpy(*logb['err_feature'])) np.testing.assert_array_almost_equal(log['err_structure'], nx.to_numpy(*logb['err_structure'])) # add tests with fixed_structures or fixed_features init_C = ot.utils.dist(xalea, xalea) init_C /= init_C.max() init_Cb = nx.from_numpy(init_C) init_Y = np.zeros((n_samples, ys.shape[1]), dtype=ys.dtype) init_Yb = nx.from_numpy(init_Y) fixed_structure, fixed_features = True, False with pytest.raises(ot.utils.UndefinedParameter): # to raise an error when `fixed_structure=True`and `init_C=None` Xb, Cb = ot.gromov.entropic_fused_gromov_barycenters( n_samples, [ysb, ytb], [C1b, C2b], ps=[p1b, p2b], lambdas=None, fixed_structure=fixed_structure, init_C=None, fixed_features=fixed_features, p=None, max_iter=10, tol=1e-3 ) Xb, Cb = ot.gromov.entropic_fused_gromov_barycenters( n_samples, [ysb, ytb], [C1b, C2b], ps=[p1b, p2b], lambdas=None, fixed_structure=fixed_structure, init_C=init_Cb, fixed_features=fixed_features, max_iter=10, tol=1e-3 ) Xb, Cb = nx.to_numpy(Xb), nx.to_numpy(Cb) np.testing.assert_allclose(Cb, init_Cb) np.testing.assert_allclose(Xb.shape, (n_samples, ys.shape[1])) fixed_structure, fixed_features = False, True with pytest.raises(ot.utils.UndefinedParameter): # to raise an error when `fixed_features=True`and `init_X=None` Xb, Cb, logb = ot.gromov.entropic_fused_gromov_barycenters( n_samples, [ysb, ytb], [C1b, C2b], [p1b, p2b], lambdas=[.5, .5], fixed_structure=fixed_structure, fixed_features=fixed_features, init_Y=None, p=pb, max_iter=10, tol=1e-3, warmstartT=True, log=True, random_state=98765, verbose=True ) Xb, Cb, logb = ot.gromov.entropic_fused_gromov_barycenters( n_samples, [ysb, ytb], [C1b, C2b], [p1b, p2b], lambdas=[.5, .5], fixed_structure=fixed_structure, fixed_features=fixed_features, init_Y=init_Yb, p=pb, max_iter=10, tol=1e-3, warmstartT=True, log=True, random_state=98765, verbose=True ) X, C = nx.to_numpy(Xb), nx.to_numpy(Cb) np.testing.assert_allclose(C.shape, (n_samples, n_samples)) np.testing.assert_allclose(Xb, init_Yb) def test_pointwise_gromov(nx): n_samples = 5 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=42) xt = xs[::-1].copy() p = ot.unif(n_samples) q = ot.unif(n_samples) C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() C1b, C2b, pb, qb = nx.from_numpy(C1, C2, p, q) def loss(x, y): return np.abs(x - y) def lossb(x, y): return nx.abs(x - y) G, log = ot.gromov.pointwise_gromov_wasserstein( C1, C2, p, q, loss, max_iter=100, log=True, verbose=True, random_state=42) G = NumpyBackend().todense(G) Gb, logb = ot.gromov.pointwise_gromov_wasserstein( C1b, C2b, pb, qb, lossb, max_iter=100, log=True, verbose=True, random_state=42) Gb = nx.to_numpy(nx.todense(Gb)) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-04) # cf convergence gromov np.testing.assert_allclose(float(logb['gw_dist_estimated']), 0.0, atol=1e-08) np.testing.assert_allclose(float(logb['gw_dist_std']), 0.0, atol=1e-08) G, log = ot.gromov.pointwise_gromov_wasserstein( C1, C2, p, q, loss, max_iter=100, alpha=0.1, log=True, verbose=True, random_state=42) G = NumpyBackend().todense(G) Gb, logb = ot.gromov.pointwise_gromov_wasserstein( C1b, C2b, pb, qb, lossb, max_iter=100, alpha=0.1, log=True, verbose=True, random_state=42) Gb = nx.to_numpy(nx.todense(Gb)) np.testing.assert_allclose(G, Gb, atol=1e-06) @pytest.skip_backend("tf", reason="test very slow with tf backend") @pytest.skip_backend("jax", reason="test very slow with jax backend") def test_sampled_gromov(nx): n_samples = 5 # nb samples mu_s = np.array([0, 0], dtype=np.float64) cov_s = np.array([[1, 0], [0, 1]], dtype=np.float64) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=42) xt = xs[::-1].copy() p = ot.unif(n_samples) q = ot.unif(n_samples) C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() C1b, C2b, pb, qb = nx.from_numpy(C1, C2, p, q) def loss(x, y): return np.abs(x - y) def lossb(x, y): return nx.abs(x - y) G, log = ot.gromov.sampled_gromov_wasserstein( C1, C2, p, q, loss, max_iter=20, nb_samples_grad=2, epsilon=1, log=True, verbose=True, random_state=42) Gb, logb = ot.gromov.sampled_gromov_wasserstein( C1b, C2b, pb, qb, lossb, max_iter=20, nb_samples_grad=2, epsilon=1, log=True, verbose=True, random_state=42) Gb = nx.to_numpy(Gb) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-04) # cf convergence gromov def test_gromov_barycenter(nx): ns = 5 nt = 8 Xs, ys = ot.datasets.make_data_classif('3gauss', ns, random_state=42) Xt, yt = ot.datasets.make_data_classif('3gauss2', nt, random_state=42) C1 = ot.dist(Xs) C2 = ot.dist(Xt) p1 = ot.unif(ns) p2 = ot.unif(nt) n_samples = 3 p = ot.unif(n_samples) C1b, C2b, p1b, p2b, pb = nx.from_numpy(C1, C2, p1, p2, p) with pytest.raises(ValueError): stop_criterion = 'unknown stop criterion' Cb = ot.gromov.gromov_barycenters( n_samples, [C1, C2], None, p, [.5, .5], 'square_loss', max_iter=10, tol=1e-3, stop_criterion=stop_criterion, verbose=False, random_state=42 ) for stop_criterion in ['barycenter', 'loss']: Cb = ot.gromov.gromov_barycenters( n_samples, [C1, C2], None, p, [.5, .5], 'square_loss', max_iter=10, tol=1e-3, stop_criterion=stop_criterion, verbose=False, random_state=42 ) Cbb = nx.to_numpy(ot.gromov.gromov_barycenters( n_samples, [C1b, C2b], [p1b, p2b], None, [.5, .5], 'square_loss', max_iter=10, tol=1e-3, stop_criterion=stop_criterion, verbose=False, random_state=42 )) np.testing.assert_allclose(Cb, Cbb, atol=1e-06) np.testing.assert_allclose(Cbb.shape, (n_samples, n_samples)) # test of gromov_barycenters with `log` on Cb_, err_ = ot.gromov.gromov_barycenters( n_samples, [C1, C2], [p1, p2], p, None, 'square_loss', max_iter=10, tol=1e-3, stop_criterion=stop_criterion, verbose=False, warmstartT=True, random_state=42, log=True ) Cbb_, errb_ = ot.gromov.gromov_barycenters( n_samples, [C1b, C2b], [p1b, p2b], pb, [.5, .5], 'square_loss', max_iter=10, tol=1e-3, stop_criterion=stop_criterion, verbose=False, warmstartT=True, random_state=42, log=True ) Cbb_ = nx.to_numpy(Cbb_) np.testing.assert_allclose(Cb_, Cbb_, atol=1e-06) np.testing.assert_array_almost_equal(err_['err'], nx.to_numpy(*errb_['err'])) np.testing.assert_allclose(Cbb_.shape, (n_samples, n_samples)) Cb2 = ot.gromov.gromov_barycenters( n_samples, [C1, C2], [p1, p2], p, [.5, .5], 'kl_loss', max_iter=10, tol=1e-3, warmstartT=True, random_state=42 ) Cb2b = nx.to_numpy(ot.gromov.gromov_barycenters( n_samples, [C1b, C2b], [p1b, p2b], pb, [.5, .5], 'kl_loss', max_iter=10, tol=1e-3, warmstartT=True, random_state=42 )) np.testing.assert_allclose(Cb2, Cb2b, atol=1e-06) np.testing.assert_allclose(Cb2b.shape, (n_samples, n_samples)) # test of gromov_barycenters with `log` on # providing init_C generator = ot.utils.check_random_state(42) xalea = generator.randn(n_samples, 2) init_C = ot.utils.dist(xalea, xalea) init_C /= init_C.max() init_Cb = nx.from_numpy(init_C) Cb2_, err2_ = ot.gromov.gromov_barycenters( n_samples, [C1, C2], [p1, p2], p, [.5, .5], 'kl_loss', max_iter=10, tol=1e-3, verbose=False, random_state=42, log=True, init_C=init_C ) Cb2b_, err2b_ = ot.gromov.gromov_barycenters( n_samples, [C1b, C2b], [p1b, p2b], pb, [.5, .5], 'kl_loss', max_iter=10, tol=1e-3, verbose=True, random_state=42, init_C=init_Cb, log=True ) Cb2b_ = nx.to_numpy(Cb2b_) np.testing.assert_allclose(Cb2_, Cb2b_, atol=1e-06) np.testing.assert_array_almost_equal(err2_['err'], nx.to_numpy(*err2b_['err'])) np.testing.assert_allclose(Cb2b_.shape, (n_samples, n_samples)) @pytest.mark.filterwarnings("ignore:divide") def test_gromov_entropic_barycenter(nx): ns = 5 nt = 10 Xs, ys = ot.datasets.make_data_classif('3gauss', ns, random_state=42) Xt, yt = ot.datasets.make_data_classif('3gauss2', nt, random_state=42) C1 = ot.dist(Xs) C2 = ot.dist(Xt) p1 = ot.unif(ns) p2 = ot.unif(nt) n_samples = 2 p = ot.unif(n_samples) C1b, C2b, p1b, p2b, pb = nx.from_numpy(C1, C2, p1, p2, p) with pytest.raises(ValueError): loss_fun = 'weird_loss_fun' Cb = ot.gromov.entropic_gromov_barycenters( n_samples, [C1, C2], None, p, [.5, .5], loss_fun, 1e-3, max_iter=10, tol=1e-3, verbose=True, warmstartT=True, random_state=42 ) with pytest.raises(ValueError): stop_criterion = 'unknown stop criterion' Cb = ot.gromov.entropic_gromov_barycenters( n_samples, [C1, C2], None, p, [.5, .5], 'square_loss', 1e-3, max_iter=10, tol=1e-3, stop_criterion=stop_criterion, verbose=True, warmstartT=True, random_state=42 ) Cb = ot.gromov.entropic_gromov_barycenters( n_samples, [C1, C2], None, p, [.5, .5], 'square_loss', 1e-3, max_iter=10, tol=1e-3, verbose=True, warmstartT=True, random_state=42 ) Cbb = nx.to_numpy(ot.gromov.entropic_gromov_barycenters( n_samples, [C1b, C2b], [p1b, p2b], None, [.5, .5], 'square_loss', 1e-3, max_iter=10, tol=1e-3, verbose=True, warmstartT=True, random_state=42 )) np.testing.assert_allclose(Cb, Cbb, atol=1e-06) np.testing.assert_allclose(Cbb.shape, (n_samples, n_samples)) # test of entropic_gromov_barycenters with `log` on for stop_criterion in ['barycenter', 'loss']: Cb_, err_ = ot.gromov.entropic_gromov_barycenters( n_samples, [C1, C2], [p1, p2], p, None, 'square_loss', 1e-3, max_iter=10, tol=1e-3, stop_criterion=stop_criterion, verbose=True, random_state=42, log=True ) Cbb_, errb_ = ot.gromov.entropic_gromov_barycenters( n_samples, [C1b, C2b], [p1b, p2b], pb, [.5, .5], 'square_loss', 1e-3, max_iter=10, tol=1e-3, stop_criterion=stop_criterion, verbose=True, random_state=42, log=True ) Cbb_ = nx.to_numpy(Cbb_) np.testing.assert_allclose(Cb_, Cbb_, atol=1e-06) np.testing.assert_array_almost_equal(err_['err'], nx.to_numpy(*errb_['err'])) np.testing.assert_allclose(Cbb_.shape, (n_samples, n_samples)) Cb2 = ot.gromov.entropic_gromov_barycenters( n_samples, [C1, C2], [p1, p2], p, [.5, .5], 'kl_loss', 1e-3, max_iter=10, tol=1e-3, random_state=42 ) Cb2b = nx.to_numpy(ot.gromov.entropic_gromov_barycenters( n_samples, [C1b, C2b], [p1b, p2b], pb, [.5, .5], 'kl_loss', 1e-3, max_iter=10, tol=1e-3, random_state=42 )) np.testing.assert_allclose(Cb2, Cb2b, atol=1e-06) np.testing.assert_allclose(Cb2b.shape, (n_samples, n_samples)) # test of entropic_gromov_barycenters with `log` on # providing init_C generator = ot.utils.check_random_state(42) xalea = generator.randn(n_samples, 2) init_C = ot.utils.dist(xalea, xalea) init_C /= init_C.max() init_Cb = nx.from_numpy(init_C) Cb2_, err2_ = ot.gromov.entropic_gromov_barycenters( n_samples, [C1, C2], [p1, p2], p, [.5, .5], 'kl_loss', 1e-3, max_iter=10, tol=1e-3, warmstartT=True, verbose=True, random_state=42, init_C=init_C, log=True ) Cb2b_, err2b_ = ot.gromov.entropic_gromov_barycenters( n_samples, [C1b, C2b], [p1b, p2b], pb, [.5, .5], 'kl_loss', 1e-3, max_iter=10, tol=1e-3, warmstartT=True, verbose=True, random_state=42, init_Cb=init_Cb, log=True ) Cb2b_ = nx.to_numpy(Cb2b_) np.testing.assert_allclose(Cb2_, Cb2b_, atol=1e-06) np.testing.assert_array_almost_equal(err2_['err'], nx.to_numpy(*err2b_['err'])) np.testing.assert_allclose(Cb2b_.shape, (n_samples, n_samples)) def test_fgw(nx): n_samples = 20 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=42) xt = xs[::-1].copy() rng = np.random.RandomState(42) ys = rng.randn(xs.shape[0], 2) yt = ys[::-1].copy() p = ot.unif(n_samples) q = ot.unif(n_samples) G0 = p[:, None] * q[None, :] C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() M = ot.dist(ys, yt) M /= M.max() Mb, C1b, C2b, pb, qb, G0b = nx.from_numpy(M, C1, C2, p, q, G0) G, log = ot.gromov.fused_gromov_wasserstein(M, C1, C2, None, q, 'square_loss', alpha=0.5, armijo=True, symmetric=None, G0=G0, log=True) Gb, logb = ot.gromov.fused_gromov_wasserstein(Mb, C1b, C2b, pb, None, 'square_loss', alpha=0.5, armijo=True, symmetric=True, G0=G0b, log=True) Gb = nx.to_numpy(Gb) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-04) # cf convergence fgw np.testing.assert_allclose( q, Gb.sum(0), atol=1e-04) # cf convergence fgw Id = (1 / (1.0 * n_samples)) * np.eye(n_samples, n_samples) np.testing.assert_allclose( Gb, np.flipud(Id), atol=1e-04) # cf convergence gromov fgw, log = ot.gromov.fused_gromov_wasserstein2(M, C1, C2, p, None, 'square_loss', armijo=True, symmetric=True, G0=None, alpha=0.5, log=True) fgwb, logb = ot.gromov.fused_gromov_wasserstein2(Mb, C1b, C2b, None, qb, 'square_loss', armijo=True, symmetric=None, G0=G0b, alpha=0.5, log=True) fgwb = nx.to_numpy(fgwb) G = log['T'] Gb = nx.to_numpy(logb['T']) np.testing.assert_allclose(fgw, fgwb, atol=1e-08) np.testing.assert_allclose(fgwb, 0, atol=1e-1, rtol=1e-1) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-04) # cf convergence gromov def test_asymmetric_fgw(nx): n_samples = 20 # nb samples rng = np.random.RandomState(0) C1 = rng.uniform(low=0., high=10, size=(n_samples, n_samples)) idx = np.arange(n_samples) rng.shuffle(idx) C2 = C1[idx, :][:, idx] # add features F1 = rng.uniform(low=0., high=10, size=(n_samples, 1)) F2 = F1[idx, :] p = ot.unif(n_samples) q = ot.unif(n_samples) G0 = p[:, None] * q[None, :] M = ot.dist(F1, F2) Mb, C1b, C2b, pb, qb, G0b = nx.from_numpy(M, C1, C2, p, q, G0) G, log = ot.gromov.fused_gromov_wasserstein( M, C1, C2, p, q, 'square_loss', alpha=0.5, G0=G0, log=True, symmetric=False, verbose=True) Gb, logb = ot.gromov.fused_gromov_wasserstein( Mb, C1b, C2b, pb, qb, 'square_loss', alpha=0.5, log=True, symmetric=None, G0=G0b, verbose=True) Gb = nx.to_numpy(Gb) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-04) # cf convergence gromov np.testing.assert_allclose(log['fgw_dist'], 0., atol=1e-04) np.testing.assert_allclose(logb['fgw_dist'], 0., atol=1e-04) fgw, log = ot.gromov.fused_gromov_wasserstein2( M, C1, C2, p, q, 'square_loss', alpha=0.5, G0=G0, log=True, symmetric=None, verbose=True) fgwb, logb = ot.gromov.fused_gromov_wasserstein2( Mb, C1b, C2b, pb, qb, 'square_loss', alpha=0.5, log=True, symmetric=False, G0=G0b, verbose=True) G = log['T'] Gb = nx.to_numpy(logb['T']) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-04) # cf convergence gromov np.testing.assert_allclose(log['fgw_dist'], 0., atol=1e-04) np.testing.assert_allclose(logb['fgw_dist'], 0., atol=1e-04) # Tests with kl-loss: for armijo in [False, True]: G, log = ot.gromov.fused_gromov_wasserstein( M, C1, C2, p, q, 'kl_loss', alpha=0.5, armijo=armijo, G0=G0, log=True, symmetric=False, verbose=True) Gb, logb = ot.gromov.fused_gromov_wasserstein( Mb, C1b, C2b, pb, qb, 'kl_loss', alpha=0.5, armijo=armijo, log=True, symmetric=None, G0=G0b, verbose=True) Gb = nx.to_numpy(Gb) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-04) # cf convergence gromov np.testing.assert_allclose(log['fgw_dist'], 0., atol=1e-04) np.testing.assert_allclose(logb['fgw_dist'], 0., atol=1e-04) fgw, log = ot.gromov.fused_gromov_wasserstein2( M, C1, C2, p, q, 'kl_loss', alpha=0.5, G0=G0, log=True, symmetric=None, verbose=True) fgwb, logb = ot.gromov.fused_gromov_wasserstein2( Mb, C1b, C2b, pb, qb, 'kl_loss', alpha=0.5, log=True, symmetric=False, G0=G0b, verbose=True) G = log['T'] Gb = nx.to_numpy(logb['T']) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose( p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose( q, Gb.sum(0), atol=1e-04) # cf convergence gromov np.testing.assert_allclose(log['fgw_dist'], 0., atol=1e-04) np.testing.assert_allclose(logb['fgw_dist'], 0., atol=1e-04) def test_fgw_integer_warnings(nx): n_samples = 20 # nb samples rng = np.random.RandomState(0) C1 = rng.uniform(low=0., high=10, size=(n_samples, n_samples)) idx = np.arange(n_samples) rng.shuffle(idx) C2 = C1[idx, :][:, idx] # add features F1 = rng.uniform(low=0., high=10, size=(n_samples, 1)) F2 = F1[idx, :] p = ot.unif(n_samples) q = ot.unif(n_samples) G0 = p[:, None] * q[None, :] M = ot.dist(F1, F2).astype(np.int32) Mb, C1b, C2b, pb, qb, G0b = nx.from_numpy(M, C1, C2, p, q, G0) G, log = ot.gromov.fused_gromov_wasserstein( M, C1, C2, p, q, 'square_loss', alpha=0.5, G0=G0, log=True, symmetric=False, verbose=True) Gb, logb = ot.gromov.fused_gromov_wasserstein( Mb, C1b, C2b, pb, qb, 'square_loss', alpha=0.5, log=True, symmetric=None, G0=G0b, verbose=True) Gb = nx.to_numpy(Gb) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose(G, 0., atol=1e-06) def test_fgw2_gradients(): n_samples = 20 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=4) xt = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=5) p = ot.unif(n_samples) q = ot.unif(n_samples) C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) M = ot.dist(xs, xt) C1 /= C1.max() C2 /= C2.max() if torch: devices = [torch.device("cpu")] if torch.cuda.is_available(): devices.append(torch.device("cuda")) for device in devices: p1 = torch.tensor(p, requires_grad=True, device=device) q1 = torch.tensor(q, requires_grad=True, device=device) C11 = torch.tensor(C1, requires_grad=True, device=device) C12 = torch.tensor(C2, requires_grad=True, device=device) M1 = torch.tensor(M, requires_grad=True, device=device) val = ot.fused_gromov_wasserstein2(M1, C11, C12, p1, q1) val.backward() assert val.device == p1.device assert q1.shape == q1.grad.shape assert p1.shape == p1.grad.shape assert C11.shape == C11.grad.shape assert C12.shape == C12.grad.shape assert M1.shape == M1.grad.shape # full gradients with alpha p1 = torch.tensor(p, requires_grad=True, device=device) q1 = torch.tensor(q, requires_grad=True, device=device) C11 = torch.tensor(C1, requires_grad=True, device=device) C12 = torch.tensor(C2, requires_grad=True, device=device) M1 = torch.tensor(M, requires_grad=True, device=device) alpha = torch.tensor(0.5, requires_grad=True, device=device) val = ot.fused_gromov_wasserstein2(M1, C11, C12, p1, q1, alpha=alpha) val.backward() assert val.device == p1.device assert q1.shape == q1.grad.shape assert p1.shape == p1.grad.shape assert C11.shape == C11.grad.shape assert C12.shape == C12.grad.shape assert alpha.shape == alpha.grad.shape def test_fgw_helper_backend(nx): n_samples = 20 # nb samples mu = np.array([0, 0]) cov = np.array([[1, 0], [0, 1]]) rng = np.random.RandomState(42) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov, random_state=0) ys = rng.randn(xs.shape[0], 2) xt = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov, random_state=1) yt = rng.randn(xt.shape[0], 2) p = ot.unif(n_samples) q = ot.unif(n_samples) G0 = p[:, None] * q[None, :] C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() M = ot.dist(ys, yt) M /= M.max() Mb, C1b, C2b, pb, qb, G0b = nx.from_numpy(M, C1, C2, p, q, G0) alpha = 0.5 Gb, logb = ot.gromov.fused_gromov_wasserstein(Mb, C1b, C2b, pb, qb, 'square_loss', alpha=0.5, armijo=False, symmetric=True, G0=G0b, log=True) # calls with nx=None constCb, hC1b, hC2b = ot.gromov.init_matrix(C1b, C2b, pb, qb, loss_fun='square_loss') def f(G): return ot.gromov.gwloss(constCb, hC1b, hC2b, G, None) def df(G): return ot.gromov.gwggrad(constCb, hC1b, hC2b, G, None) def line_search(cost, G, deltaG, Mi, cost_G): return ot.gromov.solve_gromov_linesearch(G, deltaG, cost_G, C1b, C2b, M=(1 - alpha) * Mb, reg=alpha, nx=None) # feed the precomputed local optimum Gb to cg res, log = ot.optim.cg(pb, qb, (1 - alpha) * Mb, alpha, f, df, Gb, line_search, log=True, numItermax=1e4, stopThr=1e-9, stopThr2=1e-9) def line_search(cost, G, deltaG, Mi, cost_G): return ot.optim.line_search_armijo(cost, G, deltaG, Mi, cost_G, nx=None) # feed the precomputed local optimum Gb to cg res_armijo, log_armijo = ot.optim.cg(pb, qb, (1 - alpha) * Mb, alpha, f, df, Gb, line_search, log=True, numItermax=1e4, stopThr=1e-9, stopThr2=1e-9) # check constraints np.testing.assert_allclose(res, Gb, atol=1e-06) np.testing.assert_allclose(res_armijo, Gb, atol=1e-06) def test_fgw_barycenter(nx): ns = 10 nt = 20 Xs, ys = ot.datasets.make_data_classif('3gauss', ns, random_state=42) Xt, yt = ot.datasets.make_data_classif('3gauss2', nt, random_state=42) rng = np.random.RandomState(42) ys = rng.randn(Xs.shape[0], 2) yt = rng.randn(Xt.shape[0], 2) C1 = ot.dist(Xs) C2 = ot.dist(Xt) C1 /= C1.max() C2 /= C2.max() p1, p2 = ot.unif(ns), ot.unif(nt) n_samples = 3 p = ot.unif(n_samples) ysb, ytb, C1b, C2b, p1b, p2b, pb = nx.from_numpy(ys, yt, C1, C2, p1, p2, p) lambdas = [.5, .5] Csb = [C1b, C2b] Ysb = [ysb, ytb] Xb, Cb, logb = ot.gromov.fgw_barycenters( n_samples, Ysb, Csb, None, lambdas, 0.5, fixed_structure=False, fixed_features=False, p=pb, loss_fun='square_loss', max_iter=10, tol=1e-3, random_state=12345, log=True ) # test correspondance with utils function recovered_Cb = ot.gromov.update_square_loss(pb, lambdas, logb['Ts_iter'][-1], Csb) recovered_Xb = ot.gromov.update_feature_matrix(lambdas, [y.T for y in Ysb], logb['Ts_iter'][-1], pb).T np.testing.assert_allclose(Cb, recovered_Cb) np.testing.assert_allclose(Xb, recovered_Xb) xalea = rng.randn(n_samples, 2) init_C = ot.dist(xalea, xalea) init_C /= init_C.max() init_Cb = nx.from_numpy(init_C) with pytest.raises(ot.utils.UndefinedParameter): # to raise an error when `fixed_structure=True`and `init_C=None` Xb, Cb = ot.gromov.fgw_barycenters( n_samples, Ysb, Csb, ps=[p1b, p2b], lambdas=None, alpha=0.5, fixed_structure=True, init_C=None, fixed_features=False, p=None, loss_fun='square_loss', max_iter=10, tol=1e-3 ) Xb, Cb = ot.gromov.fgw_barycenters( n_samples, [ysb, ytb], [C1b, C2b], ps=[p1b, p2b], lambdas=None, alpha=0.5, fixed_structure=True, init_C=init_Cb, fixed_features=False, p=None, loss_fun='square_loss', max_iter=10, tol=1e-3 ) Xb, Cb = nx.to_numpy(Xb), nx.to_numpy(Cb) np.testing.assert_allclose(Cb.shape, (n_samples, n_samples)) np.testing.assert_allclose(Xb.shape, (n_samples, ys.shape[1])) init_X = rng.randn(n_samples, ys.shape[1]) init_Xb = nx.from_numpy(init_X) with pytest.raises(ot.utils.UndefinedParameter): # to raise an error when `fixed_features=True`and `init_X=None` Xb, Cb, logb = ot.gromov.fgw_barycenters( n_samples, [ysb, ytb], [C1b, C2b], [p1b, p2b], [.5, .5], 0.5, fixed_structure=False, fixed_features=True, init_X=None, p=pb, loss_fun='square_loss', max_iter=10, tol=1e-3, warmstartT=True, log=True, random_state=98765, verbose=True ) Xb, Cb, logb = ot.gromov.fgw_barycenters( n_samples, [ysb, ytb], [C1b, C2b], [p1b, p2b], [.5, .5], 0.5, fixed_structure=False, fixed_features=True, init_X=init_Xb, p=pb, loss_fun='square_loss', max_iter=10, tol=1e-3, warmstartT=True, log=True, random_state=98765, verbose=True ) X, C = nx.to_numpy(Xb), nx.to_numpy(Cb) np.testing.assert_allclose(C.shape, (n_samples, n_samples)) np.testing.assert_allclose(X.shape, (n_samples, ys.shape[1])) # add test with 'kl_loss' with pytest.raises(ValueError): stop_criterion = 'unknown stop criterion' X, C, log = ot.gromov.fgw_barycenters( n_samples, [ys, yt], [C1, C2], [p1, p2], [.5, .5], 0.5, fixed_structure=False, fixed_features=False, p=p, loss_fun='kl_loss', max_iter=100, tol=1e-3, stop_criterion=stop_criterion, init_C=C, init_X=X, warmstartT=True, random_state=12345, log=True ) for stop_criterion in ['barycenter', 'loss']: X, C, log = ot.gromov.fgw_barycenters( n_samples, [ys, yt], [C1, C2], [p1, p2], [.5, .5], 0.5, fixed_structure=False, fixed_features=False, p=p, loss_fun='kl_loss', max_iter=100, tol=1e-3, stop_criterion=stop_criterion, init_C=C, init_X=X, warmstartT=True, random_state=12345, log=True, verbose=True ) np.testing.assert_allclose(C.shape, (n_samples, n_samples)) np.testing.assert_allclose(X.shape, (n_samples, ys.shape[1])) # test correspondance with utils function recovered_C = ot.gromov.update_kl_loss(p, lambdas, log['T'], [C1, C2]) np.testing.assert_allclose(C, recovered_C) def test_gromov_wasserstein_linear_unmixing(nx): n = 4 X1, y1 = ot.datasets.make_data_classif('3gauss', n, random_state=42) X2, y2 = ot.datasets.make_data_classif('3gauss2', n, random_state=42) C1 = ot.dist(X1) C2 = ot.dist(X2) Cdict = np.stack([C1, C2]) p = ot.unif(n) C1b, C2b, Cdictb, pb = nx.from_numpy(C1, C2, Cdict, p) tol = 10**(-5) # Tests without regularization reg = 0. unmixing1, C1_emb, OT, reconstruction1 = ot.gromov.gromov_wasserstein_linear_unmixing( C1, Cdict, reg=reg, p=p, q=p, tol_outer=tol, tol_inner=tol, max_iter_outer=20, max_iter_inner=200 ) unmixing1b, C1b_emb, OTb, reconstruction1b = ot.gromov.gromov_wasserstein_linear_unmixing( C1b, Cdictb, reg=reg, p=None, q=None, tol_outer=tol, tol_inner=tol, max_iter_outer=20, max_iter_inner=200 ) unmixing2, C2_emb, OT, reconstruction2 = ot.gromov.gromov_wasserstein_linear_unmixing( C2, Cdict, reg=reg, p=None, q=None, tol_outer=tol, tol_inner=tol, max_iter_outer=20, max_iter_inner=200 ) unmixing2b, C2b_emb, OTb, reconstruction2b = ot.gromov.gromov_wasserstein_linear_unmixing( C2b, Cdictb, reg=reg, p=pb, q=pb, tol_outer=tol, tol_inner=tol, max_iter_outer=20, max_iter_inner=200 ) np.testing.assert_allclose(unmixing1, nx.to_numpy(unmixing1b), atol=5e-06) np.testing.assert_allclose(unmixing1, [1., 0.], atol=5e-01) np.testing.assert_allclose(unmixing2, nx.to_numpy(unmixing2b), atol=5e-06) np.testing.assert_allclose(unmixing2, [0., 1.], atol=5e-01) np.testing.assert_allclose(C1_emb, nx.to_numpy(C1b_emb), atol=1e-06) np.testing.assert_allclose(C2_emb, nx.to_numpy(C2b_emb), atol=1e-06) np.testing.assert_allclose(reconstruction1, nx.to_numpy(reconstruction1b), atol=1e-06) np.testing.assert_allclose(reconstruction2, nx.to_numpy(reconstruction2b), atol=1e-06) np.testing.assert_allclose(C1b_emb.shape, (n, n)) np.testing.assert_allclose(C2b_emb.shape, (n, n)) # Tests with regularization reg = 0.001 unmixing1, C1_emb, OT, reconstruction1 = ot.gromov.gromov_wasserstein_linear_unmixing( C1, Cdict, reg=reg, p=p, q=p, tol_outer=tol, tol_inner=tol, max_iter_outer=20, max_iter_inner=200 ) unmixing1b, C1b_emb, OTb, reconstruction1b = ot.gromov.gromov_wasserstein_linear_unmixing( C1b, Cdictb, reg=reg, p=None, q=None, tol_outer=tol, tol_inner=tol, max_iter_outer=20, max_iter_inner=200 ) unmixing2, C2_emb, OT, reconstruction2 = ot.gromov.gromov_wasserstein_linear_unmixing( C2, Cdict, reg=reg, p=None, q=None, tol_outer=tol, tol_inner=tol, max_iter_outer=20, max_iter_inner=200 ) unmixing2b, C2b_emb, OTb, reconstruction2b = ot.gromov.gromov_wasserstein_linear_unmixing( C2b, Cdictb, reg=reg, p=pb, q=pb, tol_outer=tol, tol_inner=tol, max_iter_outer=20, max_iter_inner=200 ) np.testing.assert_allclose(unmixing1, nx.to_numpy(unmixing1b), atol=1e-06) np.testing.assert_allclose(unmixing1, [1., 0.], atol=1e-01) np.testing.assert_allclose(unmixing2, nx.to_numpy(unmixing2b), atol=1e-06) np.testing.assert_allclose(unmixing2, [0., 1.], atol=1e-01) np.testing.assert_allclose(C1_emb, nx.to_numpy(C1b_emb), atol=1e-06) np.testing.assert_allclose(C2_emb, nx.to_numpy(C2b_emb), atol=1e-06) np.testing.assert_allclose(reconstruction1, nx.to_numpy(reconstruction1b), atol=1e-06) np.testing.assert_allclose(reconstruction2, nx.to_numpy(reconstruction2b), atol=1e-06) np.testing.assert_allclose(C1b_emb.shape, (n, n)) np.testing.assert_allclose(C2b_emb.shape, (n, n)) def test_gromov_wasserstein_dictionary_learning(nx): # create dataset composed from 2 structures which are repeated 5 times shape = 4 n_samples = 2 n_atoms = 2 projection = 'nonnegative_symmetric' X1, y1 = ot.datasets.make_data_classif('3gauss', shape, random_state=42) X2, y2 = ot.datasets.make_data_classif('3gauss2', shape, random_state=42) C1 = ot.dist(X1) C2 = ot.dist(X2) Cs = [C1.copy() for _ in range(n_samples // 2)] + [C2.copy() for _ in range(n_samples // 2)] ps = [ot.unif(shape) for _ in range(n_samples)] q = ot.unif(shape) # Provide initialization for the graph dictionary of shape (n_atoms, shape, shape) # following the same procedure than implemented in gromov_wasserstein_dictionary_learning. dataset_means = [C.mean() for C in Cs] rng = np.random.RandomState(0) Cdict_init = rng.normal(loc=np.mean(dataset_means), scale=np.std(dataset_means), size=(n_atoms, shape, shape)) if projection == 'nonnegative_symmetric': Cdict_init = 0.5 * (Cdict_init + Cdict_init.transpose((0, 2, 1))) Cdict_init[Cdict_init < 0.] = 0. Csb = nx.from_numpy(*Cs) psb = nx.from_numpy(*ps) qb, Cdict_initb = nx.from_numpy(q, Cdict_init) # Test: compare reconstruction error using initial dictionary and dictionary learned using this initialization # > Compute initial reconstruction of samples on this random dictionary without backend use_adam_optimizer = True verbose = False tol = 10**(-5) epochs = 1 initial_total_reconstruction = 0 for i in range(n_samples): _, _, _, reconstruction = ot.gromov.gromov_wasserstein_linear_unmixing( Cs[i], Cdict_init, p=ps[i], q=q, reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50 ) initial_total_reconstruction += reconstruction # > Learn the dictionary using this init Cdict, log = ot.gromov.gromov_wasserstein_dictionary_learning( Cs, D=n_atoms, nt=shape, ps=ps, q=q, Cdict_init=Cdict_init, epochs=epochs, batch_size=2 * n_samples, learning_rate=1., reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50, projection=projection, use_log=False, use_adam_optimizer=use_adam_optimizer, verbose=verbose ) # > Compute reconstruction of samples on learned dictionary without backend total_reconstruction = 0 for i in range(n_samples): _, _, _, reconstruction = ot.gromov.gromov_wasserstein_linear_unmixing( Cs[i], Cdict, p=None, q=None, reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50 ) total_reconstruction += reconstruction np.testing.assert_array_less(total_reconstruction, initial_total_reconstruction) # Test: Perform same experiments after going through backend Cdictb, log = ot.gromov.gromov_wasserstein_dictionary_learning( Csb, D=n_atoms, nt=shape, ps=None, q=None, Cdict_init=Cdict_initb, epochs=epochs, batch_size=n_samples, learning_rate=1., reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50, projection=projection, use_log=False, use_adam_optimizer=use_adam_optimizer, verbose=verbose ) # Compute reconstruction of samples on learned dictionary total_reconstruction_b = 0 for i in range(n_samples): _, _, _, reconstruction = ot.gromov.gromov_wasserstein_linear_unmixing( Csb[i], Cdictb, p=psb[i], q=qb, reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50 ) total_reconstruction_b += reconstruction total_reconstruction_b = nx.to_numpy(total_reconstruction_b) np.testing.assert_array_less(total_reconstruction_b, initial_total_reconstruction) np.testing.assert_allclose(total_reconstruction_b, total_reconstruction, atol=1e-05) np.testing.assert_allclose(total_reconstruction_b, total_reconstruction, atol=1e-05) np.testing.assert_allclose(Cdict, nx.to_numpy(Cdictb), atol=1e-03) # Test: Perform same comparison without providing the initial dictionary being an optional input # knowing than the initialization scheme is the same than implemented to set the benchmarked initialization. Cdict_bis, log = ot.gromov.gromov_wasserstein_dictionary_learning( Cs, D=n_atoms, nt=shape, ps=None, q=None, Cdict_init=None, epochs=epochs, batch_size=n_samples, learning_rate=1., reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50, projection=projection, use_log=False, use_adam_optimizer=use_adam_optimizer, verbose=verbose, random_state=0 ) # > Compute reconstruction of samples on learned dictionary total_reconstruction_bis = 0 for i in range(n_samples): _, _, _, reconstruction = ot.gromov.gromov_wasserstein_linear_unmixing( Cs[i], Cdict_bis, p=ps[i], q=q, reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50 ) total_reconstruction_bis += reconstruction np.testing.assert_allclose(total_reconstruction_bis, total_reconstruction, atol=1e-05) # Test: Same after going through backend Cdictb_bis, log = ot.gromov.gromov_wasserstein_dictionary_learning( Csb, D=n_atoms, nt=shape, ps=psb, q=qb, Cdict_init=None, epochs=epochs, batch_size=n_samples, learning_rate=1., reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50, projection=projection, use_log=False, use_adam_optimizer=use_adam_optimizer, verbose=verbose, random_state=0 ) # > Compute reconstruction of samples on learned dictionary total_reconstruction_b_bis = 0 for i in range(n_samples): _, _, _, reconstruction = ot.gromov.gromov_wasserstein_linear_unmixing( Csb[i], Cdictb_bis, p=None, q=None, reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50 ) total_reconstruction_b_bis += reconstruction total_reconstruction_b_bis = nx.to_numpy(total_reconstruction_b_bis) np.testing.assert_allclose(total_reconstruction_b_bis, total_reconstruction_b, atol=1e-05) np.testing.assert_allclose(Cdict_bis, nx.to_numpy(Cdictb_bis), atol=1e-03) # Test: Perform same comparison without providing the initial dictionary being an optional input # and testing other optimization settings untested until now. # We pass previously estimated dictionaries to speed up the process. use_adam_optimizer = False verbose = True use_log = True Cdict_bis2, log = ot.gromov.gromov_wasserstein_dictionary_learning( Cs, D=n_atoms, nt=shape, ps=ps, q=q, Cdict_init=Cdict, epochs=epochs, batch_size=n_samples, learning_rate=10., reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50, projection=projection, use_log=use_log, use_adam_optimizer=use_adam_optimizer, verbose=verbose, random_state=0, ) # > Compute reconstruction of samples on learned dictionary total_reconstruction_bis2 = 0 for i in range(n_samples): _, _, _, reconstruction = ot.gromov.gromov_wasserstein_linear_unmixing( Cs[i], Cdict_bis2, p=ps[i], q=q, reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50 ) total_reconstruction_bis2 += reconstruction np.testing.assert_array_less(total_reconstruction_bis2, total_reconstruction) # Test: Same after going through backend Cdictb_bis2, log = ot.gromov.gromov_wasserstein_dictionary_learning( Csb, D=n_atoms, nt=shape, ps=psb, q=qb, Cdict_init=Cdictb, epochs=epochs, batch_size=n_samples, learning_rate=10., reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50, projection=projection, use_log=use_log, use_adam_optimizer=use_adam_optimizer, verbose=verbose, random_state=0, ) # > Compute reconstruction of samples on learned dictionary total_reconstruction_b_bis2 = 0 for i in range(n_samples): _, _, _, reconstruction = ot.gromov.gromov_wasserstein_linear_unmixing( Csb[i], Cdictb_bis2, p=psb[i], q=qb, reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50 ) total_reconstruction_b_bis2 += reconstruction total_reconstruction_b_bis2 = nx.to_numpy(total_reconstruction_b_bis2) np.testing.assert_allclose(total_reconstruction_b_bis2, total_reconstruction_bis2, atol=1e-05) def test_fused_gromov_wasserstein_linear_unmixing(nx): n = 4 X1, y1 = ot.datasets.make_data_classif('3gauss', n, random_state=42) X2, y2 = ot.datasets.make_data_classif('3gauss2', n, random_state=42) F, y = ot.datasets.make_data_classif('3gauss', n, random_state=42) C1 = ot.dist(X1) C2 = ot.dist(X2) Cdict = np.stack([C1, C2]) Ydict = np.stack([F, F]) p = ot.unif(n) C1b, C2b, Fb, Cdictb, Ydictb, pb = nx.from_numpy(C1, C2, F, Cdict, Ydict, p) # Tests without regularization reg = 0. unmixing1, C1_emb, Y1_emb, OT, reconstruction1 = ot.gromov.fused_gromov_wasserstein_linear_unmixing( C1, F, Cdict, Ydict, p=p, q=p, alpha=0.5, reg=reg, tol_outer=10**(-6), tol_inner=10**(-6), max_iter_outer=10, max_iter_inner=50 ) unmixing1b, C1b_emb, Y1b_emb, OTb, reconstruction1b = ot.gromov.fused_gromov_wasserstein_linear_unmixing( C1b, Fb, Cdictb, Ydictb, p=None, q=None, alpha=0.5, reg=reg, tol_outer=10**(-6), tol_inner=10**(-6), max_iter_outer=10, max_iter_inner=50 ) unmixing2, C2_emb, Y2_emb, OT, reconstruction2 = ot.gromov.fused_gromov_wasserstein_linear_unmixing( C2, F, Cdict, Ydict, p=None, q=None, alpha=0.5, reg=reg, tol_outer=10**(-6), tol_inner=10**(-6), max_iter_outer=10, max_iter_inner=50 ) unmixing2b, C2b_emb, Y2b_emb, OTb, reconstruction2b = ot.gromov.fused_gromov_wasserstein_linear_unmixing( C2b, Fb, Cdictb, Ydictb, p=pb, q=pb, alpha=0.5, reg=reg, tol_outer=10**(-6), tol_inner=10**(-6), max_iter_outer=10, max_iter_inner=50 ) np.testing.assert_allclose(unmixing1, nx.to_numpy(unmixing1b), atol=4e-06) np.testing.assert_allclose(unmixing1, [1., 0.], atol=4e-01) np.testing.assert_allclose(unmixing2, nx.to_numpy(unmixing2b), atol=4e-06) np.testing.assert_allclose(unmixing2, [0., 1.], atol=4e-01) np.testing.assert_allclose(C1_emb, nx.to_numpy(C1b_emb), atol=1e-03) np.testing.assert_allclose(C2_emb, nx.to_numpy(C2b_emb), atol=1e-03) np.testing.assert_allclose(Y1_emb, nx.to_numpy(Y1b_emb), atol=1e-03) np.testing.assert_allclose(Y2_emb, nx.to_numpy(Y2b_emb), atol=1e-03) np.testing.assert_allclose(reconstruction1, nx.to_numpy(reconstruction1b), atol=1e-06) np.testing.assert_allclose(reconstruction2, nx.to_numpy(reconstruction2b), atol=1e-06) np.testing.assert_allclose(C1b_emb.shape, (n, n)) np.testing.assert_allclose(C2b_emb.shape, (n, n)) # Tests with regularization reg = 0.001 unmixing1, C1_emb, Y1_emb, OT, reconstruction1 = ot.gromov.fused_gromov_wasserstein_linear_unmixing( C1, F, Cdict, Ydict, p=p, q=p, alpha=0.5, reg=reg, tol_outer=10**(-6), tol_inner=10**(-6), max_iter_outer=10, max_iter_inner=50 ) unmixing1b, C1b_emb, Y1b_emb, OTb, reconstruction1b = ot.gromov.fused_gromov_wasserstein_linear_unmixing( C1b, Fb, Cdictb, Ydictb, p=None, q=None, alpha=0.5, reg=reg, tol_outer=10**(-6), tol_inner=10**(-6), max_iter_outer=10, max_iter_inner=50 ) unmixing2, C2_emb, Y2_emb, OT, reconstruction2 = ot.gromov.fused_gromov_wasserstein_linear_unmixing( C2, F, Cdict, Ydict, p=None, q=None, alpha=0.5, reg=reg, tol_outer=10**(-6), tol_inner=10**(-6), max_iter_outer=10, max_iter_inner=50 ) unmixing2b, C2b_emb, Y2b_emb, OTb, reconstruction2b = ot.gromov.fused_gromov_wasserstein_linear_unmixing( C2b, Fb, Cdictb, Ydictb, p=pb, q=pb, alpha=0.5, reg=reg, tol_outer=10**(-6), tol_inner=10**(-6), max_iter_outer=10, max_iter_inner=50 ) np.testing.assert_allclose(unmixing1, nx.to_numpy(unmixing1b), atol=1e-06) np.testing.assert_allclose(unmixing1, [1., 0.], atol=1e-01) np.testing.assert_allclose(unmixing2, nx.to_numpy(unmixing2b), atol=1e-06) np.testing.assert_allclose(unmixing2, [0., 1.], atol=1e-01) np.testing.assert_allclose(C1_emb, nx.to_numpy(C1b_emb), atol=1e-03) np.testing.assert_allclose(C2_emb, nx.to_numpy(C2b_emb), atol=1e-03) np.testing.assert_allclose(Y1_emb, nx.to_numpy(Y1b_emb), atol=1e-03) np.testing.assert_allclose(Y2_emb, nx.to_numpy(Y2b_emb), atol=1e-03) np.testing.assert_allclose(reconstruction1, nx.to_numpy(reconstruction1b), atol=1e-06) np.testing.assert_allclose(reconstruction2, nx.to_numpy(reconstruction2b), atol=1e-06) np.testing.assert_allclose(C1b_emb.shape, (n, n)) np.testing.assert_allclose(C2b_emb.shape, (n, n)) def test_fused_gromov_wasserstein_dictionary_learning(nx): # create dataset composed from 2 structures which are repeated 5 times shape = 4 n_samples = 2 n_atoms = 2 projection = 'nonnegative_symmetric' X1, y1 = ot.datasets.make_data_classif('3gauss', shape, random_state=42) X2, y2 = ot.datasets.make_data_classif('3gauss2', shape, random_state=42) F, y = ot.datasets.make_data_classif('3gauss', shape, random_state=42) C1 = ot.dist(X1) C2 = ot.dist(X2) Cs = [C1.copy() for _ in range(n_samples // 2)] + [C2.copy() for _ in range(n_samples // 2)] Ys = [F.copy() for _ in range(n_samples)] ps = [ot.unif(shape) for _ in range(n_samples)] q = ot.unif(shape) # Provide initialization for the graph dictionary of shape (n_atoms, shape, shape) # following the same procedure than implemented in gromov_wasserstein_dictionary_learning. dataset_structure_means = [C.mean() for C in Cs] rng = np.random.RandomState(0) Cdict_init = rng.normal(loc=np.mean(dataset_structure_means), scale=np.std(dataset_structure_means), size=(n_atoms, shape, shape)) if projection == 'nonnegative_symmetric': Cdict_init = 0.5 * (Cdict_init + Cdict_init.transpose((0, 2, 1))) Cdict_init[Cdict_init < 0.] = 0. dataset_feature_means = np.stack([Y.mean(axis=0) for Y in Ys]) Ydict_init = rng.normal(loc=dataset_feature_means.mean(axis=0), scale=dataset_feature_means.std(axis=0), size=(n_atoms, shape, 2)) Csb = nx.from_numpy(*Cs) Ysb = nx.from_numpy(*Ys) psb = nx.from_numpy(*ps) qb, Cdict_initb, Ydict_initb = nx.from_numpy(q, Cdict_init, Ydict_init) # Test: Compute initial reconstruction of samples on this random dictionary alpha = 0.5 use_adam_optimizer = True verbose = False tol = 1e-05 epochs = 1 initial_total_reconstruction = 0 for i in range(n_samples): _, _, _, _, reconstruction = ot.gromov.fused_gromov_wasserstein_linear_unmixing( Cs[i], Ys[i], Cdict_init, Ydict_init, p=ps[i], q=q, alpha=alpha, reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50 ) initial_total_reconstruction += reconstruction # > Learn a dictionary using this given initialization and check that the reconstruction loss # on the learned dictionary is lower than the one using its initialization. Cdict, Ydict, log = ot.gromov.fused_gromov_wasserstein_dictionary_learning( Cs, Ys, D=n_atoms, nt=shape, ps=ps, q=q, Cdict_init=Cdict_init, Ydict_init=Ydict_init, epochs=epochs, batch_size=n_samples, learning_rate_C=1., learning_rate_Y=1., alpha=alpha, reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50, projection=projection, use_log=False, use_adam_optimizer=use_adam_optimizer, verbose=verbose ) # > Compute reconstruction of samples on learned dictionary total_reconstruction = 0 for i in range(n_samples): _, _, _, _, reconstruction = ot.gromov.fused_gromov_wasserstein_linear_unmixing( Cs[i], Ys[i], Cdict, Ydict, p=None, q=None, alpha=alpha, reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50 ) total_reconstruction += reconstruction # Compare both np.testing.assert_array_less(total_reconstruction, initial_total_reconstruction) # Test: Perform same experiments after going through backend Cdictb, Ydictb, log = ot.gromov.fused_gromov_wasserstein_dictionary_learning( Csb, Ysb, D=n_atoms, nt=shape, ps=None, q=None, Cdict_init=Cdict_initb, Ydict_init=Ydict_initb, epochs=epochs, batch_size=2 * n_samples, learning_rate_C=1., learning_rate_Y=1., alpha=alpha, reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50, projection=projection, use_log=False, use_adam_optimizer=use_adam_optimizer, verbose=verbose, random_state=0 ) # > Compute reconstruction of samples on learned dictionary total_reconstruction_b = 0 for i in range(n_samples): _, _, _, _, reconstruction = ot.gromov.fused_gromov_wasserstein_linear_unmixing( Csb[i], Ysb[i], Cdictb, Ydictb, p=psb[i], q=qb, alpha=alpha, reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50 ) total_reconstruction_b += reconstruction total_reconstruction_b = nx.to_numpy(total_reconstruction_b) np.testing.assert_array_less(total_reconstruction_b, initial_total_reconstruction) np.testing.assert_allclose(total_reconstruction_b, total_reconstruction, atol=1e-05) np.testing.assert_allclose(Cdict, nx.to_numpy(Cdictb), atol=1e-03) np.testing.assert_allclose(Ydict, nx.to_numpy(Ydictb), atol=1e-03) # Test: Perform similar experiment without providing the initial dictionary being an optional input Cdict_bis, Ydict_bis, log = ot.gromov.fused_gromov_wasserstein_dictionary_learning( Cs, Ys, D=n_atoms, nt=shape, ps=None, q=None, Cdict_init=None, Ydict_init=None, epochs=epochs, batch_size=n_samples, learning_rate_C=1., learning_rate_Y=1., alpha=alpha, reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50, projection=projection, use_log=False, use_adam_optimizer=use_adam_optimizer, verbose=verbose, random_state=0 ) # > Compute reconstruction of samples on learned dictionary total_reconstruction_bis = 0 for i in range(n_samples): _, _, _, _, reconstruction = ot.gromov.fused_gromov_wasserstein_linear_unmixing( Cs[i], Ys[i], Cdict_bis, Ydict_bis, p=ps[i], q=q, alpha=alpha, reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50 ) total_reconstruction_bis += reconstruction np.testing.assert_allclose(total_reconstruction_bis, total_reconstruction, atol=1e-05) # > Same after going through backend Cdictb_bis, Ydictb_bis, log = ot.gromov.fused_gromov_wasserstein_dictionary_learning( Csb, Ysb, D=n_atoms, nt=shape, ps=None, q=None, Cdict_init=None, Ydict_init=None, epochs=epochs, batch_size=n_samples, learning_rate_C=1., learning_rate_Y=1., alpha=alpha, reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50, projection=projection, use_log=False, use_adam_optimizer=use_adam_optimizer, verbose=verbose, random_state=0, ) # > Compute reconstruction of samples on learned dictionary total_reconstruction_b_bis = 0 for i in range(n_samples): _, _, _, _, reconstruction = ot.gromov.fused_gromov_wasserstein_linear_unmixing( Csb[i], Ysb[i], Cdictb_bis, Ydictb_bis, p=psb[i], q=qb, alpha=alpha, reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50 ) total_reconstruction_b_bis += reconstruction total_reconstruction_b_bis = nx.to_numpy(total_reconstruction_b_bis) np.testing.assert_allclose(total_reconstruction_b_bis, total_reconstruction_b, atol=1e-05) # Test: without using adam optimizer, with log and verbose set to True use_adam_optimizer = False verbose = True use_log = True # > Experiment providing previously estimated dictionary to speed up the test compared to providing initial random init. Cdict_bis2, Ydict_bis2, log = ot.gromov.fused_gromov_wasserstein_dictionary_learning( Cs, Ys, D=n_atoms, nt=shape, ps=ps, q=q, Cdict_init=Cdict, Ydict_init=Ydict, epochs=epochs, batch_size=n_samples, learning_rate_C=10., learning_rate_Y=10., alpha=alpha, reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50, projection=projection, use_log=use_log, use_adam_optimizer=use_adam_optimizer, verbose=verbose, random_state=0, ) # > Compute reconstruction of samples on learned dictionary total_reconstruction_bis2 = 0 for i in range(n_samples): _, _, _, _, reconstruction = ot.gromov.fused_gromov_wasserstein_linear_unmixing( Cs[i], Ys[i], Cdict_bis2, Ydict_bis2, p=ps[i], q=q, alpha=alpha, reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50 ) total_reconstruction_bis2 += reconstruction np.testing.assert_array_less(total_reconstruction_bis2, total_reconstruction) # > Same after going through backend Cdictb_bis2, Ydictb_bis2, log = ot.gromov.fused_gromov_wasserstein_dictionary_learning( Csb, Ysb, D=n_atoms, nt=shape, ps=None, q=None, Cdict_init=Cdictb, Ydict_init=Ydictb, epochs=epochs, batch_size=n_samples, learning_rate_C=10., learning_rate_Y=10., alpha=alpha, reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50, projection=projection, use_log=use_log, use_adam_optimizer=use_adam_optimizer, verbose=verbose, random_state=0, ) # > Compute reconstruction of samples on learned dictionary total_reconstruction_b_bis2 = 0 for i in range(n_samples): _, _, _, _, reconstruction = ot.gromov.fused_gromov_wasserstein_linear_unmixing( Csb[i], Ysb[i], Cdictb_bis2, Ydictb_bis2, p=None, q=None, alpha=alpha, reg=0., tol_outer=tol, tol_inner=tol, max_iter_outer=10, max_iter_inner=50 ) total_reconstruction_b_bis2 += reconstruction # > Compare results with/without backend total_reconstruction_b_bis2 = nx.to_numpy(total_reconstruction_b_bis2) np.testing.assert_allclose(total_reconstruction_bis2, total_reconstruction_b_bis2, atol=1e-05) def test_semirelaxed_gromov(nx): rng = np.random.RandomState(0) # unbalanced proportions list_n = [30, 15] nt = 2 ns = np.sum(list_n) # create directed sbm with C2 as connectivity matrix C1 = np.zeros((ns, ns), dtype=np.float64) C2 = np.array([[0.8, 0.05], [0.05, 1.]], dtype=np.float64) for i in range(nt): for j in range(nt): ni, nj = list_n[i], list_n[j] xij = rng.binomial(size=(ni, nj), n=1, p=C2[i, j]) C1[i * ni: (i + 1) * ni, j * nj: (j + 1) * nj] = xij p = ot.unif(ns, type_as=C1) q0 = ot.unif(C2.shape[0], type_as=C1) G0 = p[:, None] * q0[None, :] # asymmetric C1b, C2b, pb, q0b, G0b = nx.from_numpy(C1, C2, p, q0, G0) for loss_fun in ['square_loss', 'kl_loss']: G, log = ot.gromov.semirelaxed_gromov_wasserstein( C1, C2, p, loss_fun='square_loss', symmetric=None, log=True, G0=G0) Gb, logb = ot.gromov.semirelaxed_gromov_wasserstein( C1b, C2b, None, loss_fun='square_loss', symmetric=False, log=True, G0=None, alpha_min=0., alpha_max=1.) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose(p, nx.sum(Gb, axis=1), atol=1e-04) np.testing.assert_allclose(list_n / ns, np.sum(G, axis=0), atol=1e-01) np.testing.assert_allclose(list_n / ns, nx.sum(Gb, axis=0), atol=1e-01) srgw, log2 = ot.gromov.semirelaxed_gromov_wasserstein2( C1, C2, None, loss_fun='square_loss', symmetric=False, log=True, G0=G0) srgwb, logb2 = ot.gromov.semirelaxed_gromov_wasserstein2( C1b, C2b, pb, loss_fun='square_loss', symmetric=None, log=True, G0=None) G = log2['T'] Gb = nx.to_numpy(logb2['T']) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose(p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose(list_n / ns, Gb.sum(0), atol=1e-04) # cf convergence gromov np.testing.assert_allclose(log2['srgw_dist'], logb['srgw_dist'], atol=1e-07) np.testing.assert_allclose(logb2['srgw_dist'], log['srgw_dist'], atol=1e-07) # symmetric C1 = 0.5 * (C1 + C1.T) C1b, C2b, pb, q0b, G0b = nx.from_numpy(C1, C2, p, q0, G0) G, log = ot.gromov.semirelaxed_gromov_wasserstein( C1, C2, p, loss_fun='square_loss', symmetric=None, log=True, G0=None) Gb = ot.gromov.semirelaxed_gromov_wasserstein( C1b, C2b, pb, loss_fun='square_loss', symmetric=True, log=False, G0=G0b) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose(p, nx.sum(Gb, axis=1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose(list_n / ns, nx.sum(Gb, axis=0), atol=1e-02) # cf convergence gromov srgw, log2 = ot.gromov.semirelaxed_gromov_wasserstein2( C1, C2, p, loss_fun='square_loss', symmetric=True, log=True, G0=G0) srgwb, logb2 = ot.gromov.semirelaxed_gromov_wasserstein2( C1b, C2b, pb, loss_fun='square_loss', symmetric=None, log=True, G0=None) srgw_ = ot.gromov.semirelaxed_gromov_wasserstein2(C1, C2, p, loss_fun='square_loss', symmetric=True, log=False, G0=G0) G = log2['T'] # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose(p, nx.sum(Gb, 1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose(list_n / ns, np.sum(G, axis=0), atol=1e-01) np.testing.assert_allclose(list_n / ns, nx.sum(Gb, axis=0), atol=1e-01) np.testing.assert_allclose(log2['srgw_dist'], log['srgw_dist'], atol=1e-07) np.testing.assert_allclose(logb2['srgw_dist'], log['srgw_dist'], atol=1e-07) np.testing.assert_allclose(srgw, srgw_, atol=1e-07) def test_semirelaxed_gromov2_gradients(): n_samples = 50 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=4) xt = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=5) p = ot.unif(n_samples) C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() if torch: devices = [torch.device("cpu")] if torch.cuda.is_available(): devices.append(torch.device("cuda")) for device in devices: for loss_fun in ['square_loss', 'kl_loss']: # semirelaxed solvers do not support gradients over masses yet. p1 = torch.tensor(p, requires_grad=False, device=device) C11 = torch.tensor(C1, requires_grad=True, device=device) C12 = torch.tensor(C2, requires_grad=True, device=device) val = ot.gromov.semirelaxed_gromov_wasserstein2(C11, C12, p1, loss_fun=loss_fun) val.backward() assert val.device == p1.device assert p1.grad is None assert C11.shape == C11.grad.shape assert C12.shape == C12.grad.shape def test_srgw_helper_backend(nx): n_samples = 20 # nb samples mu = np.array([0, 0]) cov = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov, random_state=0) xt = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov, random_state=1) p = ot.unif(n_samples) q = ot.unif(n_samples) C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() for loss_fun in ['square_loss', 'kl_loss']: C1b, C2b, pb, qb = nx.from_numpy(C1, C2, p, q) Gb, logb = ot.gromov.semirelaxed_gromov_wasserstein(C1b, C2b, pb, loss_fun, armijo=False, symmetric=True, G0=None, log=True) # calls with nx=None constCb, hC1b, hC2b, fC2tb = ot.gromov.init_matrix_semirelaxed(C1b, C2b, pb, loss_fun) ones_pb = nx.ones(pb.shape[0], type_as=pb) def f(G): qG = nx.sum(G, 0) marginal_product = nx.outer(ones_pb, nx.dot(qG, fC2tb)) return ot.gromov.gwloss(constCb + marginal_product, hC1b, hC2b, G, nx=None) def df(G): qG = nx.sum(G, 0) marginal_product = nx.outer(ones_pb, nx.dot(qG, fC2tb)) return ot.gromov.gwggrad(constCb + marginal_product, hC1b, hC2b, G, nx=None) def line_search(cost, G, deltaG, Mi, cost_G): return ot.gromov.solve_semirelaxed_gromov_linesearch( G, deltaG, cost_G, hC1b, hC2b, ones_pb, 0., 1., fC2t=fC2tb, nx=None) # feed the precomputed local optimum Gb to semirelaxed_cg res, log = ot.optim.semirelaxed_cg(pb, qb, 0., 1., f, df, Gb, line_search, log=True, numItermax=1e4, stopThr=1e-9, stopThr2=1e-9) # check constraints np.testing.assert_allclose(res, Gb, atol=1e-06) @pytest.mark.parametrize('loss_fun', [ 'square_loss', 'kl_loss', pytest.param('unknown_loss', marks=pytest.mark.xfail(raises=ValueError)), ]) def test_gw_semirelaxed_helper_validation(loss_fun): n_samples = 20 # nb samples mu = np.array([0, 0]) cov = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov, random_state=0) xt = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov, random_state=1) p = ot.unif(n_samples) C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) ot.gromov.init_matrix_semirelaxed(C1, C2, p, loss_fun=loss_fun) def test_semirelaxed_fgw(nx): rng = np.random.RandomState(0) list_n = [16, 8] nt = 2 ns = 24 # create directed sbm with C2 as connectivity matrix C1 = np.zeros((ns, ns)) C2 = np.array([[0.7, 0.05], [0.05, 0.9]]) for i in range(nt): for j in range(nt): ni, nj = list_n[i], list_n[j] xij = rng.binomial(size=(ni, nj), n=1, p=C2[i, j]) C1[i * ni: (i + 1) * ni, j * nj: (j + 1) * nj] = xij F1 = np.zeros((ns, 1)) F1[:16] = rng.normal(loc=0., scale=0.01, size=(16, 1)) F1[16:] = rng.normal(loc=1., scale=0.01, size=(8, 1)) F2 = np.zeros((2, 1)) F2[1, :] = 1. M = (F1 ** 2).dot(np.ones((1, nt))) + np.ones((ns, 1)).dot((F2 ** 2).T) - 2 * F1.dot(F2.T) p = ot.unif(ns) q0 = ot.unif(C2.shape[0]) G0 = p[:, None] * q0[None, :] # asymmetric Mb, C1b, C2b, pb, q0b, G0b = nx.from_numpy(M, C1, C2, p, q0, G0) G, log = ot.gromov.semirelaxed_fused_gromov_wasserstein(M, C1, C2, None, loss_fun='square_loss', alpha=0.5, symmetric=None, log=True, G0=None) Gb, logb = ot.gromov.semirelaxed_fused_gromov_wasserstein(Mb, C1b, C2b, pb, loss_fun='square_loss', alpha=0.5, symmetric=False, log=True, G0=G0b) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose(p, nx.sum(Gb, axis=1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose([2 / 3, 1 / 3], nx.sum(Gb, axis=0), atol=1e-02) # cf convergence gromov srgw, log2 = ot.gromov.semirelaxed_fused_gromov_wasserstein2(M, C1, C2, p, loss_fun='square_loss', alpha=0.5, symmetric=False, log=True, G0=G0) srgwb, logb2 = ot.gromov.semirelaxed_fused_gromov_wasserstein2(Mb, C1b, C2b, None, loss_fun='square_loss', alpha=0.5, symmetric=None, log=True, G0=None) G = log2['T'] Gb = nx.to_numpy(logb2['T']) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose(p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose([2 / 3, 1 / 3], Gb.sum(0), atol=1e-04) # cf convergence gromov np.testing.assert_allclose(log2['srfgw_dist'], logb['srfgw_dist'], atol=1e-07) np.testing.assert_allclose(logb2['srfgw_dist'], log['srfgw_dist'], atol=1e-07) # symmetric for loss_fun in ['square_loss', 'kl_loss']: C1 = 0.5 * (C1 + C1.T) Mb, C1b, C2b, pb, q0b, G0b = nx.from_numpy(M, C1, C2, p, q0, G0) G, log = ot.gromov.semirelaxed_fused_gromov_wasserstein(M, C1, C2, p, loss_fun=loss_fun, alpha=0.5, symmetric=None, log=True, G0=None) Gb = ot.gromov.semirelaxed_fused_gromov_wasserstein(Mb, C1b, C2b, pb, loss_fun=loss_fun, alpha=0.5, symmetric=True, log=False, G0=G0b) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose(p, nx.sum(Gb, axis=1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose([2 / 3, 1 / 3], nx.sum(Gb, axis=0), atol=1e-02) # cf convergence gromov srgw, log2 = ot.gromov.semirelaxed_fused_gromov_wasserstein2(M, C1, C2, p, loss_fun=loss_fun, alpha=0.5, symmetric=True, log=True, G0=G0) srgwb, logb2 = ot.gromov.semirelaxed_fused_gromov_wasserstein2(Mb, C1b, C2b, pb, loss_fun=loss_fun, alpha=0.5, symmetric=None, log=True, G0=None) srgw_ = ot.gromov.semirelaxed_fused_gromov_wasserstein2(M, C1, C2, p, loss_fun=loss_fun, alpha=0.5, symmetric=True, log=False, G0=G0) G = log2['T'] Gb = nx.to_numpy(logb2['T']) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose(p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose([2 / 3, 1 / 3], Gb.sum(0), atol=1e-04) # cf convergence gromov np.testing.assert_allclose(log2['srfgw_dist'], log['srfgw_dist'], atol=1e-07) np.testing.assert_allclose(logb2['srfgw_dist'], log['srfgw_dist'], atol=1e-07) np.testing.assert_allclose(srgw, srgw_, atol=1e-07) def test_semirelaxed_fgw2_gradients(): n_samples = 20 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=4) xt = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=5) p = ot.unif(n_samples) C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) M = ot.dist(xs, xt) C1 /= C1.max() C2 /= C2.max() if torch: devices = [torch.device("cpu")] if torch.cuda.is_available(): devices.append(torch.device("cuda")) for device in devices: # semirelaxed solvers do not support gradients over masses yet. for loss_fun in ['square_loss', 'kl_loss']: p1 = torch.tensor(p, requires_grad=False, device=device) C11 = torch.tensor(C1, requires_grad=True, device=device) C12 = torch.tensor(C2, requires_grad=True, device=device) M1 = torch.tensor(M, requires_grad=True, device=device) val = ot.gromov.semirelaxed_fused_gromov_wasserstein2(M1, C11, C12, p1, loss_fun=loss_fun) val.backward() assert val.device == p1.device assert p1.grad is None assert C11.shape == C11.grad.shape assert C12.shape == C12.grad.shape assert M1.shape == M1.grad.shape # full gradients with alpha p1 = torch.tensor(p, requires_grad=False, device=device) C11 = torch.tensor(C1, requires_grad=True, device=device) C12 = torch.tensor(C2, requires_grad=True, device=device) M1 = torch.tensor(M, requires_grad=True, device=device) alpha = torch.tensor(0.5, requires_grad=True, device=device) val = ot.gromov.semirelaxed_fused_gromov_wasserstein2(M1, C11, C12, p1, loss_fun=loss_fun, alpha=alpha) val.backward() assert val.device == p1.device assert p1.grad is None assert C11.shape == C11.grad.shape assert C12.shape == C12.grad.shape assert alpha.shape == alpha.grad.shape def test_srfgw_helper_backend(nx): n_samples = 20 # nb samples mu = np.array([0, 0]) cov = np.array([[1, 0], [0, 1]]) rng = np.random.RandomState(42) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov, random_state=0) ys = rng.randn(xs.shape[0], 2) xt = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov, random_state=1) yt = rng.randn(xt.shape[0], 2) p = ot.unif(n_samples) q = ot.unif(n_samples) G0 = p[:, None] * q[None, :] C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() M = ot.dist(ys, yt) M /= M.max() Mb, C1b, C2b, pb, qb, G0b = nx.from_numpy(M, C1, C2, p, q, G0) alpha = 0.5 Gb, logb = ot.gromov.semirelaxed_fused_gromov_wasserstein(Mb, C1b, C2b, pb, 'square_loss', alpha=0.5, armijo=False, symmetric=True, G0=G0b, log=True) # calls with nx=None constCb, hC1b, hC2b, fC2tb = ot.gromov.init_matrix_semirelaxed(C1b, C2b, pb, loss_fun='square_loss') ones_pb = nx.ones(pb.shape[0], type_as=pb) def f(G): qG = nx.sum(G, 0) marginal_product = nx.outer(ones_pb, nx.dot(qG, fC2tb)) return ot.gromov.gwloss(constCb + marginal_product, hC1b, hC2b, G, nx=None) def df(G): qG = nx.sum(G, 0) marginal_product = nx.outer(ones_pb, nx.dot(qG, fC2tb)) return ot.gromov.gwggrad(constCb + marginal_product, hC1b, hC2b, G, nx=None) def line_search(cost, G, deltaG, Mi, cost_G): return ot.gromov.solve_semirelaxed_gromov_linesearch( G, deltaG, cost_G, C1b, C2b, ones_pb, M=(1 - alpha) * Mb, reg=alpha, nx=None) # feed the precomputed local optimum Gb to semirelaxed_cg res, log = ot.optim.semirelaxed_cg(pb, qb, (1 - alpha) * Mb, alpha, f, df, Gb, line_search, log=True, numItermax=1e4, stopThr=1e-9, stopThr2=1e-9) # check constraints np.testing.assert_allclose(res, Gb, atol=1e-06) def test_entropic_semirelaxed_gromov(nx): # unbalanced proportions list_n = [30, 15] nt = 2 ns = np.sum(list_n) # create directed sbm with C2 as connectivity matrix C1 = np.zeros((ns, ns), dtype=np.float64) C2 = np.array([[0.8, 0.05], [0.05, 1.]], dtype=np.float64) rng = np.random.RandomState(0) for i in range(nt): for j in range(nt): ni, nj = list_n[i], list_n[j] xij = rng.binomial(size=(ni, nj), n=1, p=C2[i, j]) C1[i * ni: (i + 1) * ni, j * nj: (j + 1) * nj] = xij p = ot.unif(ns, type_as=C1) q0 = ot.unif(C2.shape[0], type_as=C1) G0 = p[:, None] * q0[None, :] # asymmetric C1b, C2b, pb, q0b, G0b = nx.from_numpy(C1, C2, p, q0, G0) epsilon = 0.1 for loss_fun in ['square_loss', 'kl_loss']: G, log = ot.gromov.entropic_semirelaxed_gromov_wasserstein(C1, C2, p, loss_fun=loss_fun, epsilon=epsilon, symmetric=None, log=True, G0=G0) Gb, logb = ot.gromov.entropic_semirelaxed_gromov_wasserstein(C1b, C2b, None, loss_fun=loss_fun, epsilon=epsilon, symmetric=False, log=True, G0=None) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose(p, nx.sum(Gb, axis=1), atol=1e-04) np.testing.assert_allclose(list_n / ns, np.sum(G, axis=0), atol=1e-01) np.testing.assert_allclose(list_n / ns, nx.sum(Gb, axis=0), atol=1e-01) srgw, log2 = ot.gromov.entropic_semirelaxed_gromov_wasserstein2(C1, C2, None, loss_fun=loss_fun, epsilon=epsilon, symmetric=False, log=True, G0=G0) srgwb, logb2 = ot.gromov.entropic_semirelaxed_gromov_wasserstein2(C1b, C2b, pb, loss_fun=loss_fun, epsilon=epsilon, symmetric=None, log=True, G0=None) G = log2['T'] Gb = nx.to_numpy(logb2['T']) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose(p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose(list_n / ns, Gb.sum(0), atol=1e-04) # cf convergence gromov np.testing.assert_allclose(log2['srgw_dist'], logb['srgw_dist'], atol=1e-07) np.testing.assert_allclose(logb2['srgw_dist'], log['srgw_dist'], atol=1e-07) # symmetric C1 = 0.5 * (C1 + C1.T) C1b, C2b, pb, q0b, G0b = nx.from_numpy(C1, C2, p, q0, G0) G, log = ot.gromov.entropic_semirelaxed_gromov_wasserstein(C1, C2, p, loss_fun='square_loss', epsilon=epsilon, symmetric=None, log=True, G0=None) Gb = ot.gromov.entropic_semirelaxed_gromov_wasserstein(C1b, C2b, None, loss_fun='square_loss', epsilon=epsilon, symmetric=True, log=False, G0=G0b) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose(p, nx.sum(Gb, axis=1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose(list_n / ns, nx.sum(Gb, axis=0), atol=1e-02) # cf convergence gromov srgw, log2 = ot.gromov.entropic_semirelaxed_gromov_wasserstein2(C1, C2, p, loss_fun='square_loss', epsilon=epsilon, symmetric=True, log=True, G0=G0) srgwb, logb2 = ot.gromov.entropic_semirelaxed_gromov_wasserstein2(C1b, C2b, pb, loss_fun='square_loss', epsilon=epsilon, symmetric=None, log=True, G0=None) srgw_ = ot.gromov.entropic_semirelaxed_gromov_wasserstein2(C1, C2, p, loss_fun='square_loss', epsilon=epsilon, symmetric=True, log=False, G0=G0) G = log2['T'] # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose(p, nx.sum(Gb, 1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose(list_n / ns, np.sum(G, axis=0), atol=1e-01) np.testing.assert_allclose(list_n / ns, nx.sum(Gb, axis=0), atol=1e-01) np.testing.assert_allclose(log2['srgw_dist'], log['srgw_dist'], atol=1e-07) np.testing.assert_allclose(logb2['srgw_dist'], log['srgw_dist'], atol=1e-07) np.testing.assert_allclose(srgw, srgw_, atol=1e-07) @pytest.skip_backend("jax", reason="test very slow with jax backend") @pytest.skip_backend("tf", reason="test very slow with tf backend") def test_entropic_semirelaxed_gromov_dtype_device(nx): # setup n_samples = 5 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=42) xt = xs[::-1].copy() p = ot.unif(n_samples) C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() for tp in nx.__type_list__: print(nx.dtype_device(tp)) for loss_fun in ['square_loss', 'kl_loss']: C1b, C2b, pb = nx.from_numpy(C1, C2, p, type_as=tp) Gb = ot.gromov.entropic_semirelaxed_gromov_wasserstein( C1b, C2b, pb, loss_fun, epsilon=0.1, verbose=True ) gw_valb = ot.gromov.entropic_semirelaxed_gromov_wasserstein2( C1b, C2b, pb, loss_fun, epsilon=0.1, verbose=True ) nx.assert_same_dtype_device(C1b, Gb) nx.assert_same_dtype_device(C1b, gw_valb) def test_entropic_semirelaxed_fgw(nx): rng = np.random.RandomState(0) list_n = [16, 8] nt = 2 ns = 24 # create directed sbm with C2 as connectivity matrix C1 = np.zeros((ns, ns)) C2 = np.array([[0.7, 0.05], [0.05, 0.9]]) for i in range(nt): for j in range(nt): ni, nj = list_n[i], list_n[j] xij = rng.binomial(size=(ni, nj), n=1, p=C2[i, j]) C1[i * ni: (i + 1) * ni, j * nj: (j + 1) * nj] = xij F1 = np.zeros((ns, 1)) F1[:16] = rng.normal(loc=0., scale=0.01, size=(16, 1)) F1[16:] = rng.normal(loc=1., scale=0.01, size=(8, 1)) F2 = np.zeros((2, 1)) F2[1, :] = 1. M = (F1 ** 2).dot(np.ones((1, nt))) + np.ones((ns, 1)).dot((F2 ** 2).T) - 2 * F1.dot(F2.T) p = ot.unif(ns) q0 = ot.unif(C2.shape[0]) G0 = p[:, None] * q0[None, :] # asymmetric Mb, C1b, C2b, pb, q0b, G0b = nx.from_numpy(M, C1, C2, p, q0, G0) G, log = ot.gromov.entropic_semirelaxed_fused_gromov_wasserstein(M, C1, C2, None, loss_fun='square_loss', epsilon=0.1, alpha=0.5, symmetric=None, log=True, G0=None) Gb, logb = ot.gromov.entropic_semirelaxed_fused_gromov_wasserstein(Mb, C1b, C2b, pb, loss_fun='square_loss', epsilon=0.1, alpha=0.5, symmetric=False, log=True, G0=G0b) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose(p, nx.sum(Gb, axis=1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose([2 / 3, 1 / 3], nx.sum(Gb, axis=0), atol=1e-02) # cf convergence gromov srgw, log2 = ot.gromov.entropic_semirelaxed_fused_gromov_wasserstein2(M, C1, C2, p, loss_fun='square_loss', epsilon=0.1, alpha=0.5, symmetric=False, log=True, G0=G0) srgwb, logb2 = ot.gromov.entropic_semirelaxed_fused_gromov_wasserstein2(Mb, C1b, C2b, None, loss_fun='square_loss', epsilon=0.1, alpha=0.5, symmetric=None, log=True, G0=None) G = log2['T'] Gb = nx.to_numpy(logb2['T']) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose(p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose([2 / 3, 1 / 3], Gb.sum(0), atol=1e-04) # cf convergence gromov np.testing.assert_allclose(log2['srfgw_dist'], logb['srfgw_dist'], atol=1e-07) np.testing.assert_allclose(logb2['srfgw_dist'], log['srfgw_dist'], atol=1e-07) # symmetric C1 = 0.5 * (C1 + C1.T) Mb, C1b, C2b, pb, q0b, G0b = nx.from_numpy(M, C1, C2, p, q0, G0) for loss_fun in ['square_loss', 'kl_loss']: G, log = ot.gromov.entropic_semirelaxed_fused_gromov_wasserstein(M, C1, C2, p, loss_fun=loss_fun, epsilon=0.1, alpha=0.5, symmetric=None, log=True, G0=None) Gb = ot.gromov.entropic_semirelaxed_fused_gromov_wasserstein(Mb, C1b, C2b, pb, loss_fun=loss_fun, epsilon=0.1, alpha=0.5, symmetric=True, log=False, G0=G0b) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose(p, nx.sum(Gb, axis=1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose([2 / 3, 1 / 3], nx.sum(Gb, axis=0), atol=1e-02) # cf convergence gromov srgw, log2 = ot.gromov.entropic_semirelaxed_fused_gromov_wasserstein2(M, C1, C2, p, loss_fun=loss_fun, epsilon=0.1, alpha=0.5, symmetric=True, log=True, G0=G0) srgwb, logb2 = ot.gromov.entropic_semirelaxed_fused_gromov_wasserstein2(Mb, C1b, C2b, pb, loss_fun=loss_fun, epsilon=0.1, alpha=0.5, symmetric=None, log=True, G0=None) srgw_ = ot.gromov.entropic_semirelaxed_fused_gromov_wasserstein2(M, C1, C2, p, loss_fun=loss_fun, epsilon=0.1, alpha=0.5, symmetric=True, log=False, G0=G0) G = log2['T'] Gb = nx.to_numpy(logb2['T']) # check constraints np.testing.assert_allclose(G, Gb, atol=1e-06) np.testing.assert_allclose(p, Gb.sum(1), atol=1e-04) # cf convergence gromov np.testing.assert_allclose([2 / 3, 1 / 3], Gb.sum(0), atol=1e-04) # cf convergence gromov np.testing.assert_allclose(log2['srfgw_dist'], log['srfgw_dist'], atol=1e-07) np.testing.assert_allclose(logb2['srfgw_dist'], log['srfgw_dist'], atol=1e-07) np.testing.assert_allclose(srgw, srgw_, atol=1e-07) @pytest.skip_backend("tf", reason="test very slow with tf backend") def test_entropic_semirelaxed_fgw_dtype_device(nx): # setup n_samples = 5 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=42) xt = xs[::-1].copy() rng = np.random.RandomState(42) ys = rng.randn(xs.shape[0], 2) yt = ys[::-1].copy() p = ot.unif(n_samples) C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() M = ot.dist(ys, yt) for tp in nx.__type_list__: print(nx.dtype_device(tp)) Mb, C1b, C2b, pb = nx.from_numpy(M, C1, C2, p, type_as=tp) for loss_fun in ['square_loss', 'kl_loss']: Gb = ot.gromov.entropic_semirelaxed_fused_gromov_wasserstein( Mb, C1b, C2b, pb, loss_fun, epsilon=0.1, verbose=True ) fgw_valb = ot.gromov.entropic_semirelaxed_fused_gromov_wasserstein2( Mb, C1b, C2b, pb, loss_fun, epsilon=0.1, verbose=True ) nx.assert_same_dtype_device(C1b, Gb) nx.assert_same_dtype_device(C1b, fgw_valb) def test_not_implemented_solver(): # test sinkhorn n_samples = 5 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) rng = np.random.RandomState(42) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=rng) xt = xs[::-1].copy() ys = rng.randn(xs.shape[0], 2) yt = ys[::-1].copy() p = ot.unif(n_samples) q = ot.unif(n_samples) C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C1 /= C1.max() C2 /= C2.max() M = ot.dist(ys, yt) solver = 'not_implemented' # entropic gw and fgw with pytest.raises(ValueError): ot.gromov.entropic_gromov_wasserstein( C1, C2, p, q, 'square_loss', epsilon=1e-1, solver=solver) with pytest.raises(ValueError): ot.gromov.entropic_fused_gromov_wasserstein( M, C1, C2, p, q, 'square_loss', epsilon=1e-1, solver=solver) python-pot-0.9.3+dfsg/test/test_helpers.py000066400000000000000000000012311455713015700206220ustar00rootroot00000000000000"""Tests for helpers functions """ # Author: Remi Flamary # # License: MIT License import os import sys sys.path.append(os.path.join("ot", "helpers")) from openmp_helpers import get_openmp_flag, check_openmp_support # noqa from pre_build_helpers import _get_compiler, compile_test_program # noqa def test_helpers(): compiler = _get_compiler() get_openmp_flag(compiler) s = '#include \n#include \n\nint main(void) {\n\tprintf("Hello world!\\n");\n\treturn 0;\n}' output, _ = compile_test_program(s) assert len(output) == 1 and output[0] == "Hello world!" check_openmp_support() python-pot-0.9.3+dfsg/test/test_lowrank.py000066400000000000000000000103101455713015700206330ustar00rootroot00000000000000""" Test for low rank sinkhorn solvers """ # Author: Laurène DAVID # # License: MIT License import ot import numpy as np import pytest from ot.lowrank import sklearn_import # check sklearn installation def test_compute_lr_sqeuclidean_matrix(): # test computation of low rank cost matrices M1 and M2 n = 100 X_s = np.reshape(1.0 * np.arange(2 * n), (n, 2)) X_t = np.reshape(1.0 * np.arange(2 * n), (n, 2)) M1, M2 = ot.lowrank.compute_lr_sqeuclidean_matrix(X_s, X_t, rescale_cost=False) M = ot.dist(X_s, X_t, metric="sqeuclidean") # original cost matrix np.testing.assert_allclose(np.dot(M1, M2.T), M, atol=1e-05) def test_lowrank_sinkhorn(): # test low rank sinkhorn n = 100 a = ot.unif(n) b = ot.unif(n) X_s = np.reshape(1.0 * np.arange(n), (n, 1)) X_t = np.reshape(1.0 * np.arange(n), (n, 1)) Q, R, g, log = ot.lowrank.lowrank_sinkhorn(X_s, X_t, a, b, reg=0.1, log=True, rescale_cost=False) P = log["lazy_plan"][:] value_linear = log["value_linear"] # check constraints for P np.testing.assert_allclose(a, P.sum(1), atol=1e-05) np.testing.assert_allclose(b, P.sum(0), atol=1e-05) # check if lazy_plan is equal to the fully computed plan P_true = np.dot(Q, np.dot(np.diag(1 / g), R.T)) np.testing.assert_allclose(P, P_true, atol=1e-05) # check if value_linear is correct with its original formula M = ot.dist(X_s, X_t, metric="sqeuclidean") value_linear_true = np.sum(M * P_true) np.testing.assert_allclose(value_linear, value_linear_true, atol=1e-05) # check warn parameter when Dykstra algorithm doesn't converge with pytest.warns(UserWarning): ot.lowrank.lowrank_sinkhorn(X_s, X_t, a, b, reg=0.1, stopThr=0, numItermax=1) @pytest.mark.parametrize(("init"), ("random", "deterministic", "kmeans")) def test_lowrank_sinkhorn_init(init): # test lowrank inits n = 100 a = ot.unif(n) b = ot.unif(n) X_s = np.reshape(1.0 * np.arange(n), (n, 1)) X_t = np.reshape(1.0 * np.arange(n), (n, 1)) # test ImportError if init="kmeans" and sklearn not imported if init in ["random", "deterministic"] or ((init == "kmeans") and (sklearn_import is True)): Q, R, g, log = ot.lowrank.lowrank_sinkhorn(X_s, X_t, a, b, reg=0.1, init=init, log=True) P = log["lazy_plan"][:] # check constraints for P np.testing.assert_allclose(a, P.sum(1), atol=1e-05) np.testing.assert_allclose(b, P.sum(0), atol=1e-05) else: with pytest.raises(ImportError): Q, R, g = ot.lowrank.lowrank_sinkhorn(X_s, X_t, a, b, reg=0.1, init=init) @pytest.mark.parametrize(("alpha, rank"), ((0.8, 2), (0.5, 3), (0.2, 6))) def test_lowrank_sinkhorn_alpha_error(alpha, rank): # Test warning for value of alpha n = 100 a = ot.unif(n) b = ot.unif(n) X_s = np.reshape(1.0 * np.arange(n), (n, 1)) X_t = np.reshape(1.0 * np.arange(0, n), (n, 1)) with pytest.raises(ValueError): ot.lowrank.lowrank_sinkhorn(X_s, X_t, a, b, reg=0.1, rank=rank, alpha=alpha, warn=False) @pytest.mark.parametrize(("gamma_init"), ("rescale", "theory")) def test_lowrank_sinkhorn_gamma_init(gamma_init): # Test lr sinkhorn with different init strategies n = 100 a = ot.unif(n) b = ot.unif(n) X_s = np.reshape(1.0 * np.arange(n), (n, 1)) X_t = np.reshape(1.0 * np.arange(n), (n, 1)) Q, R, g, log = ot.lowrank.lowrank_sinkhorn(X_s, X_t, a, b, reg=0.1, gamma_init=gamma_init, log=True) P = log["lazy_plan"][:] # check constraints for P np.testing.assert_allclose(a, P.sum(1), atol=1e-05) np.testing.assert_allclose(b, P.sum(0), atol=1e-05) @pytest.skip_backend('tf') def test_lowrank_sinkhorn_backends(nx): # Test low rank sinkhorn for different backends n = 100 a = ot.unif(n) b = ot.unif(n) X_s = np.reshape(1.0 * np.arange(n), (n, 1)) X_t = np.reshape(1.0 * np.arange(0, n), (n, 1)) ab, bb, X_sb, X_tb = nx.from_numpy(a, b, X_s, X_t) Q, R, g, log = ot.lowrank.lowrank_sinkhorn(X_sb, X_tb, ab, bb, reg=0.1, log=True) lazy_plan = log["lazy_plan"] P = lazy_plan[:] np.testing.assert_allclose(ab, P.sum(1), atol=1e-05) np.testing.assert_allclose(bb, P.sum(0), atol=1e-05) python-pot-0.9.3+dfsg/test/test_mapping.py000066400000000000000000000041571455713015700206250ustar00rootroot00000000000000"""Tests for module mapping""" # Author: Eloi Tanguy # # License: MIT License import numpy as np import ot import pytest from ot.backend import to_numpy try: # test if cvxpy is installed import cvxpy # noqa: F401 nocvxpy = False except ImportError: nocvxpy = True @pytest.mark.skipif(nocvxpy, reason="No CVXPY available") def test_ssnb_qcqp_constants(): c1, c2, c3 = ot.mapping._ssnb_qcqp_constants(.5, 1) np.testing.assert_almost_equal(c1, 1) np.testing.assert_almost_equal(c2, .5) np.testing.assert_almost_equal(c3, 1) @pytest.mark.skipif(nocvxpy, reason="No CVXPY available") def test_nearest_brenier_potential_fit(nx): X = nx.ones((2, 2)) phi, G, log = ot.mapping.nearest_brenier_potential_fit(X, X, its=3, log=True) np.testing.assert_almost_equal(to_numpy(G), to_numpy(X)) # image of source should be close to target # test without log but with X_classes, a, b and other init method a = nx.ones(2) / 2 ot.mapping.nearest_brenier_potential_fit(X, X, X_classes=nx.ones(2), a=a, b=a, its=1, init_method='target') @pytest.mark.skipif(nocvxpy, reason="No CVXPY available") def test_brenier_potential_predict_bounds(nx): X = nx.ones((2, 2)) phi, G = ot.mapping.nearest_brenier_potential_fit(X, X, its=3) phi_lu, G_lu, log = ot.mapping.nearest_brenier_potential_predict_bounds(X, phi, G, X, log=True) # 'new' input isn't new, so should be equal to target np.testing.assert_almost_equal(to_numpy(G_lu[0]), to_numpy(X)) np.testing.assert_almost_equal(to_numpy(G_lu[1]), to_numpy(X)) # test with no log but classes ot.mapping.nearest_brenier_potential_predict_bounds(X, phi, G, X, X_classes=nx.ones(2), Y_classes=nx.ones(2)) def test_joint_OT_mapping(): """ Complements the tests in test_da, for verbose, log and bias options """ xs = np.array([[.1, .2], [-.1, .3]]) ot.mapping.joint_OT_mapping_kernel(xs, xs, verbose=True) ot.mapping.joint_OT_mapping_linear(xs, xs, verbose=True) ot.mapping.joint_OT_mapping_kernel(xs, xs, log=True, bias=True) ot.mapping.joint_OT_mapping_linear(xs, xs, log=True, bias=True) python-pot-0.9.3+dfsg/test/test_optim.py000066400000000000000000000151521455713015700203170ustar00rootroot00000000000000"""Tests for module optim fro OT optimization """ # Author: Remi Flamary # # License: MIT License import numpy as np import ot def test_conditional_gradient(nx): n_bins = 100 # nb bins # bin positions x = np.arange(n_bins, dtype=np.float64) # Gaussian distributions a = ot.datasets.make_1D_gauss(n_bins, m=20, s=5) # m= mean, s= std b = ot.datasets.make_1D_gauss(n_bins, m=60, s=10) # loss matrix M = ot.dist(x.reshape((n_bins, 1)), x.reshape((n_bins, 1))) M /= M.max() def f(G): return 0.5 * np.sum(G**2) def df(G): return G def fb(G): return 0.5 * nx.sum(G ** 2) ab, bb, Mb = nx.from_numpy(a, b, M) reg = 1e-1 G, log = ot.optim.cg(a, b, M, reg, f, df, verbose=True, log=True) Gb, log = ot.optim.cg(ab, bb, Mb, reg, fb, df, verbose=True, log=True) Gb = nx.to_numpy(Gb) np.testing.assert_allclose(Gb, G) np.testing.assert_allclose(a, Gb.sum(1)) np.testing.assert_allclose(b, Gb.sum(0)) def test_conditional_gradient_itermax(nx): n = 100 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) mu_t = np.array([4, 4]) cov_t = np.array([[1, -.8], [-.8, 1]]) xs = ot.datasets.make_2D_samples_gauss(n, mu_s, cov_s) xt = ot.datasets.make_2D_samples_gauss(n, mu_t, cov_t) a, b = np.ones((n,)) / n, np.ones((n,)) / n # loss matrix M = ot.dist(xs, xt) M /= M.max() def f(G): return 0.5 * np.sum(G**2) def df(G): return G def fb(G): return 0.5 * nx.sum(G ** 2) ab, bb, Mb = nx.from_numpy(a, b, M) reg = 1e-1 G, log = ot.optim.cg(a, b, M, reg, f, df, numItermaxEmd=10000, verbose=True, log=True) Gb, log = ot.optim.cg(ab, bb, Mb, reg, fb, df, numItermaxEmd=10000, verbose=True, log=True) Gb = nx.to_numpy(Gb) np.testing.assert_allclose(Gb, G) np.testing.assert_allclose(a, Gb.sum(1)) np.testing.assert_allclose(b, Gb.sum(0)) def test_generalized_conditional_gradient(nx): n_bins = 100 # nb bins # bin positions x = np.arange(n_bins, dtype=np.float64) # Gaussian distributions a = ot.datasets.make_1D_gauss(n_bins, m=20, s=5) # m= mean, s= std b = ot.datasets.make_1D_gauss(n_bins, m=60, s=10) # loss matrix M = ot.dist(x.reshape((n_bins, 1)), x.reshape((n_bins, 1))) M /= M.max() def f(G): return 0.5 * np.sum(G**2) def df(G): return G def fb(G): return 0.5 * nx.sum(G ** 2) reg1 = 1e-3 reg2 = 1e-1 ab, bb, Mb = nx.from_numpy(a, b, M) G, log = ot.optim.gcg(a, b, M, reg1, reg2, f, df, verbose=True, log=True) Gb, log = ot.optim.gcg(ab, bb, Mb, reg1, reg2, fb, df, verbose=True, log=True) Gb = nx.to_numpy(Gb) np.testing.assert_allclose(Gb, G, atol=1e-12) np.testing.assert_allclose(a, Gb.sum(1), atol=1e-05) np.testing.assert_allclose(b, Gb.sum(0), atol=1e-05) def test_solve_1d_linesearch_quad_funct(): np.testing.assert_allclose(ot.optim.solve_1d_linesearch_quad(1, -1), 0.5) np.testing.assert_allclose(ot.optim.solve_1d_linesearch_quad(-1, 5), 0) np.testing.assert_allclose(ot.optim.solve_1d_linesearch_quad(-1, 0.5), 1) def test_line_search_armijo(nx): xk = np.array([[0.25, 0.25], [0.25, 0.25]]) pk = np.array([[-0.25, 0.25], [0.25, -0.25]]) gfk = np.array([[23.04273441, 23.0449082], [23.04273441, 23.0449082]]) old_fval = -123. xkb, pkb, gfkb = nx.from_numpy(xk, pk, gfk) def f(x): return 1. # Should not throw an exception and return 0. for alpha alpha, a, b = ot.optim.line_search_armijo( f, xkb, pkb, gfkb, old_fval ) alpha_np, anp, bnp = ot.optim.line_search_armijo( f, xk, pk, gfk, old_fval ) assert a == anp assert b == bnp assert alpha == 0. # check line search armijo def f(x): return nx.sum((x - 5.0) ** 2) def grad(x): return 2 * (x - 5.0) xk = nx.from_numpy(np.array([[[-5.0, -5.0]]])) pk = nx.from_numpy(np.array([[[100.0, 100.0]]])) gfk = grad(xk) old_fval = f(xk) # chech the case where the optimum is on the direction alpha, _, _ = ot.optim.line_search_armijo(f, xk, pk, gfk, old_fval) np.testing.assert_allclose(alpha, 0.1) # check the case where the direction is not far enough pk = nx.from_numpy(np.array([[[3.0, 3.0]]])) alpha, _, _ = ot.optim.line_search_armijo(f, xk, pk, gfk, old_fval, alpha0=1.0) np.testing.assert_allclose(alpha, 1.0) # check the case where checking the wrong direction alpha, _, _ = ot.optim.line_search_armijo(f, xk, -pk, gfk, old_fval) assert alpha <= 0 # check the case where the point is not a vector xk = nx.from_numpy(np.array(-5.0)) pk = nx.from_numpy(np.array(100.0)) gfk = grad(xk) old_fval = f(xk) alpha, _, _ = ot.optim.line_search_armijo(f, xk, pk, gfk, old_fval) np.testing.assert_allclose(alpha, 0.1) def test_line_search_armijo_dtype_device(nx): for tp in nx.__type_list__: def f(x): return nx.sum((x - 5.0) ** 2) def grad(x): return 2 * (x - 5.0) xk = np.array([[[-5.0, -5.0]]]) pk = np.array([[[100.0, 100.0]]]) xkb, pkb = nx.from_numpy(xk, pk, type_as=tp) gfkb = grad(xkb) old_fval = f(xkb) # chech the case where the optimum is on the direction alpha, _, fval = ot.optim.line_search_armijo(f, xkb, pkb, gfkb, old_fval) alpha = nx.to_numpy(alpha) np.testing.assert_allclose(alpha, 0.1) nx.assert_same_dtype_device(old_fval, fval) # check the case where the direction is not far enough pk = np.array([[[3.0, 3.0]]]) pkb = nx.from_numpy(pk, type_as=tp) alpha, _, fval = ot.optim.line_search_armijo(f, xkb, pkb, gfkb, old_fval, alpha0=1.0) alpha = nx.to_numpy(alpha) np.testing.assert_allclose(alpha, 1.0) nx.assert_same_dtype_device(old_fval, fval) # check the case where checking the wrong direction alpha, _, fval = ot.optim.line_search_armijo(f, xkb, -pkb, gfkb, old_fval) alpha = nx.to_numpy(alpha) assert alpha <= 0 nx.assert_same_dtype_device(old_fval, fval) # check the case where the point is not a vector xkb = nx.from_numpy(np.array(-5.0), type_as=tp) pkb = nx.from_numpy(np.array(100), type_as=tp) gfkb = grad(xkb) old_fval = f(xkb) alpha, _, fval = ot.optim.line_search_armijo(f, xkb, pkb, gfkb, old_fval) alpha = nx.to_numpy(alpha) np.testing.assert_allclose(alpha, 0.1) nx.assert_same_dtype_device(old_fval, fval) python-pot-0.9.3+dfsg/test/test_ot.py000066400000000000000000000312341455713015700176100ustar00rootroot00000000000000"""Tests for main module ot """ # Author: Remi Flamary # # License: MIT License import warnings import numpy as np import pytest import ot from ot.datasets import make_1D_gauss as gauss from ot.backend import torch, tf def test_emd_dimension_and_mass_mismatch(): # test emd and emd2 for dimension mismatch n_samples = 100 n_features = 2 rng = np.random.RandomState(0) x = rng.randn(n_samples, n_features) a = ot.utils.unif(n_samples + 1) M = ot.dist(x, x) np.testing.assert_raises(AssertionError, ot.emd, a, a, M) np.testing.assert_raises(AssertionError, ot.emd2, a, a, M) # test emd and emd2 for mass mismatch a = ot.utils.unif(n_samples) b = a.copy() a[0] = 100 np.testing.assert_raises(AssertionError, ot.emd, a, b, M) np.testing.assert_raises(AssertionError, ot.emd2, a, b, M) def test_emd_backends(nx): n_samples = 100 n_features = 2 rng = np.random.RandomState(0) x = rng.randn(n_samples, n_features) y = rng.randn(n_samples, n_features) a = ot.utils.unif(n_samples) M = ot.dist(x, y) G = ot.emd(a, a, M) ab, Mb = nx.from_numpy(a, M) Gb = ot.emd(ab, ab, Mb) np.allclose(G, nx.to_numpy(Gb)) def test_emd2_backends(nx): n_samples = 100 n_features = 2 rng = np.random.RandomState(0) x = rng.randn(n_samples, n_features) y = rng.randn(n_samples, n_features) a = ot.utils.unif(n_samples) M = ot.dist(x, y) val = ot.emd2(a, a, M) ab, Mb = nx.from_numpy(a, M) valb = ot.emd2(ab, ab, Mb) np.allclose(val, nx.to_numpy(valb)) def test_emd_emd2_types_devices(nx): n_samples = 100 n_features = 2 rng = np.random.RandomState(0) x = rng.randn(n_samples, n_features) y = rng.randn(n_samples, n_features) a = ot.utils.unif(n_samples) M = ot.dist(x, y) for tp in nx.__type_list__: print(nx.dtype_device(tp)) ab, Mb = nx.from_numpy(a, M, type_as=tp) Gb = ot.emd(ab, ab, Mb) w = ot.emd2(ab, ab, Mb) nx.assert_same_dtype_device(Mb, Gb) nx.assert_same_dtype_device(Mb, w) @pytest.mark.skipif(not tf, reason="tf not installed") def test_emd_emd2_devices_tf(): nx = ot.backend.TensorflowBackend() n_samples = 100 n_features = 2 rng = np.random.RandomState(0) x = rng.randn(n_samples, n_features) y = rng.randn(n_samples, n_features) a = ot.utils.unif(n_samples) M = ot.dist(x, y) # Check that everything stays on the CPU with tf.device("/CPU:0"): ab, Mb = nx.from_numpy(a, M) Gb = ot.emd(ab, ab, Mb) w = ot.emd2(ab, ab, Mb) nx.assert_same_dtype_device(Mb, Gb) nx.assert_same_dtype_device(Mb, w) if len(tf.config.list_physical_devices('GPU')) > 0: # Check that everything happens on the GPU ab, Mb = nx.from_numpy(a, M) Gb = ot.emd(ab, ab, Mb) w = ot.emd2(ab, ab, Mb) nx.assert_same_dtype_device(Mb, Gb) nx.assert_same_dtype_device(Mb, w) assert nx.dtype_device(Gb)[1].startswith("GPU") def test_emd2_gradients(): n_samples = 100 n_features = 2 rng = np.random.RandomState(0) x = rng.randn(n_samples, n_features) y = rng.randn(n_samples, n_features) a = ot.utils.unif(n_samples) M = ot.dist(x, y) if torch: a1 = torch.tensor(a, requires_grad=True) b1 = torch.tensor(a, requires_grad=True) M1 = torch.tensor(M, requires_grad=True) val, log = ot.emd2(a1, b1, M1, log=True) val.backward() assert a1.shape == a1.grad.shape assert b1.shape == b1.grad.shape assert M1.shape == M1.grad.shape assert np.allclose(a1.grad.cpu().detach().numpy(), log['u'].cpu().detach().numpy() - log['u'].cpu().detach().numpy().mean()) assert np.allclose(b1.grad.cpu().detach().numpy(), log['v'].cpu().detach().numpy() - log['v'].cpu().detach().numpy().mean()) # Testing for bug #309, checking for scaling of gradient a2 = torch.tensor(a, requires_grad=True) b2 = torch.tensor(a, requires_grad=True) M2 = torch.tensor(M, requires_grad=True) val = 10.0 * ot.emd2(a2, b2, M2) val.backward() assert np.allclose(10.0 * a1.grad.cpu().detach().numpy(), a2.grad.cpu().detach().numpy()) assert np.allclose(10.0 * b1.grad.cpu().detach().numpy(), b2.grad.cpu().detach().numpy()) assert np.allclose(10.0 * M1.grad.cpu().detach().numpy(), M2.grad.cpu().detach().numpy()) def test_emd_emd2(): # test emd and emd2 for simple identity n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) G = ot.emd(u, u, M) # check G is identity np.testing.assert_allclose(G, np.eye(n) / n) # check constraints np.testing.assert_allclose(u, G.sum(1)) # cf convergence sinkhorn np.testing.assert_allclose(u, G.sum(0)) # cf convergence sinkhorn w = ot.emd2(u, u, M) # check loss=0 np.testing.assert_allclose(w, 0) def test_omp_emd2(): # test emd2 and emd2 with openmp for simple identity n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) w = ot.emd2(u, u, M) w2 = ot.emd2(u, u, M, numThreads=2) np.testing.assert_allclose(w, w2) def test_emd_empty(): # test emd and emd2 for simple identity n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) G = ot.emd([], [], M) # check G is identity np.testing.assert_allclose(G, np.eye(n) / n) # check constraints np.testing.assert_allclose(u, G.sum(1)) # cf convergence sinkhorn np.testing.assert_allclose(u, G.sum(0)) # cf convergence sinkhorn w = ot.emd2([], [], M) # check loss=0 np.testing.assert_allclose(w, 0) def test_emd2_multi(): n = 500 # nb bins # bin positions x = np.arange(n, dtype=np.float64) # Gaussian distributions a = gauss(n, m=20, s=5) # m= mean, s= std ls = np.arange(20, 500, 100) nb = len(ls) b = np.zeros((n, nb)) for i in range(nb): b[:, i] = gauss(n, m=ls[i], s=10) # loss matrix M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1))) # M/=M.max() print('Computing {} EMD '.format(nb)) # emd loss 1 proc ot.tic() emd1 = ot.emd2(a, b, M, 1) ot.toc('1 proc : {} s') # emd loss multipro proc ot.tic() emdn = ot.emd2(a, b, M) ot.toc('multi proc : {} s') np.testing.assert_allclose(emd1, emdn) # emd loss multipro proc with log ot.tic() emdn = ot.emd2(a, b, M, log=True, return_matrix=True) ot.toc('multi proc : {} s') for i in range(len(emdn)): emd = emdn[i] log = emd[1] cost = emd[0] check_duality_gap(a, b[:, i], M, log['G'], log['u'], log['v'], cost) emdn[i] = cost emdn = np.array(emdn) np.testing.assert_allclose(emd1, emdn) def test_lp_barycenter(): a1 = np.array([1.0, 0, 0])[:, None] a2 = np.array([0, 0, 1.0])[:, None] A = np.hstack((a1, a2)) M = np.array([[0, 1.0, 4.0], [1.0, 0, 1.0], [4.0, 1.0, 0]]) # obvious barycenter between two Diracs bary0 = np.array([0, 1.0, 0]) bary = ot.lp.barycenter(A, M, [.5, .5]) np.testing.assert_allclose(bary, bary0, rtol=1e-5, atol=1e-7) np.testing.assert_allclose(bary.sum(), 1) def test_free_support_barycenter(): measures_locations = [np.array([-1.]).reshape((1, 1)), np.array([1.]).reshape((1, 1))] measures_weights = [np.array([1.]), np.array([1.])] X_init = np.array([-12.]).reshape((1, 1)) # obvious barycenter location between two Diracs bar_locations = np.array([0.]).reshape((1, 1)) X = ot.lp.free_support_barycenter(measures_locations, measures_weights, X_init) np.testing.assert_allclose(X, bar_locations, rtol=1e-5, atol=1e-7) def test_free_support_barycenter_backends(nx): measures_locations = [np.array([-1.]).reshape((1, 1)), np.array([1.]).reshape((1, 1))] measures_weights = [np.array([1.]), np.array([1.])] X_init = np.array([-12.]).reshape((1, 1)) X = ot.lp.free_support_barycenter(measures_locations, measures_weights, X_init) measures_locations2 = nx.from_numpy(*measures_locations) measures_weights2 = nx.from_numpy(*measures_weights) X_init2 = nx.from_numpy(X_init) X2 = ot.lp.free_support_barycenter(measures_locations2, measures_weights2, X_init2) np.testing.assert_allclose(X, nx.to_numpy(X2)) def test_generalised_free_support_barycenter(): X = [np.array([-1., -1.]).reshape((1, 2)), np.array([1., 1.]).reshape((1, 2))] # two 2D points bar is obviously 0 a = [np.array([1.]), np.array([1.])] P = [np.eye(2), np.eye(2)] Y_init = np.array([-12., 7.]).reshape((1, 2)) # obvious barycenter location between two 2D Diracs Y_true = np.array([0., .0]).reshape((1, 2)) # test without log and no init Y = ot.lp.generalized_free_support_barycenter(X, a, P, 1) np.testing.assert_allclose(Y, Y_true, rtol=1e-5, atol=1e-7) # test with log and init Y, _ = ot.lp.generalized_free_support_barycenter(X, a, P, 1, Y_init=Y_init, b=np.array([1.]), log=True) np.testing.assert_allclose(Y, Y_true, rtol=1e-5, atol=1e-7) def test_generalised_free_support_barycenter_backends(nx): X = [np.array([-1.]).reshape((1, 1)), np.array([1.]).reshape((1, 1))] a = [np.array([1.]), np.array([1.])] P = [np.array([1.]).reshape((1, 1)), np.array([1.]).reshape((1, 1))] Y_init = np.array([-12.]).reshape((1, 1)) Y = ot.lp.generalized_free_support_barycenter(X, a, P, 1, Y_init=Y_init) X2 = nx.from_numpy(*X) a2 = nx.from_numpy(*a) P2 = nx.from_numpy(*P) Y_init2 = nx.from_numpy(Y_init) Y2 = ot.lp.generalized_free_support_barycenter(X2, a2, P2, 1, Y_init=Y_init2) np.testing.assert_allclose(Y, nx.to_numpy(Y2)) @pytest.mark.skipif(not ot.lp.cvx.cvxopt, reason="No cvxopt available") def test_lp_barycenter_cvxopt(): a1 = np.array([1.0, 0, 0])[:, None] a2 = np.array([0, 0, 1.0])[:, None] A = np.hstack((a1, a2)) M = np.array([[0, 1.0, 4.0], [1.0, 0, 1.0], [4.0, 1.0, 0]]) # obvious barycenter between two Diracs bary0 = np.array([0, 1.0, 0]) bary = ot.lp.barycenter(A, M, [.5, .5], solver=None) np.testing.assert_allclose(bary, bary0, rtol=1e-5, atol=1e-7) np.testing.assert_allclose(bary.sum(), 1) def test_warnings(): n = 100 # nb bins m = 100 # nb bins mean1 = 30 mean2 = 50 # bin positions x = np.arange(n, dtype=np.float64) y = np.arange(m, dtype=np.float64) # Gaussian distributions a = gauss(n, m=mean1, s=5) # m= mean, s= std b = gauss(m, m=mean2, s=10) # loss matrix M = ot.dist(x.reshape((-1, 1)), y.reshape((-1, 1))) ** (1. / 2) print('Computing {} EMD '.format(1)) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") print('Computing {} EMD '.format(1)) ot.emd(a, b, M, numItermax=1) assert "numItermax" in str(w[-1].message) #assert len(w) == 1 def test_dual_variables(): n = 500 # nb bins m = 600 # nb bins mean1 = 300 mean2 = 400 # bin positions x = np.arange(n, dtype=np.float64) y = np.arange(m, dtype=np.float64) # Gaussian distributions a = gauss(n, m=mean1, s=5) # m= mean, s= std b = gauss(m, m=mean2, s=10) # loss matrix M = ot.dist(x.reshape((-1, 1)), y.reshape((-1, 1))) ** (1. / 2) print('Computing {} EMD '.format(1)) # emd loss 1 proc ot.tic() G, log = ot.emd(a, b, M, log=True) ot.toc('1 proc : {} s') ot.tic() G2 = ot.emd(b, a, np.ascontiguousarray(M.T)) ot.toc('1 proc : {} s') cost1 = (G * M).sum() # Check symmetry np.testing.assert_array_almost_equal(cost1, (M * G2.T).sum()) # Check with closed-form solution for gaussians np.testing.assert_almost_equal(cost1, np.abs(mean1 - mean2)) # Check that both cost computations are equivalent np.testing.assert_almost_equal(cost1, log['cost']) check_duality_gap(a, b, M, G, log['u'], log['v'], log['cost']) constraint_violation = log['u'][:, None] + log['v'][None, :] - M assert constraint_violation.max() < 1e-8 def check_duality_gap(a, b, M, G, u, v, cost): cost_dual = np.vdot(a, u) + np.vdot(b, v) # Check that dual and primal cost are equal np.testing.assert_almost_equal(cost_dual, cost) [ind1, ind2] = np.nonzero(G) # Check that reduced cost is zero on transport arcs np.testing.assert_array_almost_equal((M - u.reshape(-1, 1) - v.reshape(1, -1))[ind1, ind2], np.zeros(ind1.size)) python-pot-0.9.3+dfsg/test/test_partial.py000077500000000000000000000242641455713015700206320ustar00rootroot00000000000000"""Tests for module partial """ # Author: # Laetitia Chapel # # License: MIT License import numpy as np import scipy as sp import ot from ot.backend import to_numpy, torch import pytest def test_raise_errors(): n_samples = 20 # nb samples (gaussian) n_noise = 20 # nb of samples (noise) mu = np.array([0, 0]) cov = np.array([[1, 0], [0, 2]]) rng = np.random.RandomState(42) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov, random_state=rng) xs = np.append(xs, (rng.rand(n_noise, 2) + 1) * 4).reshape((-1, 2)) xt = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov, random_state=rng) xt = np.append(xt, (rng.rand(n_noise, 2) + 1) * -3).reshape((-1, 2)) M = ot.dist(xs, xt) p = ot.unif(n_samples + n_noise) q = ot.unif(n_samples + n_noise) with pytest.raises(ValueError): ot.partial.partial_wasserstein_lagrange(p + 1, q, M, 1, log=True) with pytest.raises(ValueError): ot.partial.partial_wasserstein(p, q, M, m=2, log=True) with pytest.raises(ValueError): ot.partial.partial_wasserstein(p, q, M, m=-1, log=True) with pytest.raises(ValueError): ot.partial.entropic_partial_wasserstein(p, q, M, reg=1, m=2, log=True) with pytest.raises(ValueError): ot.partial.entropic_partial_wasserstein(p, q, M, reg=1, m=-1, log=True) with pytest.raises(ValueError): ot.partial.partial_gromov_wasserstein(M, M, p, q, m=2, log=True) with pytest.raises(ValueError): ot.partial.partial_gromov_wasserstein(M, M, p, q, m=-1, log=True) with pytest.raises(ValueError): ot.partial.entropic_partial_gromov_wasserstein(M, M, p, q, reg=1, m=2, log=True) with pytest.raises(ValueError): ot.partial.entropic_partial_gromov_wasserstein(M, M, p, q, reg=1, m=-1, log=True) def test_partial_wasserstein_lagrange(): n_samples = 20 # nb samples (gaussian) n_noise = 20 # nb of samples (noise) mu = np.array([0, 0]) cov = np.array([[1, 0], [0, 2]]) rng = np.random.RandomState(42) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov, random_state=rng) xs = np.append(xs, (rng.rand(n_noise, 2) + 1) * 4).reshape((-1, 2)) xt = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov, random_state=rng) xt = np.append(xt, (rng.rand(n_noise, 2) + 1) * -3).reshape((-1, 2)) M = ot.dist(xs, xt) p = ot.unif(n_samples + n_noise) q = ot.unif(n_samples + n_noise) w0, log0 = ot.partial.partial_wasserstein_lagrange(p, q, M, 1, log=True) w0, log0 = ot.partial.partial_wasserstein_lagrange(p, q, M, 100, log=True) def test_partial_wasserstein(nx): n_samples = 20 # nb samples (gaussian) n_noise = 20 # nb of samples (noise) mu = np.array([0, 0]) cov = np.array([[1, 0], [0, 2]]) rng = np.random.RandomState(42) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov, random_state=rng) xs = np.append(xs, (rng.rand(n_noise, 2) + 1) * 4).reshape((-1, 2)) xt = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov, random_state=rng) xt = np.append(xt, (rng.rand(n_noise, 2) + 1) * -3).reshape((-1, 2)) M = ot.dist(xs, xt) p = ot.unif(n_samples + n_noise) q = ot.unif(n_samples + n_noise) m = 0.5 p, q, M = nx.from_numpy(p, q, M) w0, log0 = ot.partial.partial_wasserstein(p, q, M, m=m, log=True) w, log = ot.partial.entropic_partial_wasserstein(p, q, M, reg=1, m=m, log=True, verbose=True) # check constraints np.testing.assert_equal(to_numpy(nx.sum(w0, axis=1) - p) <= 1e-5, [True] * len(p)) np.testing.assert_equal(to_numpy(nx.sum(w0, axis=0) - q) <= 1e-5, [True] * len(q)) np.testing.assert_equal(to_numpy(nx.sum(w0, axis=1) - p) <= 1e-5, [True] * len(p)) np.testing.assert_equal(to_numpy(nx.sum(w0, axis=0) - q) <= 1e-5, [True] * len(q)) # check transported mass np.testing.assert_allclose(np.sum(to_numpy(w0)), m, atol=1e-04) np.testing.assert_allclose(np.sum(to_numpy(w)), m, atol=1e-04) w0, log0 = ot.partial.partial_wasserstein2(p, q, M, m=m, log=True) w0_val = ot.partial.partial_wasserstein2(p, q, M, m=m, log=False) G = log0['T'] np.testing.assert_allclose(w0, w0_val, atol=1e-1, rtol=1e-1) # check constraints np.testing.assert_equal(to_numpy(nx.sum(G, axis=1) - p) <= 1e-5, [True] * len(p)) np.testing.assert_equal(to_numpy(nx.sum(G, axis=0) - q) <= 1e-5, [True] * len(q)) np.testing.assert_allclose(np.sum(to_numpy(G)), m, atol=1e-04) empty_array = nx.zeros(0, type_as=M) w = ot.partial.partial_wasserstein(empty_array, empty_array, M=M, m=None) # check constraints np.testing.assert_equal(to_numpy(nx.sum(w, axis=1) - p) <= 1e-5, [True] * len(p)) np.testing.assert_equal(to_numpy(nx.sum(w, axis=0) - q) <= 1e-5, [True] * len(q)) np.testing.assert_equal(to_numpy(nx.sum(w, axis=1) - p) <= 1e-5, [True] * len(p)) np.testing.assert_equal(to_numpy(nx.sum(w, axis=0) - q) <= 1e-5, [True] * len(q)) # check transported mass np.testing.assert_allclose(np.sum(to_numpy(w)), 1, atol=1e-04) w0 = ot.partial.entropic_partial_wasserstein(empty_array, empty_array, M=M, reg=10, m=None) # check constraints np.testing.assert_equal(to_numpy(nx.sum(w0, axis=1) - p) <= 1e-5, [True] * len(p)) np.testing.assert_equal(to_numpy(nx.sum(w0, axis=0) - q) <= 1e-5, [True] * len(q)) np.testing.assert_equal(to_numpy(nx.sum(w0, axis=1) - p) <= 1e-5, [True] * len(p)) np.testing.assert_equal(to_numpy(nx.sum(w0, axis=0) - q) <= 1e-5, [True] * len(q)) # check transported mass np.testing.assert_allclose(np.sum(to_numpy(w0)), 1, atol=1e-04) def test_partial_wasserstein2_gradient(): if torch: n_samples = 40 mu = np.array([0, 0]) cov = np.array([[1, 0], [0, 2]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov) xt = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov) M = torch.tensor(ot.dist(xs, xt), requires_grad=True, dtype=torch.float64) p = torch.tensor(ot.unif(n_samples), dtype=torch.float64) q = torch.tensor(ot.unif(n_samples), dtype=torch.float64) m = 0.5 w, log = ot.partial.partial_wasserstein2(p, q, M, m=m, log=True) w.backward() assert M.grad is not None assert M.grad.shape == M.shape def test_entropic_partial_wasserstein_gradient(): if torch: n_samples = 40 mu = np.array([0, 0]) cov = np.array([[1, 0], [0, 2]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov) xt = ot.datasets.make_2D_samples_gauss(n_samples, mu, cov) M = torch.tensor(ot.dist(xs, xt), requires_grad=True, dtype=torch.float64) p = torch.tensor(ot.unif(n_samples), requires_grad=True, dtype=torch.float64) q = torch.tensor(ot.unif(n_samples), requires_grad=True, dtype=torch.float64) m = 0.5 reg = 1 _, log = ot.partial.entropic_partial_wasserstein(p, q, M, m=m, reg=reg, log=True) log['partial_w_dist'].backward() assert M.grad is not None assert p.grad is not None assert q.grad is not None assert M.grad.shape == M.shape assert p.grad.shape == p.shape assert q.grad.shape == q.shape def test_partial_gromov_wasserstein(): rng = np.random.RandomState(42) n_samples = 20 # nb samples n_noise = 10 # nb of samples (noise) p = ot.unif(n_samples + n_noise) q = ot.unif(n_samples + n_noise) mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) mu_t = np.array([0, 0, 0]) cov_t = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=rng) xs = np.concatenate((xs, ((rng.rand(n_noise, 2) + 1) * 4)), axis=0) P = sp.linalg.sqrtm(cov_t) xt = rng.randn(n_samples, 3).dot(P) + mu_t xt = np.concatenate((xt, ((rng.rand(n_noise, 3) + 1) * 10)), axis=0) xt2 = xs[::-1].copy() C1 = ot.dist(xs, xs) C2 = ot.dist(xt, xt) C3 = ot.dist(xt2, xt2) m = 2 / 3 res0, log0 = ot.partial.partial_gromov_wasserstein(C1, C3, p, q, m=m, log=True, verbose=True) np.testing.assert_allclose(res0, 0, atol=1e-1, rtol=1e-1) C1 = sp.spatial.distance.cdist(xs, xs) C2 = sp.spatial.distance.cdist(xt, xt) m = 1 res0, log0 = ot.partial.partial_gromov_wasserstein(C1, C2, p, q, m=m, log=True) G = ot.gromov.gromov_wasserstein(C1, C2, p, q, 'square_loss') np.testing.assert_allclose(G, res0, atol=1e-04) res, log = ot.partial.entropic_partial_gromov_wasserstein(C1, C2, p, q, 10, m=m, log=True) G = ot.gromov.entropic_gromov_wasserstein( C1, C2, p, q, 'square_loss', epsilon=10) np.testing.assert_allclose(G, res, atol=1e-02) w0, log0 = ot.partial.partial_gromov_wasserstein2(C1, C2, p, q, m=m, log=True) w0_val = ot.partial.partial_gromov_wasserstein2(C1, C2, p, q, m=m, log=False) G = log0['T'] np.testing.assert_allclose(w0, w0_val, atol=1e-1, rtol=1e-1) m = 2 / 3 res0, log0 = ot.partial.partial_gromov_wasserstein(C1, C2, p, q, m=m, log=True) res, log = ot.partial.entropic_partial_gromov_wasserstein(C1, C2, p, q, 100, m=m, log=True) # check constraints np.testing.assert_equal( res0.sum(1) <= p, [True] * len(p)) # cf convergence wasserstein np.testing.assert_equal( res0.sum(0) <= q, [True] * len(q)) # cf convergence wasserstein np.testing.assert_allclose( np.sum(res0), m, atol=1e-04) np.testing.assert_equal( res.sum(1) <= p, [True] * len(p)) # cf convergence wasserstein np.testing.assert_equal( res.sum(0) <= q, [True] * len(q)) # cf convergence wasserstein np.testing.assert_allclose( np.sum(res), m, atol=1e-04) python-pot-0.9.3+dfsg/test/test_plot.py000066400000000000000000000026131455713015700201430ustar00rootroot00000000000000"""Tests for module plot for visualization """ # Author: Remi Flamary # # License: MIT License import numpy as np import pytest try: # test if matplotlib is installed import matplotlib matplotlib.use('Agg') nogo = False except ImportError: nogo = True @pytest.mark.skipif(nogo, reason="Matplotlib not installed") def test_plot1D_mat(): import ot import ot.plot n_bins = 100 # nb bins # bin positions x = np.arange(n_bins, dtype=np.float64) # Gaussian distributions a = ot.datasets.make_1D_gauss(n_bins, m=20, s=5) # m= mean, s= std b = ot.datasets.make_1D_gauss(n_bins, m=60, s=10) # loss matrix M = ot.dist(x.reshape((n_bins, 1)), x.reshape((n_bins, 1))) M /= M.max() ot.plot.plot1D_mat(a, b, M, 'Cost matrix M') @pytest.mark.skipif(nogo, reason="Matplotlib not installed") def test_plot2D_samples_mat(): import ot import ot.plot n_bins = 50 # nb samples mu_s = np.array([0, 0]) cov_s = np.array([[1, 0], [0, 1]]) mu_t = np.array([4, 4]) cov_t = np.array([[1, -.8], [-.8, 1]]) rng = np.random.RandomState(42) xs = ot.datasets.make_2D_samples_gauss(n_bins, mu_s, cov_s, random_state=rng) xt = ot.datasets.make_2D_samples_gauss(n_bins, mu_t, cov_t, random_state=rng) G = 1.0 * (rng.rand(n_bins, n_bins) < 0.01) ot.plot.plot2D_samples_mat(xs, xt, G, thr=1e-5) python-pot-0.9.3+dfsg/test/test_regpath.py000066400000000000000000000034411455713015700206170ustar00rootroot00000000000000"""Tests for module regularization path""" # Author: Haoran Wu # # License: MIT License import numpy as np import ot def test_fully_relaxed_path(): n_source = 50 # nb source samples (gaussian) n_target = 40 # nb target samples (gaussian) mu = np.array([0, 0]) cov = np.array([[1, 0], [0, 2]]) rng = np.random.RandomState(0) xs = ot.datasets.make_2D_samples_gauss(n_source, mu, cov, random_state=rng) xt = ot.datasets.make_2D_samples_gauss(n_target, mu, cov, random_state=rng) # source and target distributions a = ot.utils.unif(n_source) b = ot.utils.unif(n_target) # loss matrix M = ot.dist(xs, xt) M /= M.max() t, _, _ = ot.regpath.regularization_path(a, b, M, reg=1e-8, semi_relaxed=False) G = t.reshape((n_source, n_target)) np.testing.assert_allclose(a, G.sum(1), atol=1e-05) np.testing.assert_allclose(b, G.sum(0), atol=1e-05) def test_semi_relaxed_path(): n_source = 50 # nb source samples (gaussian) n_target = 40 # nb target samples (gaussian) mu = np.array([0, 0]) cov = np.array([[1, 0], [0, 2]]) rng = np.random.RandomState(0) xs = ot.datasets.make_2D_samples_gauss(n_source, mu, cov, random_state=rng) xt = ot.datasets.make_2D_samples_gauss(n_target, mu, cov, random_state=rng) # source and target distributions a = ot.utils.unif(n_source) b = ot.utils.unif(n_target) # loss matrix M = ot.dist(xs, xt) M /= M.max() t, _, _ = ot.regpath.regularization_path(a, b, M, reg=1e-8, semi_relaxed=True) G = t.reshape((n_source, n_target)) np.testing.assert_allclose(a, G.sum(1), atol=1e-05) np.testing.assert_allclose(b, G.sum(0), atol=1e-10) python-pot-0.9.3+dfsg/test/test_sliced.py000066400000000000000000000317471455713015700204420ustar00rootroot00000000000000"""Tests for module sliced""" # Author: Adrien Corenflos # Nicolas Courty # # License: MIT License import numpy as np import pytest import ot from ot.sliced import get_random_projections from ot.backend import tf, torch def test_get_random_projections(): rng = np.random.RandomState(0) projections = get_random_projections(1000, 50, rng) np.testing.assert_almost_equal(np.sum(projections ** 2, 0), 1.) def test_sliced_same_dist(): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) res = ot.sliced_wasserstein_distance(x, x, u, u, 10, seed=rng) np.testing.assert_almost_equal(res, 0.) def test_sliced_bad_shapes(): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) y = rng.randn(n, 4) u = ot.utils.unif(n) with pytest.raises(ValueError): _ = ot.sliced_wasserstein_distance(x, y, u, u, 10, seed=rng) def test_sliced_log(): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 4) y = rng.randn(n, 4) u = ot.utils.unif(n) res, log = ot.sliced_wasserstein_distance(x, y, u, u, 10, p=1, seed=rng, log=True) assert len(log) == 2 projections = log["projections"] projected_emds = log["projected_emds"] assert projections.shape[1] == len(projected_emds) == 10 for emd in projected_emds: assert emd > 0 def test_sliced_different_dists(): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) y = rng.randn(n, 2) res = ot.sliced_wasserstein_distance(x, y, u, u, 10, seed=rng) assert res > 0. def test_1d_sliced_equals_emd(): n = 100 m = 120 rng = np.random.RandomState(0) x = rng.randn(n, 1) a = rng.uniform(0, 1, n) a /= a.sum() y = rng.randn(m, 1) u = ot.utils.unif(m) res = ot.sliced_wasserstein_distance(x, y, a, u, 10, seed=42) expected = ot.emd2_1d(x.squeeze(), y.squeeze(), a, u) np.testing.assert_almost_equal(res ** 2, expected) def test_max_sliced_same_dist(): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) res = ot.max_sliced_wasserstein_distance(x, x, u, u, 10, seed=rng) np.testing.assert_almost_equal(res, 0.) def test_max_sliced_different_dists(): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) y = rng.randn(n, 2) res, log = ot.max_sliced_wasserstein_distance(x, y, u, u, 10, seed=rng, log=True) assert res > 0. def test_sliced_same_proj(): n_projections = 10 seed = 12 rng = np.random.RandomState(0) X = rng.randn(8, 2) Y = rng.randn(8, 2) cost1, log1 = ot.sliced_wasserstein_distance(X, Y, seed=seed, n_projections=n_projections, log=True) P = get_random_projections(X.shape[1], n_projections=10, seed=seed) cost2, log2 = ot.sliced_wasserstein_distance(X, Y, projections=P, log=True) assert np.allclose(log1['projections'], log2['projections']) assert np.isclose(cost1, cost2) def test_sliced_backend(nx): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) y = rng.randn(2 * n, 2) P = rng.randn(2, 20) P = P / np.sqrt((P**2).sum(0, keepdims=True)) n_projections = 20 xb, yb, Pb = nx.from_numpy(x, y, P) val0 = ot.sliced_wasserstein_distance(x, y, projections=P) val = ot.sliced_wasserstein_distance(xb, yb, n_projections=n_projections, seed=0) val2 = ot.sliced_wasserstein_distance(xb, yb, n_projections=n_projections, seed=0) assert val > 0 assert val == val2 valb = nx.to_numpy(ot.sliced_wasserstein_distance(xb, yb, projections=Pb)) assert np.allclose(val0, valb) def test_sliced_backend_type_devices(nx): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) y = rng.randn(2 * n, 2) P = rng.randn(2, 20) P = P / np.sqrt((P**2).sum(0, keepdims=True)) for tp in nx.__type_list__: print(nx.dtype_device(tp)) xb, yb, Pb = nx.from_numpy(x, y, P, type_as=tp) valb = ot.sliced_wasserstein_distance(xb, yb, projections=Pb) nx.assert_same_dtype_device(xb, valb) @pytest.mark.skipif(not tf, reason="tf not installed") def test_sliced_backend_device_tf(): nx = ot.backend.TensorflowBackend() n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) y = rng.randn(2 * n, 2) P = rng.randn(2, 20) P = P / np.sqrt((P**2).sum(0, keepdims=True)) # Check that everything stays on the CPU with tf.device("/CPU:0"): xb, yb, Pb = nx.from_numpy(x, y, P) valb = ot.sliced_wasserstein_distance(xb, yb, projections=Pb) nx.assert_same_dtype_device(xb, valb) if len(tf.config.list_physical_devices('GPU')) > 0: # Check that everything happens on the GPU xb, yb, Pb = nx.from_numpy(x, y, P) valb = ot.sliced_wasserstein_distance(xb, yb, projections=Pb) nx.assert_same_dtype_device(xb, valb) assert nx.dtype_device(valb)[1].startswith("GPU") def test_max_sliced_backend(nx): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) y = rng.randn(2 * n, 2) P = rng.randn(2, 20) P = P / np.sqrt((P**2).sum(0, keepdims=True)) n_projections = 20 xb, yb, Pb = nx.from_numpy(x, y, P) val0 = ot.max_sliced_wasserstein_distance(x, y, projections=P) val = ot.max_sliced_wasserstein_distance(xb, yb, n_projections=n_projections, seed=0) val2 = ot.max_sliced_wasserstein_distance(xb, yb, n_projections=n_projections, seed=0) assert val > 0 assert val == val2 valb = nx.to_numpy(ot.max_sliced_wasserstein_distance(xb, yb, projections=Pb)) assert np.allclose(val0, valb) def test_max_sliced_backend_type_devices(nx): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) y = rng.randn(2 * n, 2) P = rng.randn(2, 20) P = P / np.sqrt((P**2).sum(0, keepdims=True)) for tp in nx.__type_list__: print(nx.dtype_device(tp)) xb, yb, Pb = nx.from_numpy(x, y, P, type_as=tp) valb = ot.max_sliced_wasserstein_distance(xb, yb, projections=Pb) nx.assert_same_dtype_device(xb, valb) @pytest.mark.skipif(not tf, reason="tf not installed") def test_max_sliced_backend_device_tf(): nx = ot.backend.TensorflowBackend() n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) y = rng.randn(2 * n, 2) P = rng.randn(2, 20) P = P / np.sqrt((P**2).sum(0, keepdims=True)) # Check that everything stays on the CPU with tf.device("/CPU:0"): xb, yb, Pb = nx.from_numpy(x, y, P) valb = ot.max_sliced_wasserstein_distance(xb, yb, projections=Pb) nx.assert_same_dtype_device(xb, valb) if len(tf.config.list_physical_devices('GPU')) > 0: # Check that everything happens on the GPU xb, yb, Pb = nx.from_numpy(x, y, P) valb = ot.max_sliced_wasserstein_distance(xb, yb, projections=Pb) nx.assert_same_dtype_device(xb, valb) assert nx.dtype_device(valb)[1].startswith("GPU") def test_projections_stiefel(): rng = np.random.RandomState(0) n_projs = 500 x = rng.randn(100, 3) x = x / np.sqrt(np.sum(x**2, -1, keepdims=True)) ssw, log = ot.sliced_wasserstein_sphere(x, x, n_projections=n_projs, seed=rng, log=True) P = log["projections"] P_T = np.transpose(P, [0, 2, 1]) np.testing.assert_almost_equal(np.matmul(P_T, P), np.array([np.eye(2) for k in range(n_projs)])) def test_sliced_sphere_same_dist(): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 3) x = x / np.sqrt(np.sum(x**2, -1, keepdims=True)) u = ot.utils.unif(n) res = ot.sliced_wasserstein_sphere(x, x, u, u, 10, seed=rng) np.testing.assert_almost_equal(res, 0.) def test_sliced_sphere_same_proj(): n_projections = 10 n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 3) x = x / np.sqrt(np.sum(x**2, -1, keepdims=True)) y = rng.randn(n, 3) y = y / np.sqrt(np.sum(y**2, -1, keepdims=True)) seed = 42 cost1, log1 = ot.sliced_wasserstein_sphere(x, y, seed=seed, n_projections=n_projections, log=True) cost2, log2 = ot.sliced_wasserstein_sphere(x, y, seed=seed, n_projections=n_projections, log=True) assert np.allclose(log1['projections'], log2['projections']) assert np.isclose(cost1, cost2) def test_sliced_sphere_bad_shapes(): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 3) x = x / np.sqrt(np.sum(x**2, -1, keepdims=True)) y = rng.randn(n, 4) y = y / np.sqrt(np.sum(x**2, -1, keepdims=True)) u = ot.utils.unif(n) with pytest.raises(ValueError): _ = ot.sliced_wasserstein_sphere(x, y, u, u, 10, seed=rng) def test_sliced_sphere_values_on_the_sphere(): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 3) x = x / np.sqrt(np.sum(x**2, -1, keepdims=True)) y = rng.randn(n, 4) u = ot.utils.unif(n) with pytest.raises(ValueError): _ = ot.sliced_wasserstein_sphere(x, y, u, u, 10, seed=rng) def test_sliced_sphere_log(): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 4) x = x / np.sqrt(np.sum(x**2, -1, keepdims=True)) y = rng.randn(n, 4) y = y / np.sqrt(np.sum(y**2, -1, keepdims=True)) u = ot.utils.unif(n) res, log = ot.sliced_wasserstein_sphere(x, y, u, u, 10, p=1, seed=rng, log=True) assert len(log) == 2 projections = log["projections"] projected_emds = log["projected_emds"] assert projections.shape[0] == len(projected_emds) == 10 for emd in projected_emds: assert emd > 0 def test_sliced_sphere_different_dists(): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 3) x = x / np.sqrt(np.sum(x**2, -1, keepdims=True)) u = ot.utils.unif(n) y = rng.randn(n, 3) y = y / np.sqrt(np.sum(y**2, -1, keepdims=True)) res = ot.sliced_wasserstein_sphere(x, y, u, u, 10, seed=rng) assert res > 0. def test_1d_sliced_sphere_equals_emd(): n = 100 m = 120 rng = np.random.RandomState(0) x = rng.randn(n, 2) x = x / np.sqrt(np.sum(x**2, -1, keepdims=True)) x_coords = (np.arctan2(-x[:, 1], -x[:, 0]) + np.pi) / (2 * np.pi) a = rng.uniform(0, 1, n) a /= a.sum() y = rng.randn(m, 2) y = y / np.sqrt(np.sum(y**2, -1, keepdims=True)) y_coords = (np.arctan2(-y[:, 1], -y[:, 0]) + np.pi) / (2 * np.pi) u = ot.utils.unif(m) res = ot.sliced_wasserstein_sphere(x, y, a, u, 10, seed=42, p=2) expected = ot.binary_search_circle(x_coords.T, y_coords.T, a, u, p=2) res1 = ot.sliced_wasserstein_sphere(x, y, a, u, 10, seed=42, p=1) expected1 = ot.binary_search_circle(x_coords.T, y_coords.T, a, u, p=1) np.testing.assert_almost_equal(res ** 2, expected) np.testing.assert_almost_equal(res1, expected1, decimal=3) @pytest.skip_backend("tf") def test_sliced_sphere_backend_type_devices(nx): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 3) x = x / np.sqrt(np.sum(x**2, -1, keepdims=True)) y = rng.randn(2 * n, 3) y = y / np.sqrt(np.sum(y**2, -1, keepdims=True)) sw_np, log = ot.sliced_wasserstein_sphere(x, y, log=True) P = log["projections"] for tp in nx.__type_list__: print(nx.dtype_device(tp)) xb, yb = nx.from_numpy(x, y, type_as=tp) valb = ot.sliced_wasserstein_sphere(xb, yb, projections=nx.from_numpy(P, type_as=tp)) nx.assert_same_dtype_device(xb, valb) np.testing.assert_almost_equal(sw_np, nx.to_numpy(valb)) def test_sliced_sphere_gradient(): if torch: import torch.nn.functional as F X0 = torch.randn((20, 3)) X0 = F.normalize(X0, p=2, dim=-1) X0.requires_grad_(True) X1 = torch.randn((20, 3)) X1 = F.normalize(X1, p=2, dim=-1) sw = ot.sliced_wasserstein_sphere(X1, X0, n_projections=100, p=2) grad_x0 = torch.autograd.grad(sw, X0)[0] assert not torch.any(torch.isnan(grad_x0)) def test_sliced_sphere_unif_values_on_the_sphere(): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 3) u = ot.utils.unif(n) with pytest.raises(ValueError): _ = ot.sliced_wasserstein_sphere_unif(x, u, 10, seed=rng) def test_sliced_sphere_unif_log(): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 4) x = x / np.sqrt(np.sum(x**2, -1, keepdims=True)) u = ot.utils.unif(n) res, log = ot.sliced_wasserstein_sphere_unif(x, u, 10, seed=rng, log=True) assert len(log) == 2 projections = log["projections"] projected_emds = log["projected_emds"] assert projections.shape[0] == len(projected_emds) == 10 for emd in projected_emds: assert emd > 0 def test_sliced_sphere_unif_backend_type_devices(nx): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 3) x = x / np.sqrt(np.sum(x**2, -1, keepdims=True)) for tp in nx.__type_list__: print(nx.dtype_device(tp)) xb = nx.from_numpy(x, type_as=tp) valb = ot.sliced_wasserstein_sphere_unif(xb) nx.assert_same_dtype_device(xb, valb) python-pot-0.9.3+dfsg/test/test_smooth.py000066400000000000000000000077071455713015700205070ustar00rootroot00000000000000"""Tests for ot.smooth model """ # Author: Remi Flamary # # License: MIT License import numpy as np import ot import pytest from scipy.optimize import check_grad def test_smooth_ot_dual(): # get data n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) with pytest.raises(NotImplementedError): Gl2, log = ot.smooth.smooth_ot_dual(u, u, M, 1, reg_type='none') # squared l2 regularisation Gl2, log = ot.smooth.smooth_ot_dual(u, u, M, 1, reg_type='l2', log=True, stopThr=1e-10) # check constraints np.testing.assert_allclose( u, Gl2.sum(1), atol=1e-05) # cf convergence sinkhorn np.testing.assert_allclose( u, Gl2.sum(0), atol=1e-05) # cf convergence sinkhorn # kl regularisation G = ot.smooth.smooth_ot_dual(u, u, M, 1, reg_type='kl', stopThr=1e-10) # check constraints np.testing.assert_allclose( u, G.sum(1), atol=1e-05) # cf convergence sinkhorn np.testing.assert_allclose( u, G.sum(0), atol=1e-05) # cf convergence sinkhorn G2 = ot.sinkhorn(u, u, M, 1, stopThr=1e-10) np.testing.assert_allclose(G, G2, atol=1e-05) # sparsity-constrained regularisation max_nz = 2 Gsc, log = ot.smooth.smooth_ot_dual( u, u, M, 1, max_nz=max_nz, log=True, reg_type='sparsity_constrained', stopThr=1e-10) # check marginal constraints np.testing.assert_allclose(u, Gsc.sum(1), atol=1e-03) np.testing.assert_allclose(u, Gsc.sum(0), atol=1e-03) # check sparsity constraints np.testing.assert_array_less( np.sum(Gsc > 0, axis=0), np.ones(n) * max_nz + 1) def test_smooth_ot_semi_dual(): # get data n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) with pytest.raises(NotImplementedError): Gl2, log = ot.smooth.smooth_ot_semi_dual(u, u, M, 1, reg_type='none') # squared l2 regularisation Gl2, log = ot.smooth.smooth_ot_semi_dual(u, u, M, 1, reg_type='l2', log=True, stopThr=1e-10) # check constraints np.testing.assert_allclose( u, Gl2.sum(1), atol=1e-05) # cf convergence sinkhorn np.testing.assert_allclose( u, Gl2.sum(0), atol=1e-05) # cf convergence sinkhorn # kl regularisation G = ot.smooth.smooth_ot_semi_dual(u, u, M, 1, reg_type='kl', stopThr=1e-10) # check constraints np.testing.assert_allclose( u, G.sum(1), atol=1e-05) # cf convergence sinkhorn np.testing.assert_allclose( u, G.sum(0), atol=1e-05) # cf convergence sinkhorn G2 = ot.sinkhorn(u, u, M, 1, stopThr=1e-10) np.testing.assert_allclose(G, G2, atol=1e-05) # sparsity-constrained regularisation max_nz = 2 Gsc = ot.smooth.smooth_ot_semi_dual( u, u, M, 1, reg_type='sparsity_constrained', max_nz=max_nz, stopThr=1e-10) # check marginal constraints np.testing.assert_allclose(u, Gsc.sum(1), atol=1e-03) np.testing.assert_allclose(u, Gsc.sum(0), atol=1e-03) # check sparsity constraints np.testing.assert_array_less(np.sum(Gsc > 0, axis=0), np.ones(n) * max_nz + 1) def test_sparsity_constrained_gradient(): max_nz = 5 regularizer = ot.smooth.SparsityConstrained(max_nz=max_nz) rng = np.random.RandomState(0) X = rng.randn(10,) b = 0.5 def delta_omega_func(X): return regularizer.delta_Omega(X)[0] def delta_omega_grad(X): return regularizer.delta_Omega(X)[1] dual_grad_err = check_grad(delta_omega_func, delta_omega_grad, X) np.testing.assert_allclose(dual_grad_err, 0.0, atol=1e-07) def max_omega_func(X, b): return regularizer.max_Omega(X, b)[0] def max_omega_grad(X, b): return regularizer.max_Omega(X, b)[1] semi_dual_grad_err = check_grad(max_omega_func, max_omega_grad, X, b) np.testing.assert_allclose(semi_dual_grad_err, 0.0, atol=1e-07) python-pot-0.9.3+dfsg/test/test_solvers.py000066400000000000000000000332071455713015700206650ustar00rootroot00000000000000"""Tests for ot solvers""" # Author: Remi Flamary # # License: MIT License import itertools import numpy as np import pytest import sys import ot from ot.bregman import geomloss lst_reg = [None, 1] lst_reg_type = ['KL', 'entropy', 'L2'] lst_unbalanced = [None, 0.9] lst_unbalanced_type = ['KL', 'L2', 'TV'] lst_reg_type_gromov = ['entropy'] lst_gw_losses = ['L2', 'KL'] lst_unbalanced_type_gromov = ['KL', 'semirelaxed', 'partial'] lst_unbalanced_gromov = [None, 0.9] lst_alpha = [0, 0.4, 0.9, 1] lst_method_params_solve_sample = [ {'method': '1d'}, {'method': '1d', 'metric': 'euclidean'}, {'method': 'gaussian'}, {'method': 'gaussian', 'reg': 1}, {'method': 'factored', 'rank': 10}, {'method': 'lowrank', 'rank': 10} ] lst_parameters_solve_sample_NotImplemented = [ {'method': '1d', 'metric': 'any other one'}, # fail 1d on weird metrics {'method': 'gaussian', 'metric': 'euclidean'}, # fail gaussian on metric not euclidean {'method': 'factored', 'metric': 'euclidean'}, # fail factored on metric not euclidean {"method": 'lowrank', 'metric': 'euclidean'}, # fail lowrank on metric not euclidean {'lazy': True}, # fail lazy for non regularized {'lazy': True, 'unbalanced': 1}, # fail lazy for non regularized unbalanced {'lazy': True, 'reg': 1, 'unbalanced': 1}, # fail lazy for unbalanced and regularized ] # set readable ids for each param lst_method_params_solve_sample = [pytest.param(param, id=str(param)) for param in lst_method_params_solve_sample] lst_parameters_solve_sample_NotImplemented = [pytest.param(param, id=str(param)) for param in lst_parameters_solve_sample_NotImplemented] def assert_allclose_sol(sol1, sol2): lst_attr = ['value', 'value_linear', 'plan', 'potential_a', 'potential_b', 'marginal_a', 'marginal_b'] nx1 = sol1._backend if sol1._backend is not None else ot.backend.NumpyBackend() nx2 = sol2._backend if sol2._backend is not None else ot.backend.NumpyBackend() for attr in lst_attr: try: np.allclose(nx1.to_numpy(getattr(sol1, attr)), nx2.to_numpy(getattr(sol2, attr))) except NotImplementedError: pass def test_solve(nx): n_samples_s = 10 n_samples_t = 7 n_features = 2 rng = np.random.RandomState(0) x = rng.randn(n_samples_s, n_features) y = rng.randn(n_samples_t, n_features) a = ot.utils.unif(n_samples_s) b = ot.utils.unif(n_samples_t) M = ot.dist(x, y) # solve unif weights sol0 = ot.solve(M) print(sol0) # solve signe weights sol = ot.solve(M, a, b) # check some attributes sol.potentials sol.sparse_plan sol.marginals sol.status assert_allclose_sol(sol0, sol) # solve in backend ab, bb, Mb = nx.from_numpy(a, b, M) solb = ot.solve(M, a, b) assert_allclose_sol(sol, solb) # test not implemented unbalanced and check raise with pytest.raises(NotImplementedError): sol0 = ot.solve(M, unbalanced=1, unbalanced_type='cryptic divergence') # test not implemented reg_type and check raise with pytest.raises(NotImplementedError): sol0 = ot.solve(M, reg=1, reg_type='cryptic divergence') @pytest.mark.parametrize("reg,reg_type,unbalanced,unbalanced_type", itertools.product(lst_reg, lst_reg_type, lst_unbalanced, lst_unbalanced_type)) def test_solve_grid(nx, reg, reg_type, unbalanced, unbalanced_type): n_samples_s = 10 n_samples_t = 7 n_features = 2 rng = np.random.RandomState(0) x = rng.randn(n_samples_s, n_features) y = rng.randn(n_samples_t, n_features) a = ot.utils.unif(n_samples_s) b = ot.utils.unif(n_samples_t) M = ot.dist(x, y) try: # solve unif weights sol0 = ot.solve(M, reg=reg, reg_type=reg_type, unbalanced=unbalanced, unbalanced_type=unbalanced_type) # solve signe weights sol = ot.solve(M, a, b, reg=reg, reg_type=reg_type, unbalanced=unbalanced, unbalanced_type=unbalanced_type) assert_allclose_sol(sol0, sol) # solve in backend ab, bb, Mb = nx.from_numpy(a, b, M) solb = ot.solve(M, a, b, reg=reg, reg_type=reg_type, unbalanced=unbalanced, unbalanced_type=unbalanced_type) assert_allclose_sol(sol, solb) except NotImplementedError: pytest.skip("Not implemented") def test_solve_not_implemented(nx): n_samples_s = 10 n_samples_t = 7 n_features = 2 rng = np.random.RandomState(0) x = rng.randn(n_samples_s, n_features) y = rng.randn(n_samples_t, n_features) M = ot.dist(x, y) # test not implemented and check raise with pytest.raises(NotImplementedError): ot.solve(M, reg=1.0, reg_type='cryptic divergence') with pytest.raises(NotImplementedError): ot.solve(M, unbalanced=1.0, unbalanced_type='cryptic divergence') # pairs of incompatible divergences with pytest.raises(NotImplementedError): ot.solve(M, reg=1.0, reg_type='kl', unbalanced=1.0, unbalanced_type='tv') def test_solve_gromov(nx): np.random.seed(0) n_samples_s = 3 n_samples_t = 5 Ca = np.random.rand(n_samples_s, n_samples_s) Ca = (Ca + Ca.T) / 2 Cb = np.random.rand(n_samples_t, n_samples_t) Cb = (Cb + Cb.T) / 2 a = ot.utils.unif(n_samples_s) b = ot.utils.unif(n_samples_t) M = np.random.rand(n_samples_s, n_samples_t) sol0 = ot.solve_gromov(Ca, Cb) # GW sol = ot.solve_gromov(Ca, Cb, a=a, b=b) # GW sol0_fgw = ot.solve_gromov(Ca, Cb, M) # FGW # check some attributes sol.potentials sol.marginals assert_allclose_sol(sol0, sol) # solve in backend ax, bx, Mx, Cax, Cbx = nx.from_numpy(a, b, M, Ca, Cb) solx = ot.solve_gromov(Cax, Cbx, a=ax, b=bx) # GW solx_fgw = ot.solve_gromov(Cax, Cbx, Mx) # FGW assert_allclose_sol(sol, solx) assert_allclose_sol(sol0_fgw, solx_fgw) @pytest.mark.parametrize("reg,reg_type,unbalanced,unbalanced_type,alpha,loss", itertools.product(lst_reg, lst_reg_type_gromov, lst_unbalanced_gromov, lst_unbalanced_type_gromov, lst_alpha, lst_gw_losses)) def test_solve_gromov_grid(nx, reg, reg_type, unbalanced, unbalanced_type, alpha, loss): np.random.seed(0) n_samples_s = 3 n_samples_t = 5 Ca = np.random.rand(n_samples_s, n_samples_s) Ca = (Ca + Ca.T) / 2 Cb = np.random.rand(n_samples_t, n_samples_t) Cb = (Cb + Cb.T) / 2 a = ot.utils.unif(n_samples_s) b = ot.utils.unif(n_samples_t) M = np.random.rand(n_samples_s, n_samples_t) try: sol0 = ot.solve_gromov(Ca, Cb, reg=reg, reg_type=reg_type, unbalanced=unbalanced, unbalanced_type=unbalanced_type, loss=loss) # GW sol0_fgw = ot.solve_gromov(Ca, Cb, M, reg=reg, reg_type=reg_type, unbalanced=unbalanced, unbalanced_type=unbalanced_type, alpha=alpha, loss=loss) # FGW # solve in backend ax, bx, Mx, Cax, Cbx = nx.from_numpy(a, b, M, Ca, Cb) solx = ot.solve_gromov(Cax, Cbx, reg=reg, reg_type=reg_type, unbalanced=unbalanced, unbalanced_type=unbalanced_type, loss=loss) # GW solx_fgw = ot.solve_gromov(Cax, Cbx, Mx, reg=reg, reg_type=reg_type, unbalanced=unbalanced, unbalanced_type=unbalanced_type, alpha=alpha, loss=loss) # FGW solx.value_quad assert_allclose_sol(sol0, solx) assert_allclose_sol(sol0_fgw, solx_fgw) except NotImplementedError: pytest.skip("Not implemented") def test_solve_gromov_not_implemented(nx): np.random.seed(0) n_samples_s = 3 n_samples_t = 5 Ca = np.random.rand(n_samples_s, n_samples_s) Ca = (Ca + Ca.T) / 2 Cb = np.random.rand(n_samples_t, n_samples_t) Cb = (Cb + Cb.T) / 2 a = ot.utils.unif(n_samples_s) b = ot.utils.unif(n_samples_t) M = np.random.rand(n_samples_s, n_samples_t) Ca, Cb, M, a, b = nx.from_numpy(Ca, Cb, M, a, b) # test not implemented and check raise with pytest.raises(NotImplementedError): ot.solve_gromov(Ca, Cb, loss='weird loss') with pytest.raises(NotImplementedError): ot.solve_gromov(Ca, Cb, unbalanced=1, unbalanced_type='cryptic divergence') with pytest.raises(NotImplementedError): ot.solve_gromov(Ca, Cb, reg=1, reg_type='cryptic divergence') # detect partial not implemented and error detect in value with pytest.raises(ValueError): ot.solve_gromov(Ca, Cb, unbalanced_type='partial', unbalanced=1.5) with pytest.raises(NotImplementedError): ot.solve_gromov(Ca, Cb, unbalanced_type='partial', unbalanced=0.5, symmetric=False) with pytest.raises(NotImplementedError): ot.solve_gromov(Ca, Cb, M, unbalanced_type='partial', unbalanced=0.5) with pytest.raises(ValueError): ot.solve_gromov(Ca, Cb, reg=1, unbalanced_type='partial', unbalanced=1.5) with pytest.raises(NotImplementedError): ot.solve_gromov(Ca, Cb, reg=1, unbalanced_type='partial', unbalanced=0.5, symmetric=False) def test_solve_sample(nx): # test solve_sample when is_Lazy = False n = 20 X_s = np.reshape(1.0 * np.arange(n), (n, 1)) X_t = np.reshape(1.0 * np.arange(0, n), (n, 1)) a = ot.utils.unif(X_s.shape[0]) b = ot.utils.unif(X_t.shape[0]) M = ot.dist(X_s, X_t) # solve with ot.solve sol00 = ot.solve(M, a, b) # solve unif weights sol0 = ot.solve_sample(X_s, X_t) # solve signe weights sol = ot.solve_sample(X_s, X_t, a, b) # check some attributes sol.potentials sol.sparse_plan sol.marginals sol.status assert_allclose_sol(sol0, sol) assert_allclose_sol(sol0, sol00) # solve in backend X_sb, X_tb, ab, bb = nx.from_numpy(X_s, X_t, a, b) solb = ot.solve_sample(X_sb, X_tb, ab, bb) assert_allclose_sol(sol, solb) # test not implemented unbalanced and check raise with pytest.raises(NotImplementedError): sol0 = ot.solve_sample(X_s, X_t, unbalanced=1, unbalanced_type='cryptic divergence') # test not implemented reg_type and check raise with pytest.raises(NotImplementedError): sol0 = ot.solve_sample(X_s, X_t, reg=1, reg_type='cryptic divergence') def test_solve_sample_lazy(nx): # test solve_sample when is_Lazy = False n = 20 X_s = np.reshape(1.0 * np.arange(n), (n, 1)) X_t = np.reshape(1.0 * np.arange(0, n), (n, 1)) a = ot.utils.unif(X_s.shape[0]) b = ot.utils.unif(X_t.shape[0]) X_s, X_t, a, b = nx.from_numpy(X_s, X_t, a, b) M = ot.dist(X_s, X_t) # solve with ot.solve sol00 = ot.solve(M, a, b, reg=1) sol0 = ot.solve_sample(X_s, X_t, a, b, reg=1) # solve signe weights sol = ot.solve_sample(X_s, X_t, a, b, reg=1, lazy=True) assert_allclose_sol(sol0, sol00) np.testing.assert_allclose(sol0.plan, sol.lazy_plan[:], rtol=1e-5, atol=1e-5) @pytest.mark.skipif(sys.version_info < (3, 10), reason="requires python3.10 or higher") @pytest.mark.skipif(not geomloss, reason="pytorch not installed") @pytest.skip_backend('tf') @pytest.skip_backend("cupy") @pytest.skip_backend("jax") @pytest.mark.parametrize("metric", ["sqeuclidean", "euclidean"]) def test_solve_sample_geomloss(nx, metric): # test solve_sample when is_Lazy = False n_samples_s = 13 n_samples_t = 7 n_features = 2 rng = np.random.RandomState(0) x = rng.randn(n_samples_s, n_features) y = rng.randn(n_samples_t, n_features) a = ot.utils.unif(n_samples_s) b = ot.utils.unif(n_samples_t) xb, yb, ab, bb = nx.from_numpy(x, y, a, b) sol0 = ot.solve_sample(xb, yb, ab, bb, reg=1) # solve signe weights sol = ot.solve_sample(xb, yb, ab, bb, reg=1, method='geomloss') assert_allclose_sol(sol0, sol) sol1 = ot.solve_sample(xb, yb, ab, bb, reg=1, lazy=False, method='geomloss') assert_allclose_sol(sol0, sol) sol1 = ot.solve_sample(xb, yb, ab, bb, reg=1, lazy=True, method='geomloss_tensorized') np.testing.assert_allclose(nx.to_numpy(sol1.lazy_plan[:]), nx.to_numpy(sol.lazy_plan[:]), rtol=1e-5, atol=1e-5) sol1 = ot.solve_sample(xb, yb, ab, bb, reg=1, lazy=True, method='geomloss_online') np.testing.assert_allclose(nx.to_numpy(sol1.lazy_plan[:]), nx.to_numpy(sol.lazy_plan[:]), rtol=1e-5, atol=1e-5) sol1 = ot.solve_sample(xb, yb, ab, bb, reg=1, lazy=True, method='geomloss_multiscale') np.testing.assert_allclose(nx.to_numpy(sol1.lazy_plan[:]), nx.to_numpy(sol.lazy_plan[:]), rtol=1e-5, atol=1e-5) sol1 = ot.solve_sample(xb, yb, ab, bb, reg=1, lazy=True, method='geomloss') np.testing.assert_allclose(nx.to_numpy(sol1.lazy_plan[:]), nx.to_numpy(sol.lazy_plan[:]), rtol=1e-5, atol=1e-5) @pytest.mark.parametrize("method_params", lst_method_params_solve_sample) def test_solve_sample_methods(nx, method_params): n_samples_s = 20 n_samples_t = 7 n_features = 2 rng = np.random.RandomState(0) x = rng.randn(n_samples_s, n_features) y = rng.randn(n_samples_t, n_features) a = ot.utils.unif(n_samples_s) b = ot.utils.unif(n_samples_t) xb, yb, ab, bb = nx.from_numpy(x, y, a, b) sol = ot.solve_sample(x, y, **method_params) solb = ot.solve_sample(xb, yb, ab, bb, **method_params) # check some attributes (no need ) assert_allclose_sol(sol, solb) sol2 = ot.solve_sample(x, x, **method_params) if method_params['method'] not in ['factored', 'lowrank']: np.testing.assert_allclose(sol2.value, 0) @pytest.mark.parametrize("method_params", lst_parameters_solve_sample_NotImplemented) def test_solve_sample_NotImplemented(nx, method_params): n_samples_s = 20 n_samples_t = 7 n_features = 2 rng = np.random.RandomState(0) x = rng.randn(n_samples_s, n_features) y = rng.randn(n_samples_t, n_features) a = ot.utils.unif(n_samples_s) b = ot.utils.unif(n_samples_t) xb, yb, ab, bb = nx.from_numpy(x, y, a, b) with pytest.raises(NotImplementedError): ot.solve_sample(xb, yb, ab, bb, **method_params) python-pot-0.9.3+dfsg/test/test_stochastic.py000066400000000000000000000210071455713015700213270ustar00rootroot00000000000000""" ========================== Stochastic test ========================== This example is designed to test the stochatic optimization algorithms module for descrete and semicontinous measures from the POT library. """ # Authors: Kilian Fatras # RĂ©mi Flamary # # License: MIT License import numpy as np import ot ############################################################################# # COMPUTE TEST FOR SEMI-DUAL PROBLEM ############################################################################# ############################################################################# # # TEST SAG algorithm # --------------------------------------------- # 2 identical discrete measures u defined on the same space with a # regularization term, a learning rate and a number of iteration def test_stochastic_sag(): # test sag n = 10 reg = 1 numItermax = 30000 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) G = ot.stochastic.solve_semi_dual_entropic(u, u, M, reg, "sag", numItermax=numItermax) # check constraints np.testing.assert_allclose( u, G.sum(1), atol=1e-03) # cf convergence sag np.testing.assert_allclose( u, G.sum(0), atol=1e-03) # cf convergence sag ############################################################################# # # TEST ASGD algorithm # --------------------------------------------- # 2 identical discrete measures u defined on the same space with a # regularization term, a learning rate and a number of iteration def test_stochastic_asgd(): # test asgd n = 10 reg = 1 numItermax = 10000 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) G, log = ot.stochastic.solve_semi_dual_entropic(u, u, M, reg, "asgd", numItermax=numItermax, log=True) # check constraints np.testing.assert_allclose( u, G.sum(1), atol=1e-02) # cf convergence asgd np.testing.assert_allclose( u, G.sum(0), atol=1e-02) # cf convergence asgd ############################################################################# # # TEST Convergence SAG and ASGD toward Sinkhorn's solution # -------------------------------------------------------- # 2 identical discrete measures u defined on the same space with a # regularization term, a learning rate and a number of iteration def test_sag_asgd_sinkhorn(): # test all algorithms n = 10 reg = 1 nb_iter = 10000 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) G_asgd = ot.stochastic.solve_semi_dual_entropic(u, u, M, reg, "asgd", numItermax=nb_iter) G_sag = ot.stochastic.solve_semi_dual_entropic(u, u, M, reg, "sag", numItermax=nb_iter) G_sinkhorn = ot.sinkhorn(u, u, M, reg) # check constraints np.testing.assert_allclose( G_sag.sum(1), G_sinkhorn.sum(1), atol=1e-02) np.testing.assert_allclose( G_sag.sum(0), G_sinkhorn.sum(0), atol=1e-02) np.testing.assert_allclose( G_asgd.sum(1), G_sinkhorn.sum(1), atol=1e-02) np.testing.assert_allclose( G_asgd.sum(0), G_sinkhorn.sum(0), atol=1e-02) np.testing.assert_allclose( G_sag, G_sinkhorn, atol=1e-02) # cf convergence sag np.testing.assert_allclose( G_asgd, G_sinkhorn, atol=1e-02) # cf convergence asgd ############################################################################# # COMPUTE TEST FOR DUAL PROBLEM ############################################################################# ############################################################################# # # TEST SGD algorithm # --------------------------------------------- # 2 identical discrete measures u defined on the same space with a # regularization term, a batch_size and a number of iteration def test_stochastic_dual_sgd(): # test sgd n = 10 reg = 1 numItermax = 5000 batch_size = 10 rng = np.random.RandomState(0) x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) G, log = ot.stochastic.solve_dual_entropic(u, u, M, reg, batch_size, numItermax=numItermax, log=True) # check constraints np.testing.assert_allclose( u, G.sum(1), atol=1e-03) # cf convergence sgd np.testing.assert_allclose( u, G.sum(0), atol=1e-03) # cf convergence sgd ############################################################################# # # TEST Convergence SGD toward Sinkhorn's solution # -------------------------------------------------------- # 2 identical discrete measures u defined on the same space with a # regularization term, a batch_size and a number of iteration def test_dual_sgd_sinkhorn(): # test all dual algorithms n = 10 reg = 1 nb_iter = 5000 batch_size = 10 rng = np.random.RandomState(0) # Test uniform x = rng.randn(n, 2) u = ot.utils.unif(n) M = ot.dist(x, x) G_sgd = ot.stochastic.solve_dual_entropic(u, u, M, reg, batch_size, numItermax=nb_iter) G_sinkhorn = ot.sinkhorn(u, u, M, reg) # check constraints np.testing.assert_allclose( G_sgd.sum(1), G_sinkhorn.sum(1), atol=1e-02) np.testing.assert_allclose( G_sgd.sum(0), G_sinkhorn.sum(0), atol=1e-02) np.testing.assert_allclose( G_sgd, G_sinkhorn, atol=1e-02) # cf convergence sgd # Test gaussian n = 30 reg = 1 batch_size = 30 a = ot.datasets.make_1D_gauss(n, 15, 5) # m= mean, s= std b = ot.datasets.make_1D_gauss(n, 15, 5) X_source = np.arange(n, dtype=np.float64) Y_target = np.arange(n, dtype=np.float64) M = ot.dist(X_source.reshape((n, 1)), Y_target.reshape((n, 1))) M /= M.max() G_sgd = ot.stochastic.solve_dual_entropic(a, b, M, reg, batch_size, numItermax=nb_iter) G_sinkhorn = ot.sinkhorn(a, b, M, reg) # check constraints np.testing.assert_allclose( G_sgd.sum(1), G_sinkhorn.sum(1), atol=1e-03) np.testing.assert_allclose( G_sgd.sum(0), G_sinkhorn.sum(0), atol=1e-03) np.testing.assert_allclose( G_sgd, G_sinkhorn, atol=1e-03) # cf convergence sgd def test_loss_dual_entropic(nx): nx.seed(0) xs = nx.randn(50, 2) xt = nx.randn(40, 2) + 2 ws = nx.rand(50) ws = ws / nx.sum(ws) wt = nx.rand(40) wt = wt / nx.sum(wt) u = nx.randn(50) v = nx.randn(40) def metric(x, y): return -nx.dot(x, y.T) ot.stochastic.loss_dual_entropic(u, v, xs, xt) ot.stochastic.loss_dual_entropic(u, v, xs, xt, ws=ws, wt=wt, metric=metric) def test_plan_dual_entropic(nx): nx.seed(0) xs = nx.randn(50, 2) xt = nx.randn(40, 2) + 2 ws = nx.rand(50) ws = ws / nx.sum(ws) wt = nx.rand(40) wt = wt / nx.sum(wt) u = nx.randn(50) v = nx.randn(40) def metric(x, y): return -nx.dot(x, y.T) G1 = ot.stochastic.plan_dual_entropic(u, v, xs, xt) assert np.all(nx.to_numpy(G1) >= 0) assert G1.shape[0] == 50 assert G1.shape[1] == 40 G2 = ot.stochastic.plan_dual_entropic(u, v, xs, xt, ws=ws, wt=wt, metric=metric) assert np.all(nx.to_numpy(G2) >= 0) assert G2.shape[0] == 50 assert G2.shape[1] == 40 def test_loss_dual_quadratic(nx): nx.seed(0) xs = nx.randn(50, 2) xt = nx.randn(40, 2) + 2 ws = nx.rand(50) ws = ws / nx.sum(ws) wt = nx.rand(40) wt = wt / nx.sum(wt) u = nx.randn(50) v = nx.randn(40) def metric(x, y): return -nx.dot(x, y.T) ot.stochastic.loss_dual_quadratic(u, v, xs, xt) ot.stochastic.loss_dual_quadratic(u, v, xs, xt, ws=ws, wt=wt, metric=metric) def test_plan_dual_quadratic(nx): nx.seed(0) xs = nx.randn(50, 2) xt = nx.randn(40, 2) + 2 ws = nx.rand(50) ws = ws / nx.sum(ws) wt = nx.rand(40) wt = wt / nx.sum(wt) u = nx.randn(50) v = nx.randn(40) def metric(x, y): return -nx.dot(x, y.T) G1 = ot.stochastic.plan_dual_quadratic(u, v, xs, xt) assert np.all(nx.to_numpy(G1) >= 0) assert G1.shape[0] == 50 assert G1.shape[1] == 40 G2 = ot.stochastic.plan_dual_quadratic(u, v, xs, xt, ws=ws, wt=wt, metric=metric) assert np.all(nx.to_numpy(G2) >= 0) assert G2.shape[0] == 50 assert G2.shape[1] == 40 python-pot-0.9.3+dfsg/test/test_unbalanced.py000066400000000000000000000537631455713015700212750ustar00rootroot00000000000000"""Tests for module Unbalanced OT with entropy regularization""" # Author: Hicham Janati # Laetitia Chapel # Quang Huy Tran # # License: MIT License import itertools import numpy as np import ot import pytest from ot.unbalanced import barycenter_unbalanced @pytest.mark.parametrize("method,reg_type", itertools.product(["sinkhorn", "sinkhorn_stabilized", "sinkhorn_reg_scaling"], ["kl", "entropy"])) def test_unbalanced_convergence(nx, method, reg_type): # test generalized sinkhorn for unbalanced OT n = 100 rng = np.random.RandomState(42) x = rng.randn(n, 2) a = ot.utils.unif(n) # make dists unbalanced b = ot.utils.unif(n) * 1.5 M = ot.dist(x, x) a, b, M = nx.from_numpy(a, b, M) epsilon = 1. reg_m = 1. G, log = ot.unbalanced.sinkhorn_unbalanced( a, b, M, reg=epsilon, reg_m=reg_m, method=method, reg_type=reg_type, log=True, verbose=True ) loss = nx.to_numpy(ot.unbalanced.sinkhorn_unbalanced2( a, b, M, reg=epsilon, reg_m=reg_m, method=method, reg_type=reg_type, verbose=True )) # check fixed point equations # in log-domain fi = reg_m / (reg_m + epsilon) logb = nx.log(b + 1e-16) loga = nx.log(a + 1e-16) if reg_type == "entropy": logKtu = nx.logsumexp(log["logu"][None, :] - M.T / epsilon, axis=1) logKv = nx.logsumexp(log["logv"][None, :] - M / epsilon, axis=1) elif reg_type == "kl": log_ab = loga[:, None] + logb[None, :] logKtu = nx.logsumexp(log["logu"][None, :] - M.T / epsilon + log_ab.T, axis=1) logKv = nx.logsumexp(log["logv"][None, :] - M / epsilon + log_ab, axis=1) v_final = fi * (logb - logKtu) u_final = fi * (loga - logKv) np.testing.assert_allclose( nx.to_numpy(u_final), nx.to_numpy(log["logu"]), atol=1e-05) np.testing.assert_allclose( nx.to_numpy(v_final), nx.to_numpy(log["logv"]), atol=1e-05) # check if sinkhorn_unbalanced2 returns the correct loss np.testing.assert_allclose(nx.to_numpy(nx.sum(G * M)), loss, atol=1e-5) # check in case no histogram is provided M_np = nx.to_numpy(M) a_np, b_np = np.array([]), np.array([]) a, b = nx.from_numpy(a_np, b_np) G = ot.unbalanced.sinkhorn_unbalanced( a, b, M, reg=epsilon, reg_m=reg_m, method=method, reg_type=reg_type, verbose=True ) G_np = ot.unbalanced.sinkhorn_unbalanced( a_np, b_np, M_np, reg=epsilon, reg_m=reg_m, method=method, reg_type=reg_type, verbose=True ) np.testing.assert_allclose(G_np, nx.to_numpy(G)) @pytest.mark.parametrize("method,reg_type", itertools.product(["sinkhorn", "sinkhorn_stabilized", "sinkhorn_reg_scaling"], ["kl", "entropy"])) def test_unbalanced_warmstart(nx, method, reg_type): # test generalized sinkhorn for unbalanced OT n = 100 rng = np.random.RandomState(42) x = rng.randn(n, 2) a = ot.utils.unif(n) b = ot.utils.unif(n) M = ot.dist(x, x) a, b, M = nx.from_numpy(a, b, M) epsilon = 1. reg_m = 1. G0, log0 = ot.unbalanced.sinkhorn_unbalanced( a, b, M, reg=epsilon, reg_m=reg_m, method=method, reg_type=reg_type, warmstart=None, log=True, verbose=True ) loss0 = ot.unbalanced.sinkhorn_unbalanced2( a, b, M, reg=epsilon, reg_m=reg_m, method=method, reg_type=reg_type, warmstart=None, verbose=True ) dim_a, dim_b = M.shape warmstart = (nx.zeros(dim_a, type_as=M), nx.zeros(dim_b, type_as=M)) G, log = ot.unbalanced.sinkhorn_unbalanced( a, b, M, reg=epsilon, reg_m=reg_m, method=method, reg_type=reg_type, warmstart=warmstart, log=True, verbose=True ) loss = ot.unbalanced.sinkhorn_unbalanced2( a, b, M, reg=epsilon, reg_m=reg_m, method=method, reg_type=reg_type, warmstart=warmstart, verbose=True ) _, log_emd = ot.lp.emd(a, b, M, log=True) warmstart1 = (log_emd["u"], log_emd["v"]) G1, log1 = ot.unbalanced.sinkhorn_unbalanced( a, b, M, reg=epsilon, reg_m=reg_m, method=method, reg_type=reg_type, warmstart=warmstart1, log=True, verbose=True ) loss1 = ot.unbalanced.sinkhorn_unbalanced2( a, b, M, reg=epsilon, reg_m=reg_m, method=method, reg_type=reg_type, warmstart=warmstart1, verbose=True ) np.testing.assert_allclose( nx.to_numpy(log["logu"]), nx.to_numpy(log0["logu"]), atol=1e-05) np.testing.assert_allclose( nx.to_numpy(log["logv"]), nx.to_numpy(log0["logv"]), atol=1e-05) np.testing.assert_allclose( nx.to_numpy(log0["logu"]), nx.to_numpy(log1["logu"]), atol=1e-05) np.testing.assert_allclose( nx.to_numpy(log0["logv"]), nx.to_numpy(log1["logv"]), atol=1e-05) np.testing.assert_allclose(nx.to_numpy(G), nx.to_numpy(G0), atol=1e-05) np.testing.assert_allclose(nx.to_numpy(G0), nx.to_numpy(G1), atol=1e-05) np.testing.assert_allclose(nx.to_numpy(loss), nx.to_numpy(loss0), atol=1e-5) np.testing.assert_allclose(nx.to_numpy(loss0), nx.to_numpy(loss1), atol=1e-5) @pytest.mark.parametrize("method,reg_type, log", itertools.product(["sinkhorn", "sinkhorn_stabilized", "sinkhorn_reg_scaling"], ["kl", "entropy"], [True, False])) def test_sinkhorn_unbalanced2(nx, method, reg_type, log): n = 100 rng = np.random.RandomState(42) x = rng.randn(n, 2) a = ot.utils.unif(n) # make dists unbalanced b = ot.utils.unif(n) * 1.5 M = ot.dist(x, x) a, b, M = nx.from_numpy(a, b, M) epsilon = 1. reg_m = 1. loss = nx.to_numpy(ot.unbalanced.sinkhorn_unbalanced2( a, b, M, reg=epsilon, reg_m=reg_m, method=method, reg_type=reg_type, log=False, verbose=True )) res = ot.unbalanced.sinkhorn_unbalanced2( a, b, M, reg=epsilon, reg_m=reg_m, method=method, reg_type=reg_type, log=log, verbose=True ) loss0 = res[0] if log else res np.testing.assert_allclose(nx.to_numpy(loss), nx.to_numpy(loss0), atol=1e-5) @pytest.mark.parametrize("method,reg_m", itertools.product(["sinkhorn", "sinkhorn_stabilized", "sinkhorn_reg_scaling"], [1, float("inf")])) def test_unbalanced_relaxation_parameters(nx, method, reg_m): # test generalized sinkhorn for unbalanced OT n = 100 rng = np.random.RandomState(50) x = rng.randn(n, 2) a = ot.utils.unif(n) # make dists unbalanced b = rng.rand(n, 2) M = ot.dist(x, x) epsilon = 1. a, b, M = nx.from_numpy(a, b, M) # options for reg_m full_list_reg_m = [reg_m, reg_m] full_tuple_reg_m = (reg_m, reg_m) tuple_reg_m, list_reg_m = (reg_m), [reg_m] nx_reg_m = reg_m * nx.ones(1) list_options = [nx_reg_m, full_tuple_reg_m, tuple_reg_m, full_list_reg_m, list_reg_m] loss, log = ot.unbalanced.sinkhorn_unbalanced( a, b, M, reg=epsilon, reg_m=reg_m, method=method, log=True, verbose=True ) for opt in list_options: loss_opt, log_opt = ot.unbalanced.sinkhorn_unbalanced( a, b, M, reg=epsilon, reg_m=opt, method=method, log=True, verbose=True ) np.testing.assert_allclose( nx.to_numpy(log["logu"]), nx.to_numpy(log_opt["logu"]), atol=1e-05) np.testing.assert_allclose( nx.to_numpy(log["logv"]), nx.to_numpy(log_opt["logv"]), atol=1e-05) np.testing.assert_allclose( nx.to_numpy(loss), nx.to_numpy(loss_opt), atol=1e-05) @pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_stabilized", "sinkhorn_reg_scaling"]) def test_unbalanced_multiple_inputs(nx, method): # test generalized sinkhorn for unbalanced OT n = 100 rng = np.random.RandomState(42) x = rng.randn(n, 2) a = ot.utils.unif(n) # make dists unbalanced b = rng.rand(n, 2) M = ot.dist(x, x) epsilon = 1. reg_m = 1. a, b, M = nx.from_numpy(a, b, M) G, log = ot.unbalanced.sinkhorn_unbalanced(a, b, M, reg=epsilon, reg_m=reg_m, method=method, log=True, verbose=True) # check fixed point equations # in log-domain fi = reg_m / (reg_m + epsilon) logb = nx.log(b + 1e-16) loga = nx.log(a + 1e-16)[:, None] logKtu = nx.logsumexp( log["logu"][:, None, :] - M[:, :, None] / epsilon, axis=0 ) logKv = nx.logsumexp(log["logv"][None, :] - M[:, :, None] / epsilon, axis=1) v_final = fi * (logb - logKtu) u_final = fi * (loga - logKv) np.testing.assert_allclose( nx.to_numpy(u_final), nx.to_numpy(log["logu"]), atol=1e-05) np.testing.assert_allclose( nx.to_numpy(v_final), nx.to_numpy(log["logv"]), atol=1e-05) def test_stabilized_vs_sinkhorn(nx): # test if stable version matches sinkhorn n = 100 # Gaussian distributions a = ot.datasets.make_1D_gauss(n, m=20, s=5) # m= mean, s= std b1 = ot.datasets.make_1D_gauss(n, m=60, s=8) b2 = ot.datasets.make_1D_gauss(n, m=30, s=4) # creating matrix A containing all distributions b = np.vstack((b1, b2)).T M = ot.utils.dist0(n) M /= np.median(M) epsilon = 0.1 reg_m = 1. ab, bb, Mb = nx.from_numpy(a, b, M) G, _ = ot.unbalanced.sinkhorn_unbalanced2( ab, bb, Mb, epsilon, reg_m, method="sinkhorn_stabilized", log=True ) G2, _ = ot.unbalanced.sinkhorn_unbalanced2( ab, bb, Mb, epsilon, reg_m, method="sinkhorn", log=True ) G2_np, _ = ot.unbalanced.sinkhorn_unbalanced2( a, b, M, epsilon, reg_m, method="sinkhorn", log=True ) G = nx.to_numpy(G) G2 = nx.to_numpy(G2) np.testing.assert_allclose(G, G2, atol=1e-5) np.testing.assert_allclose(G2, G2_np, atol=1e-5) @pytest.mark.parametrize("method", ["sinkhorn", "sinkhorn_stabilized"]) def test_unbalanced_barycenter(nx, method): # test generalized sinkhorn for unbalanced OT barycenter n = 100 rng = np.random.RandomState(42) x = rng.randn(n, 2) A = rng.rand(n, 2) # make dists unbalanced A = A * np.array([1, 2])[None, :] M = ot.dist(x, x) epsilon = 1. reg_m = 1. A, M = nx.from_numpy(A, M) q, log = barycenter_unbalanced( A, M, reg=epsilon, reg_m=reg_m, method=method, log=True, verbose=True ) # check fixed point equations fi = reg_m / (reg_m + epsilon) logA = nx.log(A + 1e-16) logq = nx.log(q + 1e-16)[:, None] logKtu = nx.logsumexp( log["logu"][:, None, :] - M[:, :, None] / epsilon, axis=0 ) logKv = nx.logsumexp(log["logv"][None, :] - M[:, :, None] / epsilon, axis=1) v_final = fi * (logq - logKtu) u_final = fi * (logA - logKv) np.testing.assert_allclose( nx.to_numpy(u_final), nx.to_numpy(log["logu"]), atol=1e-05) np.testing.assert_allclose( nx.to_numpy(v_final), nx.to_numpy(log["logv"]), atol=1e-05) def test_barycenter_stabilized_vs_sinkhorn(nx): # test generalized sinkhorn for unbalanced OT barycenter n = 100 rng = np.random.RandomState(42) x = rng.randn(n, 2) A = rng.rand(n, 2) # make dists unbalanced A = A * np.array([1, 4])[None, :] M = ot.dist(x, x) epsilon = 0.5 reg_m = 10 Ab, Mb = nx.from_numpy(A, M) qstable, _ = barycenter_unbalanced( Ab, Mb, reg=epsilon, reg_m=reg_m, log=True, tau=100, method="sinkhorn_stabilized", verbose=True ) q, _ = barycenter_unbalanced( Ab, Mb, reg=epsilon, reg_m=reg_m, method="sinkhorn", log=True ) q_np, _ = barycenter_unbalanced( A, M, reg=epsilon, reg_m=reg_m, method="sinkhorn", log=True ) q, qstable = nx.to_numpy(q, qstable) np.testing.assert_allclose(q, qstable, atol=1e-05) np.testing.assert_allclose(q, q_np, atol=1e-05) def test_wrong_method(nx): n = 10 rng = np.random.RandomState(42) x = rng.randn(n, 2) a = ot.utils.unif(n) # make dists unbalanced b = ot.utils.unif(n) * 1.5 M = ot.dist(x, x) epsilon = 1. reg_m = 1. a, b, M = nx.from_numpy(a, b, M) with pytest.raises(ValueError): ot.unbalanced.sinkhorn_unbalanced( a, b, M, reg=epsilon, reg_m=reg_m, method='badmethod', log=True, verbose=True ) with pytest.raises(ValueError): ot.unbalanced.sinkhorn_unbalanced2( a, b, M, epsilon, reg_m, method='badmethod', verbose=True ) def test_implemented_methods(nx): IMPLEMENTED_METHODS = ['sinkhorn', 'sinkhorn_stabilized'] TO_BE_IMPLEMENTED_METHODS = ['sinkhorn_reg_scaling'] NOT_VALID_TOKENS = ['foo'] # test generalized sinkhorn for unbalanced OT barycenter n = 3 rng = np.random.RandomState(42) x = rng.randn(n, 2) a = ot.utils.unif(n) # make dists unbalanced b = ot.utils.unif(n) * 1.5 A = rng.rand(n, 2) M = ot.dist(x, x) epsilon = 1. reg_m = 1. a, b, M, A = nx.from_numpy(a, b, M, A) for method in IMPLEMENTED_METHODS: ot.unbalanced.sinkhorn_unbalanced(a, b, M, epsilon, reg_m, method=method) ot.unbalanced.sinkhorn_unbalanced2(a, b, M, epsilon, reg_m, method=method) barycenter_unbalanced(A, M, reg=epsilon, reg_m=reg_m, method=method) with pytest.warns(UserWarning, match='not implemented'): for method in set(TO_BE_IMPLEMENTED_METHODS): ot.unbalanced.sinkhorn_unbalanced(a, b, M, epsilon, reg_m, method=method) ot.unbalanced.sinkhorn_unbalanced2(a, b, M, epsilon, reg_m, method=method) barycenter_unbalanced(A, M, reg=epsilon, reg_m=reg_m, method=method) with pytest.raises(ValueError): for method in set(NOT_VALID_TOKENS): ot.unbalanced.sinkhorn_unbalanced(a, b, M, epsilon, reg_m, method=method) ot.unbalanced.sinkhorn_unbalanced2(a, b, M, epsilon, reg_m, method=method) barycenter_unbalanced(A, M, reg=epsilon, reg_m=reg_m, method=method) @pytest.mark.parametrize("reg_div,regm_div", itertools.product(['kl', 'l2', 'entropy'], ['kl', 'l2'])) def test_lbfgsb_unbalanced(nx, reg_div, regm_div): np.random.seed(42) xs = np.random.randn(5, 2) xt = np.random.randn(6, 2) M = ot.dist(xs, xt) a = ot.unif(5) b = ot.unif(6) G, log = ot.unbalanced.lbfgsb_unbalanced(a, b, M, 1, 10, reg_div=reg_div, regm_div=regm_div, log=True, verbose=False) ab, bb, Mb = nx.from_numpy(a, b, M) Gb, log = ot.unbalanced.lbfgsb_unbalanced(ab, bb, Mb, 1, 10, reg_div=reg_div, regm_div=regm_div, log=True, verbose=False) np.testing.assert_allclose(G, nx.to_numpy(Gb)) @pytest.mark.parametrize("reg_div,regm_div", itertools.product(['kl', 'l2', 'entropy'], ['kl', 'l2'])) def test_lbfgsb_unbalanced_relaxation_parameters(nx, reg_div, regm_div): np.random.seed(42) xs = np.random.randn(5, 2) xt = np.random.randn(6, 2) M = ot.dist(xs, xt) a = ot.unif(5) b = ot.unif(6) a, b, M = nx.from_numpy(a, b, M) reg_m = 10 full_list_reg_m = [reg_m, reg_m] full_tuple_reg_m = (reg_m, reg_m) tuple_reg_m, list_reg_m = (reg_m), [reg_m] np1_reg_m = reg_m * np.ones(1) np2_reg_m = reg_m * np.ones(2) list_options = [np1_reg_m, np2_reg_m, full_tuple_reg_m, tuple_reg_m, full_list_reg_m, list_reg_m] G = ot.unbalanced.lbfgsb_unbalanced(a, b, M, 1, reg_m=reg_m, reg_div=reg_div, regm_div=regm_div, log=False, verbose=False) for opt in list_options: G0 = ot.unbalanced.lbfgsb_unbalanced( a, b, M, 1, reg_m=opt, reg_div=reg_div, regm_div=regm_div, log=False, verbose=False ) np.testing.assert_allclose(nx.to_numpy(G), nx.to_numpy(G0), atol=1e-06) @pytest.mark.parametrize("reg_div,regm_div", itertools.product(['kl', 'l2', 'entropy'], ['kl', 'l2'])) def test_lbfgsb_reference_measure(nx, reg_div, regm_div): np.random.seed(42) xs = np.random.randn(5, 2) xt = np.random.randn(6, 2) M = ot.dist(xs, xt) a = ot.unif(5) b = ot.unif(6) a, b, M = nx.from_numpy(a, b, M) c = a[:, None] * b[None, :] G, _ = ot.unbalanced.lbfgsb_unbalanced(a, b, M, reg=1, reg_m=10, c=None, reg_div=reg_div, regm_div=regm_div, log=True, verbose=False) G0, _ = ot.unbalanced.lbfgsb_unbalanced(a, b, M, reg=1, reg_m=10, c=c, reg_div=reg_div, regm_div=regm_div, log=True, verbose=False) np.testing.assert_allclose(nx.to_numpy(G), nx.to_numpy(G0), atol=1e-06) @pytest.mark.parametrize("div", ["kl", "l2"]) def test_mm_convergence(nx, div): n = 100 rng = np.random.RandomState(42) x = rng.randn(n, 2) rng = np.random.RandomState(75) y = rng.randn(n, 2) a_np = ot.utils.unif(n) b_np = ot.utils.unif(n) M = ot.dist(x, y) M = M / M.max() reg_m = 100 a, b, M = nx.from_numpy(a_np, b_np, M) G, _ = ot.unbalanced.mm_unbalanced(a, b, M, reg_m=reg_m, div=div, verbose=False, log=True) loss = nx.to_numpy( ot.unbalanced.mm_unbalanced2(a, b, M, reg_m, div=div, verbose=True) ) # check if the marginals come close to the true ones when large reg np.testing.assert_allclose(np.sum(nx.to_numpy(G), 1), a_np, atol=1e-03) np.testing.assert_allclose(np.sum(nx.to_numpy(G), 0), b_np, atol=1e-03) # check if mm_unbalanced2 returns the correct loss np.testing.assert_allclose(nx.to_numpy(nx.sum(G * M)), loss, atol=1e-5) # check in case no histogram is provided a_np, b_np = np.array([]), np.array([]) a, b = nx.from_numpy(a_np, b_np) G_null = ot.unbalanced.mm_unbalanced(a, b, M, reg_m=reg_m, div=div, verbose=False) np.testing.assert_allclose(nx.to_numpy(G_null), nx.to_numpy(G)) # test when G0 is given G0 = ot.emd(a, b, M) G0_np = nx.to_numpy(G0) reg_m = 10000 G = ot.unbalanced.mm_unbalanced(a, b, M, reg_m=reg_m, div=div, G0=G0, verbose=False) np.testing.assert_allclose(G0_np, nx.to_numpy(G), atol=1e-05) @pytest.mark.parametrize("div", ["kl", "l2"]) def test_mm_relaxation_parameters(nx, div): n = 100 rng = np.random.RandomState(42) x = rng.randn(n, 2) rng = np.random.RandomState(75) y = rng.randn(n, 2) a_np = ot.utils.unif(n) b_np = ot.utils.unif(n) M = ot.dist(x, y) M = M / M.max() a, b, M = nx.from_numpy(a_np, b_np, M) reg = 1e-2 reg_m = 100 full_list_reg_m = [reg_m, reg_m] full_tuple_reg_m = (reg_m, reg_m) tuple_reg_m, list_reg_m = (reg_m), [reg_m] nx1_reg_m = reg_m * nx.ones(1) nx2_reg_m = reg_m * nx.ones(2) list_options = [nx1_reg_m, nx2_reg_m, full_tuple_reg_m, tuple_reg_m, full_list_reg_m, list_reg_m] G0, _ = ot.unbalanced.mm_unbalanced(a, b, M, reg_m=reg_m, reg=reg, div=div, verbose=False, log=True) loss_0 = nx.to_numpy( ot.unbalanced.mm_unbalanced2(a, b, M, reg_m=reg_m, reg=reg, div=div, verbose=True) ) for opt in list_options: G1, _ = ot.unbalanced.mm_unbalanced(a, b, M, reg_m=opt, reg=reg, div=div, verbose=False, log=True) loss_1 = nx.to_numpy( ot.unbalanced.mm_unbalanced2(a, b, M, reg_m=opt, reg=reg, div=div, verbose=True) ) np.testing.assert_allclose(nx.to_numpy(G0), nx.to_numpy(G1), atol=1e-05) np.testing.assert_allclose(loss_0, loss_1, atol=1e-5) @pytest.mark.parametrize("div", ["kl", "l2"]) def test_mm_reference_measure(nx, div): n = 100 rng = np.random.RandomState(42) x = rng.randn(n, 2) rng = np.random.RandomState(75) y = rng.randn(n, 2) a_np = ot.utils.unif(n) b_np = ot.utils.unif(n) M = ot.dist(x, y) M = M / M.max() a, b, M = nx.from_numpy(a_np, b_np, M) c = a[:, None] * b[None, :] reg = 1e-2 reg_m = 100 G0, _ = ot.unbalanced.mm_unbalanced(a, b, M, reg_m=reg_m, c=None, reg=reg, div=div, verbose=False, log=True) loss_0 = ot.unbalanced.mm_unbalanced2(a, b, M, reg_m=reg_m, c=None, reg=reg, div=div, verbose=True) loss_0 = nx.to_numpy(loss_0) G1, _ = ot.unbalanced.mm_unbalanced(a, b, M, reg_m=reg_m, c=c, reg=reg, div=div, verbose=False, log=True) loss_1 = ot.unbalanced.mm_unbalanced2(a, b, M, reg_m=reg_m, c=c, reg=reg, div=div, verbose=True) loss_1 = nx.to_numpy(loss_1) np.testing.assert_allclose(nx.to_numpy(G0), nx.to_numpy(G1), atol=1e-05) np.testing.assert_allclose(loss_0, loss_1, atol=1e-5) def test_mm_wrong_divergence(nx): n = 100 rng = np.random.RandomState(42) x = rng.randn(n, 2) rng = np.random.RandomState(75) y = rng.randn(n, 2) a_np = ot.utils.unif(n) b_np = ot.utils.unif(n) M = ot.dist(x, y) M = M / M.max() a, b, M = nx.from_numpy(a_np, b_np, M) reg = 1e-2 reg_m = 100 G0, _ = ot.unbalanced.mm_unbalanced(a, b, M, reg_m=reg_m, reg=reg, div="kl", verbose=False, log=True) loss_0 = nx.to_numpy( ot.unbalanced.mm_unbalanced2(a, b, M, reg_m=reg_m, reg=reg, div="kl", verbose=True) ) G1, _ = ot.unbalanced.mm_unbalanced(a, b, M, reg_m=reg_m, reg=reg, div="wrong_div", verbose=False, log=True) loss_1 = nx.to_numpy( ot.unbalanced.mm_unbalanced2(a, b, M, reg_m=reg_m, reg=reg, div="wrong_div", verbose=True) ) np.testing.assert_allclose(nx.to_numpy(G0), nx.to_numpy(G1), atol=1e-05) np.testing.assert_allclose(loss_0, loss_1, atol=1e-5) python-pot-0.9.3+dfsg/test/test_utils.py000066400000000000000000000351251455713015700203310ustar00rootroot00000000000000"""Tests for module utils for timing and parallel computation """ # Author: Remi Flamary # # License: MIT License import ot import numpy as np import sys import pytest def get_LazyTensor(nx): n1 = 100 n2 = 200 rng = np.random.RandomState(42) a = rng.rand(n1) a /= a.sum() b = rng.rand(n2) b /= b.sum() a, b = nx.from_numpy(a, b) def getitem(i, j, a, b): return a[i, None] * b[None, j] # create a lazy tensor T = ot.utils.LazyTensor((n1, n2), getitem, a=a, b=b) return T, a, b def test_proj_simplex(nx): n = 10 rng = np.random.RandomState(0) # test on matrix when projection is done on axis 0 x = rng.randn(n, 2) x1 = nx.from_numpy(x) # all projections should sum to 1 proj = ot.utils.proj_simplex(x1) l1 = np.sum(nx.to_numpy(proj), axis=0) l2 = np.ones(2) np.testing.assert_allclose(l1, l2, atol=1e-5) # all projections should sum to 3 proj = ot.utils.proj_simplex(x1, 3) l1 = np.sum(nx.to_numpy(proj), axis=0) l2 = 3 * np.ones(2) np.testing.assert_allclose(l1, l2, atol=1e-5) # tets on vector x = rng.randn(n) x1 = nx.from_numpy(x) # all projections should sum to 1 proj = ot.utils.proj_simplex(x1) l1 = np.sum(nx.to_numpy(proj), axis=0) l2 = np.ones(2) np.testing.assert_allclose(l1, l2, atol=1e-5) def test_projection_sparse_simplex(): def double_sort_projection_sparse_simplex(X, max_nz, z=1, axis=None): r"""This is an equivalent but less efficient version of ot.utils.projection_sparse_simplex, as it uses two sorts instead of one. """ if axis == 0: # For each column of X, find top max_nz values and # their corresponding indices. This incurs a sort. max_nz_indices = np.argpartition( X, kth=-max_nz, axis=0)[-max_nz:] max_nz_values = X[max_nz_indices, np.arange(X.shape[1])] # Project the top max_nz values onto the simplex. # This incurs a second sort. G_nz_values = ot.smooth.projection_simplex( max_nz_values, z=z, axis=0) # Put the projection of max_nz_values to their original indices # and set all other values zero. G = np.zeros_like(X) G[max_nz_indices, np.arange(X.shape[1])] = G_nz_values return G elif axis == 1: return double_sort_projection_sparse_simplex( X.T, max_nz, z, axis=0).T else: X = X.ravel().reshape(-1, 1) return double_sort_projection_sparse_simplex( X, max_nz, z, axis=0).ravel() m, n = 5, 10 rng = np.random.RandomState(0) X = rng.uniform(size=(m, n)) max_nz = 3 for axis in [0, 1, None]: slow_sparse_proj = double_sort_projection_sparse_simplex( X, max_nz, axis=axis) fast_sparse_proj = ot.utils.projection_sparse_simplex( X, max_nz, axis=axis) # check that two versions produce consistent results np.testing.assert_allclose( slow_sparse_proj, fast_sparse_proj) def test_parmap(): n = 10 def f(i): return 1.0 * i * i a = np.arange(n) l1 = list(map(f, a)) l2 = list(ot.utils.parmap(f, a)) np.testing.assert_allclose(l1, l2) def test_tic_toc(): import time ot.tic() time.sleep(0.1) t = ot.toc() t2 = ot.toq() # test timing # np.testing.assert_allclose(0.1, t, rtol=1e-1, atol=1e-1) # very slow macos github action equality not possible assert t > 0.09 # test toc vs toq np.testing.assert_allclose(t, t2, rtol=1e-1, atol=1e-1) def test_kernel(): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) K = ot.utils.kernel(x, x) # gaussian kernel has ones on the diagonal np.testing.assert_allclose(np.diag(K), np.ones(n)) def test_unif(): n = 100 u = ot.unif(n) np.testing.assert_allclose(1, np.sum(u)) def test_unif_backend(nx): n = 100 for tp in nx.__type_list__: print(nx.dtype_device(tp)) u = ot.unif(n, type_as=tp) np.testing.assert_allclose(1, np.sum(nx.to_numpy(u)), atol=1e-6) def test_dist(): n = 10 rng = np.random.RandomState(0) x = rng.randn(n, 2) D = np.zeros((n, n)) for i in range(n): for j in range(n): D[i, j] = np.sum(np.square(x[i, :] - x[j, :])) D2 = ot.dist(x, x) D3 = ot.dist(x) D4 = ot.dist(x, x, metric='minkowski', p=2) assert D4[0, 1] == D4[1, 0] # dist shoul return squared euclidean np.testing.assert_allclose(D, D2, atol=1e-14) np.testing.assert_allclose(D, D3, atol=1e-14) # tests that every metric runs correctly metrics_w = [ 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule' ] # those that support weights metrics = ['mahalanobis', 'seuclidean'] # do not support weights depending on scipy's version for metric in metrics_w: print(metric) ot.dist(x, x, metric=metric, p=3, w=rng.random((2, ))) ot.dist(x, x, metric=metric, p=3, w=None) # check that not having any weight does not cause issues for metric in metrics: print(metric) ot.dist(x, x, metric=metric, p=3) # weighted minkowski but with no weights with pytest.raises(ValueError): ot.dist(x, x, metric="wminkowski") def test_dist_backends(nx): n = 100 rng = np.random.RandomState(0) x = rng.randn(n, 2) x1 = nx.from_numpy(x) lst_metric = ['euclidean', 'sqeuclidean'] for metric in lst_metric: D = ot.dist(x, x, metric=metric) D1 = ot.dist(x1, x1, metric=metric) # low atol because jax forces float32 np.testing.assert_allclose(D, nx.to_numpy(D1), atol=1e-5) def test_dist0(): n = 100 M = ot.utils.dist0(n, method='lin_square') # dist0 default to linear sampling with quadratic loss np.testing.assert_allclose(M[0, -1], (n - 1) * (n - 1)) def test_dots(): n1, n2, n3, n4 = 100, 50, 200, 100 rng = np.random.RandomState(0) A = rng.randn(n1, n2) B = rng.randn(n2, n3) C = rng.randn(n3, n4) X1 = ot.utils.dots(A, B, C) X2 = A.dot(B.dot(C)) np.testing.assert_allclose(X1, X2) def test_clean_zeros(): n = 100 nz = 50 nz2 = 20 u1 = ot.unif(n) u1[:nz] = 0 u1 = u1 / u1.sum() u2 = ot.unif(n) u2[:nz2] = 0 u2 = u2 / u2.sum() M = ot.utils.dist0(n) a, b, M2 = ot.utils.clean_zeros(u1, u2, M) assert len(a) == n - nz assert len(b) == n - nz2 def test_cost_normalization(nx): rng = np.random.RandomState(0) C = rng.rand(10, 10) C1 = nx.from_numpy(C) # does nothing M0 = ot.utils.cost_normalization(C1) M1 = nx.to_numpy(M0) np.testing.assert_allclose(C, M1) M = ot.utils.cost_normalization(C1, 'median') M1 = nx.to_numpy(M) np.testing.assert_allclose(np.median(M1), 1) M = ot.utils.cost_normalization(C1, 'max') M1 = nx.to_numpy(M) np.testing.assert_allclose(M1.max(), 1) M = ot.utils.cost_normalization(C1, 'log') M1 = nx.to_numpy(M) np.testing.assert_allclose(M1.max(), np.log(1 + C).max()) M = ot.utils.cost_normalization(C1, 'loglog') M1 = nx.to_numpy(M) np.testing.assert_allclose(M1.max(), np.log(1 + np.log(1 + C)).max()) with pytest.raises(ValueError): ot.utils.cost_normalization(C1, 'error') def test_check_params(): res1 = ot.utils.check_params(first='OK', second=20) assert res1 is True res0 = ot.utils.check_params(first='OK', second=None) assert res0 is False def test_check_random_state_error(): with pytest.raises(ValueError): ot.utils.check_random_state('error') def test_get_parameter_pair_error(): with pytest.raises(ValueError): ot.utils.get_parameter_pair((1, 2, 3)) # not pair ;) def test_deprecated_func(): @ot.utils.deprecated('deprecated text for fun') def fun(): pass def fun2(): pass @ot.utils.deprecated('deprecated text for class') class Class(): pass with pytest.warns(DeprecationWarning): fun() with pytest.warns(DeprecationWarning): cl = Class() print(cl) if sys.version_info < (3, 5): print('Not tested') else: assert ot.utils._is_deprecated(fun) is True assert ot.utils._is_deprecated(fun2) is False def test_BaseEstimator(): class Class(ot.utils.BaseEstimator): def __init__(self, first='spam', second='eggs'): self.first = first self.second = second cl = Class() names = cl._get_param_names() assert 'first' in names assert 'second' in names params = cl.get_params() assert 'first' in params assert 'second' in params params['first'] = 'spam again' cl.set_params(**params) with pytest.raises(ValueError): cl.set_params(bibi=10) assert cl.first == 'spam again' def test_OTResult(): res = ot.utils.OTResult() # test print print(res) # tets get citation print(res.citation) lst_attributes = ['a_to_b', 'b_to_a', 'lazy_plan', 'marginal_a', 'marginal_b', 'marginals', 'plan', 'potential_a', 'potential_b', 'potentials', 'sparse_plan', 'status', 'value', 'value_linear', 'value_quad', 'log'] for at in lst_attributes: print(at) with pytest.raises(NotImplementedError): getattr(res, at) def test_get_coordinate_circle(): rng = np.random.RandomState(42) u = rng.rand(1, 100) x1, y1 = np.cos(u * (2 * np.pi)), np.sin(u * (2 * np.pi)) x = np.concatenate([x1, y1]).T x_p = ot.utils.get_coordinate_circle(x) np.testing.assert_allclose(u[0], x_p) def test_LazyTensor(nx): n1 = 100 n2 = 200 shape = (n1, n2) rng = np.random.RandomState(42) x1 = rng.randn(n1, 2) x2 = rng.randn(n2, 2) x1, x2 = nx.from_numpy(x1, x2) # i,j can be integers or slices, x1,x2 have to be passed as keyword arguments def getitem(i, j, x1, x2): return nx.dot(x1[i], x2[j].T) # create a lazy tensor T = ot.utils.LazyTensor((n1, n2), getitem, x1=x1, x2=x2) assert T.shape == (n1, n2) assert str(T) == "LazyTensor(shape=(100, 200),attributes=(x1,x2))" assert T.x1 is x1 assert T.x2 is x2 # get the full tensor (not lazy) assert T[:].shape == shape # get one component assert T[1, 1] == nx.dot(x1[1], x2[1].T) # get one row assert T[1].shape == (n2,) # get one column with slices assert T[::10, 5].shape == (10,) with pytest.raises(NotImplementedError): T["error"] def test_OTResult_LazyTensor(nx): T, a, b = get_LazyTensor(nx) res = ot.utils.OTResult(lazy_plan=T, batch_size=9, backend=nx) np.testing.assert_allclose(nx.to_numpy(a), nx.to_numpy(res.marginal_a)) np.testing.assert_allclose(nx.to_numpy(b), nx.to_numpy(res.marginal_b)) def test_LazyTensor_reduce(nx): T, a, b = get_LazyTensor(nx) T0 = T[:] s0 = nx.sum(T0) # total sum s = ot.utils.reduce_lazytensor(T, nx.sum, nx=nx) np.testing.assert_allclose(nx.to_numpy(s), 1) np.testing.assert_allclose(nx.to_numpy(s), nx.to_numpy(s0)) s2 = ot.utils.reduce_lazytensor(T, nx.sum) np.testing.assert_allclose(nx.to_numpy(s), nx.to_numpy(s2)) s2 = ot.utils.reduce_lazytensor(T, nx.sum, batch_size=500) np.testing.assert_allclose(nx.to_numpy(s), nx.to_numpy(s2)) s2 = ot.utils.reduce_lazytensor(T, nx.sum, batch_size=11) np.testing.assert_allclose(nx.to_numpy(s), nx.to_numpy(s2)) # sum over axis 0 s = ot.utils.reduce_lazytensor(T, nx.sum, axis=0, nx=nx) np.testing.assert_allclose(nx.to_numpy(s), nx.to_numpy(b)) # sum over axis 1 s = ot.utils.reduce_lazytensor(T, nx.sum, axis=1, nx=nx) np.testing.assert_allclose(nx.to_numpy(s), nx.to_numpy(a)) # test otehr reduction function s = ot.utils.reduce_lazytensor(T, nx.logsumexp, axis=1, nx=nx) s2 = nx.logsumexp(T[:], axis=1) np.testing.assert_allclose(nx.to_numpy(s), nx.to_numpy(s2)) # test 3D tensors def getitem(i, j, k, a, b, c): return a[i, None, None] * b[None, j, None] * c[None, None, k] # create a lazy tensor n = a.shape[0] T = ot.utils.LazyTensor((n, n, n), getitem, a=a, b=a, c=a) # total sum s1 = ot.utils.reduce_lazytensor(T, nx.sum, axis=0, nx=nx) s2 = ot.utils.reduce_lazytensor(T, nx.sum, axis=1, nx=nx) np.testing.assert_allclose(nx.to_numpy(s1), nx.to_numpy(s2)) with pytest.raises(NotImplementedError): ot.utils.reduce_lazytensor(T, nx.sum, axis=2, nx=nx, batch_size=10) def test_lowrank_LazyTensor(nx): p = 5 n1 = 100 n2 = 200 shape = (n1, n2) rng = np.random.RandomState(42) X1 = rng.randn(n1, p) X2 = rng.randn(n2, p) diag_d = rng.rand(p) X1, X2, diag_d = nx.from_numpy(X1, X2, diag_d) T0 = nx.dot(X1, X2.T) T = ot.utils.get_lowrank_lazytensor(X1, X2) np.testing.assert_allclose(nx.to_numpy(T[:]), nx.to_numpy(T0)) assert T.Q is X1 assert T.R is X2 # get the full tensor (not lazy) assert T[:].shape == shape # get one component assert T[1, 1] == nx.dot(X1[1], X2[1].T) # get one row assert T[1].shape == (n2,) # get one column with slices assert T[::10, 5].shape == (10,) T0 = nx.dot(X1 * diag_d[None, :], X2.T) T = ot.utils.get_lowrank_lazytensor(X1, X2, diag_d, nx=nx) np.testing.assert_allclose(nx.to_numpy(T[:]), nx.to_numpy(T0)) def test_labels_to_mask_helper(nx): y = np.array([1, 0, 2, 2, 1]) out = np.array([ [0, 1, 0], [1, 0, 0], [0, 0, 1], [0, 0, 1], [0, 1, 0], ]) y = nx.from_numpy(y) masks = ot.utils.labels_to_masks(y) np.testing.assert_array_equal(out, masks) def test_label_normalization(nx): y = nx.from_numpy(np.arange(5) + 1) out = np.arange(5) # labels are shifted y_normalized = ot.utils.label_normalization(y) np.testing.assert_array_equal(out, y_normalized) # labels are shifted but the shift if expected y_normalized_start = ot.utils.label_normalization(y, start=1) np.testing.assert_array_equal(y, y_normalized_start) python-pot-0.9.3+dfsg/test/test_weak.py000066400000000000000000000022721455713015700201150ustar00rootroot00000000000000"""Tests for main module ot.weak """ # Author: Remi Flamary # # License: MIT License import ot import numpy as np def test_weak_ot(): # test weak ot solver and identity stationary point n = 50 rng = np.random.RandomState(0) xs = rng.randn(n, 2) xt = rng.randn(n, 2) u = ot.utils.unif(n) G, log = ot.weak_optimal_transport(xs, xt, u, u, log=True) # check constraints np.testing.assert_allclose(u, G.sum(1)) np.testing.assert_allclose(u, G.sum(0)) # chaeck that identity is recovered G = ot.weak_optimal_transport(xs, xs, G0=np.eye(n) / n) # check G is identity np.testing.assert_allclose(G, np.eye(n) / n) # check constraints np.testing.assert_allclose(u, G.sum(1)) np.testing.assert_allclose(u, G.sum(0)) def test_weak_ot_bakends(nx): # test weak ot solver for different backends n = 50 rng = np.random.RandomState(0) xs = rng.randn(n, 2) xt = rng.randn(n, 2) u = ot.utils.unif(n) G = ot.weak_optimal_transport(xs, xt, u, u) xs2, xt2, u2 = nx.from_numpy(xs, xt, u) G2 = ot.weak_optimal_transport(xs2, xt2, u2, u2) np.testing.assert_allclose(nx.to_numpy(G2), G)