pax_global_header00006660000000000000000000000064140251013010014475gustar00rootroot0000000000000052 comment=32976286cb1eb6638533cffd9bc1646d24f541ad sparse-0.12.0/000077500000000000000000000000001402510130100130525ustar00rootroot00000000000000sparse-0.12.0/.codecov.yml000066400000000000000000000007751402510130100153060ustar00rootroot00000000000000comment: layout: "header, diff, changes, uncovered" coverage: status: project: default: # Total project must not drop by more than 3% target: auto threshold: "3%" patch: default: # Patch coverage must be 92% target: auto threshold: "92%" precision: 2 round: down range: 70...98 ignore: - sparse/tests/ - sparse/_version.py - ^(?!sparse\/) codecov: token: 8de77158-fc37-47be-930c-f98739f9e5bf sparse-0.12.0/.coveragerc000066400000000000000000000002621402510130100151730ustar00rootroot00000000000000[run] source= sparse/ omit= sparse/_version.py sparse/tests/* [report] exclude_lines = pragma: no cover return NotImplemented raise NotImplementedError sparse-0.12.0/.gitattributes000066400000000000000000000000401402510130100157370ustar00rootroot00000000000000sparse/_version.py export-subst sparse-0.12.0/.github/000077500000000000000000000000001402510130100144125ustar00rootroot00000000000000sparse-0.12.0/.github/CODE_OF_CONDUCT.md000066400000000000000000000001061402510130100172060ustar00rootroot00000000000000# Code of Conduct Please see [`docs/conduct.md`](../docs/conduct.rst)sparse-0.12.0/.github/FUNDING.yml000066400000000000000000000012251402510130100162270ustar00rootroot00000000000000# These are supported funding model platforms github: [Quansight, Quansight-Labs] patreon: # Replace with a single Patreon username open_collective: # Replace with a single Open Collective username ko_fi: # Replace with a single Ko-fi username tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry liberapay: # Replace with a single Liberapay username issuehunt: # Replace with a single IssueHunt username otechie: # Replace with a single Otechie username custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] sparse-0.12.0/.github/ISSUE_TEMPLATE/000077500000000000000000000000001402510130100165755ustar00rootroot00000000000000sparse-0.12.0/.github/ISSUE_TEMPLATE/bug_report.md000066400000000000000000000010561402510130100212710ustar00rootroot00000000000000--- name: Bug report about: Create a report to help us improve title: '' labels: type:bug assignees: '' --- **Describe the bug** A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior. **Expected behavior** A clear and concise description of what you expected to happen. **System** - OS and version: [e.g. Windows 10] - `sparse` version (`sparse.__version__`) - NumPy version (`np.__version__`) - Numba version (`numba.__version__`) **Additional context** Add any other context about the problem here. sparse-0.12.0/.github/ISSUE_TEMPLATE/feature_request.md000066400000000000000000000010741402510130100223240ustar00rootroot00000000000000--- name: Feature request about: Suggest an idea for this project title: '' labels: type:enhancement assignees: '' --- **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. **Describe the solution you'd like** A clear and concise description of what you want to happen. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Additional context** Add any other context or screenshots about the feature request here. sparse-0.12.0/.github/ISSUE_TEMPLATE/question-support.md000066400000000000000000000005011402510130100224740ustar00rootroot00000000000000--- name: Question/Support about: A question about how to use this library. title: '' labels: type:support assignees: '' --- **Description** Provide a description of what you'd like to do. **Example Code** Syntactically valid Python code that shows what you want to do, possibly with placeholder functions or methods. sparse-0.12.0/.gitignore000066400000000000000000000015641402510130100150500ustar00rootroot00000000000000#####=== Python ===##### # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *,cover .pytest_cache/ test_results/ junit/ .hypothesis/ # Airspeed velocity .asv/ # Translations *.mo *.pot # Django stuff: *.log # Sphinx documentation docs/_build/ _build/ # PyBuilder target/ # IDE .idea/ .vscode/ default.profraw # Sandbox sandbox.py sparse-0.12.0/LICENSE000066400000000000000000000027551402510130100140700ustar00rootroot00000000000000BSD 3-Clause License Copyright (c) 2018, Sparse developers All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. sparse-0.12.0/MANIFEST.in000066400000000000000000000005041402510130100146070ustar00rootroot00000000000000recursive-include sparse *.py recursive-include sparse *.html recursive-include docs *.py recursive-include docs *.rst recursive-include docs *.png include setup.py include README.rst include LICENSE include MANIFEST.in include requirements.txt recursive-include requirements *.txt prune docs/_build include versioneer.py sparse-0.12.0/README.rst000066400000000000000000000017131402510130100145430ustar00rootroot00000000000000Sparse Multidimensional Arrays ============================== |Build Status| |Docs Status| |Coverage| This library provides multi-dimensional sparse arrays. * `Documentation `_ * `Contributing `_ * `Bug Reports/Feature Requests `_ .. |Build Status| image:: https://dev.azure.com/einsteinedison/PyData%20Sparse/_apis/build/status/Tests?branchName=master :target: https://dev.azure.com/einsteinedison/PyData%20Sparse/_build/latest?definitionId=1&branchName=master :alt: Build status .. |Docs Status| image:: https://readthedocs.org/projects/sparse-nd/badge/?version=latest :target: http://sparse.pydata.org/en/latest/?badge=latest :alt: Documentation Status .. |Coverage| image:: https://codecov.io/gh/pydata/sparse/branch/master/graph/badge.svg :target: https://codecov.io/gh/pydata/sparse :alt: Coverage Report sparse-0.12.0/asv.conf.json000066400000000000000000000003771402510130100154710ustar00rootroot00000000000000{ "version": 1, "project": "sparse", "project_url": "https://sparse.pydata.org/", "repo": ".", "dvcs": "git", "environment_type": "conda", "env_dir": ".asv/env", "results_dir": ".asv/results", "html_dir": ".asv/html" } sparse-0.12.0/azure-pipelines.yml000066400000000000000000000013731402510130100167150ustar00rootroot00000000000000jobs: - job: Linux variables: python_version: '3.6' pool: vmImage: 'ubuntu-16.04' steps: - template: ci/azure-steps.yml strategy: matrix: Python38: python_version: '3.8' Python37: python_version: '3.7' Python36: python_version: '3.6' - job: MacOS variables: python_version: '3.6' pool: vmImage: 'macos-10.14' steps: - template: ci/azure-steps.yml - job: Windows variables: python_version: '3.6' pool: vmImage: 'vs2017-win2016' steps: - template: ci/azure-steps.yml - job: Docs pool: vmImage: 'ubuntu-16.04' steps: - template: ci/azure-docs.yml trigger: branches: include: - master tags: include: - '*' pr: - master sparse-0.12.0/benchmarks/000077500000000000000000000000001402510130100151675ustar00rootroot00000000000000sparse-0.12.0/benchmarks/__init__.py000066400000000000000000000000001402510130100172660ustar00rootroot00000000000000sparse-0.12.0/benchmarks/benchmark_coo.py000066400000000000000000000027441402510130100203420ustar00rootroot00000000000000import numpy as np import sparse class MatrixMultiplySuite: def setup(self): np.random.seed(0) self.x = sparse.random((100, 100), density=0.01) self.y = sparse.random((100, 100), density=0.01) self.x @ self.y # Numba compilation def time_matmul(self): self.x @ self.y class ElemwiseSuite: def setup(self): np.random.seed(0) self.x = sparse.random((100, 100, 100), density=0.01) self.y = sparse.random((100, 100, 100), density=0.01) self.x + self.y # Numba compilation def time_add(self): self.x + self.y def time_mul(self): self.x * self.y class ElemwiseBroadcastingSuite: def setup(self): np.random.seed(0) self.x = sparse.random((100, 1, 100), density=0.01) self.y = sparse.random((100, 100), density=0.01) def time_add(self): self.x + self.y def time_mul(self): self.x * self.y class IndexingSuite: def setup(self): np.random.seed(0) self.index = np.random.randint(0, 100, 50) self.x = sparse.random((100, 100, 100), density=0.01) # Numba compilation self.x[5] self.x[self.index] def time_index_scalar(self): self.x[5, 5, 5] def time_index_slice(self): self.x[:50] def time_index_slice2(self): self.x[:50, :50] def time_index_slice3(self): self.x[:50, :50, :50] def time_index_fancy(self): self.x[self.index] sparse-0.12.0/benchmarks/benchmark_gcxs.py000066400000000000000000000031151402510130100205170ustar00rootroot00000000000000import numpy as np import sparse class MatrixMultiplySuite: def setup(self): np.random.seed(0) self.x = sparse.random((100, 100), density=0.01, format="gcxs") self.y = sparse.random((100, 100), density=0.01, format="gcxs") self.x @ self.y # Numba compilation def time_matmul(self): self.x @ self.y class ElemwiseSuite: def setup(self): np.random.seed(0) self.x = sparse.random((100, 100, 100), density=0.01, format="gcxs") self.y = sparse.random((100, 100, 100), density=0.01, format="gcxs") self.x + self.y # Numba compilation def time_add(self): self.x + self.y def time_mul(self): self.x * self.y class ElemwiseBroadcastingSuite: def setup(self): np.random.seed(0) self.x = sparse.random((100, 1, 100), density=0.01, format="gcxs") self.y = sparse.random((100, 100), density=0.01, format="gcxs") def time_add(self): self.x + self.y def time_mul(self): self.x * self.y class IndexingSuite: def setup(self): np.random.seed(0) self.index = np.random.randint(0, 100, 50) self.x = sparse.random((100, 100, 100), density=0.01, format="gcxs") # Numba compilation self.x[5] self.x[self.index] def time_index_scalar(self): self.x[5, 5, 5] def time_index_slice(self): self.x[:50] def time_index_slice2(self): self.x[:50, :50] def time_index_slice3(self): self.x[:50, :50, :50] def time_index_fancy(self): self.x[self.index] sparse-0.12.0/benchmarks/benchmark_tensordot.py000066400000000000000000000031401402510130100215720ustar00rootroot00000000000000import numpy as np import sparse class TensordotSuiteDenseSparse: """ Performance comparison for returntype=COO vs returntype=np.ndarray. tensordot(np.ndarray, COO) """ def setup(self): np.random.seed(0) self.n = np.random.random((100, 100)) self.s = sparse.random((100, 100, 100, 100), density=0.01) def time_dense(self): sparse.tensordot(self.n, self.s, axes=([0, 1], [0, 2])) def time_sparse(self): sparse.tensordot(self.n, self.s, axes=([0, 1], [0, 2]), return_type=sparse.COO) class TensordotSuiteSparseSparse: """ Performance comparison for returntype=COO vs returntype=np.ndarray. tensordot(COO, COO) """ def setup(self): np.random.seed(0) self.s1 = sparse.random((100, 100), density=0.01) self.s2 = sparse.random((100, 100, 100, 100), density=0.01) def time_dense(self): sparse.tensordot( self.s1, self.s2, axes=([0, 1], [0, 2]), return_type=np.ndarray ) def time_sparse(self): sparse.tensordot(self.s1, self.s2, axes=([0, 1], [0, 2])) class TensordotSuiteSparseDense: """ Performance comparison for returntype=COO vs returntype=np.ndarray. tensordot(COO, np.ndarray) """ def setup(self): np.random.seed(0) self.s = sparse.random((100, 100, 100, 100), density=0.01) self.n = np.random.random((100, 100)) def time_dense(self): sparse.tensordot(self.s, self.n, axes=([0, 1], [0, 1])) def time_sparse(self): sparse.tensordot(self.s, self.n, axes=([0, 1], [0, 1]), return_type=sparse.COO) sparse-0.12.0/ci/000077500000000000000000000000001402510130100134455ustar00rootroot00000000000000sparse-0.12.0/ci/01-install.sh000077500000000000000000000002101402510130100156610ustar00rootroot00000000000000#!/usr/bin/env bash if [[ $NUMPY_VERSION ]]; then pip install numpy$NUMPY_VERSION; fi pip install -e .[tests] pip install codecov sparse-0.12.0/ci/azure-docs.yml000066400000000000000000000006111402510130100162420ustar00rootroot00000000000000steps: - task: UsePythonVersion@0 inputs: versionSpec: "3.7" architecture: "x64" - script: | pip install -e .[docs] displayName: Install package - script: sphinx-build -W -b html docs/ _build/html displayName: Build documentation - task: PublishPipelineArtifact@1 inputs: artifactName: 'Documentation' targetPath: '$(System.DefaultWorkingDirectory)/_build/html' sparse-0.12.0/ci/azure-steps.yml000066400000000000000000000013341402510130100164530ustar00rootroot00000000000000steps: - task: UsePythonVersion@0 inputs: versionSpec: $(python_version) architecture: "x64" - script: | pip install numpy$NUMPY_VERSION; pip install -e .[tests] pip install codecov displayName: Install package - script: pytest --pyargs sparse displayName: Run tests - script: codecov displayName: Upload coverage to CodeCov - task: PublishTestResults@2 condition: always() inputs: testResultsFiles: "$(System.DefaultWorkingDirectory)/**/test-*.xml" testRunTitle: "Publish test results" - task: PublishCodeCoverageResults@1 inputs: codeCoverageTool: Cobertura summaryFileLocation: "$(System.DefaultWorkingDirectory)/**/coverage.xml" sparse-0.12.0/ci/environment-3.6.yml000066400000000000000000000003121402510130100170340ustar00rootroot00000000000000name: py36-sparse-test channels: - conda-forge dependencies: - python=3.6 - pip - pytest - numpy - scipy - pytest-cov - nomkl - numba - black - codecov - pip: - pytest-black sparse-0.12.0/ci/environment-3.7.yml000066400000000000000000000003121402510130100170350ustar00rootroot00000000000000name: py37-sparse-test channels: - conda-forge dependencies: - python=3.7 - pip - pytest - numpy - scipy - pytest-cov - nomkl - numba - black - codecov - pip: - pytest-black sparse-0.12.0/ci/environment-3.8.yml000066400000000000000000000003121402510130100170360ustar00rootroot00000000000000name: py38-sparse-test channels: - conda-forge dependencies: - python=3.8 - pip - pytest - numpy - scipy - pytest-cov - nomkl - numba - black - codecov - pip: - pytest-black sparse-0.12.0/docs/000077500000000000000000000000001402510130100140025ustar00rootroot00000000000000sparse-0.12.0/docs/_templates/000077500000000000000000000000001402510130100161375ustar00rootroot00000000000000sparse-0.12.0/docs/_templates/autosummary/000077500000000000000000000000001402510130100205255ustar00rootroot00000000000000sparse-0.12.0/docs/_templates/autosummary/base.rst000066400000000000000000000001511402510130100221660ustar00rootroot00000000000000{{ objname | escape | underline}} .. currentmodule:: {{ module }} .. auto{{ objtype }}:: {{ objname }} sparse-0.12.0/docs/_templates/autosummary/class.rst000066400000000000000000000010171402510130100223630ustar00rootroot00000000000000{{ objname | escape | underline}} .. currentmodule:: {{ module }} .. autoclass:: {{ objname }} {% block attributes %} {% if attributes %} .. rubric:: Attributes .. autosummary:: :toctree: {% for item in attributes %} {{ name }}.{{ item }} {% endfor %} {% endif %} {% endblock %} {% block methods %} {% if methods %} .. rubric:: Methods .. autosummary:: :toctree: {% for item in methods %} {{ name }}.{{ item }} {% endfor %} {% endif %} {% endblock %} sparse-0.12.0/docs/_templates/autosummary/module.rst000066400000000000000000000006541402510130100225510ustar00rootroot00000000000000{{ fullname | escape | underline }} .. rubric:: Description .. automodule:: {{ fullname }} .. currentmodule:: {{ fullname }} {% if classes %} .. rubric:: Classes .. autosummary:: :toctree: {% for class in classes %} {{ class }} {% endfor %} {% endif %} {% if functions %} .. rubric:: Functions .. autosummary:: :toctree: {% for function in functions %} {{ function }} {% endfor %} {% endif %} sparse-0.12.0/docs/changelog.rst000066400000000000000000000265301402510130100164710ustar00rootroot00000000000000Changelog ========= .. currentmodule:: sparse 0.12.0 / 2021-03-19 ------------------- There are a number of large changes in this release. For example, we have implemented the :obj:`GCXS` type, and its specializations :obj:`CSR` and :obj:`CSC`. We plan on gradually improving the performance of these. * A number of :obj:`GCXS` fixes and additions (:pr:`409`, :pr:`407`, :pr:`414`, :pr:`417`, :pr:`419` thanks :ghuser:`daletovar`) * Ability to change the index dtype for better storage characteristics. (:pr:`441`, thanks :ghuser:`daletovar`) * Some work on :obj:`DOK` arrays to bring them closer to the other formats (:pr:`435`, :pr:`437`, :pr:`439`, :pr:`440`, thanks :ghuser:`DragaDoncila`) * :obj:`CSR` and :obj:`CSC` specializations of :obj:`GCXS` (:pr:`442`, thanks :ghuser:`ivirshup`) For now, this is experimental undocumented API, and subject to change. * Fix a number of bugs (:pr:`407`, :issue:`406`) * Add ``nnz`` parameter to :obj:`sparse.random` (:pr:`410`, thanks :ghuser:`emilmelnikov`) 0.11.2 / 2020-09-04 ------------------- * Fix :obj:`TypingError` on :obj:`sparse.dot` with complex dtypes. (:issue:`403`, :pr:`404`) 0.11.1 / 2020-08-31 ------------------- * Fix :obj:`ValueError` on :obj:`sparse.dot` with extremely small values. (:issue:`398`, :pr:`399`) 0.11.0 / 2020-08-18 ------------------- * Improve the performance of :obj:`sparse.dot`. (:issue:`331`, :pr:`389`, thanks :ghuser:`daletovar`) * Added the :obj:`COO.swapaxes` method. (:pr:`344`, thanks :ghuser:`lueckem`) * Added multi-axis 1-D indexing support. (:pr:`343`, thanks :ghuser:`mikeymezher`) * Fix :obj:`outer` for arrays that weren't one-dimensional. (:issue:`346`, :pr:`347`) * Add ``casting`` kwarg to :obj:`COO.astype`. (:issue:`391`, :pr:`392`) * Fix for :obj:`COO` constructor accepting invalid inputs. (:issue:`385`, :pr:`386`) 0.10.0 / 2020-05-13 ------------------- * Fixed a bug where converting an empty DOK array to COO leads to an incorrect dtype. (:issue:`314`, :pr:`315`) * Change code formatter to black. (:pr:`284`) * Add :obj:`COO.flatten` and :obj:`sparse.outer`. (:issue:`316`, :pr:`317`). * Remove broadcasting restriction between sparse arrays and dense arrays. (:issue:`306`, :pr:`318`) * Implement deterministic dask tokenization. (:issue:`300`, :pr:`320`, thanks :ghuser:`danielballan`) * Improve testing around densification (:pr:`321`, thanks :ghuser:`danielballan`) * Simplify Numba extension. (:pr:`324`, thanks :ghuser:`eric-wieser`). * Respect ``copy=False`` in ``astype`` (:pr:`328`, thanks :ghuser:`eric-wieser`). * Replace linear_loc with ravel_multi_index, which is 3x faster. (:pr:`330`, thanks :ghuser:`eric-wieser`). * Add error msg to tensordot operation when ``ndim==0`` (:issue:`332`, :pr:`333`, thanks :ghuser:`guilhermeleobas`). * Maintainence fixes for Sphinx 3.0 and Numba 0.49, and dropping support for Python 3.5. (:pr:`337`). * Fixed signature for :obj:`numpy.clip`. 0.9.1 / 2020-01-23 ------------------ * Fixed a bug where indexing with an empty list could lead to issues. (:issue:`281`, :pr:`282`) * Change code formatter to black. (:pr:`284`) * Add the :obj:`diagonal` and :obj:`diagonalize` functions. (:issue:`288`, :pr:`289`, thanks :ghuser:`pettni`) * Add HTML repr for notebooks. (:pr:`283`, thanks :ghuser:`daletovar`) * Avoid making copy of ``coords`` when making a new :obj:`COO` array. * Add stack and concatenate for GCXS. (:issue:`301`, :pr:`303`, thanks :ghuser:`daletovar`). * Fix issue where functions dispatching to an attribute access wouldn't work with ``__array_function__``. (:issue:`308`, :pr:`309`). * Add partial support for constructing and mirroring :obj:`COO` objects to Numba. 0.8.0 / 2019-08-26 ------------------ This release switches to Numba's new typed lists, a lot of back-end work with the CI infrastructure, so Linux, macOS and Windows are officially tested. It also includes bug fixes. It also adds in-progress, not yet public support for the GCXS format, which is a generalisation of CSR/CSC. (huge thanks to :ghuser:`daletovar`) * Fixed a bug where an array with size == 1 and nnz == 0 could not be broadcast. (:issue:`242`, :pr:`243`) * Add ``std`` and ``var``. (:pr:`244`) * Move to Azure Pipelines with CI for Windows, macOS and Linux. (:pr:`245`, :pr:`246`, :pr:`247`, :pr:`248`) * Add ``resize``, and change ``reshape`` so it raises a ``ValueError`` on shapes that don't correspond to the same size. (:issue:`241`, :issue:`250`, :pr:`256` thanks, :ghuser:`daletovar`) * Add ``isposinf`` and ``isneginf``. (:issue:`252`, :pr:`253`) * Fix ``tensordot`` when nnz = 0. (:issue:`255`, :pr:`256`) * Modifications to ``__array_function__`` to allow for sparse XArrays. (:pr:`261`, thanks :ghuser:`nvictus`) * Add not-yet-public support for GCXS. (:pr:`258`, thanks :ghuser:`daletovar`) * Improvements to ``__array_function__``. (:pr:`267`, :pr:`272`, thanks :ghuser:`crusaderky`) * Convert all Numba lists to typed lists. (:pr:`264`) * Why write code when it exists elsewhere? (:pr:`277`) * Fix some element-wise operations with scalars. (:pr:`278`) * Private modules should be private, and tests should be in the package. (:pr:`280`) 0.7.0 / 2019-03-14 ------------------ This is a release that adds compatibility with NumPy's new ``__array_function__`` protocol, for details refer to `NEP-18 `_. The other big change is that we dropped compatibility with Python 2. Users on Python 2 should use version 0.6.0. There are also some bug-fixes relating to fill-values. This was mainly a contributor-driven release. The full list of changes can be found below: * Fixed a bug where going between :obj:`sparse.DOK` and :obj:`sparse.COO` caused fill-values to be lost. (:issue:`225`, :pr:`226`). * Fixed warning for a matrix that was incorrectly considered too dense. (:issue:`228`, :pr:`229`) * Fixed some warnings in Python 3.7, the fix was needed. in preparation for Python 3.8. (:pr:`233`, thanks :ghuser:`nils-werner`) * Drop support for Python 2.7 (:issue:`234`, :pr:`235`, thanks :ghuser:`hugovk`) * Clearer error messages (:issue:`230`, :issue:`231`, :pr:`232`) * Restructure requirements.txt files. (:pr:`236`) * Support fill-value in reductions in specific cases. (:issue:`237`, :pr:`238`) * Add ``__array_function__`` support. (:pr:`239`, thanks, :ghuser:`pentschev`) * Cleaner code! (:pr:`240`) 0.6.0 / 2018-12-19 ------------------ This release breaks backward-compatibility. Previously, if arrays were fed into NumPy functions, an attempt would be made to densify the array and apply the NumPy function. This was unintended behaviour in most cases, with the array filling up memory before raising a ``MemoryError`` if the array was too large. We have now changed this behaviour so that a ``RuntimeError`` is now raised if an attempt is made to automatically densify an array. To densify, use the explicit ``.todense()`` method. * Fixed a bug where ``np.matrix`` could sometimes fail to convert to a ``COO``. (:issue:`199`, :pr:`200`). * Make sure that ``sparse @ sparse`` returns a sparse array. (:issue:`201`, :pr:`203`) * Bring ``operator.matmul`` behaviour in line with NumPy for ``ndim > 2``. (:issue:`202`, :pr:`204`, :pr:`217`) * Make sure ``dtype`` is preserved with the ``out`` kwarg. (:issue:`205`, :pr:`206`) * Fix integer overflow in ``reduce`` on Windows. (:issue:`207`, :pr:`208`) * Disallow auto-densification. (:issue:`218`, :pr:`220`) * Add auto-densification configuration, and a configurable warning for checking if the array is too dense. (:pr:`210`, :pr:`213`) * Add pruning of fill-values to COO constructor. (:pr:`221`) 0.5.0 / 2018-10-12 ------------------ * Added :code:`COO.real`, :code:`COO.imag`, and :code:`COO.conj` (:pr:`196`). * Added :code:`sparse.kron` function (:pr:`194`, :pr:`195`). * Added :code:`order` parameter to :code:`COO.reshape` to make it work with :code:`np.reshape` (:pr:`193`). * Added :code:`COO.mean` and :code:`sparse.nanmean` (:pr:`190`). * Added :code:`sparse.full` and :code:`sparse.full_like` (:pr:`189`). * Added :code:`COO.clip` method (:pr:`185`). * Added :code:`COO.copy` method, and changed pickle of :code:`COO` to not include its cache (:pr:`184`). * Added :code:`sparse.eye`, :code:`sparse.zeros`, :code:`sparse.zeros_like`, :code:`sparse.ones`, and :code:`sparse.ones_like` (:pr:`183`). 0.4.1 / 2018-09-12 ------------------ * Allow mixed :code:`ndarray`-:code:`COO` operations if the result is sparse (:issue:`124`, via :pr:`182`). * Allow specifying a fill-value when converting from NumPy arrays (:issue:`179`, via :pr:`180`). * Added :code:`COO.any` and :code:`COO.all` methods (:pr:`175`). * Indexing for :code:`COO` now accepts a single one-dimensional array index (:pr:`172`). * The fill-value can now be something other than zero or :code:`False` (:pr:`165`). * Added a :code:`sparse.roll` function (:pr:`160`). * Numba code now releases the GIL. This leads to better multi-threaded performance in Dask (:pr:`159`). * A number of bugs occurred, so to resolve them, :code:`COO.coords.dtype` is always :code:`np.int64`. :code:`COO`, therefore, uses more memory than before (:pr:`158`). * Add support for saving and loading :code:`COO` files from disk (:issue:`153`, via :pr:`154`). * Support :code:`COO.nonzero` and :code:`np.argwhere` (:issue:`145`, via :pr:`148`). * Allow faux in-place operations (:issue:`80`, via :pr:`146`). * :code:`COO` is now always canonical (:pr:`141`). * Improve indexing performance (:pr:`128`). * Improve element-wise performance (:pr:`127`). * Reductions now support a negative axis (:issue:`117`, via :pr:`118`). * Match behaviour of :code:`ufunc.reduce` from NumPy (:issue:`107`, via :pr:`108`). 0.3.1 / 2018-04-12 ------------------ * Fix packaging error (:pr:`138`). 0.3.0 / 2018-02-22 ------------------ * Add NaN-skipping aggregations (:pr:`102`). * Add equivalent to :code:`np.where` (:pr:`102`). * N-input universal functions now work (:pr:`98`). * Make :code:`dot` more consistent with NumPy (:pr:`96`). * Create a base class :code:`SparseArray` (:pr:`92`). * Minimum NumPy version is now 1.13 (:pr:`90`). * Fix a bug where setting a :code:`DOK` element to zero did nothing (:issue:`93`, via :pr:`94`). 0.2.0 / 2018-01-25 ------------------ * Support faster :code:`np.array(COO)` (:pr:`87`). * Add :code:`DOK` type (:pr:`85`). * Fix sum for large arrays (:issue:`82`, via :pr:`83`). * Support :code:`.size` and :code:`.density` (:pr:`69`). * Documentation added for the package (:pr:`43`). * Minimum required SciPy version is now 0.19 (:pr:`70`). * :code:`len(COO)` now works (:pr:`68`). * :code:`scalar op COO` now works for all operators (:pr:`67`). * Validate axes for :code:`.transpose()` (:pr:`61`). * Extend indexing support (:pr:`57`). * Add :code:`random` function for generating random sparse arrays (:pr:`41`). * :code:`COO(COO)` now copies the original object (:pr:`55`). * NumPy universal functions and reductions now work on :code:`COO` arrays (:pr:`49`). * Fix concatenate and stack for large arrays (:issue:`32`, via :pr:`51`). * Fix :code:`nnz` for scalars (:issue:`47`, via :pr:`48`). * Support more operators and remove all special cases (:pr:`46`). * Add support for :code:`triu` and :code:`tril` (:pr:`40`). * Add support for Ellipsis (:code:`...`) and :code:`None` when indexing (:pr:`37`). * Add support for bitwise bindary operations like :code:`&` and :code:`|` (:pr:`38`). * Support broadcasting in element-wise operations (:pr:`35`). sparse-0.12.0/docs/conduct.rst000066400000000000000000000124601402510130100161760ustar00rootroot00000000000000Contributor Covenant Code of Conduct ==================================== Our Pledge ---------- We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. Our Standards ------------- Examples of behavior that contributes to a positive environment for our community include: - Demonstrating empathy and kindness toward other people - Being respectful of differing opinions, viewpoints, and experiences - Giving and gracefully accepting constructive feedback - Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience - Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: - The use of sexualized language or imagery, and sexual attention or advances of any kind - Trolling, insulting or derogatory comments, and personal or political attacks - Public or private harassment - Publishing others' private information, such as a physical or email address, without their explicit permission - Other conduct which could reasonably be considered inappropriate in a professional setting Enforcement Responsibilities ---------------------------- Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. Scope ----- This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Enforcement ----------- Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at `habbasi@quansight.com `_. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. Enforcement Guidelines ---------------------- Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: 1. Correction ~~~~~~~~~~~~~ **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. 2. Warning ~~~~~~~~~~ **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. 3. Temporary Ban ~~~~~~~~~~~~~~~~ **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. 4. Permanent Ban ~~~~~~~~~~~~~~~~ **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. Attribution ----------- This Code of Conduct is adapted from the `Contributor Covenant `__, version 2.0, available at https://www.contributor-covenant.org/version/2/0/code\_of\_conduct.html. Community Impact Guidelines were inspired by `Mozilla's code of conduct enforcement ladder `__. For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations. sparse-0.12.0/docs/conf.py000066400000000000000000000140731402510130100153060ustar00rootroot00000000000000#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # sparse documentation build configuration file, created by # sphinx-quickstart on Fri Dec 29 20:58:03 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath("..")) from sparse import __version__ # flake8: noqa E402 # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.doctest", "sphinx.ext.intersphinx", "sphinx.ext.coverage", "sphinx.ext.mathjax", "sphinx.ext.napoleon", "sphinx.ext.viewcode", "sphinx.ext.autosummary", "sphinx.ext.inheritance_diagram", "sphinx.ext.extlinks", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] mathjax_path = ( "https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML" ) # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = ".rst" # The master toctree document. master_doc = "index" # General information about the project. project = "sparse" copyright = "2018, Sparse developers" author = "Sparse Developers" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = __version__ # The full version, including alpha/beta/rc tags. release = __version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ["_build", "**tests**", "**setup**", "**extern**", "**data**"] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False autosummary_generate = True autosummary_generate_overwrite = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "sphinx_rtd_theme" html_logo = "logo.png" html_favicon = "logo.png" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars # html_sidebars = { # '**': [ # 'relations.html', # needs 'show_related': True theme option to display # 'searchbox.html', # ] # } # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = "sparsedoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, "sparse.tex", "sparse Documentation", "Sparse Developers", "manual") ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(master_doc, "sparse", "sparse Documentation", [author], 1)] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "sparse", "sparse Documentation", author, "sparse", "One line description of project.", "Miscellaneous", ) ] # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { "python": ("https://docs.python.org/3/", None), "numpy": ("https://docs.scipy.org/doc/numpy/", None), "scipy": ("https://docs.scipy.org/doc/scipy/reference/", None), } extlinks = { "issue": ("https://github.com/pydata/sparse/issues/%s", "Issue #"), "pr": ("https://github.com/pydata/sparse/pull/%s", "PR #"), "ghuser": ("https://github.com/%s", "@"), } sparse-0.12.0/docs/construct.rst000066400000000000000000000141411402510130100165610ustar00rootroot00000000000000.. currentmodule:: sparse Construct Sparse Arrays ======================= From coordinates and data ------------------------- You can construct :obj:`COO` arrays from coordinates and value data. The :code:`coords` parameter contains the indices where the data is nonzero, and the :code:`data` parameter contains the data corresponding to those indices. For example, the following code will generate a :math:`5 \times 5` diagonal matrix: .. code-block:: python >>> import sparse >>> coords = [[0, 1, 2, 3, 4], ... [0, 1, 2, 3, 4]] >>> data = [10, 20, 30, 40, 50] >>> s = sparse.COO(coords, data, shape=(5, 5)) >>> s.todense() array([[10, 0, 0, 0, 0], [ 0, 20, 0, 0, 0], [ 0, 0, 30, 0, 0], [ 0, 0, 0, 40, 0], [ 0, 0, 0, 0, 50]]) In general :code:`coords` should be a :code:`(ndim, nnz)` shaped array. Each row of :code:`coords` contains one dimension of the desired sparse array, and each column contains the index corresponding to that nonzero element. :code:`data` contains the nonzero elements of the array corresponding to the indices in :code:`coords`. Its shape should be :code:`(nnz,)`. If ``data`` is the same across all the coordinates, it can be passed in as a scalar. For example, the following produces the :math:`4 \times 4` identity matrix: .. code-block:: python >>> import sparse >>> coords = [[0, 1, 2, 3], ... [0, 1, 2, 3]] >>> data = 1 >>> s = sparse.COO(coords, data, shape=(4, 4)) You can, and should, pass in :obj:`numpy.ndarray` objects for :code:`coords` and :code:`data`. In this case, the shape of the resulting array was determined from the maximum index in each dimension. If the array extends beyond the maximum index in :code:`coords`, you should supply a shape explicitly. For example, if we did the following without the :code:`shape` keyword argument, it would result in a :math:`4 \times 5` matrix, but maybe we wanted one that was actually :math:`5 \times 5`. .. code-block:: python coords = [[0, 3, 2, 1], [4, 1, 2, 0]] data = [1, 4, 2, 1] s = COO(coords, data, shape=(5, 5)) :obj:`COO` arrays support arbitrary fill values. Fill values are the "default" value, or value to not store. This can be given a value other than zero. For example, the following builds a (bad) representation of a :math:`2 \times 2` identity matrix. Note that not all operations are supported for operations with nonzero fill values. .. code-block:: python coords = [[0, 1], [1, 0]] data = [0, 0] s = COO(coords, data, fill_value=1) From :doc:`Scipy sparse matrices ` ------------------------------------------------------------------- To construct :obj:`COO` array from :obj:`spmatrix ` objects, you can use the :obj:`COO.from_scipy_sparse` method. As an example, if :code:`x` is a :obj:`scipy.sparse.spmatrix`, you can do the following to get an equivalent :obj:`COO` array: .. code-block:: python s = COO.from_scipy_sparse(x) From :doc:`Numpy arrays ` ------------------------------------------------------------ To construct :obj:`COO` arrays from :obj:`numpy.ndarray` objects, you can use the :obj:`COO.from_numpy` method. As an example, if :code:`x` is a :obj:`numpy.ndarray`, you can do the following to get an equivalent :obj:`COO` array: .. code-block:: python s = COO.from_numpy(x) Generating random :obj:`COO` objects ------------------------------------ The :obj:`sparse.random` method can be used to create random :obj:`COO` arrays. For example, the following will generate a :math:`10 \times 10` matrix with :math:`10` nonzero entries, each in the interval :math:`[0, 1)`. .. code-block:: python s = sparse.random((10, 10), density=0.1) Building :obj:`COO` Arrays from :obj:`DOK` Arrays ------------------------------------------------- It's possible to build :obj:`COO` arrays from :obj:`DOK` arrays, if it is not easy to construct the :code:`coords` and :obj:`data` in a simple way. :obj:`DOK` arrays provide a simple builder interface to build :obj:`COO` arrays, but at this time, they can do little else. You can get started by defining the shape (and optionally, datatype) of the :obj:`DOK` array. If you do not specify a dtype, it is inferred from the value dictionary or is set to :code:`dtype('float64')` if that is not present. .. code-block:: python s = DOK((6, 5, 2)) s2 = DOK((2, 3, 4), dtype=np.uint8) After this, you can build the array by assigning arrays or scalars to elements or slices of the original array. Broadcasting rules are followed. .. code-block:: python s[1:3, 3:1:-1] = [[6, 5]] DOK arrays also support fancy indexing assignment if and only if all dimensions are indexed. .. code-block:: python s[[0, 2], [2, 1], [0, 1]] = 5 s[[0, 3], [0, 4], [0, 1]] = [1, 5] At the end, you can convert the :obj:`DOK` array to a :obj:`COO` array, and perform arithmetic or other operations on it. .. code-block:: python s3 = COO(s) In addition, it is possible to access single elements and slices of the :obj:`DOK` array using normal Numpy indexing, as well as fancy indexing if and only if all dimensions are indexed. Slicing and fancy indexing will always return a new DOK array. .. code-block:: python s[1, 2, 1] # 5 s[5, 1, 1] # 0 s[[0, 3], [0, 4], [0, 1]] # .. _converting: Converting :obj:`COO` objects to other Formats ---------------------------------------------- :obj:`COO` arrays can be converted to :doc:`Numpy arrays `, or to some :obj:`spmatrix ` subclasses via the following methods: * :obj:`COO.todense`: Converts to a :obj:`numpy.ndarray` unconditionally. * :obj:`COO.maybe_densify`: Converts to a :obj:`numpy.ndarray` based on certain constraints. * :obj:`COO.to_scipy_sparse`: Converts to a :obj:`scipy.sparse.coo_matrix` if the array is two dimensional. * :obj:`COO.tocsr`: Converts to a :obj:`scipy.sparse.csr_matrix` if the array is two dimensional. * :obj:`COO.tocsc`: Converts to a :obj:`scipy.sparse.csc_matrix` if the array is two dimensional. sparse-0.12.0/docs/contributing.rst000066400000000000000000000110741402510130100172460ustar00rootroot00000000000000Contributing ============ General Guidelines ------------------ sparse is a community-driven project on GitHub. You can find our `repository on GitHub `_. Feel free to open issues for new features or bugs, or open a pull request to fix a bug or add a new feature. If you haven't contributed to open-source before, we recommend you read `this excellent guide by GitHub on how to contribute to open source `_. The guide is long, so you can gloss over things you're familiar with. If you're not already familiar with it, we follow the `fork and pull model `_ on GitHub. Filing Issues ------------- If you find a bug or would like a new feature, you might want to `consider filing a new issue on GitHub `_. Before you open a new issue, please make sure of the following: * This should go without saying, but make sure what you are requesting is within the scope of this project. * The bug/feature is still present/missing on the ``master`` branch on GitHub. * A similar issue or pull request isn't already open. If one already is, it's better to contribute to the discussion there. Contributing Code ----------------- This project has a number of requirements for all code contributed. * We use ``flake8`` to automatically lint the code and maintain code style. * We use Numpy-style docstrings. * It's ideal if user-facing API changes or new features have documentation added. * 100% code coverage is recommended for all new code in any submitted PR. Doctests count toward coverage. * Performance optimizations should have benchmarks added in ``benchmarks``. Setting up Your Development Environment --------------------------------------- The following bash script is all you need to set up your development environment, after forking and cloning the repository: .. code-block:: bash pip install -e .[all] Running/Adding Unit Tests ------------------------- It is best if all new functionality and/or bug fixes have unit tests added with each use-case. We use `pytest `_ as our unit testing framework, with the ``pytest-cov`` extension to check code coverage and ``pytest-flake8`` to check code style. You don't need to configure these extensions yourself. Once you've configured your environment, you can just ``cd`` to the root of your repository and run .. code-block:: bash pytest --pyargs sparse This automatically checks code style and functionality, and prints code coverage, even though it doesn't fail on low coverage. Unit tests are automatically run on Travis CI for pull requests. Coverage -------- The ``pytest`` script automatically reports coverage, both on the terminal for missing line numbers, and in annotated HTML form in ``htmlcov/index.html``. Coverage is automatically checked on CodeCov for pull requests. Adding/Building the Documentation --------------------------------- If a feature is stable and relatively finalized, it is time to add it to the documentation. If you are adding any private/public functions, it is best to add docstrings, to aid in reviewing code and also for the API reference. We use `Numpy style docstrings `_ and `Sphinx `_ to document this library. Sphinx, in turn, uses `reStructuredText `_ as its markup language for adding code. We use the `Sphinx Autosummary extension `_ to generate API references. In particular, you may want do look at the :code:`docs/generated` directory to see how these files look and where to add new functions, classes or modules. For example, if you add a new function to the :code:`sparse.COO` class, you would open up :code:`docs/generated/sparse.COO.rst`, and add in the name of the function where appropriate. To build the documentation, you can :code:`cd` into the :code:`docs` directory and run .. code-block:: bash sphinx-build -W -b html . _build/html After this, you can find an HTML version of the documentation in :code:`docs/_build/html/index.html`. Documentation for pull requests is automatically built on CircleCI and can be found in the build artifacts. Adding and Running Benchmarks ----------------------------- We use `Airspeed Velocity `_ to run benchmarks. We have it set up to use ``conda``, but you can edit the configuration locally if you so wish. sparse-0.12.0/docs/generated/000077500000000000000000000000001402510130100157405ustar00rootroot00000000000000sparse-0.12.0/docs/generated/sparse.COO.T.rst000066400000000000000000000001021402510130100206010ustar00rootroot00000000000000COO\.T ====== .. currentmodule:: sparse .. autoattribute:: COO.Tsparse-0.12.0/docs/generated/sparse.COO.all.rst000066400000000000000000000001031402510130100211470ustar00rootroot00000000000000COO.all ======= .. currentmodule:: sparse .. automethod:: COO.allsparse-0.12.0/docs/generated/sparse.COO.any.rst000066400000000000000000000001031402510130100211660ustar00rootroot00000000000000COO.any ======= .. currentmodule:: sparse .. automethod:: COO.anysparse-0.12.0/docs/generated/sparse.COO.asformat.rst000066400000000000000000000001221402510130100222140ustar00rootroot00000000000000COO.asformat ============ .. currentmodule:: sparse .. automethod:: COO.asformatsparse-0.12.0/docs/generated/sparse.COO.astype.rst000066400000000000000000000001161402510130100217100ustar00rootroot00000000000000COO\.astype =========== .. currentmodule:: sparse .. automethod:: COO.astypesparse-0.12.0/docs/generated/sparse.COO.broadcast_to.rst000066400000000000000000000001421402510130100230460ustar00rootroot00000000000000COO\.broadcast\_to ================== .. currentmodule:: sparse .. automethod:: COO.broadcast_tosparse-0.12.0/docs/generated/sparse.COO.clip.rst000066400000000000000000000001111402510130100213250ustar00rootroot00000000000000COO\.clip ========= .. currentmodule:: sparse .. automethod:: COO.clip sparse-0.12.0/docs/generated/sparse.COO.conj.rst000066400000000000000000000001111402510130100213270ustar00rootroot00000000000000COO\.conj ========= .. currentmodule:: sparse .. automethod:: COO.conj sparse-0.12.0/docs/generated/sparse.COO.copy.rst000066400000000000000000000001111402510130100213500ustar00rootroot00000000000000COO\.copy ========= .. currentmodule:: sparse .. automethod:: COO.copy sparse-0.12.0/docs/generated/sparse.COO.density.rst000066400000000000000000000001251402510130100220620ustar00rootroot00000000000000COO\.density ============ .. currentmodule:: sparse .. autoattribute:: COO.density sparse-0.12.0/docs/generated/sparse.COO.dot.rst000066400000000000000000000001051402510130100211670ustar00rootroot00000000000000COO\.dot ======== .. currentmodule:: sparse .. automethod:: COO.dotsparse-0.12.0/docs/generated/sparse.COO.dtype.rst000066400000000000000000000001161402510130100215300ustar00rootroot00000000000000COO\.dtype ========== .. currentmodule:: sparse .. autoattribute:: COO.dtypesparse-0.12.0/docs/generated/sparse.COO.enable_caching.rst000066400000000000000000000001501402510130100233030ustar00rootroot00000000000000COO\.enable\_caching ==================== .. currentmodule:: sparse .. automethod:: COO.enable_cachingsparse-0.12.0/docs/generated/sparse.COO.flatten.rst000066400000000000000000000001171402510130100220410ustar00rootroot00000000000000COO.flatten =========== .. currentmodule:: sparse .. automethod:: COO.flattensparse-0.12.0/docs/generated/sparse.COO.from_iter.rst000066400000000000000000000001271402510130100223730ustar00rootroot00000000000000COO.from\_iter ============== .. currentmodule:: sparse .. automethod:: COO.from_itersparse-0.12.0/docs/generated/sparse.COO.from_numpy.rst000066400000000000000000000001341402510130100225760ustar00rootroot00000000000000COO\.from\_numpy ================ .. currentmodule:: sparse .. automethod:: COO.from_numpysparse-0.12.0/docs/generated/sparse.COO.from_scipy_sparse.rst000066400000000000000000000001631402510130100241340ustar00rootroot00000000000000COO\.from\_scipy\_sparse ======================== .. currentmodule:: sparse .. automethod:: COO.from_scipy_sparsesparse-0.12.0/docs/generated/sparse.COO.imag.rst000066400000000000000000000001141402510130100213160ustar00rootroot00000000000000COO\.imag ========= .. currentmodule:: sparse .. autoattribute:: COO.imag sparse-0.12.0/docs/generated/sparse.COO.linear_loc.rst000066400000000000000000000001341402510130100225120ustar00rootroot00000000000000COO\.linear\_loc ================ .. currentmodule:: sparse .. automethod:: COO.linear_locsparse-0.12.0/docs/generated/sparse.COO.max.rst000066400000000000000000000001051402510130100211660ustar00rootroot00000000000000COO\.max ======== .. currentmodule:: sparse .. automethod:: COO.maxsparse-0.12.0/docs/generated/sparse.COO.maybe_densify.rst000066400000000000000000000001451402510130100232230ustar00rootroot00000000000000COO\.maybe\_densify =================== .. currentmodule:: sparse .. automethod:: COO.maybe_densifysparse-0.12.0/docs/generated/sparse.COO.mean.rst000066400000000000000000000001111402510130100213160ustar00rootroot00000000000000COO\.mean ========= .. currentmodule:: sparse .. automethod:: COO.mean sparse-0.12.0/docs/generated/sparse.COO.min.rst000066400000000000000000000001051402510130100211640ustar00rootroot00000000000000COO\.min ======== .. currentmodule:: sparse .. automethod:: COO.minsparse-0.12.0/docs/generated/sparse.COO.nbytes.rst000066400000000000000000000001211402510130100217030ustar00rootroot00000000000000COO\.nbytes =========== .. currentmodule:: sparse .. autoattribute:: COO.nbytessparse-0.12.0/docs/generated/sparse.COO.ndim.rst000066400000000000000000000001131402510130100213270ustar00rootroot00000000000000COO\.ndim ========= .. currentmodule:: sparse .. autoattribute:: COO.ndimsparse-0.12.0/docs/generated/sparse.COO.nnz.rst000066400000000000000000000001101402510130100212020ustar00rootroot00000000000000COO\.nnz ======== .. currentmodule:: sparse .. autoattribute:: COO.nnzsparse-0.12.0/docs/generated/sparse.COO.nonzero.rst000066400000000000000000000001171402510130100220760ustar00rootroot00000000000000COO.nonzero =========== .. currentmodule:: sparse .. automethod:: COO.nonzerosparse-0.12.0/docs/generated/sparse.COO.prod.rst000066400000000000000000000001101402510130100213410ustar00rootroot00000000000000COO\.prod ========= .. currentmodule:: sparse .. automethod:: COO.prodsparse-0.12.0/docs/generated/sparse.COO.real.rst000066400000000000000000000001141402510130100213240ustar00rootroot00000000000000COO\.real ========= .. currentmodule:: sparse .. autoattribute:: COO.real sparse-0.12.0/docs/generated/sparse.COO.reduce.rst000066400000000000000000000001161402510130100216520ustar00rootroot00000000000000COO\.reduce =========== .. currentmodule:: sparse .. automethod:: COO.reducesparse-0.12.0/docs/generated/sparse.COO.reshape.rst000066400000000000000000000001211402510130100220260ustar00rootroot00000000000000COO\.reshape ============ .. currentmodule:: sparse .. automethod:: COO.reshapesparse-0.12.0/docs/generated/sparse.COO.resize.rst000066400000000000000000000001141402510130100217020ustar00rootroot00000000000000COO.resize ========== .. currentmodule:: sparse .. automethod:: COO.resizesparse-0.12.0/docs/generated/sparse.COO.round.rst000066400000000000000000000001131402510130100215270ustar00rootroot00000000000000COO\.round ========== .. currentmodule:: sparse .. automethod:: COO.roundsparse-0.12.0/docs/generated/sparse.COO.rst000066400000000000000000000030341402510130100204060ustar00rootroot00000000000000COO === .. currentmodule:: sparse .. autoclass:: COO .. note:: :obj:`COO` objects also support :ref:`operators ` and :ref:`indexing ` .. rubric:: Attributes .. autosummary:: :toctree: COO.T COO.dtype COO.nbytes COO.ndim COO.nnz COO.size COO.density COO.imag COO.real .. rubric:: :doc:`Constructing COO objects <../construct>` .. autosummary:: :toctree: COO.from_iter COO.from_numpy COO.from_scipy_sparse .. rubric:: :ref:`Element-wise operations ` .. autosummary:: :toctree: COO.astype COO.conj COO.clip COO.round .. rubric:: :ref:`Reductions ` .. autosummary:: :toctree: COO.reduce COO.sum COO.prod COO.min COO.max COO.any COO.all COO.mean COO.std COO.var .. rubric:: :ref:`Converting to other formats ` .. autosummary:: :toctree: COO.asformat COO.todense COO.maybe_densify COO.to_scipy_sparse COO.tocsc COO.tocsr .. rubric:: :ref:`Other operations ` .. autosummary:: :toctree: COO.copy COO.dot COO.flatten COO.reshape COO.resize COO.transpose COO.swapaxes COO.nonzero .. rubric:: Utility functions .. autosummary:: :toctree: COO.broadcast_to COO.enable_caching COO.linear_loc sparse-0.12.0/docs/generated/sparse.COO.size.rst000066400000000000000000000001141402510130100213530ustar00rootroot00000000000000COO\.size ========= .. currentmodule:: sparse .. autoattribute:: COO.size sparse-0.12.0/docs/generated/sparse.COO.std.rst000066400000000000000000000001031402510130100211710ustar00rootroot00000000000000COO.std ======= .. currentmodule:: sparse .. automethod:: COO.stdsparse-0.12.0/docs/generated/sparse.COO.sum.rst000066400000000000000000000001051402510130100212050ustar00rootroot00000000000000COO\.sum ======== .. currentmodule:: sparse .. automethod:: COO.sumsparse-0.12.0/docs/generated/sparse.COO.swapaxes.rst000066400000000000000000000001221402510130100222330ustar00rootroot00000000000000COO.swapaxes ============ .. currentmodule:: sparse .. automethod:: COO.swapaxessparse-0.12.0/docs/generated/sparse.COO.to_scipy_sparse.rst000066400000000000000000000001551402510130100236140ustar00rootroot00000000000000COO\.to\_scipy\_sparse ====================== .. currentmodule:: sparse .. automethod:: COO.to_scipy_sparsesparse-0.12.0/docs/generated/sparse.COO.tocsc.rst000066400000000000000000000001131402510130100215130ustar00rootroot00000000000000COO\.tocsc ========== .. currentmodule:: sparse .. automethod:: COO.tocscsparse-0.12.0/docs/generated/sparse.COO.tocsr.rst000066400000000000000000000001131402510130100215320ustar00rootroot00000000000000COO\.tocsr ========== .. currentmodule:: sparse .. automethod:: COO.tocsrsparse-0.12.0/docs/generated/sparse.COO.todense.rst000066400000000000000000000001211402510130100220400ustar00rootroot00000000000000COO\.todense ============ .. currentmodule:: sparse .. automethod:: COO.todensesparse-0.12.0/docs/generated/sparse.COO.transpose.rst000066400000000000000000000001271402510130100224230ustar00rootroot00000000000000COO\.transpose ============== .. currentmodule:: sparse .. automethod:: COO.transposesparse-0.12.0/docs/generated/sparse.COO.var.rst000066400000000000000000000001031402510130100211670ustar00rootroot00000000000000COO.var ======= .. currentmodule:: sparse .. automethod:: COO.varsparse-0.12.0/docs/generated/sparse.DOK.asformat.rst000066400000000000000000000001221402510130100222110ustar00rootroot00000000000000DOK.asformat ============ .. currentmodule:: sparse .. automethod:: DOK.asformatsparse-0.12.0/docs/generated/sparse.DOK.density.rst000066400000000000000000000001241402510130100220560ustar00rootroot00000000000000DOK\.density ============ .. currentmodule:: sparse .. autoattribute:: DOK.densitysparse-0.12.0/docs/generated/sparse.DOK.from_coo.rst000066400000000000000000000001261402510130100222040ustar00rootroot00000000000000DOK\.from\_coo ============== .. currentmodule:: sparse .. automethod:: DOK.from_coosparse-0.12.0/docs/generated/sparse.DOK.from_numpy.rst000066400000000000000000000001341402510130100225730ustar00rootroot00000000000000DOK\.from\_numpy ================ .. currentmodule:: sparse .. automethod:: DOK.from_numpysparse-0.12.0/docs/generated/sparse.DOK.ndim.rst000066400000000000000000000001131402510130100213240ustar00rootroot00000000000000DOK\.ndim ========= .. currentmodule:: sparse .. autoattribute:: DOK.ndimsparse-0.12.0/docs/generated/sparse.DOK.nnz.rst000066400000000000000000000001101402510130100211770ustar00rootroot00000000000000DOK\.nnz ======== .. currentmodule:: sparse .. autoattribute:: DOK.nnzsparse-0.12.0/docs/generated/sparse.DOK.rst000066400000000000000000000006151402510130100204050ustar00rootroot00000000000000DOK === .. currentmodule:: sparse .. autoclass:: DOK .. rubric:: Attributes .. autosummary:: :toctree: DOK.density DOK.ndim DOK.nnz DOK.size .. rubric:: Methods .. autosummary:: :toctree: DOK.asformat DOK.from_coo DOK.from_numpy DOK.to_coo DOK.todense sparse-0.12.0/docs/generated/sparse.DOK.size.rst000066400000000000000000000001131402510130100213470ustar00rootroot00000000000000DOK\.size ========= .. currentmodule:: sparse .. autoattribute:: DOK.sizesparse-0.12.0/docs/generated/sparse.DOK.to_coo.rst000066400000000000000000000001201402510130100216550ustar00rootroot00000000000000DOK\.to\_coo ============ .. currentmodule:: sparse .. automethod:: DOK.to_coosparse-0.12.0/docs/generated/sparse.DOK.todense.rst000066400000000000000000000001211402510130100220350ustar00rootroot00000000000000DOK\.todense ============ .. currentmodule:: sparse .. automethod:: DOK.todensesparse-0.12.0/docs/generated/sparse.GCXS.T.rst000066400000000000000000000001021402510130100207250ustar00rootroot00000000000000GCXS.T ====== .. currentmodule:: sparse .. autoproperty:: GCXS.Tsparse-0.12.0/docs/generated/sparse.GCXS.__init__.rst000066400000000000000000000001351402510130100222670ustar00rootroot00000000000000GCXS.\_\_init\_\_ ================= .. currentmodule:: sparse .. automethod:: GCXS.__init__sparse-0.12.0/docs/generated/sparse.GCXS.all.rst000066400000000000000000000001061402510130100212760ustar00rootroot00000000000000GCXS.all ======== .. currentmodule:: sparse .. automethod:: GCXS.allsparse-0.12.0/docs/generated/sparse.GCXS.amax.rst000066400000000000000000000001111402510130100214500ustar00rootroot00000000000000GCXS.amax ========= .. currentmodule:: sparse .. automethod:: GCXS.amaxsparse-0.12.0/docs/generated/sparse.GCXS.amin.rst000066400000000000000000000001111402510130100214460ustar00rootroot00000000000000GCXS.amin ========= .. currentmodule:: sparse .. automethod:: GCXS.aminsparse-0.12.0/docs/generated/sparse.GCXS.any.rst000066400000000000000000000001061402510130100213150ustar00rootroot00000000000000GCXS.any ======== .. currentmodule:: sparse .. automethod:: GCXS.anysparse-0.12.0/docs/generated/sparse.GCXS.asformat.rst000066400000000000000000000001251402510130100223430ustar00rootroot00000000000000GCXS.asformat ============= .. currentmodule:: sparse .. automethod:: GCXS.asformatsparse-0.12.0/docs/generated/sparse.GCXS.astype.rst000066400000000000000000000001171402510130100220350ustar00rootroot00000000000000GCXS.astype =========== .. currentmodule:: sparse .. automethod:: GCXS.astypesparse-0.12.0/docs/generated/sparse.GCXS.change_compressed_axes.rst000066400000000000000000000002031402510130100252150ustar00rootroot00000000000000GCXS.change\_compressed\_axes ============================= .. currentmodule:: sparse .. automethod:: GCXS.change_compressed_axessparse-0.12.0/docs/generated/sparse.GCXS.clip.rst000066400000000000000000000001111402510130100214510ustar00rootroot00000000000000GCXS.clip ========= .. currentmodule:: sparse .. automethod:: GCXS.clipsparse-0.12.0/docs/generated/sparse.GCXS.compressed_axes.rst000066400000000000000000000001561402510130100237170ustar00rootroot00000000000000GCXS.compressed\_axes ===================== .. currentmodule:: sparse .. autoproperty:: GCXS.compressed_axessparse-0.12.0/docs/generated/sparse.GCXS.conj.rst000066400000000000000000000001111402510130100214530ustar00rootroot00000000000000GCXS.conj ========= .. currentmodule:: sparse .. automethod:: GCXS.conjsparse-0.12.0/docs/generated/sparse.GCXS.copy.rst000066400000000000000000000001111402510130100214740ustar00rootroot00000000000000GCXS.copy ========= .. currentmodule:: sparse .. automethod:: GCXS.copysparse-0.12.0/docs/generated/sparse.GCXS.density.rst000066400000000000000000000001241402510130100222050ustar00rootroot00000000000000GCXS.density ============ .. currentmodule:: sparse .. autoproperty:: GCXS.densitysparse-0.12.0/docs/generated/sparse.GCXS.dot.rst000066400000000000000000000001061402510130100213140ustar00rootroot00000000000000GCXS.dot ======== .. currentmodule:: sparse .. automethod:: GCXS.dotsparse-0.12.0/docs/generated/sparse.GCXS.dtype.rst000066400000000000000000000001161402510130100216540ustar00rootroot00000000000000GCXS.dtype ========== .. currentmodule:: sparse .. autoproperty:: GCXS.dtypesparse-0.12.0/docs/generated/sparse.GCXS.flatten.rst000066400000000000000000000001221402510130100221610ustar00rootroot00000000000000GCXS.flatten ============ .. currentmodule:: sparse .. automethod:: GCXS.flattensparse-0.12.0/docs/generated/sparse.GCXS.from_coo.rst000066400000000000000000000001271402510130100223340ustar00rootroot00000000000000GCXS.from\_coo ============== .. currentmodule:: sparse .. automethod:: GCXS.from_coosparse-0.12.0/docs/generated/sparse.GCXS.from_iter.rst000066400000000000000000000001321402510130100225130ustar00rootroot00000000000000GCXS.from\_iter =============== .. currentmodule:: sparse .. automethod:: GCXS.from_itersparse-0.12.0/docs/generated/sparse.GCXS.from_numpy.rst000066400000000000000000000001351402510130100227230ustar00rootroot00000000000000GCXS.from\_numpy ================ .. currentmodule:: sparse .. automethod:: GCXS.from_numpysparse-0.12.0/docs/generated/sparse.GCXS.from_scipy_sparse.rst000066400000000000000000000001641402510130100242610ustar00rootroot00000000000000GCXS.from\_scipy\_sparse ======================== .. currentmodule:: sparse .. automethod:: GCXS.from_scipy_sparsesparse-0.12.0/docs/generated/sparse.GCXS.imag.rst000066400000000000000000000001131402510130100214410ustar00rootroot00000000000000GCXS.imag ========= .. currentmodule:: sparse .. autoproperty:: GCXS.imagsparse-0.12.0/docs/generated/sparse.GCXS.max.rst000066400000000000000000000001061402510130100213130ustar00rootroot00000000000000GCXS.max ======== .. currentmodule:: sparse .. automethod:: GCXS.maxsparse-0.12.0/docs/generated/sparse.GCXS.maybe_densify.rst000066400000000000000000000001461402510130100233500ustar00rootroot00000000000000GCXS.maybe\_densify =================== .. currentmodule:: sparse .. automethod:: GCXS.maybe_densifysparse-0.12.0/docs/generated/sparse.GCXS.mean.rst000066400000000000000000000001111402510130100214420ustar00rootroot00000000000000GCXS.mean ========= .. currentmodule:: sparse .. automethod:: GCXS.meansparse-0.12.0/docs/generated/sparse.GCXS.min.rst000066400000000000000000000001061402510130100213110ustar00rootroot00000000000000GCXS.min ======== .. currentmodule:: sparse .. automethod:: GCXS.minsparse-0.12.0/docs/generated/sparse.GCXS.nbytes.rst000066400000000000000000000001211402510130100220270ustar00rootroot00000000000000GCXS.nbytes =========== .. currentmodule:: sparse .. autoproperty:: GCXS.nbytessparse-0.12.0/docs/generated/sparse.GCXS.ndim.rst000066400000000000000000000001131402510130100214530ustar00rootroot00000000000000GCXS.ndim ========= .. currentmodule:: sparse .. autoproperty:: GCXS.ndimsparse-0.12.0/docs/generated/sparse.GCXS.nnz.rst000066400000000000000000000001101402510130100213260ustar00rootroot00000000000000GCXS.nnz ======== .. currentmodule:: sparse .. autoproperty:: GCXS.nnzsparse-0.12.0/docs/generated/sparse.GCXS.prod.rst000066400000000000000000000001111402510130100214660ustar00rootroot00000000000000GCXS.prod ========= .. currentmodule:: sparse .. automethod:: GCXS.prodsparse-0.12.0/docs/generated/sparse.GCXS.real.rst000066400000000000000000000001131402510130100214470ustar00rootroot00000000000000GCXS.real ========= .. currentmodule:: sparse .. autoproperty:: GCXS.realsparse-0.12.0/docs/generated/sparse.GCXS.reduce.rst000066400000000000000000000001171402510130100217770ustar00rootroot00000000000000GCXS.reduce =========== .. currentmodule:: sparse .. automethod:: GCXS.reducesparse-0.12.0/docs/generated/sparse.GCXS.reshape.rst000066400000000000000000000001221402510130100221530ustar00rootroot00000000000000GCXS.reshape ============ .. currentmodule:: sparse .. automethod:: GCXS.reshapesparse-0.12.0/docs/generated/sparse.GCXS.round.rst000066400000000000000000000001141402510130100216540ustar00rootroot00000000000000GCXS.round ========== .. currentmodule:: sparse .. automethod:: GCXS.roundsparse-0.12.0/docs/generated/sparse.GCXS.round_.rst000066400000000000000000000001211402510130100220110ustar00rootroot00000000000000GCXS.round\_ ============ .. currentmodule:: sparse .. automethod:: GCXS.round_sparse-0.12.0/docs/generated/sparse.GCXS.rst000066400000000000000000000022621402510130100205340ustar00rootroot00000000000000GCXS ==== .. currentmodule:: sparse .. autoclass:: GCXS .. rubric:: Attributes .. autosummary:: :toctree: GCXS.T GCXS.compressed_axes GCXS.density GCXS.dtype GCXS.imag GCXS.nbytes GCXS.ndim GCXS.nnz GCXS.real GCXS.size .. rubric:: Methods .. autosummary:: :toctree: GCXS.__init__ GCXS.all GCXS.amax GCXS.amin GCXS.any GCXS.asformat GCXS.astype GCXS.change_compressed_axes GCXS.clip GCXS.conj GCXS.copy GCXS.dot GCXS.flatten GCXS.from_coo GCXS.from_iter GCXS.from_numpy GCXS.from_scipy_sparse GCXS.max GCXS.maybe_densify GCXS.mean GCXS.min GCXS.prod GCXS.reduce GCXS.reshape GCXS.round GCXS.round_ GCXS.std GCXS.sum GCXS.to_scipy_sparse GCXS.tocoo GCXS.todense GCXS.todok GCXS.transpose GCXS.var sparse-0.12.0/docs/generated/sparse.GCXS.size.rst000066400000000000000000000001131402510130100214760ustar00rootroot00000000000000GCXS.size ========= .. currentmodule:: sparse .. autoproperty:: GCXS.sizesparse-0.12.0/docs/generated/sparse.GCXS.std.rst000066400000000000000000000001061402510130100213200ustar00rootroot00000000000000GCXS.std ======== .. currentmodule:: sparse .. automethod:: GCXS.stdsparse-0.12.0/docs/generated/sparse.GCXS.sum.rst000066400000000000000000000001061402510130100213320ustar00rootroot00000000000000GCXS.sum ======== .. currentmodule:: sparse .. automethod:: GCXS.sumsparse-0.12.0/docs/generated/sparse.GCXS.to_scipy_sparse.rst000066400000000000000000000001561402510130100237410ustar00rootroot00000000000000GCXS.to\_scipy\_sparse ====================== .. currentmodule:: sparse .. automethod:: GCXS.to_scipy_sparsesparse-0.12.0/docs/generated/sparse.GCXS.tocoo.rst000066400000000000000000000001141402510130100216500ustar00rootroot00000000000000GCXS.tocoo ========== .. currentmodule:: sparse .. automethod:: GCXS.tocoosparse-0.12.0/docs/generated/sparse.GCXS.todense.rst000066400000000000000000000001221402510130100221650ustar00rootroot00000000000000GCXS.todense ============ .. currentmodule:: sparse .. automethod:: GCXS.todensesparse-0.12.0/docs/generated/sparse.GCXS.todok.rst000066400000000000000000000001141402510130100216450ustar00rootroot00000000000000GCXS.todok ========== .. currentmodule:: sparse .. automethod:: GCXS.todoksparse-0.12.0/docs/generated/sparse.GCXS.transpose.rst000066400000000000000000000001301402510130100225410ustar00rootroot00000000000000GCXS.transpose ============== .. currentmodule:: sparse .. automethod:: GCXS.transposesparse-0.12.0/docs/generated/sparse.GCXS.var.rst000066400000000000000000000001061402510130100213160ustar00rootroot00000000000000GCXS.var ======== .. currentmodule:: sparse .. automethod:: GCXS.varsparse-0.12.0/docs/generated/sparse.SparseArray.asformat.rst000066400000000000000000000001521402510130100240330ustar00rootroot00000000000000SparseArray.asformat ==================== .. currentmodule:: sparse .. automethod:: SparseArray.asformatsparse-0.12.0/docs/generated/sparse.SparseArray.density.rst000066400000000000000000000001541402510130100237000ustar00rootroot00000000000000SparseArray\.density ==================== .. currentmodule:: sparse .. autoattribute:: SparseArray.densitysparse-0.12.0/docs/generated/sparse.SparseArray.ndim.rst000066400000000000000000000001431402510130100231460ustar00rootroot00000000000000SparseArray\.ndim ================= .. currentmodule:: sparse .. autoattribute:: SparseArray.ndimsparse-0.12.0/docs/generated/sparse.SparseArray.nnz.rst000066400000000000000000000001401402510130100230210ustar00rootroot00000000000000SparseArray\.nnz ================ .. currentmodule:: sparse .. autoattribute:: SparseArray.nnzsparse-0.12.0/docs/generated/sparse.SparseArray.rst000066400000000000000000000005401402510130100222210ustar00rootroot00000000000000SparseArray =========== .. currentmodule:: sparse .. autoclass:: SparseArray .. rubric:: Attributes .. autosummary:: :toctree: SparseArray.density SparseArray.ndim SparseArray.nnz SparseArray.size .. rubric:: Methods .. autosummary:: :toctree: SparseArray.asformat SparseArray.todense sparse-0.12.0/docs/generated/sparse.SparseArray.size.rst000066400000000000000000000001431402510130100231710ustar00rootroot00000000000000SparseArray\.size ================= .. currentmodule:: sparse .. autoattribute:: SparseArray.sizesparse-0.12.0/docs/generated/sparse.SparseArray.todense.rst000066400000000000000000000001471402510130100236640ustar00rootroot00000000000000SparseArray.todense =================== .. currentmodule:: sparse .. automethod:: SparseArray.todensesparse-0.12.0/docs/generated/sparse.argwhere.rst000066400000000000000000000001101402510130100215620ustar00rootroot00000000000000argwhere ======== .. currentmodule:: sparse .. autofunction:: argwheresparse-0.12.0/docs/generated/sparse.as_coo.rst000066400000000000000000000001041402510130100212240ustar00rootroot00000000000000as\_coo ======= .. currentmodule:: sparse .. autofunction:: as_coosparse-0.12.0/docs/generated/sparse.clip.rst000066400000000000000000000000741402510130100207160ustar00rootroot00000000000000clip ==== .. currentmodule:: sparse .. autofunction:: clipsparse-0.12.0/docs/generated/sparse.concatenate.rst000066400000000000000000000001211402510130100222440ustar00rootroot00000000000000concatenate =========== .. currentmodule:: sparse .. autofunction:: concatenatesparse-0.12.0/docs/generated/sparse.diagonal.rst000066400000000000000000000001151402510130100215410ustar00rootroot00000000000000diagonal ============= .. currentmodule:: sparse .. autofunction:: diagonalsparse-0.12.0/docs/generated/sparse.diagonalize.rst000066400000000000000000000001211402510130100222460ustar00rootroot00000000000000diagonalize =========== .. currentmodule:: sparse .. autofunction:: diagonalizesparse-0.12.0/docs/generated/sparse.dot.rst000066400000000000000000000000711402510130100205520ustar00rootroot00000000000000dot === .. currentmodule:: sparse .. autofunction:: dotsparse-0.12.0/docs/generated/sparse.elemwise.rst000066400000000000000000000001101402510130100215700ustar00rootroot00000000000000elemwise ======== .. currentmodule:: sparse .. autofunction:: elemwisesparse-0.12.0/docs/generated/sparse.eye.rst000066400000000000000000000000721402510130100205470ustar00rootroot00000000000000eye === .. currentmodule:: sparse .. autofunction:: eye sparse-0.12.0/docs/generated/sparse.full.rst000066400000000000000000000000751402510130100207320ustar00rootroot00000000000000full ==== .. currentmodule:: sparse .. autofunction:: full sparse-0.12.0/docs/generated/sparse.full_like.rst000066400000000000000000000001141402510130100217300ustar00rootroot00000000000000full_like ========= .. currentmodule:: sparse .. autofunction:: full_like sparse-0.12.0/docs/generated/sparse.isneginf.rst000066400000000000000000000001101402510130100215600ustar00rootroot00000000000000isneginf ======== .. currentmodule:: sparse .. autofunction:: isneginfsparse-0.12.0/docs/generated/sparse.isposinf.rst000066400000000000000000000001101402510130100216100ustar00rootroot00000000000000isposinf ======== .. currentmodule:: sparse .. autofunction:: isposinfsparse-0.12.0/docs/generated/sparse.kron.rst000066400000000000000000000000751402510130100207410ustar00rootroot00000000000000kron ==== .. currentmodule:: sparse .. autofunction:: kron sparse-0.12.0/docs/generated/sparse.load_npz.rst000066400000000000000000000001121402510130100215660ustar00rootroot00000000000000load\_npz ========= .. currentmodule:: sparse .. autofunction:: load_npzsparse-0.12.0/docs/generated/sparse.matmul.rst000066400000000000000000000001021402510130100212560ustar00rootroot00000000000000matmul ====== .. currentmodule:: sparse .. autofunction:: matmulsparse-0.12.0/docs/generated/sparse.moveaxis.rst000066400000000000000000000001101402510130100216110ustar00rootroot00000000000000moveaxis ======== .. currentmodule:: sparse .. autofunction:: moveaxissparse-0.12.0/docs/generated/sparse.nanmax.rst000066400000000000000000000001021402510130100212410ustar00rootroot00000000000000nanmax ====== .. currentmodule:: sparse .. autofunction:: nanmaxsparse-0.12.0/docs/generated/sparse.nanmean.rst000066400000000000000000000001061402510130100214000ustar00rootroot00000000000000nanmean ======= .. currentmodule:: sparse .. autofunction:: nanmean sparse-0.12.0/docs/generated/sparse.nanmin.rst000066400000000000000000000001021402510130100212370ustar00rootroot00000000000000nanmin ====== .. currentmodule:: sparse .. autofunction:: nanminsparse-0.12.0/docs/generated/sparse.nanprod.rst000066400000000000000000000001051402510130100214230ustar00rootroot00000000000000nanprod ======= .. currentmodule:: sparse .. autofunction:: nanprodsparse-0.12.0/docs/generated/sparse.nanreduce.rst000066400000000000000000000001131402510130100217250ustar00rootroot00000000000000nanreduce ========= .. currentmodule:: sparse .. autofunction:: nanreducesparse-0.12.0/docs/generated/sparse.nansum.rst000066400000000000000000000001021402510130100212600ustar00rootroot00000000000000nansum ====== .. currentmodule:: sparse .. autofunction:: nansumsparse-0.12.0/docs/generated/sparse.ones.rst000066400000000000000000000000751402510130100207340ustar00rootroot00000000000000ones ==== .. currentmodule:: sparse .. autofunction:: ones sparse-0.12.0/docs/generated/sparse.ones_like.rst000066400000000000000000000001141402510130100217320ustar00rootroot00000000000000ones_like ========= .. currentmodule:: sparse .. autofunction:: ones_like sparse-0.12.0/docs/generated/sparse.outer.rst000066400000000000000000000000771402510130100211300ustar00rootroot00000000000000outer ===== .. currentmodule:: sparse .. autofunction:: outersparse-0.12.0/docs/generated/sparse.random.rst000066400000000000000000000001021402510130100212370ustar00rootroot00000000000000random ====== .. currentmodule:: sparse .. autofunction:: randomsparse-0.12.0/docs/generated/sparse.result_type.rst000066400000000000000000000001211402510130100223370ustar00rootroot00000000000000result_type =========== .. currentmodule:: sparse .. autofunction:: result_typesparse-0.12.0/docs/generated/sparse.roll.rst000066400000000000000000000000741402510130100207370ustar00rootroot00000000000000roll ==== .. currentmodule:: sparse .. autofunction:: rollsparse-0.12.0/docs/generated/sparse.rst000066400000000000000000000013061402510130100177670ustar00rootroot00000000000000API === .. rubric:: Description .. automodule:: sparse .. currentmodule:: sparse .. rubric:: Classes .. autosummary:: :toctree: COO DOK GCXS SparseArray .. rubric:: Functions .. autosummary:: :toctree: argwhere as_coo concatenate clip diagonal diagonalize dot elemwise eye full full_like isposinf isneginf kron load_npz matmul moveaxis nanmax nanmean nanmin nanprod nanreduce nansum ones ones_like outer random result_type roll save_npz stack tensordot tril triu where zeros zeros_like sparse-0.12.0/docs/generated/sparse.save_npz.rst000066400000000000000000000001121402510130100216050ustar00rootroot00000000000000save\_npz ========= .. currentmodule:: sparse .. autofunction:: save_npzsparse-0.12.0/docs/generated/sparse.stack.rst000066400000000000000000000000771402510130100210770ustar00rootroot00000000000000stack ===== .. currentmodule:: sparse .. autofunction:: stacksparse-0.12.0/docs/generated/sparse.tensordot.rst000066400000000000000000000001131402510130100220020ustar00rootroot00000000000000tensordot ========= .. currentmodule:: sparse .. autofunction:: tensordotsparse-0.12.0/docs/generated/sparse.tril.rst000066400000000000000000000000741402510130100207410ustar00rootroot00000000000000tril ==== .. currentmodule:: sparse .. autofunction:: trilsparse-0.12.0/docs/generated/sparse.triu.rst000066400000000000000000000000741402510130100207520ustar00rootroot00000000000000triu ==== .. currentmodule:: sparse .. autofunction:: triusparse-0.12.0/docs/generated/sparse.where.rst000066400000000000000000000000771402510130100211040ustar00rootroot00000000000000where ===== .. currentmodule:: sparse .. autofunction:: wheresparse-0.12.0/docs/generated/sparse.zeros.rst000066400000000000000000000001001402510130100211170ustar00rootroot00000000000000zeros ===== .. currentmodule:: sparse .. autofunction:: zeros sparse-0.12.0/docs/generated/sparse.zeros_like.rst000066400000000000000000000001171402510130100221330ustar00rootroot00000000000000zeros_like ========== .. currentmodule:: sparse .. autofunction:: zeros_like sparse-0.12.0/docs/index.rst000066400000000000000000000056151402510130100156520ustar00rootroot00000000000000Sparse ====== .. image:: logo.png :alt: Logo :align: center :width: 20em This implements sparse arrays of arbitrary dimension on top of :obj:`numpy` and :obj:`scipy.sparse`. It generalizes the :obj:`scipy.sparse.coo_matrix` and :obj:`scipy.sparse.dok_matrix` layouts, but extends beyond just rows and columns to an arbitrary number of dimensions. Additionally, this project maintains compatibility with the :obj:`numpy.ndarray` interface rather than the :obj:`numpy.matrix` interface used in :obj:`scipy.sparse` These differences make this project useful in certain situations where scipy.sparse matrices are not well suited, but it should not be considered a full replacement. It lacks layouts that are not easily generalized like CSR/CSC and depends on scipy.sparse for some computations. Motivation ---------- Sparse arrays, or arrays that are mostly empty or filled with zeros, are common in many scientific applications. To save space we often avoid storing these arrays in traditional dense formats, and instead choose different data structures. Our choice of data structure can significantly affect our storage and computational costs when working with these arrays. Design ------ The main data structure in this library follows the `Coordinate List (COO) `_ layout for sparse matrices, but extends it to multiple dimensions. The COO layout, which stores the row index, column index, and value of every element: === === ==== row col data === === ==== 0 0 10 0 2 13 1 3 9 3 8 21 === === ==== It is straightforward to extend the COO layout to an arbitrary number of dimensions: ==== ==== ==== === ==== dim1 dim2 dim3 ... data ==== ==== ==== === ==== 0 0 0 . 10 0 0 3 . 13 0 2 2 . 9 3 1 4 . 21 ==== ==== ==== === ==== This makes it easy to *store* a multidimensional sparse array, but we still need to reimplement all of the array operations like transpose, reshape, slicing, tensordot, reductions, etc., which can be challenging in general. Fortunately in many cases we can leverage the existing :obj:`scipy.sparse` algorithms if we can intelligently transpose and reshape our multi-dimensional array into an appropriate 2-d sparse matrix, perform a modified sparse matrix operation, and then reshape and transpose back. These reshape and transpose operations can all be done at numpy speeds by modifying the arrays of coordinates. After scipy.sparse runs its operations (often written in C) then we can convert back to using the same path of reshapings and transpositions in reverse. LICENSE ------- This library is licensed under BSD-3 .. toctree:: :maxdepth: 3 :hidden: install quickstart construct operations generated/sparse roadmap contributing changelog conduct .. _scipy.sparse: https://docs.scipy.org/doc/scipy/reference/sparse.html sparse-0.12.0/docs/install.rst000066400000000000000000000007631402510130100162100ustar00rootroot00000000000000.. currentmodule:: sparse Install ======= You can install this library with ``pip``: .. code-block:: bash pip install sparse You can also install from source from GitHub, either by pip installing directly:: pip install git+https://github.com/pydata/sparse Or by cloning the repository and installing locally:: git clone https://github.com/pydata/sparse.git cd sparse/ pip install . Note that this library is under active development and so some API churn should be expected. sparse-0.12.0/docs/logo.png000066400000000000000000006724451402510130100154720ustar00rootroot00000000000000‰PNG  IHDR¤¤]#ÒÐ pHYs  ÒÝ~ütEXtSoftwareCelsys Studio ToolÁ§á| IDATxÚìÝy|wa÷ñÏofö”dË÷}ß±ã8NìØ‰ÏĹI8 ”¶´ ”»¨ZèC®–(NG ”¤O -<BÈ}X¾ÇNÛñ}HŽïS²¤½æzþXI¶›\öXã|ßJ«Ý™ýÍj~ûyÍþÖ„aˆˆˆˆˆˆˆˆˆHTŒ‚”ˆˆˆˆˆˆˆˆDIAJDDDDDDDD"¥ %""""""""‘R‘H)H‰ˆˆˆˆˆˆˆH¤¤DDDDDDDD$R R""""""""))‰”‚”ˆˆˆˆˆˆˆˆDJAJDDDDDDDD"¥ %""""""""‘R‘H)H‰ˆˆˆˆˆˆˆH¤¤DDDDDDDD$R R""""""""))‰”‚”ˆˆˆHDŽ÷êÆÐx -¬…Õ5uzFDDDäíJAJDDD$"Çﺺ<ñ h0Æ«­®Yr¯žy;R‰HGj§0%"""oS R""""yUjÐ`Œ[S]³ô·z–DDDäí@AJDDD$"g RíSª­®YV§gKDDD.d R""""yÍ ÕNaJDDD.p R""""yÝAª]Àbc‚šêšºµzöDDDäB¢ %"""‘7¤Úþ}ƘÚꚺz=‹"""r!P‰È›Rí¦DDDä¡ %"""‘·¤Ú•ÃTMuM]£žU‰#)‘ˆœµ Ðo¡…µPaJDDDâFAJDDD$"g5HµS˜‘R‰È9 R회ñj«k–,Ô3-"""]‚”ˆˆˆHDÎijÐЦîÕ3."""]•‚”ˆˆˆHD" Rí¦DDD¤ S‰H¤Aª]@ƒ1¥Ûªk–ÕiDDD¤«P‰Èy RíSªU˜‘®@AJDDD$"ç5HµS˜‘.@AJDDD$"]"Hµ XlLp[uM]½FFDDD¢¦ %"""‘.¤Úþ}ƘZ…)‰’‚”ˆˆˆHDºdj§0%"""R‰H—Ríÿ®¶0Õ¨‘sEAJDDD$"±RM¡ñZX ¦DDDä\P‰Hl‚T;…)9G¤DDDD"» Õ. É¯¦ºfɽE9¤DDDD"Û `ÙT\ó¹†äøµXö½My+¤DDDD"ç eœÙŸ#9fXvƒ»kEmbÄ•÷jTEDDäMÍ-¤DDDD¢ë •Ì’]ðy’£gƒ±p·/%·è{/†­ûjªk–ÕitEDDä Í-¤DDDD¢ë •®¢âÚ¿#1b&‹Ò–§É/ùAî,6¦T«0%"""¯{n¡ %"""8)+ÛƒìµObØåå µñQrËþ0Ê—ï)L‰ˆˆÈë¤ %"""‘X©Ê>d¯ý;C¦‚1×?H~ÅO '^}ãÀÀSS]SW¯Q‘ÓQ‰H¬ƒT·T\ûyœA—€âÚÿ!ÿÌ}„Å–3ÿQàßgŒ©U˜‘WR‰Hœƒ”Ýc0Ù‡3`"Rxþ—VÝOXʽö+L‰ˆˆÈ+(H‰ˆˆˆD$ÖAª×² >‡Óo<„…çî§°æ—„nþußGzwZX «kêu4ˆˆˆ¼½)H‰ˆˆˆD$ÖAªï*®ùvŸÑøäŸùŵÿCèߨ4…Æ[¨0%""òö¦ %"""‘8)gÀEd¯þì^# ýù?¡´þ÷o ¾ûÖî8 Á¯¶ºfɽ:BDDDÞ>¤DDDD"ç •6ÌÜOcW"tóäßCiË“à{gç¦DDDÞV¤DDDD"ë 5ò*²s>…Õ­?a±…\Ý]”¶-À;»¤0%""ò¶  %"""‘8©äØùdf} «²/a¡‰Ö§¿‹»sþ¹yÀ€ÅÆ”j«k–Õéȹð(H‰ˆˆˆD$ÖAê¢ÈÌü0VE/‚Ü1rO}·~„Á¹}`…)‘ ’‚”ˆˆˆHDâ¤R“ßIúŠ?ÇÊT´¡õ‰ÿƒ·çˆj.©0%""rAQ‰HœƒTzêûH_þ§˜T%Aó!Zÿg¼}ë£ßÀ¿ÏS[]SW¯#JDD$¾¤DDDD"ß eHOÿ3ÒS߇Idñ›ö‘{ìx·œ¿MR˜‰5)‘ˆÄ6H‹ÌŒÛH]ònL"|7­~ÿÈÎó¿må0US]Sר#LDD$FÓ )‘hÄ6HY™«>Bêâ[0vÿh=­|ÿøË]cûšBã-´°*L‰ˆˆÄƒ‚”ˆˆˆHDb¤ìÙÙŸ$9ñFŒeãÞAëÃwœ8е¶SaJDD$6¤DDDD"× eœ™¹·“špƒw`s9HµíšÜ¦zÖ,©ÕQ'""ÒEç R""""шmJdÈ^]CrÌ|¼ýhy¨–°p¢kox@ƒ1^muÍ’{uô‰ˆˆt±ù…‚”ˆˆˆH4b¤’Y² >OrÔU†¸{_¤õ¡ZÂR.; 0%""Òõæ R""""шmJUQqÝß“1ƒ0ððv¯¡õ¡; ýR¼v$ Á·¦ºféou4Šˆˆœçù…‚”ˆˆˆH4b¤2Ý©¸þ‹$†]Nè»x»ž¥å¡Z ¦óÈ€ÅNŸ!µU¼¯NG¥ˆˆÈyš_(H‰ˆˆˆD#®AÊÊö âÆÿ3x ¡_ÂݱŒÖG¾ë±pú£â_[lUöªêttŠˆˆDKAJDDD$"± R½¨¸éqN"ôЏÛÓúø?Çy Œ3h•7݉ÉV´]\|þ—5™ÙŸ\«£TDD$¢³±‚”ˆˆˆH4b¤*ûPqó8ýǺJ[ž"÷Ôwb<6$†L¥âÆ/cÒU-‡)¬þÅ~}Ÿ1¦¶º¦®^G«ˆˆÈ9>+H‰ˆˆˆDãø]ׄq\wÉêÖŸÊw|»ÏèrÚø(¹º»c<6$†Ï âú/bRÍ)¬ºŸâ†‡Ê¿|…)‘s}:V‰ÆÉ+¤ q SV÷TÞò5ì^à Ý<Åõ¿'¿ô‡1ž[$F]EÅu_À$2MûÉ?s/¥ÍOt¾]àßÕ¦uôŠˆˆœåÓ±‚”ˆˆˆH4šÿû¯CÿÈNB¯Ø>#aÊî1˜Š[¾Ýc0a)GqÝä—ÿ8¾aÙ$ÇÌ#»às'…|…•ÿNi[Ý«oÐo¡…µPaJDDäìQ‰ˆtWèn_BiûRü£» cֵÔÝs•·~«ûÂb+ŵ¿&ÿ̽ñË!9~WlÿXùå?ÆÝ¹âÌ£0%""rV)H‰ˆˆˆD'$ðð줴å)J;W4îådŒêšaÊî=²¤ªú[(<ÿK «îï(Ø Ro$;÷Ó`ÙøGv’_öo¸ «^ûošŒñj«k–,Ôá,""òæ)H‰ˆˆˆD§câú%üCÛ(mz·þY‚æÃtÕ0e÷Må;ÿ «¢a¡™Âê_PXó_ñÛI’“o!;ûS` þ¡mä–þoÏÚ×' maê^Ö"""oâ|¬ %""" oßúÐî3“Ètü,tóxû7RÚò^Ãs­G»Üv;ýÆQùÎÂdª óMžûO /ü¿øN€©KÞMfÖÇÊãr`3ù%÷àíßøÆïLaJDDäͤDDDD¢qâç C.%1f>NŸQ`':~[ðö­§´é Ü=/曺Ìv;/¦òÖ¯aRU„¹Fò«þƒâ‹¿ï8‘&5õýdfÜ€·o¹Å÷àÚúæï4 Á˜ÒmÕ5Ëêt¤‹ˆˆ¼Žó±‚”ˆˆˆH4Žßuuàô¿gØ4’£®Âî5,»ã6a¡÷å5”^zïÀFÂbËyßngÈT*o¾“ª È£°òg7<ß p2Kú²žþA¼=kÉÕÝ´þ­ßyÀbcJµ S"""¯q>V‰Æñ»® O]Êt ‰‘W’>»zp§0äŽáÕ¯¢¸ñ1üC[ ÝüyÛîÄð+¨¸ñc’Y‚Ö£ä—ÿ„Ò¦Çâ;NUžþ礧¾w÷r‹î"hÜsöDaJDD䟤DDDD¢Ñ~…Ô+-OŒ˜Abø C/êê¶Óñ» ùîŽe7?Ap´žÐ+F¾Ý‰Q³¨¸áK'EÐ|ˆüòSÚòT|'Àé*23n#uÉ»pw=C®în‚ÎþƒþƘšêšºz½DDDN9+H‰ˆˆˆDãdꘊѦ,›ä¨Ù$†OÇ<«²ÏÉ+¦Â i¥mu”6?‰ß´|7²íN޽šìµŸo RÉ/ý¥m‹ã;ÎT“¹òä&½wDzrj9rî4ðï3ÆÔ*L‰ˆˆ´¤DDDD¢ñê Õ1%£=L™d–ĨY$FÌÄ0«¢S¾Yàã7î¡´ù J[4‚À?çÛœp=×ü-ØAÓ>rK„»cYlÇÁÊö$3ë£$'\@iÛbòuw䎟ûW˜)Ïy¤DDDD¢qæ ušIZ¦;É1sI ¿»ß8¬lÊá B¿„d¥Mãn_B;çpN—št3Ùùw€å4î!·ä¸»ž‰í8X•}ÈÌþ8ɱWPÚü$¹Å÷ND¶ aèÝia-¬®©kÔ+CDDÞŽ¤DDDD"òF‚T;«¢Éq×à ›†Ó{&Ó Œ@XlÅ?¼â¦Çqw,oûF¾³?·K]ò.²socák ¿øû¸»WÇv¬nÈÎù$‰Q³(m|”Ü’ï[£Ý€¦Ðx ¦DDäíHAJDDD$"o&Hµ³º$5áZœ!S±{Ǥ*Êa* ‹Íx·PÜð{Ü]+Á÷Îêv§§¾Ì¬O€1øGv’[|Þžµ±«zÙ¹·“~Å ‘_òƒó÷M† S""ò6¤ %"""‘7¤:#ŸÝg4ÉqW“|)Võ`L2[^c*ð 'ðö­§¸î¼—_8kÛžö§d®ü+ÀàÞF®î¼}ëc;vÏadæÞNbèe_ü-ù¥?"ôKçwÚŒñjªk–Ü«W‹ˆˆ\è¤DDDD"òV®ze˜r]LrÜ‚òÂçÝú—ÀïäŽáî~žâúßáÜòg‹†ôô¿ 3ã/ðn!_w7ÞM±»÷H²ó>ƒ3h2…çE~ù!ðºÆ4ãÕ*L‰ˆÈ…LAJDDD$"o-HuLß85L%†^Nrµ8ý'`*zaœ4½Á‰x»×P\ÿ þ±†7÷p–MfÆm¤§ýÞþäêîÆ?´5¶ã`÷CvÞ8. ðÜÏɯüé9]þMQ˜‘ ˜‚”ˆˆˆHDÎNê˜ÆÑ¦Œ!9z‰1ópúÃd{`ìDy})·@д·~Å 4|õßÿ!¶CæÊžú~¼½ëÊAêÈÎØŽƒÓÙyŸÁî7)<û䟹¯ënpÀ‹Æ”jªk–ÕéU$"" )‘ˆœÝ Õ1£=,;Ibì|’cçc÷…•é– _þF¾Æ=¸;WPÜð{ÂBó«þþ´÷î¤È\õRSþïåçÉÕ}ïÍ_qÕ8'‘vïQ†äWþ”Âs?ïú°Ø˜R­Â”ˆˆ\¤DDDD"rn‚TÇ´ŽŽ0•ª 9á’cæ`÷†IU–¿‘/ðòMøGëq·-¦¸éÑS¾‘ïôaÊ$2df}œÔä[pëW‘_rþñ=±gð”rê9 €ü²£ðü/ã³ S""rP‰È¹ RÓ;ÚÃ’UÙ›äÄ›IŽº«ÛÀ¶oä| wïàfÜíK)m}ú”õ“:‡)“ª 3ë¤&Ý €»s9¹%? hÚÛqH ½ŒÌ¼Ï`÷OnÉ÷)¾øÛøíHàß—žòžÚÌü;êõꑸQ‰H4AêÕìžÃHN¼‰Äðéåoäs’Bè—[ãíßHiûRÜËO&!&UEvî§HN¸€Ò¶Åä—þ ùPlÇ!1bÙ¹·cu¾G®î.ŠŠå¾¤/}/™«>zv¢¨×«LDDâBAJDDD$"ç+Hµ³ûŽ!uñ-$†\†UÙlh[ø¼ùÞ¾ ”¶>÷ò ''‹™j²sÿšä¸k(m~’ü²%h=ÛqHŒšEvΧ°ºõ'ôKäžü¥ÍOÄr_Ò—ÿ ™ÛÁ?¶û¾üÿ§¶òï©×«MDDº:)‘ˆœï Õ~Õ“3`"©KÞ3d V¦º¼¾–rM{ñö¬£´uÞ'Eöº/3€âK_ñcÂ\clÇ!9f.™9ŸÂªìCèÉ=þMJÛêb¹/ééNæŠ?ËÁ?ZOîño5¹×/´°V×Ô5êU'""]•‚”ˆˆˆHDÎꘂe“v9©‹oÁ0 “®,ÿÊßÈw¼wÇrüC[INzÇÉ µþAò+~JX8ÛqHŽ_@fÖDZ*zºyZýÆ+>®™™&}ùÊAêðvZŸü6þ¡­ÐOaJDDº,)‘ˆt Õ>´0NŠÄ¨«HM¼ »ÏLª¢í—!a® ÿøË˜lìƒ(<ÿ+ «þƒ°ØÛqH^t™«>Š•íAXÊÑúP-îîձܗ̬‘¾ô}`Ùø‡¶–ƒÔáí'o 0%""]”‚”ˆˆˆHDº\jgÙ˜TÉѳIŽ¿»ÏhL"]þ]à—¿Ïv(®ýr‹ï‰õ8¤&½ƒÌ•…Ét',4Óòû/ãí}1–û’™ó)ÒSÞÆÂ;°‘Ü“ßÁ?ºëÕ7 h0Æ«­®Yr¯^‰""Ò(H‰ˆˆˆD¤Ë©v–ƒUÑ‹ä˜9å0Õk8XN§›øÇwS\÷;J›Ÿ ,4ÇrR—¼‹ÌŒaÒU„ùÆrÚ·!–û’÷R“o-©}ëÉ=õ/øÇÎü S""ÒE(H‰ˆˆˆD¤Ë©vv»[cæ’šx#V·~´ÅÛ³w×JÜ]Ï–r±‡ô¥ï%}Å_`R•­Giýý—ñlŠáLÞ"{u ©‰7•ƒÔžµäžþüã{^ûoŒqkªk–þV¯L9/§1)‘hÄ&H"5ñ&ÒWüVUßο‚æC¸»×à6¬Âݹ/û”¾ì¤§“Ì4¢å÷_./7¶CÅÕŸ%9áz0w÷jrO/$hÚ÷úï#`±1¥Úêšeuz…ŠˆH”¤DDDD"Ç å œDvÞ§±ûŒß%ô]L"¦ü|>Á‰ý¸õÏâÖ¯Âmx®ËïSzúI_ör:±Ÿ–¿ŒdGü&òNŠì5Ÿ%9nA9HíZI®î{'¼ñ;S˜‘¨Ïc R""""шcJ ½ŒÌìO`÷Eè)½ô&U3d*VE¯“7 üã/ãîX†[¿ oßú.»O™™&ué{1‰4þñ=´>ô§_¼«Oä“Y²×ü-É1óÊAjÇ2r‹ï!h>ôæï4`±1AMuMÝZ½bED䜞ǤDDDD¢Ë 5b™«>V^àÜ÷È-ú.~ã^¬ÊÞ$FÏ!1äRLªêä„þ‘”¶-ÁÛ½ïà–.·O™Y'uÉ»0N ÿè.ZºÿøîøMäSUT,ø‰Q³ÀJ[‘_ò‚Ö£oýÎÿ>cLmuM]½^¹""rNÎc R""""шe5‹ÌUÅî1|—Ö§¾ƒxVe/0Ve#fâ ¼“ª8ù‡¾‹wx¥ÍOáí]×¥>—{;ÉI7—ƒÔám´<üU‚Æ=±;žL¦šŠŸ'1bF9Hm~‚üÒ%È;{¢0%""çê<¦ %"""8©äØ«É̼ «z0¡W$÷Ä·Zb’§Ä'+UÙ‡äèYØýÆc’ÙŽ_…nÿÐVJ›Ÿ,‡©ã/Ÿ÷}Ê^óY’®ÃØI¼›i}ô«MûcwxÞö©âº¿'1v~9Hí]Gëcßxkë.'VUßr:0×=@~åÏ 'ÎÍ4…Æ[ha-T˜‘·JAJDDD$"q R©I7“žþA¬ª~„ÅVZû:ëÔ)%aªÐ –ƒ3pɱó°{;Ñq˰”ÃmxŽÒæ'ðn9;ë½±é/7|‰äèÙ`'pw¯!÷Ä7 ZŽÄîx²º bÁçq_ ÏÿŠÂªû ‹Íçö¦DDälœ‘¤DDDD¢Ë uÉ»I_þ¬Ê>„…fZýZ[Œ:Ý®œ¦Š­'…3d*‰‘WbWÛé¸eX8»keùŠ©#;su Û¡âú/‘yØî®gÈ=ùí³»îRDìCÈ^ó9œA“ „šÿ¢°úç„ÅÖh6 -Lõ¬YR«W·ˆˆ¼Q R""""‰cJO}?©©ïêèE˜k¤õѯ·…¥?´+§„)·€IfH ›Ž3ô2ìîÁ:¦‚–ø»ž¡´åIü#õçüê“ÈPqÃ?6­¤¶/¥õéïž›u—Î1»×ðr0ªÿKáù_–rÑnH@ƒ1^muÍ’{õ*‘×}NV‰F,ƒÔ´?#5å=XÙ­Gi}ìŸ0§\éôSM:”ïa’Yç“zVU¿Nû šöSÚ± wÛ"ü£ „nþÜL~ÓUTÜð%C¦‚±)m]D®îîs·îÒ9d÷CöšÏâô O~åÏ(¾øB·p~6HaJDDÞÈ9YAJDDD$q R™·‘š|+&SMÐ|ˆÖ'¾‰9eM¨×9å¤#LV¶‰¡—á ¹«¢7S¾Yâ7ù ÜË ÷zųº?V¶'7| gÐÅ`,J›'·ä„Å–ØONÿ d¯þì>£À÷È/ÿ1Åõžõçì h0¦t[uͲ:½êED䌳)‘hÄ2H]õQR“nƤ«š{êÛ)ƒSONý¨Ÿ©èMrÄ ìae{´ý<ü£õå0µk%þ‰ƒà»ge¬ª¾T\ÿ¥òÇ܀ↇÈ/ÿqôs; œA“Éο»×pB¯D~é(½ô¡_ê°Ø˜R­Â”ˆˆœvV  %"""X©9Ÿ"uÑõ˜d%~ãrOã$ÏÊ}‡^‰ÐÍãôKbäUØ}Ç`eºw|”/ôŠø‡·SÚø(nÃêò7òÞ[zL«z0×§ïØòºKë °òßÏßÇÜÞ‚ÄÐËÈÌû4võB¯@~ñ÷)n~â¬Å»³FaJDDNCAJDDD$"q RÙùww &Y¬¾¤’Ù³úa¡,§ÿ#f`÷Iwëø(_XÊáØD饇ñö®#È7B࿩Dz{ §âº/`÷Ex_øVÝþ?æö&$FÌ$;çSXÝ–räêmø^×Üà€ÅÆ·U×ÔÕ뿈ˆ(H‰ˆˆˆD$–AjÁçHŽ™‡I¤ñì ·è.Lªòœc¨¸îï;>æVXýs kþ»ë]Uô:$ÇÌ%3ëcXUý ‹-äžþ.¥ËÞòUdç\àßgŒ©U˜y{S‰HƒTÅõÿ‹Ä¨Y;‰wh+ùº»1™îçô1ÃB3&S3øƒ§`U>yUV´ÃÛóÅ ãÞVþF¾×9§uL$»àsØ=†º ÏÞGáÅßtÝ«Šþ€äød®üVeÂ|­Oÿ oúê±È)L‰ˆ¼­)H‰ˆˆˆD$–Aê¦/“>c;xû7’[úÃò:O ÍXÝúã ž‚3ðb¬ª¾˜d0à{Ípw?OqÃï Ž¿Lè•8uÑôÓqO¡âš¿Åê>€°ØJ~ÅO(¾ôp|"Î)Ro"=óCXÙž¹cäžünÃê7|ÕØyøw%N¬­üã4ê¿„ˆÈÛ‡‚”ˆˆˆHDâ¤*où*ÎÐË1ÆÂÝ»ŽüÊŸb¥»Eº a¡»×œA“±û_„UÙã¤À@èNÀ­–⋼æÂç‰áW½º¦|UQá¹¥?¢´ù‰×}…UW’ºä]¤§+SMÐz”ÖÇ¿‰·ç…XîKæÊ6%'^¿Ð؉…&U¥0%"ò6  %"""‘X©w}gð%x»Ÿ§°êÿbÒUçe[B¯„Ó{Π)ØýÆb²=0v°”ÃoÜ‹»} Å—!Ì7qº«¥£g“{;VE/‚|#ùºïQÚ¶„׺²ª+JO}?éË?€IU´¢õ±ÆÛ·!–ûRyË×H ›˜&w÷ê…­~iaõgžR˜¹€)H‰ˆˆˆD$vAÊXT½çÛ8&†-Vd IDAT~ù*¤5¿Ä¤+Ïû¦9}ÇbšŒÝkVº;Øa±ÿh=¥-OSÚò$¡[8u‡Êë.ÍþV¦;AËr‹î*¯»Céé$}éû0É,Á‰ý´>ö ¼›c¹/Uïù6ÎÀ‹pw¯¡å·_j2Æ­©®Yr¯þsˆˆ\˜¤DDDD"· eì$•ôœ~ã}wç Šë~{rñóÍNb÷Cbð¥ØÕƒËßÈgÙx¹ãø·PÚ±”Òæ'ÛvÈšx™+?‚IWœ8HîéÁݽ&†³xCfÆm¤.yOù÷ÐúÈWñìŒß¾X6UïùNÿ‹Êás×3´>|gùw Æxµ S"")‘ˆÄ.H%³T½ç;Ø}FzÅòÇá6>‚IdºÌ6†^ ‚€ÄÈ™$†\ŠÕ}@ÛöB¿DØzoÿFJÛ—àî\Ajò;É\ùaL²¿q¹§¾…·w}ü&Ë&såGHM¾c'ðí¦õá¯àß¿7$‰4•ïþv[øtqw,¥õ±ê|#…)‘ Ž‚”ˆˆˆHDb¤²ÕT½ûÛØ½†ºJ[ž¢´uÆIv¹m ͘T%‰Q³pMƪêW^ø< Ýw’„nŽÐ+•ל2ÿÈNZûþ±†øM♹MrÂucáÜBëÃ_!h9»}±*z•³^à KyJ[ž$·è®?üG‹)Õ*L‰ˆÄ—‚”ˆˆˆHDb¤ªSõΪHXh¡øÒCx»×”×iêâÂR»çp£ga÷•ê¶ aÆ@AËarO·þÙö©1qù†:“H“÷’ãàØHËõ„¹ø}1Õ­?•ïúgìê!åð¹ñqrK¾ÿúþXaJD$¶¤DDDD"¯ e°{ §òÖ¯cuëG˜?Aqýƒxû6”»ML„¾‹Óo‰aÓ±{jû†À¶ð]¼›)m[„»sAó¡Ž}ïêaÊ$³d¯þ,É1s oß:Zª%,¶Äíív!åã¬z a¡™â†‡È/ÿñ»›ÀÀSS]SW¯ÿ4""19(H‰ˆˆˆD#vAªï*où*Veo‚\#Ńhk Ÿy‹0ðpN"=åݘTUÇoÊ ŸÃ?¼w×3”¶/%,6w<]5L™T%ÙŸ#9j>îË/Ðúp-¡[ˆÛÛìÞ#©¼õkXU} óM×ýŽü3÷¾¹» üûŒ1µ S""18(H‰ˆˆˆD#nAÊ8‘Š›ïÄÊö h=JqíÿàÙÛç?,¶’ºô½$ÇÌmûAÛÇ÷h S-GðöoÄÝõ îŽe„~©ã¹èjaʤ«¨¸î $†Ï \¼†Õ´üâ²V§ãlÀ$*nþG¬lÏrø|þWžÿåÙóлÓÂZX]Sרÿ@""]ì  %"""Æï߆^±}Öµã±H ›FÅ _¤* šQXóßM{ã;v’ÔE7’9|w÷ «Ajâ8æae{ž¼bÊÍ4îÅݳwç ¼=kO}rÎûØYÙžTÜø8ƒ§zEÜËi}ôk1|7bH ™JÅ ÿÐ>SXý Š/þöì=F@Sh¼… S""]ì  %"""³ÿ–v,Ç?V¾Û>£K†)c‘y%×}“ÌœØOþ¹_¶ŠïÄ7ÝäøkI ›¾KiÇ2Zý:&™Åî;–Ôä[I šŒÉTwüMXlÅo܃·{5îÎåx6Ÿzçm쬊^TÜô8'•ƒÔ¶:Zÿf Å"1|:×±-|¤°ê?)nøýÙ,…)‘®u P‰ˆï…þñJÛãî\|7øíkþt±0eÙ$GÏ!»àó˜Dš q/…Õ?'h=߉o"Cjò;ËWù%Üm‹i}ìŸ:ž«¢'vÿ ¤&ÝŒ3h2&‘éøÛ°ØŠd'n󸻞}ÅZZÑUÕ—Š›jqú'ôŠ”6?Iî©ïÄoP,»>¯ýûrølÚOþÙû(mzüÜ=f@ƒ1^muÍ’{õOIDä<ž—¤DDDD"Sžxùþ±]”¶<»sAó!N~”¯«„‡ä¸«É^óYŒÄ?¾›Âs?'ÌÇøÂc“žö§8&ú¥rÄyòÛ¯¸Áªì‹3h2©I7áôŸ¶sr‹-ø‡¶RÚ¹¯aþñ=çgxºõ§òwb÷Cè(m|”\ÝÝñË!9¦-|:)‚Æ=äWþŒÒÖEçþ±¦DDÎïiYAJDDD$a)šdöä|ÿè®òS;–4î:aÊvHM¸žì¼;ÀvðÖSxî~ÂbK|ŸÿÀ#{åG±û+_U´ñQr‹î:Ã,ÙÂê>€Äà)$'½§Ï¨Ž…ÏÂ\#ÞÁM¸;–ãî^CÐ|0Ò}±ªQùޝ`÷QR"·äûñÛ!9ö²×üMGøÌ/ÿ îŽeÑmƒÂ”ˆÈy¡ %"""‘Üâ{Âäè9Ø}Çtú8XG˜ÚZGiÇ2ÂæC„~é<‡‚©I7“{; ÿÈ «î'tó±}þCß#;çSؽGzEŠë$¿ä¯1[¶°{ '1lÉñ×b÷–Ý~­ÇðömÀݾoÏÚò7÷E1<=†PqË×°{ !,å(®{€üòÇoPN>óËþ·þÙè·%`±1¥Úêšeuúo%"rî)H‰ˆˆˆDäø]W‡vŸ18ƒ'“5»ß8Œ“:yƒ¶0UÜüîŽååõš:?x’h'IM¾•ÌœOÿÐVò«î‡óÊÞŠÀ'3÷vìžÃÝÅóú#ŽÀé=’ÄÈ+IŽU=¸ó]·ÅÛ½šÒ¶%xû6›Ï鮨=‡QyË×°ª•ƒÔÚ_“_ù³ø½±“$'ÝÔ)|æ—þw÷šóxœ(L‰ˆDrP‰Æñ»®î˜xÙ}F“6ĈØ}Æ`é“7ô]¼ÃÛ)mzw×JÂ\cäWL'EjÊ{È\õQ¼›(¬úO½ø@ÇɈó¯È?sß{^iì>cHŒšErÌ\¬ª¾¢ùî®gpw,Å;°‰°”kÿKÎæÂçvï‘TÞúu¬ª~„ÅV Ïÿ7…U÷Çï͈“*‡ÏÙŸ =|æ–|oïú.p¼(L‰ˆœÓs€‚”ˆˆˆH4ÊAªs˜púÃ6Äð+°{ çÔ5¦B¯ˆx¥-OâîXFoŠìŠ)“Èšú>23nÀÛ·ÂsÿI—ú&À7( B*ü-VU_ÂRŽÂê_´íÓ›x~’YœþHŒžCbÄL¬ÊÞ§ó‹ïÁ;°©ëldàßgŒ©­®©«×1‘³xP‰Æ©WH½*Lõ¿gØ4ça÷ŠIUvü.tó'ÃÔö¥ÅfðÏí•J&™%}ÙHOÿ`9ìYKaõÏÁX±}þÃ0¤âڿêèUŽ8«î§ðü/ßÚó”®Â8‰ä˜y8C/ÇÊö8ùËÀÇo܃»c¥mKðí:eÜÞZ˜²û+©lOÂbsÛ¾ü*~oFN>sußÃ?¼­ëm¬Â”ˆÈÙ=(H‰ˆˆˆD£s꘎Ñ)L ¸ˆÄð+p†^ŽÝsXç+¦Ü<þáí”6>JiÇòòÇÁ‚s¦Lª’ô´?#}Ùàî^Caõ/0¶ß¡âº/`²Õ„…fòÏÞGqíÿœ•»¶²=q]LbÌ<C/ë |üc ”¶>»}þñ—Oó7¦œþQyë×1™îg}_"}3ršð™«»ÿh}×Ýh…)‘³sP‰ÆéƒTÇ´ŒW^1•= gÐìžC;‡©RÿÐVŠÅÝõ a±ÂàìNÓUdfÜFê’wàÖ?KaÍ/1N"Æ3_CÅu_Ĥ«óMäWþ;ÅõžÕ‡°*ûà ™JrÌ\œÁ—tþ6ÅÀÃ;´Ò¦ÇpëŸ%8qàŒãÿZœSyË×Úö¥‘ü3÷R\÷»ø Iª‚ô´v Ÿùº»ðïéÚÐo¡…µ°º¦®QÿÝDDÞÄ9@AJDDD$8HuLÏè¦^LrôœAcu„IU´ý&$,´àÚzrñób+gk'“©&3óC¤.¾¥ v.§¸ö7ã+¤Œ• {Ý0© Â\#ù?¡øÒÃgëÞ;=÷V÷$†_Arôì~ã;-Zú%ü›(n| ¯á¹ò·)žá~ÎÄr)•7å侬ü)Å ÅoLÒUd®øKRSÞS>ÎêŸ%·èn‚ûã± S""oþ  %"""פ:¦it Sƒ§5«¦º ,_1e „!a¾©¦6?Ž»cù) h¿yV¶™«>Jò¢(m[LiýƒñRNšìµ‡If Z’_þcJ›?ÛÒiÜì^ÃI ›NbôìÞ#;‡©Roÿ†rPÜý€ìœ¿&1òJŠ/=Üoâ;w4½¨˜ÿ7`;MûÉ-¹§|%Y4Ϋ‚âÈ«p†^†Ý}@§0´Åݹ‚ÒÆÇðì8íG0“ã¯í×wDzØÉ«Âçæ'ÊAªØ|a¼èŒ)ÝV]³¬NÿED^qfT‰ÆÙ RÓ8:Ž3IŽ»§ßxLEOŒS^«(ôŠ-Gðl¤´ù ÜÝk^×½Û=“™s;‰áÓ(®·þ™7uµUWauëOvîí`9øÇ÷_|nꨧß'ÇÍ#¯"1b‰Á—bUö9¹FWœ8@iûJ›ŸÀ?¾ü“ÁLM¼‰ìüš“qmñ=¸»VÆpLû)#¯*g/=B~É÷ÿàÕaq†ÞZ[JDä4gD)‘hœÝ Õ1ãd˜2$F]Õ¦Æa²=1v ÝAËáòS›ÇÛûâî£Ìî9Œì¼Oã ™Z/þ¦í*« žO~`õFvö'ÁXøÇÈÕ}ïåçÏ×4¼ã97v’ÄèÙ$†_3h2VE/°ì¶íöñ›öRÚò4¥-O4îBRßBvÞ§Ár÷«ûnÃs±«z0Ù¹Mbøåãlýƒä—þÐ-\0¯{)‘3œ ¤DDDD¢qn‚ÔéÞå;$G]uò£|™`Ûåoä+å šâí‰Ò–'ñö®?ujH{$±{$;ï38ƒ&Pxþ—¸õÏal;žQÀ÷pú#såGÀü#;ÊAjïºó=ïxÎMª’䘹$†_ÝV¶gù›|ÿønŠ¥´å)’c¯&;ûmW{í&·èîó×Þ<»ç02so'1ô2 >óKÿ•Ð/]0¯{)‘3œ¤DDDD¢YjŸè9)£g“¿»Ï¬tUÛZE!a1Gpb?Þ¾õ”¶.ÂÛ·¡s(è;†ì¼;p\@ṟãÖ¯Â$ÓñŒ^ gÐd23þ0ø‡¶–#Î]n[­lOãæ—ÃTïQX™êŽ0–rx6_Â<“ÈàÝUÞ—NW½ÅC9|~gÐ%åãìù_‘_ñãNOŒ;)‘3ÌS¤DDDD¢uê˜ð%³$FÍ"5á:ì>£1©J0VÛS­{q÷®ÃݶïÀfœ~ãÈίÁî7)<ûå •®Šgp $†^Nzúðl"Ww7þÁ-]v›­ª¾$'\GbèåØ=‡bÒÝÊãø„‡±åÝEîé…xûÖÇn\ì>cÈÎÿ ΀‰Vÿ‚üÊõZe¯:ö¤DDN??Q‰Æù R¿T%ÉѳIN¸»÷HL²²|åM[ñ÷àí}‘âºßaeº“™÷œþ€üŠŸâ֯ªèÏ(àHŒ¼’ôÔ÷àí]_RGvtùm·{#9á:œÁ—`÷‚IdO®1EyáúüÒQ\÷@ìÆÅé?žÌ¼;pú ðìæ>âümޝ:ö¤DDN?/Q‰ÆùRÀLu9L_€Ý{&™mçLXlÁ;´•0w »ßxìC ðÉ/ÿ7܆簪úÆóÉw $FÏ!5å=x/¿PRǺòTSÃŒÓ<ɱ×à ¹´<.v¢ãwAîÞî5×=€·cl†Å0‘ì¼Ï`÷„äWþŒÂªû/¨×½‚”ˆÈÎr R""""Ñè*Aª< 4X½HŒšu2L9©¶wÐa)¶Óñ³Ü“ßÂÛ·«û@N÷­|]> ¸’ã®!uñ-¸õ«È/¹ÿøž8LÙ;=߉‘3ÉÌü+ìÞ#;ï£W$h>Œ·{5ÅõâÝÕå÷Ìt ÙyŸ.ïK’_þc kþë‚zÝ+H‰ˆœáì¦ %""".¤ÚYvG˜J¿¶N¹ò¦»})ùÕ¿ÀsÊ:Rñ S¡[ 9á:Ro*ïÏÎåä–ü€ iœ¦îÏwfΧH_úÞò¾yÅòoídÛ¾æ šöâ6¬¦¸þ÷'öwÙñJ ½ŒÌÜÛ±{ƒ0 ·ä‡×þú‚zÝ+H‰ˆœá¬¦ %""".¤ÚÙ ¬l£f“ºè:ìž#ÀvN¾©vóø·à6¾OPS‰F,‚T›ä„ëÉÎú8&[ _ŽÆ:ù&»Ø‚hëÉ0]:L…nô”?"1z6Å ‘_ùSÂ\cü&ðÉ,Ù¹·“¼èÜ+(¼ðkŒ“¯„=h2É3±ª—¬7| w ÿðJÛê(m~âÔ{UIvÞgHŽ_”¯*®ÿ¦mí¯Ð+ºy’cæ’1«ª&‘ ¡_"l9Šw`#îŽe”¶->õž#¯äØùdf}«ª/¡W$÷ø?¿b›âOAJDä ç3)‘hÄ)H¥&ÝLz懱²= '(¾ôV·þ'‰Õc(v÷å+oÚßt[;"‡xе?}ôeLbØ4 Ïÿ’ªû ‹-ñ›Àgº—ƒTûÇ·B¯„Ó{8‰Ñs°ûŒÆ$2í£EXhÆ;´¯~¥íK šž›ãlêûHOû³ò•x¹Fš÷¿ðn¹ ^÷ R""g8)H‰ˆˆˆD#VAꊿ$=õ}˜d–àÄ~üÃ;K­¯cvicÒÝ0É4¦¢7vU_°N®m[ñö­Ãݾ” ØY˜2v‚ôŒa÷ a@nñ=7<¾»ãÈî1”ìÕ5ÖÃòö¾ØéÊ´×Ͳ! ±z#9fvï»ý£•!A®ÿÀ&Üúg)íXJ˜k<»ÇÙåBúòt\‰×òÀÛÅ¿p(H‰ˆœáܬ %"""8©Ì•Eê’wc’YüÆ}Gw¾Ž uÊZC–ƒIU•ÃT¦Gy©Naªoßúr˜*4ÿá«¯ÎÆ¤×I“žù!ìžÃ ðÈ-º‹â¦ÇÀ÷bwÙ½G’wGÇzXùgîÃ?¸™·´ÎSðÿÙ»óøª®ûî÷Ÿ=žI³„„˜gÌŒcãyÊÔ´i¦¦ÍmÚä6M“fhÚ¤©ý´½iâäIœ¦MÓ´é“Þ¦7Oû¤Nlj ƒ ðŒ™ÑŒ„@óp¦=­ûÇBƒ ^¿÷öË>G眽×Öþ¾Öú­ ;„U· {þ¥ÓÇ«’=xí¯â6ïÃmÚN¿¼¢k?@tå{FGâ ÿôÏ𻯫ó^)!„˜àÞ,”B!DnäU µñ£D–>ˆfÅðûOâ÷4Ã¥Œ »˜Œ ¦0Lð\Œ©ó/ ¦œÞÉ—qŽïDe‡AùW§Ók½éCaÐâ»$Ÿþ:Îѧ!ðóî82*ç¿õ˜Õ‹Ez÷÷Ãç ì;åf@)¬k±ç݆^\=Òž¡`è ^ÛK¸MÏã¶ì ßÿV޳›~›ÈŠw‡#ñ:þéð{[®«ó^)!„˜àÞ,”B!DnäS ßôqìÅ÷¡YQ‚¾6¼žfpS—ÛÕd4˜Òôp*Ÿ ‹ŸT\L¹-ûqOì@¹Ù+LiÑ"b7ý6zq ÊË’ÚòUœã;@yw™Õ‹ˆoúð–R¤ŸûnX|ü î3å» öì X³7 Vž{1ð ;ÃöjÙÛ¼÷MïÇØú¹áíáH¼¾6’?ûâU/¤žkH !Ä÷f ¤„B!r#¯©ÍŸÂ^xš!èm )/ýf»œœ1e ÇJÁŽ£ÅK0`XçÞ3C¸m/áÛ¿B‘/#vãÑ‹ªQNŠä“_ÆmÚ yØ6k—¿åcSæBà“zö;¨¡Óaˆt…)ßG³"X³ÖcÕ¯A/¨8÷bàã´ã6ìÂm}¯íåËþüØÆYú@8¯§‰áŸý¼6ï IDAT9Á@ÇuuÞK %„Ü›%B!„È| ¤w~kÞmhf¿·%œ²çeÞâ§ŽŸÊ§ÇKÁŠ¡ÅKÐåcŠiCêÃk ÷Ø6”÷ Ÿ>zÉ4¢«ß‡^X‰Ê“üÅ_á¶îÏË@ʪ[ElãÇÂí¾Gjç·QÃ=¨«X‡K)Уa05}%Z¬xÌ‹~OÎñx­/àu¾äÏßú ìE÷ YQü®ã ?þƒ×Õy/”BLÐ+@J!„"7ò*ºç Xs6¢6~O3~o3xWjE¼‹Sv-V‚ž(L©t?nëK8GŸFù.šnp¹Å»•ç`TÌ&ºê=è¨ôÃO<ŒwòUÞR!ðkÄš¹ŽØ†ßÃ(«GyYÒ;ÿŽ Ù A ´kz¼kÖzÌiKÑ"ç^ |¼3ÇqoÇm{å’VË‹ßþiìw ™¼Î#$Ÿxˆ`èÌuuÞK %„ÜR$B!„Èü ¤4÷} kÖº0ên MûÎÿžqÁT¢ÍŽ¡ÅŠÐâc‚)¥Òýx-ûpŽ?ƒò=4ÝäRÃ$åf0«Yñkèñ2‚T/ÉŸýùeä™L¬9‰ÝüŒÒZ”›!µãoQéþÜh7lôxÖœ ˜U‹Ðìø¹ýíeñOÅ9²ïÔÁptÝwý ÖÜ[Ã@ªãÉ'þA²çº:ï%Bˆ zH !„BäFÞR†EÁ}Y¿Ͱð»ñûZÀ¿ZSÂÎ ¦ ¦„ÁT´-VŠfFFžì‚ánÜÖp?ƒ 4Ãâ‚)•MbÖ­"ºìh±‚á®p5·®yyÙó7[÷á°@»“"½ý[Ù¡«¶Bá„ûÕsPn³jöÜ[1*ç¢YÑs¯;)¼ÎÃ8‡ŸÄëVŒ¶•rRh‘¬é«0g¬Á(ªLÉ܆]8'v[ûAÌêÅ é8 Ï‘zúk¨lòº:ï%Bˆ îH !„BäFÞR…SIÜ÷瘕óP* èn GH]“80>˜Š W…#¦Î†S† J¡œÁà)Ü–ý¸ ÏA3m‚ÁÓDV¼{ÞmhV¿§‰¡ÿó)Tv8/£èª÷YùëèñR‚áž0 <&ÇŠc‚)ßEa֭ª]Ž^TúÙ`Jôw€E• çÈÖpúá[Íqr@J!&¸cH %„B‘yH•ÔRpï1*f¡¿§™ ¯T0y~¤Å(ª‚HAX=R€¦[ ”“ÂïkÃkïä+ƒ§ˆ®ùÖœájn§1ü_ŸF¹é¼<Ž¢k?@tÙ»ÂzXÃgH=ó(ŠÉH>fŒþ¥@aկƬ]Ž^Pš>ò>¾W)œ†gImû*=p]÷H !Äw ¤„B!r#_)£|&‰{¾€QV‡ò‚¾6üÞV®í”° X1Œ¢*´H!DâhV M7!PÙaü¾6ÜÆ]˜µË1k—£&Þ©Ã ÿäsy;'¶îÃDnxZ¤`ð©íßmÌ(²Éõ¸qîwi:zÁÌé+1k– ÇËF~wHe‡Èìÿwœ†çPC](ß¹.Î{ ¤„b‚;„RB!„¹‘7Tå<wÿ)FI-ÊÍà÷·ô¶29V£¸:œÆgÇÑ" ¨ÀE¥Ð ;ü(ܶ—þïχCwòPlÃïYršÇh'ýÌ·A7&wûŒ ¦t ½h*öœ[Â:ec Ÿ»üîFœC¿ÄmÙÖùò½¼>ï%Bˆ î H !„BäF¾Rfõbwý zq *›$èÀïmar#[»•Â(Ÿ‰^\®º§átCM~_+Ãÿõ‚dO^GñMÇ^xwX«·…ÔŽo£™‘¼hŸ³üþŒŠ™Äoýz¢ltæÞÈ?PÙa¼ÓGqý¯íe‚Ì ~^¶—RB1Á=[)!„BˆÜÈ›@ªv9‰;?‡^X‰Ê ✠kHåSà¹hv £b6zÁ´HœÑ°ÃÍൾ€Û²çøTf0¯¶-¾ùSØóoG3müîFR;ÿÍŠæÕ6ƒ§A$î8œêŒd³ãá ‰#SUz¯ýÙC¿Àë8ˆr’“«–Ù¥‹H !ÄEI %„B‘#ùHiXõkˆßñiôD9AºŸ`àAÿÉ|ìê‚C•bTÎE Pn•êÃ;s¯e?Ή¨l2/¶)q×ç°æl ëa9Fzç?Œnù#îF³ã$îúÂŒ YQìwaV-¼pÄT_nË~ÜÆÝøgŽ_Õ`J/¨ à`”Õ£²Iœ#Oá6î]%0šÅĨ˜MìÆ‚fà9Fjë#øÝoþ3 {ÖÍØ‹ï §hF‹Ãý¢Tf¿··õEœ#[;¯ZO)!„˜ Ï!”B!Dn üó{”9í¬k1§ÝSš>úzê ‹gÛŽ×ñ:AºTÛÎa´ØÍ¿Kdé ÀïmÁïm/“§{]G/šŠQ9ÍŠôŸ$½ûû¨Ë ؔ瀗A/™Ž½ð"Á”ïô¶à6ïÃmÜ×uü+?•O/¬¤ð]_C/©Ee‡É|¯õEдÍN„ûÇ÷ 2øÝ a;5‚sl;nÃ.”—¾â#¦ôÂJâ·}kæMç©®9¯euå6È@/ªÁ¬Zš†×y„ÌÞÿuÅ¿Fe†F uÏ ¦æ^¸*_Oóh0åtŒuv顇Q9‚·=^ŠJõ“yé?ð{ó¯]T€½àNìw‚ p›÷’ÜòUTzàê>üX1ô²:"KÄš¹=^2²ÿG‚©Žpåˆ]x'_û—¼Ù`J)!„˜àš,”B!Dn¼a 5æáמ³sú ÌiËЋ¦Ž 7*3ŒßuçèVœÏ¢Üô«1¥N%~Çg°êVRm##pò´Ï¨›èEU˜Õ‹ðN$³÷_¯Zð0˜²0¦Ìž{+ÆÔùNåëi ëƒ5íÆ輬ÑgfÍ ü+´hAªÌ ÿNÐ×–íøØKÂÑJ¨·aÉ­_Ee“¹yŠbL™M䆷cÕ.G‹ŸkC'®œØúnÓ¼S‡Æ›—{.H %„\‹%B!„ÈK¤Î±f­Ç™Ê§WL ŠUv¿«çÈœÆÝ¨Ìà˜©FoŽ^\MâŽÏbÖ.HÍß®›èEÕ˜Õ‹ðN¾Bfÿ¿ƒ®_Õ¯=L™Uó±fm‹ªŸLyÝ a}°¦½¨dÏ%MÃ4§¯ àþ‡Ñ"É2ûxå ç’ïYþ¬YëA8ÇwÚúÊÍíjŽz¼cê|"K߆Y³-’8׆Ù$~oóèʉ~wÃØÇ(.5˜’@J!.N)!„Bˆy3ÔYVýZ¬Ùë1«¡U¬¦ ÂçîœcÛqžE¥úßôo4Jk‰ßñ9Ìš%áhžvüÓGòw§ëÖÈ©0r[_$óÂÿ‡6¦F×Õ¤2C`F0«bͺ£bÖ¸©|ø.^× œ#[âکþ_LYõkIÜ÷%4;N0ÜMfò®Y”ï]ùX3Ö†ÔÑm$·>rÍŠçë…•˜ÕKˆ,¹£f šaŸû­ÙpD¢Û¸·y~_ëØÇ)Þ(˜’@J!.N)!„BˆysÔø^«n5Öœa0U8ÍŽ‡+ó©•ÄïnÂixçØ3c–²¿tFY=ñ»>9u~X{ #¿)骅¸Íûȼø#4ÓÎEW{´íTfÍŽcL]€5sFùŒ°íF(/Žv;útXü<ÝÑpÆšµžÄ=_@³¢CgHïùTº/ïšEù.±5ïÇœ¾çÈ’[¿vmk•izQ5ÖôØ‹î­;6ú›GpO<‡Û²Ÿ`èô¥m«RBqñË®RB!„¹ñVFH]L­Âš½³fqLY±pZà‡5¦zšpžÃ9¾}̈©7Ía”Ï$q÷ŸbL™Rýíù=eψ WaN]€Û¸‹ìË?†œR¶]L%0ª`ͼ £|æøâçgƒ©ÃOâ6î&È Œ+\oÏÝDü®Ï£™‚ÁS¤w•Ê»fQ¾GìÆßœv>ÙƒOÚöIò„¤c”Lì[EdÉýå3ÃÐ÷ìoOõãuÀix¯í¥7\P)!„˜àr+”B!Dn¼µ@j´ûÆØPÉœ¶ {Þm#ÁTUXcJÓ!ð²ÃáÊn»qŽ>=fÄÔÄÁ”1eNH•Ï ©¾6ü®ãù»ÓÍFq5Få|œ;ɾòš¹]oΦ̚%˜õ«1ÊgÔ )/‹úÙC¿Àm~• W¦›;‰;?†E0ÐAz×÷¢öùÆ÷ˆ®ûp8•2ðÈxœÔ3ßšdOJ:Æ”ÙX3nÄž;FiݸSA²ïä+8ÇwൿÖp» ¤„b‚ˬRB!„¹qe©‹3k–bÏߌY³td*_, ¦|— 3HÐÛ‚Ó´çðSa]£°+ÈùÁ”1u>‰»ÿ £t:ÊËôµWÌ9ϘQôâÌʹ8G·‘=ðÓqáÏ5è‚sA05}æô•¥ÓÇOåsRøgŽ‘=SöìÄoû#0,ü¾“¤wý#\B1ôI'ð‰Ýü»Sçƒï‘yõ1ÒÏ~gr>06Få\¬Y7cϽ½¸zü¦ w……ÏïÀë<„rRã^—@J!&¸¾J %„B‘W3:ˬZ„½ð®pU¾ÂÊ‘SøAz¿·¯e?Α­©³µ‡Î$fÕ"÷üzqMHõ¶à÷4åïN·âá©)sp?Iöà/®q Åû]e†Ðbص˱j—£OC‹ÄGÞ¾î:ˆrÓXõkÐ"ø½-¤Ÿý.uµ·ÉD>ñŨ˜R/ÿ'é]ß›ÜNV £jöìX³×£L÷z0؉۸çÄNüÓGQ^6ÜVå?\öÉÉP!λ®J %„B‘¹¤Î2¦Ì%²ôÌÚe艊ñÁTf¿»¯õ%œãÏ ý;³f)‰{¾€^XNëi"èmÉßn'0Šk0*f}ýç8G¶L’@j´Kθ`*^Š9mYL•L;·¢bà£|7,È®é#Ôwò²Y”RÄ7~ £|ø.™þôó?ȨH³f)ÖìX3Ö¢'ÊÇn~_; Ïâu7¢¼´ŒBˆ‹]O%B!„È\RgSæY2LU.g¯•ÄïmÁk{ çØv‚ÁN̪$øKôD9ÊMãw7ô·åog×N —Ô†Á}õ'8Ƕ„<“î×r~0eÕ­Æœ¶½¸f$D³ê››!óâðZö£E ó®mb·|£´å;döþ¿döÿ0¿Ž­hVír¬9·`Õ­D‹•œ{1ðñûZqŽïÀ9¾ýáâþà!¹ !Äy×Q ¤„B!rãZRg³Â`ªnU8•Ï)êí{™üžfÜãÏ]ý>ôâê°~QOAÿÉüíìF ÑK¦a”ÕŠìËÿ…sbç$ ¤F5cƒ)½ ³~ fõŒ’i`X£ïT™A¼ö×pN~wãÃFå܇ä („çÝ $B!„ÈkH 9ŒÒéØKÀžu3Zá”ÑS¨€`¸ •«H!*3Rƒüª•ù&ug7R„^Z‹Q:Pd^ønóóhV>7ç˜*ª"¶ê=•óƼG…¡bª¯ó0nÃshÑ‚I¾iñÛ>^8åeI?÷]²¯þ$ߎ®qç„^8«~ Öœ˜5KÆcÉP!λŠJ %„B‘×6ó­ieuØ‹îÁžu3zQèfø² FÞ¦ƒ p›÷¡2>„OúÎn´£t:zÉ4P™ý?Äm{9¬Ã”?[(‚¬y·»éCaSyYPA*j:ÊwPÉ‘`êÄ‚áîÉ;•O7‰ßöIô‚ ”›!½óïɾþx¾>R;'ô’ZìYë°fmÀ¨œ‹fE%Bˆ‹]=%B!„ÈÉHyˆ6 ŒÒ:ìù·cͺ9œv6˜áw7à¶¾„fÇÑ s‡ðIÛÙ•`”Ö¡WCà“Þû¼S‡ÎÛ–ü ÒXsn!ºú½aÛô4 œB/®F/®>WÌË ÷àwÄ9ñ,*Õ7&˜ší¦é±Û?…/C9)RÏü-Îá'óýÑjܾ5*faÍ\‡5ëæ‡Íª…ÉP!λjJ %„B‘“+Ó!4lô’ì¹›°ÞŽ˜¥P™aüžf¼îtÓݘð!|Òm[¼£¬½p*é]ÿ„ßÝpÞ6ä失æl$ºâݸMÏ“Þó}ôX1‘ގƉ•ŒÔS('M0tïÔAÜÆÝ¨tÿä ¦Ì‰ÍŸB‹• ²IRÏ<Šsdëõòˆ5nßSç=\ô›ÿð\…⼫¥RB!„¹1Y©±¢«ßKlýGFþK1nU·‘"ç~׉p4Ž®Oø>i:»ñrŒ²:ôÂJð=RÏ~‡ ¿}ìfå ÀšµŽÈ²wà¶ì#½ãÛø}'Á0±goÄ^pFå<ôh&(…r3ƒ§ðÚà6?Ö£ºÖÁ”%qû£E‹PÙaRÛ¾slûõö¨(”ò.ûä·ä („ç]%%B!„È|¤ìE÷¿íÐÌ*Ýr3hñÒs«òÖ.ò»ñNE·¢ i<„Oz¢½¬>¬Uä;¤Ÿù[‚á®ü<€ «~-‘¥à6î!µóï:Îí}3‚5g#ö‚;1¦ÌA†£Á‚å¦ :ð:à6íE9ÉkLiv‚øæKe2 IDATO¡E P™A’O÷ij×åy¯”ÿpÙ'w<$W@!„8ï^ ”B!DnäC ¹ámÄ6~ ÍŒ tÞ÷oD—¿£|æ¸SÊwz[ðNYQlò’Ò ¦„T¢åeIm•êÍÏŽ»Ǭ_Cdñ½8'v’Þù‚¡Ó¾×Žc͹…È‚;1¦ÌF‹ŒªWÙ$þ@^û«¸M{ÁËä<˜Òìâ·ÿ1šG¥úI>ý5ÜÆÝ×åy/#¤„b‚{RB!„¹‘SöV¼;œ²gXý'I>ù7x‡0Jk1ëoÄž;Æ”Y£´a$˜ê;‰×yͰC`Rl^85 ¤â¥(7Cê鯣²Cùy™QìY7c/¼ çØ6ÒÏ~÷WŽøÒb%Øs6†íV1-gt*Yf¿¿ ¯ý5Üæý¹ ¦h±bâ›?‰fÅR½¤¶<‚Û¼÷º<ï%Bˆ îQH !„BäFÞÔºéwÀ0ñûZImý^Çë዆‰^8«nöü;Â"Úc¦ò¡ü¾¶pÄT´Ü,(ÿšn^X…^>=VŒrR$·>n:?;5o3öüÍ8‡Ÿ"ýÜ?¼áˆ/ =Q62•ïŒò™##Ú*;ŒßÓ‚×önëK¸W7˜ |ô¢*b›>ŽÄî"µå«¸­/^—ç½RB1ÁÝI)!„BˆÜÈ‹@êÆß"¶ö ›ø=Ma Õyø¼¤Ž^4«vyX«¨jáø` ðû;©q^– ÝwM¶G/®Á(«?W<{ËWQ¾“§=w {á=Øs7=øé]ÿ„J÷_âÎ0Ðc%a0µðnŒòçÚM ¦šqÛ^Äk{ïªSÊ÷0Êêˆmøý0:Mò©¯à|åº<ï%Bˆ nkH !„BäF>R±u&ºú½ ø]'Hnýþ™cc»Œ &ôâj¬ÚXónŬZ„fÇÇ}^0ÜE0Ônš =›ÓíÑ‹§…ÁËHñìÔ–GP9þ WL¹ámX³7}í§¤÷ü3*s™Su=Q†=çìù·£_L á÷4á¶¼€×þÚ¦”ç`VÎ%ºîãµÊ’[¾‚×~àº<ï%Bˆ‹“@J!„"Gò"ÚðûDWþ:h:Þ飤¶~ ¿»ábÝH.LÍÞ€Y½xL€ ’=CgÀM¤!G£”ô’éá©H‚ ÕGêé¯Aàçåñ£|èÊ_Çšq#ÙW#½ç_PÙá7÷#S0íYëÃSå3ÁS´>3ˆß݈۲¯ýUPêŠSÊMcV/!zӇР¿¯Ô–¯à:t]ž÷H !ÄÅI %„B‘#ùHÅ7ý‘eï ©SI=ýuüžæ_Õd\0UT9m)öì ˜5KÐb%ãÞ¤úPÃ]¨ìðH0•½ªÛ£—Ö…”'î&µí‚ òó |"«ÞƒU·€ÌKÿAfï¿¢œÔ[û\ÃÂ(šŠ5{¤øyY=èÆèË*;Œwú(^Ë~¼ŽpÓ[ ¦”“œ¾ŠØš÷‡µÊz[H=õ7x§^—ç½RB1AB)!„BˆÜ˜ô”¦¿õD–¾ 4 ïä+¤¶}¿¯õRþ˜qÁTa%æ´eX³nÆœ¶=^6þ!=3D0Ôþû*SzÉtŒŠYhV”`è4©mß䪭wµ)Etõ{1k—Ùÿïdöÿu…Š´kf½°kö†°øy٠дs_?L¹M{ðGꊽ™`Je†±fÞDtÕo†Twɧ¾Šßuüº<ï%Bˆ î;H !„B䯤¤t“ømDdñ} i¸­/’ÚþM‚þöËé^rA0U³kæM˜Ó–¡TŒXw’ƒ¨T?Av¼+Lé%µSæŒÖ*J=ó­|îº]ý>ÌiKÈìýW2/þåf®ì·˜‘p5Ź›°çÝŠQ:ô1Sù²Ca0Õ° ÿÌQ@»¬`Je±æl"ºâ×Ââù§’Üú~wãuyÞK %„Üo$B!„ÈÉHif„øæOa/¸3 ¤š÷’Úþ-‚ÁSoæÓL%Ê1k–`Í\‡Y» ½` hú¹‡v'I0Ô…JõdÁs¸#™ÆR~_+©‡6fÔO^Ñu¢«?€Y½€ôžï“}é?Q^öªzÉ4ì9±ælÂ(«ßfÙ!¼Î#¸»ðNE³bh¦}Ñö+HõYx'‘Þº‰×yˆÔ–Gð{[®Ëó^)!„˜à>#”B!DnLú@ÊŽßüÇØóo4܆çHíøvXŒüÍ*cƒ -V‚U» kƘÓn@/¨_H;›$Hõ ’½áª|^–·Lé%Ó1*ç„ų»I?÷ãB•¼bØD×¼sê|Ò»¾Gö壮rxÍŠ¢—LÇž}3ÖÜ[1JëÆOåsRx¯ã6í!Hõž7ÊíÂ`*HöYò‘¥„µÊÚ_ k•õµ]—ç½RB1ÁýE)!„BˆÜ˜ôT¤øíŒ=wÎ±í¤ŸýÁp÷•ÿ®hæ´°f܈U» ½p*Ö¹‡x7CìB ÷¤ûÁsË/Fn”ÖaL™†…æ8é]ÿ8®`w^1"ÄÖ~£r. Híü{²¯ý|77LJA/«Ãž½kî-%Ó/ ¦Ú_ÅmÚKî›púe0ÜCdÙ;ˆ,¾7 ¤Ú^"¹í—954H %„ÜW$B!„ÈIHÅKHÜþ¬Y7à~ŠôsÿŽz¹²ßÄÙQ3šǬ]5cm8•¯¨ ͰÏ=Ì{‚áT²‡ Õ¾sY«äé¥u˜#”×yˆÌžï«‡”Ww+Ftí1*f Híø6Ù×ßËíï0#3±foÄš½£xÚ…«òu¼ŽÛ¸› 3pA0${‰®x7ö‚;@Óq[ö‘Úö蛜:ùI %„ÜO$B!„ÈÉHéÄoÿ4ÖŒȾþ8éÝßG¥û¯VW”Ñé\ºŽ–š±³f zQ5š=÷PïfÂúRÉ^‚dÏH©7¦ôÒ:Ìʹa­¢ö×HïûW´| ¤"D×|£|©í’=ô$Þµù=v£lÖœX3oÂ(©=/˜Âë8SéÑ•U²—ÈêßÄž{kH5î&õÌ·ÞâÔÐÉK)!„˜à>"”B!DnLú@ªp*ñ;>ƒU· €ì«‘~þ¨ÌàUî‘j ÆSkÃUùª†Á”# ¯Ââç*=Hì §ú¿j*ŸNÙ«œº‰Ûú"Ù—þóìGå_Ç=ZŽ*¾KrÛ7pŽlÀ¿¶¿ËŽc”ÏÄš½kÖzŒ’iç¬Oá|·á9üTv˜èš÷RÎñ¤w|; ¯CH !Ä÷ ¤„B!rcÒR%ÓHÜþiÌÚåd^üßdöÿ•ÎU×”s#¦ ¬ú5añóêÅáT>;J¡ÜTL¥ú†NSçí^M™²7; ¤šž'ûúÏ!póòøÑãeD×¼½¤åeIm}çØ3—5…ñj¶™fF0¦.Àšu3ÖÌuÅÕãGL9)ܦçq›vc/ºkúÊ0:²…ô³ß½ SC' ¤„b‚;ˆRB!„¹1Ù)£¬žøíŸÆ¬Y(2ûþÌ‹ÿå¤rÝEel¸dÕŒ˜ªY<2bêl0 œ4*;Sƒ§ÆøÖÍp„TÅ,Ð ÜÏâÛ†rÓùÙqOT[ó~ôâj”›!õÔßà4<{ntÙµýuŒ ¦ªcÏÞ€Y· £¨züJŠé~@C‹‚¦“}ý礟ûnƒÏÜ’@J!&¸sH %„B‘“>š2—øæObV-¥Hïù>ÙWþë8çSk°f® GLW¡Ù‰0˜ |”“Be†R½áˆ©ÀÃÂ(ŽQRÎÑm8'vL¸úÛd§V]ó^ô©('Eò—Ûüü$ ¤.l3ÍŽcV-š½súŠ‘`ʺà/ÜÆ=¤wÿ~OÓuyÞK %„Ü1$B!„ÈÉH™U‹ˆßö ŒÊyø¤w}ìkÿºæÎø`Êœ¾2}S³½pd*Ÿ>Le† †Nã÷µ¢Y1ô©Sf‡SÃýçø3y{üè%Óˆ®|za%*;Dò‰¿Äm}‘ ¦*NŽÇŒÑߥE˜SŽS«0ŠªÆ˜"ðp÷=ð3üî†p5ÅëˆRB1ÁB)!„BˆÜ˜ôÔ´ˆßú‡á7ß'õìßãüÅ$¤&ø½ÓWbÏÙŽ˜*œŠfÅFƒ© =€ßÛ‚Q\ƒ^4ÐÈø)αíãVïË'zi=Ñ•¿Ž^PJ0üó‡ðÚ_crR££¿O³b˜ÓnëG͸ñ‚vP™AÜæ}d>ßÓŒJLòm»4H !Äw ¤„B!rc²RVÝ*b·üßå3PžCzÇßá~ å;“z¿šÓW`ÏZ9톰ø¹MCù.šf€aÙW#{àgè…•yyü峈¬ø5ôD9Aª—äã_Â;u(_;8.™Õ‹‰ßùYŒÒº‘é†jܪ|Aª¯e?Ùƒ?ÇïiAe†Èç`J)!„˜àÎ ”B!DnLú@jæ:b?ŠQZÍÞþ-œcÛÆ ŸÄÌš¥Øón ƒ©Â©hvl|Ð1ØIöõ'𻎓‡Q9è²w¡ÅK’= ÿôÏðÏÏ¿í(ŸIüŽÏ`N]€ò‚¾V4+ŽVPfFFÞ¥†ÎൽLöÀãø}m#EÏó¯Ý$Bˆ‹“@J!„"Gú½]Mæj{Î-Ä6ü^¸Š›“&õôÿÄiØ ¾—WûÙ¬Zˆ½ðn¬ú5áh(Ý_|‚T/~O~ÇA¼î†°øy>P ³f ‘¥oC‹ aø¿ÿ4/ ›Õ‹‰oþ$FÅ,”“&ó↺°êVbV-LA²·õE²¯ý” ÿ$ÊI“OÁ”RBqqH !„BäHzçß+·ýÕI;ªÅ^p'±u¿ƒ^4•&¹õܦçó'´9O䆷»éCh±â‘d€pæ˜ï¤ûñ»›ð;á>AÖŸš¬súJ"KîG‹ œbè'BÐ2ïÚŬ]NüÖO`”Õ£|‡ô3ß"{ðh‘B¬Yë°f¬Åœº­ ͰÃ?ò]‚d7nÓ^²AÐ׊òò!˜’@J!.N)!„Bˆ\=˜ºiåwÀ9¶¯ý~wäú}‘%÷½ñ·Â¢Ù©’[þ&\Å-ðór›ÓW„ÁGi]8Ò&;ŒfÇÑÌ0äP¾‹Jõã÷4ã|·ãu4 E 'ß±ã»X3×Yt7š]€ß߯ðcB0t:ïÚŪ_KlÓ`”Ô¢Ü4©߯9úôèÔP-Z„=ç¬ú5•sÐâe£#¦”—%êÂkÞKöÐ/ñûZGFðMÞg ¤„ââ$B!„Èá³)€r3øgŽáÙ‚×ñzøP= úd‘eï ºæýèñ²°hö“_Æ;ù ¨ /w¶5c-ñMG/ž†r†É¾òZ¼ô\ñó³ÓÂ| ݇ßÓ„×ö2^û«€6©‚)å9Øs7aϿ͎ã÷63ü_Ÿ%Hõæ_»ÌÞ@|ãGÑ‹ªQN’Ôöoâß92ï\ñs=^Š5wVÝŒŠ™hñÒ0LT ¼ Á@në‹8¯?ßrÒ§H !ÄÅI %„B‘³P!«ÎmG{ø§áÛ†×þþ@Ç5]ùDV½=VL0ÜMò—wêõI–½Öì Äoùza*3ˆsl{2©-V‚^:#Q†þAà¤ð»ðN¾Œ×þZØažÁ”rÓØ îžwšÅïn`èÇŸAe‡ò®]ìy›‰møzA%*3HjÛ7pwMxìë…•X³7`Õ¯Å(Ÿ+§ò©å¦ úÛÃS~J0tfò÷H !ÄEI %„B‘#™½?PÖÌuèeõŒ ¦Ü4Þ©C8GŸÆë8@0pꚌöˆ®ý ÑåïB‹ v’üÅ_â>F>®l`ÏßLlýï¡T¤pw«I¤áô½H!záôx9æHƒ#ÁÔq¼–ðÎß¹¦Á”rRD–>ˆ5{šaã9Îð?…ò²ù×.‹î k•%*Òý¤¶>‚Û¼ï y½°{î­˜õk0Êg G‹@7!ðQn ¿ÿ$nÓ^œC¿$îš4Û+”B\œRB!„9Ò÷­Û•9mVÝ*¬úµèeuラì0^ç!œ#[ñ:^G{ä0˜Š­û0‘ÞŽIà÷·“|âaüî&ò6Zt±›ÿ/ôx)A²¯íå‘m9»=è&šE‹¡TŒ1¥Tz¯»¯y/~w#ÊM_“`Je†‰¬|7ÖŒ›Ð ïÔa†ûtÞ­€Yú`X«,^Fì!µå+¸m/]òH<½¨{ÁíXu«ÑK§£G Ã0Ñ÷²ƒ}m¸ÍûpŽlÁ”RBqqH !„BäHߣ›ÃŽ—¦aÖ®Àª_ƒU· ½¤ÍŠŽ †ðNÄ9üÞ©ƒÉžœS±%²øþpJXo É'Âï;™·û;²ìÄÖþZ¬ˆ`è Þé£àe¹hÀfXhf-Zˆ/Ã(œ:fÄ”Beð»›p›÷âwÈy0¥2ƒD×¼³n5šfàu¼ÆÐcŸËËú^Ñï&ºú}a» w…µÊÚp¹Á§QZ‹½à.̺UÅ5h‘èFLeñ{Âörn#Hõ]³í•@J!.N)!„Bˆ ¤F{b:VÝjÌúÕXÓW¢—L?b*3„×ñÙCOâŸ:D¸z5¦4ø¦?Ä^t7šaãw72üø—òr·‘ "ºê7ˆ®~/Z¤€`à~_*3ð«ÿ̰ÐÌš@+šŠQ0%œv¶M²Cø] ç‚)'•“`*H»éw0§/Àk{…áÿþ|^¶LtÍûˆ®ü 4;N }ò¯ñ:¼éÏ3Êgb/¸#1U\…fÅAÓÂUÓø]'p[_À9²•ÎùöJ %„Ü©%B!„È ©Ñ'j«n VýÌÚeèÅ5c‚)…Jàu¼NöÐ/ñN ª¯t0¥Ä7 {þíᜮã$öÅk:²ä­õrõ°&ÖŠw‡#¾:úO¢2ƒ—¸?¬p*ŸE+ªÂ(¬L9)ü®ã¸ÍûðÏG9É«Léâë?‚YsJùx­/0ü³?Ïdž!¶î·‰,{'šÃï?Iò—ßuâ-²1e‘E÷`Ö.WQu¨<•êÃïƒDçØv”“ÊÙK %„Ü$B!„È ©³tkæMa0U³½¸ú\0¥‚T^û«8‡žÂë<„rÓW.˜2Lw|kî-hèx‡þٯɈ’+B7‰­û°&–iãœ"èmE9×ý9šG³"hS0Š«/ ¦ºÃS§¢ÜZ¤àŠoNî'¾ñ0«¢·iÉ'þG>}èÄÖÿ.‘¥o Û¥·•ä/þ¿·å µ»Y9{ÑÝa0UX9z)7ƒJvãuÀmÞ‡{bgN‚) ¤„b‚[‚RB!„¹ñ†ÔhMÞ³ ³~5fÍô‚J4+høÉž0˜©1¥<ç-×ÒÌñ»>=k= ð:0üÓ/äå*nñ ¿½ä~4ÝÀïoÇïm÷Mº‰f'À°Ðè%ÕçVì#\)ÑïiÆmÚƒßyå9hvôŠmŽJ»õ˜•óP¾ƒÛðÉ'¿œí¢›Ä7~{ñ½h†N }âápeÉ+ö„£¡6ÆÔyDÝScŠÕ+/K0Ô…úpL5îÃÝ«D)!„˜àr-”B!Dn\r u–abϽ«nFÕbôÂ)h¦ haáæd7ÞÉWÈŽSÞ%¯TvA§ÐŠ‘¸ç‹X3Ö TÖ(úÙ®^ͪ«ÝÉ5#Ä6ýö»Ð4= ¤zšÁ˼µ>LézAÅyÓ+ðÃïiÆk~¯ã ÊwƬ³TfømŸÂ˜2åfpŽï µõ‘ükÃ&vëb/¸M7ðÎ#ù󇆻¯Â—éhV4œÊ·äþ0˜Š—ŽŒpS(7K0|ÿÔ¡0˜jÚsUX ¤„b‚Ë´RB!„¹qÙÔٛڽkæ˜S %ÊGBÖÇIöà¶½„sð—x‡Ï>_ÞwD$îû ¬é+Q¾‹×²ŸáÇ¿”¿\+Fü¶?ž·à$~wøÎú-RšS¥ÓǘÂwñ{[pwãu@Þ¸àêr)'Eü¶Ob”ÏD9)œ£O“ÚþÍük3Ö*i¯óÃ?•_ª£Ù ŒÊ9D–>ˆY½=QšžCn†`ð4Þ©ƒ¸MÏãµ¾pEƒ) ¤„b‚˳RB!„¹ñf©ÑŽ›Çž{+挵˜S梔„ cªO¾LöÐ/ñÏ¿¼ÏŽRpÿØӖ…SÂòµFј}¿ýÓØsnà÷Äïi G‘]QzXÌ\=V†^^?>x <ü¾6܆çpÛ^ ÃøìoQ^–øæOa”LG9Ã8‡Ÿ"µãïò¸]6‚R¸í¯‘üù_ä¦È¸¦£ÅŠFjL݃9íôx  …ûØÍ žÂëx·y/nË>ðÏ/—ò޶RBqñ˲RB!„¹ñV©ÑœÅ^x7VýŒ)³Ñb%££s”›&èÀm}çèÓ—¼z™+¦àÁ¿‹f»Y܆gó³FÑÙ퉒¸ësX3oF.Ao[H½ÅZ[¿ò;£Eèñ²pÄ”?÷bàã÷ŸÄmz·e?蚦]òg+7CâÎÏ¡W£2Cdþ‚ôsßÍÃvI¸óO°f®à¶½Lòñ/¡®ÔȵK¡è±Œ© ˆ,º³vY8Úíì¾ö²xm/ã¶ìÃm{é-SH !Ä÷ ¤„B!rãJR£9;޽ø^¬ºÕ³ÑbEhºÊG9iü¼ÖpŽnÃïiúÕÏèñR Þöÿ`LrÓ8G·‘zúëùÛÉ‘¸çϰêÖŒR­øÝ¼ÙQ.—ûÝ(…/E/© GPøø§ðš÷â6?BCÓõ7üL•M‘¸÷‹è…•¨ôÙ?%½ç_ò°] IÜýgXõkQÁ˜©¡×â™DÓÑåSç?_6.DT^– ¿·õE¼–}¸í¯¾©`J)!„˜à2,”B!Dn\é@j´C/!²ðn¬úµèå3Ð#…`àû(g¿¿¯õ²‡Ÿ³šÙøj=QNÁ;¾‚Q1+¬Qtd ©íæo'7^BÁ½_¬]Žò‚žæ7 宸oˆòÑâ%eõãFá Á`'nÓnÜæ½a0õ+FL™! î½ ‚ ÕGöÕÇÈìû·ük—XqX<¿n%Ê›$SC5 ½`JL-¹³j1Z$q®©Ü AnëK¸MÏãw3¢ëƒ) ¤„b‚˯RB!„¹qµ©³ôD9ö¢»±f®Ã(™ º®È—$èmÅmÙsd+Áp׸j½°2 ¤ÊêùÿÙ»ï(9®ûÀ÷ß[U{¦{ƒH3)f‚$DQYTX{u,Ù–dÛ’`¯³½ÖP–Ãóy»ÆÚ²ü¬³ö£d[ÏkËÊ –@`ŽˆŒÁ OžžÐ±ªî}Üžˆ ™îÁïs%Q“ºBwU}Ï­[¦œ§²÷! »þ¶n×µ“j!õÖN¼•WÙ‘.CÝö){‹qÂKƒ1¨X·uÃä­}€ Sã}öV¾î'1¡rfÎ1eÂü©ûÿ'Ù„. Q~î‹”žý×úÛ.É&RoùÞªk1Aÿð#ä¿ógµóú2íx+® ºåx+®œ¦‚2zø8þñgñ>Npfß´9ÉæS¤„bžã£)!„Bˆ…q±ƒÔäEuª…Ø5÷Yw+N¶I€r1ÚÇ”ÆÚI›?‚ë«^ˆ¯¤á‰“]…)SÞó Š~¶n×µÓ°Œô[¨Þ‚X":†ê^ÜM¡ªOçsšVá$›˜˜PcÐc½øÇž¶aÊ/£\0˜Ò¸QÒoý*‘EPzîÿP~þ?êo»¤ZH½í¼WÚ uð‡ä¿÷—µv‰„“]…·b Ñ-¯Çk¿fÆDõ&¬Ø¸{ô üî'z€'vv˜’ %„ó|ÚJB!„X ¤&/þW»öíDÖÜ„“iGE′aª#èÂ?ö•ƒ;!(Ñøóÿ/NÃr;iö¾Jññ¬Ûuí4® }ÿ§ì-ˆ~=tŒpèXm¼¸h '‘EE“¨dN¼ÑŽd0=ÞOpây*]»1…a§¤ßÚ‰Š5 Çû)=ý/”ôµúÛ. ËH¿íOp—mÂø%{kèþªv_ov5ÞÊ+‰^ñz¼U×L><&ÂÔ1*Gvãw?I8pdÎ9¦$H !ÄÜ$H !„B,…RÆNvÕ´0µrr´‡ ʘbŽ ÷:wŠèå¯Åi\)RzáK”žü|Ý®k'ÓNúíjoA¬‡£k%HMˆ$pR­¨H•hÄIdÀñì׌AçŽ?Kåà!'ußï b)ôX/¥'ÿ‰òKߪ¿í2+V^ú6…‡?]ó¯ÛmZƒ×~5Ñ-¯Ç]~¢c¾ IDATÅÙ#¦»m˜:ú8zèøŒ9¦Œñ%H !Ä\g(¤„B!Æâ©‰+j7³ŠØÕo#²áTºer´‡ ʘJÞ†‘HSÈQ|êó”_ü (µ8O@{Õ¡ƒô;þ'ÓnƒÔÐqôð±Ú|±^ '½lž0¥Ñãèü nÓj;Bjä Å'¤²ÿ{u·]œL;éwünÓL¥@ùÇ_§øèß×Ï~Õ²¾:bê>Üe—ÛÛa«LP¶aªk7~×c„Ã' ôe„”BÌC‚”B!ÄYÔ eOýPn§©ÃÎ1µþVœTËTüÀ BŸÊ¡vnŸ—™§¦ÃAóZÒïü { bÙ>ipÑçú‰/:ŠÓ° I¢âiœD¸SaÊn=ÖgƒÔÞ‡êî=0;–_ü2ÅÇþ¡þ–£uÞÊ«ˆn¾wÅ–·òú„ƒG©z¿ë1‚ác4ÿÚ:åP!f•HB!„X‹¤&OQ‘nË:¢WßOdÍkpÒ­ œÉï0…þ±'ñ»ŸÂ?þŒX»ú³õ¦ÜÖ ¤ßùá¤Z0å1ôÈÂÁ£õ±£8Nãr¦b ¨D#Ê‹25º&8ó¥§þÿø3v{ÔÉ9½Û²Î©j(,=ÿź¾5ÔmÝ€·êZ¢›¶á®¸bV˜ »(ï}èä¶_ë”O@!„˜u6"AJ!„baÔNš<DE¸-ëˆßòóDÖÝ:ã«&(cÆû zöáw=ŽüiL9?ù³µ¦Üå›ixûŸ£’YLq=ÚCXë#¤ÎÚ^Ÿ;Žãá¤ZñV_gÿ}âü] 탞}Töÿèc5¦Üe›H¿ã/p’Mvòü§¿@é¹[2ïqwùf"«¯'²q+n놉9¦:åP!f’ %„B±@j9HE.»‹ÔÿåÅÐùAôhNº §aÙÔ7é=Úƒâ9ü£Oœ|ã—&N+©¥0åuÜ@ú­Ÿ´O¥+£sgйõzÊFÝòP¦<Ž©P±4*j'Õ6AS&è=€ð‡TŽ>aPsKâ­ØbCa"ƒ)æ(>õÏ”_øÒ’{¯{Ë7ã­¾Èú[!À[¶¹S>…bÖÑM‚”B!ĨéRWÜGê¾ß×CçNQ|ú_Ž¿é½D:n@ŧ¾Y‡èÑ3øÇŸÅïzœàÌL¥0qzI-„©Èº[I½åQ‘:?„99U§gìÊ‹¹ì.»ús§1~PvRúh¢:Ç¿\ Sûñï¢Òµ{Z˜Zümã­º–ôýŸBÅÒöiŽO>HùG_[J—W3Ö±»l* á§ÿW§| !ĬOL RB!„ £–ƒTìª7“¼÷7Àñ‡OPÜõ·øÝO¡’Y¼e›‰^ù&"«¯?k£p´‡àijöib§\3#¦"·’zÓNø99]§gì.*Þ09ÇW˜;‰:(T"ƒ“jEÅ’àÆPÕ§ò™ ‚ÉôÂ?ü(•#»j"LEÖ¼†Ô[@E“èü ¥'¤¼ç›Kñ2kr<м}W§| !ĬOJ RB!„ £¦ƒÔµo'¹í×@9„CÇ(>òwøÝOM~ÝI6ã.¿œè–7YóT,=õÃF£GNã{šÊ‘Ý„g^ÂåE]žésbéñ~tî4z¬§>wÇÅI¶àuÜ@8|; އrL¢¢IT² åÅìSùœ`ìÄôù!¦ºÃ?´kQçÿЬ¿Ô›ÿÈŽ\ï§øØ?PÙ÷Ý%ý¾— %„s“ %„B±@j6H)EìºÿBòžŠp Ë©ãÏžõ­NªwÅbW¼oÍkPÑäôKotî•®Çñ>NسoÑÂTôÊ7‘º÷7í-ˆc}v„ÔXo}î8އӰ¯ý*ôðqÂÜi”™ñm& Q‘„ R±4N<®Œ_ªŽ˜:‚ßõ•C;!ô'·ÿBM„½ìn’oü};rm¬âîÏR9ðƒ%ý¾— %„óœ~HB!„Xµ¤â7þW[P„ý‡(<ü‚S/Îû#NªoÕµD7ß‹·êzT,5ýœpø~×£øGŸ ì;8íV¾…»æ~;âËqÑ£=è‘SèñúÜq§©oÙå„CÇÐc½(ÇóÛQ^•È¢â 8±†j˜ÂŽ˜ è;ˆßýþ¡Ó¢áÅ15cäÚèŠ~–Ê¡‡—ôû^‚”BÌsú!AJ!„baÔlr<â7ý ‰Û?@Ð{€âß&8óÒ\§LNÃ2¦6mÃk¿o˜v%® s'ñ»ÇïÚMØwhÁFLÅ®ÿ)’w ”šœ?JçësÇq"¸­ëq[Öv£ÇPŽzÙ3:D¹Q{+_2‹M3-Lå‡z÷á{¿k7¦46ç6¾fŒ\9Ma×ßáwí^Òï{ RB17 RB!„ ¤fƒ”둸ùçˆßú>‚3/QxøÓ„½^î4’™ajy5L݃·êTlz˜2„¹Sø‡wáw=J8pô"‡)Eü5ïžñ¥s§GNa Ãõ¹ã¸ܶM¸M„ƒG«AÊ9§uat0¦œx#(û³&(c C½ Ž?CåÈnL17ïv~µ¦\³“ç¿ûÉ%ý¾— %„ó¡$H !„B,ŒZ RÊ¿íýÄozÁ©)ìü4áÀ‘sùiÎ S«¯·aªýšY·òÙ ¹ýC;ñ»#ì¾8aÊq‰ßôžÉ_áðIôÈ©Y¡¥Ž¸Qܶ¸Mkìò taòƒpNAjj; œÈT˜Jf«Û}taˆ ÷Á±§ñ»G†æÝίTìºw‘¼çãS“ç?üé9ç*[J$H !Ä9k2øW¦â7¾›ÄÖ_e'Ï/<üi‚“/,é÷½)!„˜çˆ$AJ!„baÔlŠ&IÜñab×½¿û)Š»>M8|ò•ü6f„©L;‘µ7Ùp;Þò-gÏ15tœJuÄ”:Ž +¯~yfø ‡ºÑ¹Ó˜òX}î8^·e-nó:»<GÐù¡W¤¦¶“Ñ!J9¨D•nÅId§}Ý óC„½ûíSÇžBœ™w;Ÿ«øÍ?KâŽÙåè;Dáá¿!8½gI¿ï%H !ÄOØ£iœl;nËz»<9HM0ZÛ9¦RÍÕ05kÄ”_"ì;€d7þ‰çf…¥³Ã”òb$îü0±ëí­”¯lòüú#AJ!æ9¾IB!„X5¤–“¼çãD6n ¼çwÿoLiôbž†2#Lµ¬#²áN"k^ƒÛºÁŽ˜ª”툩}ßÅïÚ΂ç_žT ‰{>>Øôð ‚þ#–ër¿Q±¤&&5ï?‚)^ü 5ÁhB¡RM¨d NrÚSù&¶Oÿ*‡~HpòEÂþ#Ì5JJE$¶þ±kß¼ÚÉóë‡)!„˜çø&AJ!„baÔlÊ´“¼çWˆ¬¿ €ò‹_¡øø?bÊ 1 ø¬0ÕºÈÆ­D:^ƒÛ²öì05ÐEeïCö©|…a0úìåiXf—§ØÂÜI¾ƒçuÛ_M°Çq²«p›ÖØåé?‚.æªGM2:N²ÉÎ3•È€ãN}=¬Å?¼ËŽ˜ê;4cû¨XŠÄÖ_ž¼•ÒïÚMa×gf=½oé‘ %„óß$H !„B,ŒZ RnÓÛ~…Èš›(=÷ü¦RXÈÓRf„©¶MD/ÛŠ×q#nóT,=ù=Æ/Ù0µï»ö©|…ÜŒŸØÂÁ£„GA×iJdp2íÓ‚ÔaLqdú ¥e”ÂIdQÉ&œD¸SaŠ0 >FåÐÃÇž&è‚ÐGEâ$_ûëD·¼¸Ð“ç×. RB1ÏñM‚”B!Ĩ٠պä=¿‚·úzJÏü”žúgÌ¢L>3LyË7ÙtÞªkq³6L)Æ`*ãöV±ýß§räÑÉ[ ݦÛ~u2°½й“ç4ÿTMž°'šp³í8ÙÕ„}«£×ww2a(œD•l²ÿ¸‘éßaoµ<´ ÿÈ#˜ržÄœœÛ«²ÿûý{{ æ&AJ!æ9¾IB!„X5¤–m²AªýJOý¥§¿€ sÎ¥9ÂÔå¯%²ê:œL;*š´·‹)ô¡²ï»Tö·i É×þ:^Ç §~„ï¯ß •l¶A*³ 0„½1•<‹¤¦¶ }À±a*‘A%›Q^tÆw‡]øÇŸÅk»lr»”÷|“âãÿ€)ä–ôû^‚”BÌs$‘ %„B±0j5Hy+¶¸çãx+® øø?R~öß0a¥NW™¦V^Eôò×âµ_ƒ“Y‰Š$Àq ÑåQ‚“/ö!²þv¼ö«ð=moq3a]î7Nª'ÓŽ“icú¢©³·ÏD˜RÑÊâdWUoµ¬ Œ Q^ €òKߦøÈg0åü’~ßKBˆyŽ ¤„B!Æð_ÝÛÃÚZ{]ÞªkHÞõ1Üå›(>úYJ/|ÂZQ4+L­ºŽèåÛl˜j\aÔã—1åqT$f'DÇÎUDX™sôzà¤ÛpWâdV‚ѽÀ/ÔØ«œ¦*–FÅÒ8©–™aª*ì;Dñ‰ N½¸Àó•-, RB1Ï‘C‚”B!ÄÂÚqw§2Þv2µòš¼ŽIÞõ˸m—†Âß¡ü£¯Öè-n³ÂTÇD7ß‹×~5NªMT+“Oçó?‹)ƒ®ÓRée¨L;nãrÐAïAŠ5újç1•h@ESöé|ÕH8Aõá}‚ÊÁÿ$8º@Ov\X¤„bž#†)!„Bˆ…•Û±-«ÑÛk%LEÖÝBbë/ᶬ£)ìükÊ{¾YW'²æ&¢[^·âJTCÊšÃÈ”FGz0Ŧ¦N…©ÛÞ^žÓ°'³§a„Aï~Ë5þª§­[Sý×h7³?{—×ùAü£SÙû¡cK*LIBˆyޤ„B!G­„©È†;HlýEܦ5  ?ø+Ê{‚:@8|üâ¬ù¤j;LÙÛõVगa aï{ b}\nL®[S)â´¬ÃÍ®bŠ ‡Û Ð…!‚î§(ïùáÐq;y{_¯HBˆyޤ„B!WnǶ¬1fŽûþÅøûÑM÷¿ýC¸M«!ôÉÿÿ¦²ÿûÔÃè¡9—çŠûHÜùœt›FÛ‰ÏÁT è±>Âáãú³æ•ªÍ0ådVá4®ÀI·b‚² RÚ¯»íb·yœ= WŸÈCESàz߉$8õ"å=ß$ì?\×aJ‚”BÌM‚”B!DÈíØ¶ÎÓ¹Ða*ºùu$nÿœL;&(Søî_؉ÀëTtó½$nÿ ]žr=Ö‹Jdpp<À`ÊôèiÂÁã`fÏ•U[aÊÉ®Æi\Ž“jÅø%¾5:¿×Ë3aˆÛº'Ý Ê{²“ž'²8é6T,^åFª?`ÐÅa‚S?¢ü£¯öÄø¥º›œ^‚”BÌM‚”B!DYè0½òM$n}Nã Œ_$ÿПâw=V·ë/ºå$n{¿]žJÂþPÑ+ß„·l“}â›r˜ S¹“ƒ](ÔäDèÕSej!L9Mv©T3¦R ì;8ë–Ãú`´¶ë?‘cð»Ÿ؉赆jœR±DSaJ‡èÒ(Á©©ìùAï>ûT¾:¹Ž‘ %„s“ %„BQ£r;¶n3&Ú‰Ã=óïÄ®¹Ÿø-?‡“nÔóä¿ýIücO×íz‹]ý6»< Ë0•ùo}@Eâx+¶Ùp'nÛFT4m”1˜Jžpø$aÿa”ëÕT˜rš×â4,ÃI6Û×Ùw¨.ƒ€Ûz*‘œz”QÞ´ èµ¶£¦’YT4Y SÕ¯ë]!8½‡Ê¾ïÚŸ¯j~™%H !ÄÜ$H !„BÔ¸‹¦b×½‹øMï±s•Æÿæ'N¾P·ë+vÝ;‰ßô^»<å1Æ¿ñ };òÆq1A¯m#‘u·á¶m²ÿ¿M˜Ò8aîaßa;BÇq}yÜæu8 ËQɬ}}ý©Ïù½œÖõ8‰,„>þé=Vl´—&“ËeÃT 'Ùl£T$†rc6ê"èÝoÃÔñg1Aí>uP‚”BÌM‚”B!D¸Xa*~㻉½æÝvN1Çø×ÿˆàÌÞº]Og/Ï·£m¦zr<Ð!nË:"·â¶nDEâS¡AáÿsÆh§s:Iöâ nËz¢—Ý…Û´¦Í#eÊc„CÇ1ÅÀTGN]nóZœl*–BçЃݳžX'Ñ4ªqN"3m.¬W÷Cú¨h'݆Š%ÁÙÉ虦zöâŽÊþï-Ê­|¤„bžã‚)!„Bˆ¥ã•„©ä¶_#zÅëQ±á`7c_úML!W·ë ¹í׈ny*š$P¿A*žÁiXno¥,æûT'4¿P×#ª:b*…Š7ØIÐãÕùà Æ/£Çûâw?øa»]/2 RB1ϧ¶)!„Bˆ¥+·cÛ:cL'Žûþù¾'õ†ß%rÙÝ(/NÐ{€ñ¯üÎE½íbK½ñ÷‰l¼ åÅzöQÜý¿Q‘èùÝ&ô§ÂÔúÛq³«gÞÊW%ìFç‡P®wAFL¹-ëpšì$ë:?€>Á…‹8 Ä€J6Ù[ëâvröp ë"…µj˜ŠÄQ‰,*Þ€kœÜN&(cÆû zœx–Ê‘G1¥‹7˜)!„˜çÓZ‚”B!ÄÒ÷ra*õæ?"²þ”%8óã_ý}L¥P·ËšzË'ˆ¬»åFÎì¡øÄƒ¯z„Ôl& ˆ¬½©¦Vͼ•¯4F8pÄÆ þ+_ŸNËܦ”Eçë4HTª'ÝŠŠ¥0ù¡˜œ]Ù[ù"qT"ƒŠ7ÚSÕ‘m&¬`ÆzöœxŽJ×îêœ`zÑ%H !ÄœŸÒ¤„B!.Õ0µÇ}ÇÄÿ—~Û'ñÖÞŒr\‚S?füë¸ ·2],éûÿoÍM(ÇÃ?ù<å§¿^ô¢ü-(ÇÅë¸È†­¸™•3o嫌£Ç0Åtiüâyÿ ·u#NSÊñÐ…!ôðI@×Õ61Zã4,ÇI7£")tapà(Êqâ’£C”CÅQ‰ N"Ê©nà f| wÁ‰çñ>Î^¸e— %„s:KB!„¸ôävlÝfL´‡{Òïøs¼ŽP8ø'žcüÿB¿^OoI¿óÏñV_o—çØÓ”Ÿÿ÷‹¤&Ø[ÄDVß@dý­8+ÏšcJõbJ£èÒèy…)·í2¤pÐÅatîä¢<-îU­­q2+q’Ív.¬|5H¹î‚îFk”±“ž'28‰ìd˜"ôÑù{+ßñgð=ë{õË.AJ!æþT– %„BqéÊý¯»·¥ÞõW‘U×Þc0ÝO1þ­O‚êóäÖ‹‘~ÇŸã­¼ƒÆïzœò¿Šr£ ò÷MàÄRx7âuÜX15óV>=Þ)æÐ¥1~ÒH4§¤V¡PvRóÑêo„Tˆ›]J6¡\{ëa8Ø…r#‹±—`ŒA9žü<™ÅI4UŸøè$èÙW SÏ Çz_ù²KBˆ¹?%H !„BB›1z‡ßõØuù‡>uzލbi¤–mƘÿð.Ê{Zðða´Æ‰§ñV߀·úÜÌŠaJ—F0ã˜B]Ÿ?L9nÛFÜÌjŒ í(«ñêm)¸MkªAʳO»ìFy‘ÅÜ[0”rQ±Tº'ž™ SÆ  C§÷{ ÿÔíè´ó]v RB1÷§°)!„B1¡²÷¡ä¿ó—8¬­Ç×ï$›I½ýOñÚ6bBŸÊP9¸so ›bŒÁ‰7à­¾HÇ 8 Ëf…©QÌh/º˜Ã”ó–gþ7‚Ûºa*HåNV'Þ®Ó •jF)¯… 5ã²Èþg¼•jž9bÊtq˜ðÌ^*Gvö ì>}@‚”BÌùÉ+AJ!„BÌ–Ûq÷Œñê.L9+Hßÿ)Üæµ˜ Leßwñ»ƒ™<{~F‡¸é6Üökˆt\“^6mŽ)ƒ.Ž GíS¦<aÅ~É‹á6uT—§B8ÔýŠ&F_l&ðq›×¢Ò-öÖÃüáà± R“—Gö?c ¨dNªe*LaÐ…aÂÞøGvôì#<ú“—]‚”BÌý‰+AJ!„âÜävlËjôvgGvûÎÜÒ_æ»·WÃT¦Nmq›:H½íܦL%OyÏ·N>_3¯Ðh›Y‰×~ ÞÊ«pÚ¦FLUoÓ#gl˜ Ê C;Bªy-Æ/öÖÝ~d·eNªwÏ's IDATc4&?D8|l‘æ:·} *Z½•/Ù<-L)ôìÇ?òÁ™½/¦$H !Ä<Ÿ´¤„B!^ÞDˆRÆÛ>f4#F;.…05çò×hDpÛ6’~Ë'p2+1åqÊ/~• ç¥š|­Nãr¼U×á­¸'Õn5Lé=ÚK0pSªNŽÞŽñ gö¢\¯îö!Tp[6à¤[0:Ää ‡OÔͲ¨d3Nªkœ¦Êy‚ž½ø]» Nï!è:{Ù%H !ÄÜŸ­¤„B!ævN!FÂT Ù*¼å[H½åqÒmèbŽò‹_&ì;XÃgãn¦·ý¼[ìHœ‰Hèâ0*GES˜ò8ÁÉP±TÝí;&ðq[7ØRaS&>¹hs{÷ë7¥Tª'™EÅ@MÝjÊy¾TŽnëF¤üÒä­‰j‘çö:ïå0Õ§ò%›P‰ N¼qÚ\`Õ0ÕˆÊá]6L Ç% RB1סO‚”B!Ä”W=™·æ˜RAç%¦ÖUÃÔûkâ9.‘ŽI½á÷QÉ z¬ŸÒsÿ†Î¬£³s·uÞÊ+qÛ.³#¦¦O~>>ˆ>Ž }Pn5èÔþù¼ éR•"º0ˆí©» 5¹<Ú •È Y¦¦Ý~hü"aßaü®G©t?ñ@æç?×)Ÿ®B1ë'AJ!„â"Ìn 0aˆr#¨D#*ÞˆJd§?Að S>m…b& RB!„¸$]ô5›æE¥*Û³Ûݹô×íÖmÆD;…bÖÑA‚”B!–¢šQg]¹†ŸSJuf·ïì^úÛæâ„)M»á§IÜú~PŠ g/¥'>_¿gœ¦’÷ü*¸z¬ß)¢\wòÛLèƒòp—£b ¨håFíׂ2„ûd¾ü  S“?»PaJr«A*)æGÎ@ÞJy^oëÂ0ãßþ¤)!„˜ëè AJ!„KInǶëqvÔMˆ:ë öÒ SC;îîTÆÛŽCæ‚œØÆRÄoz/ñ›~P§~Lé馦&S:¿}§i É»?Ž‹ë#:†RjÎeš˜PÛiXŽŠ7‚³#¦”0~*taSÎ/`˜Ràx¸Ë/GyqLq˜0w‚â’ÿ<Ò…ãß~@‚”BÌut %„Bˆ¥ ·cÛ:cL'Žûþ¥q%{i„©ÜŽmYÞ~!”Š7¸å}Änø)‚ÏQ|ú (׫Ûõã6¯#±õ—@)ôh¯ RÓFGÍÅ„!ÊõPéVûD¾X#¸`0-E%oÃT¥0mý\¬0¥Àà.Ûlo5, Žœ‚ ¼ä?—tq”ño}B‚”BÌut %„Bˆz¶äBÔYW´¦ÎùÄ6‘!qÛ/»öíøÇž¦üܿ٧ÐÕ#'‚Û²ŽÄ²»ÂháðqÔ9-Âèåx¨T *‘Á‰¥ÁñÀLPÆøEL%ÉaüâE S Ü(îòÍ(7‚. £s'!¬,ùÏ']cü›,AJ!æ::HB!D=Zò!jÆU-#F;œÙí;sK|»f1;^Évu’MÄïø0±«Þ €ôqÊ/|&'ó®3n¯í2â·ÚU¡GÎæNN›œüœN÷1ZÛ[ùRÍhĉ72Œ_ÆT 6L†0~é"„)^ oÙf;Va;¡¿ôߺå<ãßø# RB1×ÑA‚”B!êÉ%¢Îºº½¤ÂÔyog'ÕBâ®_&ºùu€Á?ü(å=ߨß9¤oÅUÄo~¯Ýü¹Sè‘Ó¯0°)Œ16L%›QÉ&T,5ùU”0å¼ý§0Œ Š(72ù³¯.L)ˆ$ð–oå  Cè¡`‚%ÿ–5åcßøC RB1×ÑA‚”B!ê$P¼â‘3KŽ„©99é6wŒè¦{CåàN*û¿ u{¾ëàu\OüÆwæN¡GΠœWتqÉÚ0•jAEâ“_5ASÇ”Æl˜ Ë L)T4‰»|3 0ù!Âáã`Â%ÿV5•"c_ÿ RB1×ÑA‚”B!jæf—w¦ßý·.ýýbë6c¢s…)'ÓNòÞíDÖÜÆPÞûmüûêvY1D7n%vÍýèá“„£*HM^Øÿòb8©T";kÄT S·qª0„ ýóSÚ@,=¤ ÃèÁ£\œ'úÕØ6ôËŒ}í÷$H !Ä\G RB!„¨­à !êœO䜆åxí×Ys#nÓšc*‘íT±ÔƒK?9;L¹M$ïý ¼Õ׃є÷|“ÊÁ¢¼h}.¤ÖD7ßKôÊ7Ù>‰=s‘&iWLL>Q©f”;µÞLP¶£¥J£vÄ”>0¥u5Hm±¿«0D8pt²…-e&¨0öÕß• %„sy$H !„¢Œ}á#Y¿ï€„¨Ÿxö¦PÑNªÅ†¨wâ6u â”Å ~ñ˜ïïtÛ6=¸ÔWGnÇÝ0ÆëÄa­Û¼–ä}¿…·ò*¤^ü2•?DÅÓõ¹pa@ôÊ7½â>û¯Ã'У=x„ÔY;(<;Ç”“j×üª +˜â¦8‚.äÀ?1L­QѤÝ.€°·ìIBˆKû”F‚”B!ý¢Í/n×¹Sþ±§3å=ß´O³ÎÚÊ‹£’Mx«®%ºinv•}ZšÆ/¢óƒèáø'ŸÇïzò˜=љݾëÁ¥¾zr;îþ€Órygò ¿½Ö[¾tHéù/R9øœdS}¾/BŸØ5o¯NÒáÐqôXïER“;œ Sn'ÝŠ“l™¦|L1‡) £‹#`ÂyÃÔYAj´‡0w¥–~‘2aÀØW~[‚”BÌu¤‘ %„BˆE¼\û¨N`-Æ`*ÂÁ.ü®Ç(ïû¦“U„×ÃI6áuÜHtãVÜ–uv®Ÿ‰”í¼<¹“ø'žÅ?ò˜½µk‚æa¥*Ùíî\êk+ì?ü·í²Nt°¶ôÌ¿R9øCœ†¶ú|w>ñþ ‘ wÚe[Ð U} ZÛØkÄI·Ú:žª#¦ 9L1gGLaP®7µïb0aˆŠ%ñV^mwÇÜIôX—ÄR¤„bþ3 RB!„Xhù¯ÿÁ"ï錬¿m­Jdf_cÊã½ûñ»§òÒ·1aåR=UÃI5ã­½™è†;lˆJ6WçDR“£TÂÜI‚Þ…ëŸÿBÿÒ SYB{ñÉÚîÞ™q2íu¹&ô‰ßøn"ën'HM-€ S m8‰&˜ OajS®†)=®ŒQÑÔä©p°SÌ —ü;Øè±/ÿ–)!„˜ë,G‚”B!Êä|?ž·Öm]›YEdÓ=DÖ܈Š5Ìüf¢K#„=û¨ÜIåÀ¸FTLp’Mxën%²îVÜ–u8ÉfT$Jat€)¢GNãÙmCTaÂðÜÖÑ%¦Šþ}¶´çk¿å5møuOÕßDR¡Oì¦÷Yóû¯CÇ1c=ซö’l˜R¨xNª'Õ2óõ„!º0„) ¡‹9Ð6:©Df*HõÆT  ý%ÿ>6Z3öåÿ&AJ!æ AJ!„Ýô‰§g|Áñð–]†Ó´–è÷á­Ø‚Š&gþ°Ðù!‚Ó{¨ìý6þñg—ôºrR-DÖÝŠ·úzÜ–µ8é6T4eCTècJc6Du=Fåèãö‰g~^É9Ý%¦r;¶eɬø=7Ùöñº Sa@ü–Ÿ³O ¡cè±¾E!U½t`"xN„)'‘E%›æSÅ!ôh/¦R@Å&Ÿ²ôì¿è%ÿÙ'AJ!^æ¨"AJ!„/lÝfLt×½ì7:Þ²M¸Ë6»úm8M(/6ý²Â=Þâ9*/}Û^Ô.!“#¢V]‹Ó¼§q*šB)£§BTpêE*‡Aõc*ã¯,DͦÃÏ)¥:³Ûwv/íýqÛ:•jý3§qù{T¼¡æ_¯ C·½¯ýÀÞê¦Çû1HM^B03L98É,*Ñ„JfPntj‚2¦0 Já4,ÁÉ0¡_˱ÛÐƾô›¤„b®£‰)!„B\ø ÿ­ÛŒ‰vâpÏyý`uÄ”·òjb×½'½lÆ<5`0A=ÖKpüYJ/|;Yß'c±4‘ wâ­¼·e=NÃ2T¼åxÓnÍ;Cpz~÷è±>t~ÌE]"aª¦’¸ýƒÕ‘E†pà(:?XC!gV˜R.N"c'ÜOfg„)ŒêSõ‚Ó{0•ñ“£/U¤„âeŽ"¤„Bqá.ô_aˆšÍõp[7Y{3±ëÞ…“ÈØGÐO»¸5~ÑÞºÖý$¥çÿÃN’\?§`6D­¿ oÙå¸Ë.Ãi\‰Š¥Qn¤¢ª#¢Î¼DpòEÂÜ)ôxßäœ<•„©Ú`4‰Û?„»|36Hu¡óC58²h¾0Õ„“lš•Aç‡Ð#§ œ˜Cjé^HBˆ—9zHB!Ä«¿°¿@!jÆ.6L5uÝ´ØuïBÅ’S_³W{˜Jžpè8þá]”üu;ŸRÍžy)T"CdÕu¸Ë.Ç[¾'»zÚˆ¨SCôô¼Dpf¯½Mk¬wÚÅû²aj{vûÎÜRÞǾðÁm&t>¥âÉ;ÏšÃl‘%îø0nÛe` áÀaLqdr¤Q îàÌ SN2ƒJ6Û9¦¦ t~]¿\ý¹¥w]b Œ}é7$H !Ä\G RB!„x¥.|ˆšç"×uq[Ö»ê-D·¼‰ÏºêÓ˜ò8Aß!*û¿OeßwjìŒË†(oÙåx+®Ä]±·y *Ö`GDU_¿9Mг°÷aÿôh&¬,îk׌ìppv,õ05òÙû·a¢Ÿr²+î¬SŠÄÖ_ÄmYFÛ§Ó•Æf4Ù½Ä`z˜RÑ4N¶'Ù|Öwš°‚) £Ç!(Mü¿KfŸ’ %„/s´ %„Bˆó•Û±m1¦Ç}ÿ‚ž¸D¸+® v囉l¸cÖù h.ž~‰òþïáyô¬ ä…¾0w’YÜÖ vDÔÊ«p›×¢’M3CÔh/aï>‚¾C„½Ð#g?DÍv …©ÜŽ­ÛH,û7»òêE SÊ!±õ—p›×‚ úA%_O—€AWЏMkp›×اf–ÆP®‡ŠM­[ã—0Åœ-(.â{öÂ’ %„/s” %„Bˆs¿P_œuÖ L2k'>¿êÍxí×κ•Ï€Ñù!‚S/RÞóM‚S?Zð q•Ìà6­Å[¾¯ýjûä¼d¼èä­†f¬Ÿ ÷aÿ!‚Ó{GN/έyçã’ Sw€xË'ݦöŽE SŽKbëGq›Vƒú_¨»õhÂ`rÂ~BŸàÌK¿ˆ“iÇiX>cÄ£ J“#¦LPF©úS¤„âeΖ$H !„â'_˜×Fˆ:ëz=³¯ýbW½·í2T4ÁŒ0†èñ~üÏR~ñ+„]ÿä*Þˆ›]…»ü ¼Õ×á¶l˜¢ ˜ñ‚þC„ý‡ Ž?gC”êk§¨†©KáB{2Lµ­ïP^táþ°!y×Gq2í†}ª£‡ê‹Ñ!Þ²ËQ‰,Á©aJ£¨HÂŽŒ§Qñ Ê‹MýŒ_†ÐùALP©ë05ú¤„bÎs& RB!„˜ÿB¼6CÔY×í-ëðV_Oìê·ádW͸°ƒ }ôÈüî')¿øUôè™ RoÀI/³#¢ÖÞ„Ûº'Ù„òâ£Á/¢ÇöÆï~Špè8u[’æ˜RAgvû®—úûaä³ïøˆ“Yó?U<•^?èÅlj\ }ÂÞ–ën½­ñVlAÅÒ` ~÷ã`@EbvŽ)/ŽJ6£b)û7mÄT%oGLå‡íˆ)Ç©»÷Œ)!„˜çÜI‚”B!fËíØ–5Æì¨õ5›»|3‘57»ê-¨T3v4KuÄ”1˜ Œ>N¥ëqÊ/|S~õóñ¨X 'ÙŒ»ò*"ën±·&¥šmˆÂ@¥ˆÎŸ ì=€ßý$áàÑ¥·Ó\"a*·c[–ÌŠßs“m¿ØaJE$îú(NÃ2LX©©Jý­4î²M¨x#èÿÔ‹à—Q‘©Ñf6L%ìû6šBE“¨HŒ‰¹¤LiÜÎ1U²#¦—z S¤„bžãœ)!„BL¿ØÖèíÊxÛqÈÔërx+®$²þ6¢W¾ '‘×e*LiŒ_"ì?BåÐ)ïùæ+˜·I¡"1T¼oÕuD6ÜÛ²'Õ £³ü2º0„>Aгφ¨¸epÑI˜º@ëQ£6H¥Z0AÙ)í×áÚR¸­ì-{: 8ýc;ÚÉL~}ÆSù"‰êˆ©´ýß^ ”²sÕǠ8Š. båº5¿ô¤„bž£ƒ)!„B,•5ƒãâ­ØBdãV¢›_‡“låL}½ú„» g•—¾Måð®sº°ÆõpâxíWÛßÛ¼'Ù\½MPM>Æ>Ì$8ýcü®Ç»/½JsL){vû#_Yêï¦Z~÷BN|nÂ'ÝJâÎØ å—{÷ƒ ëðŠÃ|ºä䤿Aåzg¿¿f„©$Nªb©j˜ªŽx ty SÎcÆû1aeŽßU;$H !Ä<‡ RB!Ä¥kI†¨g: Iඬ'zÅ}D/»•ÈØÑ“Wþ]ÌŽòKß"8ùÂ<¿ËAÅÒx«¯#ºùuvDT²ÅÞV¤LècŠ#è‘ÓøÇŸ¡rh:wRv2ÍÃJU:³ÛݹÄßKëTªõÏœÆåï¹aÊÜL;‰;>ŒJf1~ÑŽªÇ åx6H%²v¤WÏ>œg¾7.Sa*@ES8©û4M/>9Gœ +”1¥1ôhý5¦$H !Ä<Ÿö¤„BˆKÏ’Qgñ(œtN¦ØÕo#²ö&l4˜¦tˆÎâw=FyïC„}§~<Ö`CÔ¦{ìdåé6û¨ú‰UGöàw?‰hWõ©y!u?aù…$a꼿ˆÛ¼ŽÄí¿`CN¥`GHÕã>åFq›:ìrLŒôÒÁÌ0<÷—a*–ÆIµ¢¢Ip£L<ñÐhß>½²<ŽíÅhÚ퀋O‚”BÌó)/AJ!„¸´ í¸»ó’ Qgù8¸ÙÕ8™•Ä®};^û5öÉ_SÂ=ÖKåÐÃTíÄÍ®"²îÖjˆZÑÊq1:œ QÇžÁ?òˆ¥á—ÀhÙÑæs)…©DË?;Ùw¾’0eÊyÜe—“¸õ}¨DSÎÛ£êpe¸1Üæ5¨x#¦R$ìÝ{¾o\¦ÂThÃT2k£T¼a2>™ bÃT%íÖD˜’ %„ó|ºKB!. ¹wÀ¯‡µr¤p›×â4u¿î]¸Ë¯°#ž¦r„!Æ/`ü"8ž}ò—ëM…¨‘3vþ©ƒ?°!ª<^%Ήæa¥ôöìö/,í÷ÝÖm*±üSç¦Li¯ý*â·ü*Ö€)ö¬Ï å%pšVãÄ1•AÏ^”z% 2-Lék°ŸÇR8±p=Д0~SΣG{½¨aJ‚”BÌó©.AJ!„XÚ$D½ü®ÛÔ»òJâ7ü4nS̸p5Sÿ¥Cti=ÚCpf/~×nôh:?do?¯Œ?§”êÌnßÙ½´ß‡ç¦Lq¯ãFâ7½KcŠ£ý‡Põ¤"IÜìjT¼SÎÛ 5ïüQçÇhƒo€XoÀ‰&ÁñìC *L¥hGLõ±XaJ‚”BÌs&AJ!„XªÀ¢Îù„(–¶“žo¼ ·ýªêEëô+Z£ Cø]Szþ‹è±%D]0—P˜"Þöy·©½ãå”. Yw+ñ×ü *šÄs„ý‡©Ç"¥¢iœì*ÖÊc=û/Xš|‡pâö¾XI€ã‚íh©Ò¨ Sù!À,h˜’ %„ó$H !„Kí‚WBÔ9Ÿy1¼u·àµmÂ]~¹Å‘ÈÚ§xM^0Ï|"Ÿ)ôî§²ÿ?©ìÿž¬Ä ¹=Y¼å›?çµ_Ý¿ùg»—úû”xË'ç Sz|覻‰]ÿS6H† û€S‡A*Ö€“Y…Š¥Ð¥QÂÞý(ǽXÍF©xKÙ0¥” S¥qôxŸ T•"8îcs‘ %„ó|bKB!–ÊîÖmÆD”u¬ÑîòÍx+¯Â[q%n“ QxQ0Æ>­+?ʱ#;"qÎz"_1Gpz•=ßÀ?þ¬¬ÖW,Òx+¶Øm²| ns*Ñô9ÜÈvåEsKû}[ Smë;&ž Çú‰^q±kߊ&Ñźïpý)*‘ÁɬDES˜âAßÁA ¼8*Çi\aŸÌ ti 3ÚcoÈ Ê`.îÜo¤„bžOj RB!D½_ÐnÝfL´‡{dm¼üªŠ%q[6Øø±òJ¼ÖÕQQÌÄœ3c}} zöÙ[¤‚ñ›ÞKdã]¨X’™a*@çñ=CùG_#ì?$«ù|¶HĆA·m#ÞÊ«q[×ã$›À‹M„Áýÿ³÷ÞAv^çæsÎî½oçt#gÌ”DRàHV²dÓ´ÆvÙ«‘Öµ®™Ù0ÆlÕ®]µ;³PÕxwõÏj¼®õÌìhe{g'Ø–-K'ˆ”(RL`‘¹sº}ÓÎÙ?ÎE£@“ ˆÐá}J*ªÔ$îíïÜû}ç<|ßß;{ùP:üî¡Ìý_\Õbjæ_}áÐùÁ®²õ fv„p×çÈìù*ÈÞE‘s›±UׂnꩵN‘Žž\Px_Ú0)*Ⱥ÷ÐØuux5˜Ê ¶Z„¸Š­!%‚°Ä>@„” ‚ ¬LDD}€ O¦¯u=^çVüž]x[Ñu-(?ƒÅBTÂÌ‘Œž =A:y¢¢#QÂ:²þ2~ÿ^T˜ãª˜rùLa„xè*/ý{LiòÊ+3Œ.\Åóñ;¶âµoÀïÛ‹×¾ÝÐ>½çÅàØI’Ëo“ŸI&OÒ–Cù‡§WïwzžæîßÖAã©6fö>C¸ó35!5E2vzÅEHYcÐ íèÆnT˜Ã–&Ýïq+½lšÔ¡ê[Ñ(/¼ò1Õ9lyâ 6*Þv1%BJa‰ý™)AAXY¤“g÷W_ýÓƒÑéç>nË3rAÞk£“iDç{ñ;6ã÷ÝWQ­dP(l\ÆÌ“ŽŸ"=N:zªvpÍÞø`kAY2{ŸÆëÜz˜²iŒ™¹L|â0•Wÿ•dâx­xù>‚ÁGñ:6¡: ¬s·¶¦0Š©PJ ›FF¤S—Êéä雞{埭v1e0ê?ñ?wüL³ò3îšÌ\ºãíe·k º±ÝÐáZçÆI'†îªºú^R°Ö;IVßž凘j[š¤â*§0·åuEH ‚ ,±O!%‚ +†ýÀAàã6HÇNQyñI.¿­ÎÉÕY¸žî ² IDATÁÉ4 ;ñ:·¬»¯}“«À ²5ñQÁ”&I'†HÇN’ u­=aîæ^@ûèÆN2;>n]¿8cÊZla&ÏR=ú·T_ÿ3÷šk/@×·ãµo x¿{;º±®N'ULq S‡š4°•¥[¨La¢¿û×ÿ¢ñüáï¬æË½ó×ù`ëþƒÊÏþf:u[ž†4ZQ¿ƒ5)º©ÝÐŽ r˜¹±šÒ÷ð=°ÆI²\kvùÀåÃU ØÒ4$ÕšTþpbJ„” Âû5R‚ ‚°¼)ü‡<˜ýȯò{wÿüµáÚ6©’\|ƒÊ ÿéØil³–[ÄTX‡®oÅëÚN°þ!¼ŽÍW[Á”‚¸Š)Mb&Ï‘Œ ¹ô6¤*Sk/ègÐͽdvý,º±J½@L%ÒÑ“DGÿšêÛµöÄóÑÙftË:‚uà¯ÝÔ… ëPÊæ‘ Š&˜ò”Q&y©&ħžIÎ<÷ÛùÏ~c•_ÉÁäò;ÿ›­ÌþŠòƒõÆmš ›ûjB*‹™ÃL Ý• ©÷}oƉ&ÝÐá2¦² ôü÷×”§œS·vo!%‚°Ä¾M„” ‚ ,O¦í´ÖDû_VÙ2;>M¸ë³nê›°HLU爇^¤òâ’Î\rU&kiCãgP¹f¼®m„?vUDYך—D˜Òfú<éè âóG ­¢²þP›DT%Üõ9t®<ïêúXƒŠ$—ß¡úö&>ùÜê_í£Â:¼–~üu¬ÝÜ‹Ê6¢´‡5©ËìIc¬I0…Qleö¦DÔu×?®ŸùéšS3ÿúƒ­O}=èß÷Ô-KÔ»ŒM¼|?ª¡ùÖÃɳËBHÍ¿GcQJ¡ÚÑõ­¨°…a]¦<-Maã Äe>¨˜!%‚°ÄþM„” ‚ ,/®Š(ïË×ó›zÈÜ÷ÂMO »®æŸ€û7ú¥IâS?¦òÒ¿ÃÌ­þ‹å¨°¿k;áÖ§æ3‰T­Ì&¶´_¸ëólxd÷rS6‰Ýg`^HNž[–Ó­µ(ª.ïZùê[Y˜gÊ3ØÒ´«èKªÜ¬˜!%‚°Ä¶A„” ‚ ,›Cæ’"êZ¼Î-döüÁ†Çn >LaŒèØ÷¨¾ñ-Lqbî`4*Èâwï Üñ©ùŒ¨y•&ØÊ,fv„äò[$ç^Á&T¦áÎj+PŠpû§·<‰Ê6]m—QsEù&fúÂjXк¡pÓãø9 ‘Ë»j>k]€|a SwíOw¨½ôª˜úá¯äüèðê¾g<¾?Üõôï.g1e“¯e=ª±¥La”tjy ©ù÷l†RÙ&7•¯®uAŔŔ¦°¥™Z«iô¾ŸcR‚ KìDH ‚ ½>TÞ¼ˆº¿Ù}Ïà÷ヲͤ6ñmê<Õ·ÿšèØ÷\Êjؼø¼ždv|ºÖš×áBÅ•®Uà13—H.¾A|îךÞíúÂ&U@‘Ùý9‚Gœ [ЄI0³ÃDǾOåõ?c%OLT¹f­O¬¯u=ª®ågjB¢êðqªELa„»‘sfã ɹ#oÄ'ÿî7ELÝ; )S#:O¦ìÝÚ‡[£Â'¦UL)N`ËWÄÔÒ‚U„” ·XR‚ ‚p¯‘n´»²þ4ÍæÏ 6~„ì¾/âõìDyáb1WHGŽQ}û¯ˆ‡^pó¯Â-Ot.1eÓ¯e=º¡”ÂÌ»ª@¥VЕU51Uïr¦êZýÔ”¦°Å lTš˜k3R‚ KÜ]EH ‚ ÂÝ>4Þ>u-™]Ÿ#Üóüöµ|©bªZ$¹p„ÊkJ:zÜU¬\9l-c1¥2õø=» uQM=µpl—IDµ„™#¹üñ¹W°i‚ÒÞmÉ%º­Zí£³M„;?…×±æX,¦’ñÓDoþÅòŸÈç„Û>AÐw^ûFtC'„¹«“óÊ3.¨<*º5J¢ZÐþ½ûœ­-1õäW‚mŸþÚrS6M® )lMH]­Và•u­©*l@5´¹vé˜ÒvnÜUL-¸ÿˆAXâ®*BJAîÖ!ñΉ¨keAvï/îú,^¾¯–/µ`â[e–èôO¨¾öǤ“çÀ𫇭e$¦TX‡ß»Ýxíñò½¨l3Ê °Ö@Tv"jä]¢Ó?FéZÀû-Lj»«è/ßK¸ý“è–õó­mµÂÆUÒÑT_ÿ3¢‡—ׯ1Èâ÷ï«ÉÁÍèÆ®Z•šçªÔʳ˜Ò¤›˜g“ži²¬>WWÅÔ~>àðÐê¾çÔÄÔŽOvª…îæõNS¼Öõè†v¬5NTί° ©ë¾ n’d¶UׂÎ5s]+_e[ší1ûÇÿ­)A„ÝMEH ‚ Â>Þ%u º®•̃¿D¸e?º¾íº‰o¦<íÚÄŽ|sYŸ+?ƒß¿¿o/^û&¼|*׌ò3Xk°QÉ#$>ù¶:¡“"+k¦ñ;·lû$^s7èÅm4Gré-ªo~‡øÌOîí[ ë𺶠>âÖ¤¥ßÉÁ+“ó*H*˜jÑÄ«W2u̲½ü6®\~çoã£óW»˜*üÅoÿ^v×~C…Ùà®_gcðZÐõ­Xc0…Lateû¨«wYð|T¦•kªµò-¨J-Ïb*³¿ûµ¯6ýýÿó < A®Ù_ˆA„;Çä¡'Þmu-^çV|>ðk1™ŸøæÚÄLqœêëߢúö_a«…{·)ñ3ø}÷áuïÀïÜŠnY‡®…c_QvnŒdô8ɹW0ÅI@¡ÂìŠþŒX“nüÁ¦'œ8\8‘ÏLy†äÂkTßüÉ…×¹›ÕF*S×¾¿~ç¼¶ Nz¡QQ37æÚñ¢2¦:禎Y³r®ÿSÓ‡öç3~é÷ýÎíÏÜM1eÅk@×µ¸A ³#Øâ¨Utq•ç*¦2 ¨útváíÞbf‡¿ª›zÊQášÛ§)AA¸‡¿'¿b­ÍÀryOÁà#dî{¿g*S¿HLÙ$ÂÌSyùß?|Wƒµ•âõîÆïچߵݺÞM³ò3nþú•Ö¼±“$_ÇF{M«ÛÊÇV‹„;?C¸ù T¦ñš‰|)¦4E|æ'TßúéèIRA¯c~Ï.¼Î­.'êÚ5)ŒL¡³MØ4¤²¢DÔu×_ÄÔÁXtÛ RI3;Œ-M­.!uõf^ˆÊÔ»V¾lÓ•Ÿ|8(OFA„kn›"¤Aávö–Ÿˆº–pçgÈìþYWí䮊k±q™trˆòþ5ÉÅ7îì&Äϸö¯îíN|´ot-/AÖI¸Œ™';Irùwµ)Ê VõgÈZKfûÏlx̉Å'÷4ÁÌø!Õ7¿™á¶Š)/Àkßèä`÷—UßAÎý<*cæF1…ï…Ø¨ÄrnÍûÀ×?®Ÿ{ù?$'ŸûÇù‡§Wï½jÿ`¸ï™¯ýûžº“ÁçÖ‚×¶kÆÆeÌìe—1¶ªOXNLél#dйüWA”'¤ Â5·KR‚ ‚p;wË_D]Kæþ_$³ësèæ^”r5øÜb+³$ÃïP~þß’Nœq§ÊÛµùrx­ëñ:¶à÷íY =ˆ¨âéøi’áw1Óç±fõ‹¨E‡ø$‚¤Löá/á÷íEeê¸v"_:}èø¨¾úÇØ¤úá^PûnMÚ7á÷îv-“]ÖDT\Á'0³Ã® Ê °å–ótÆ‹)LTâ ¯üßé¹Wþ™ˆ©[þ¶àµo@e›]‹çÌå{Ú|×¾ÃÆ¸I{:€´úÕ`ÃGÊ“Ráš§„)AAø0‡¹•'¢mÂ:²ý*áÖ§ÜXvoáD>‹)OŸyò›T>”˜RAïÃëØLÐw^çVtC»Q0/=Òñ3¤£ÇI'Î`M²¦DÔu‡Ú¸‚ ëÉ>øËx›QaŽE¡ÉI„™õ£üã•kÂkêÁ_w?ÁºÑù>T¶éªˆŠJ®0±¥ILiL,—¾†)LTâwÿú_4þƒ?üÕ~¯ûðbJÒ®B*Óà„Ôô€¿†!%‚°ÄSB„” ‚ ÜÜál5‹¨k 6?Afçgñ{w£ÂúÅßÒ„tâ4å—þñÉç®ß\øT®Ùµæ <‚ßµm>h^DUfHg.“ŽŸ"9ÿ6*¢Â:ù ÝôÎ#x`ãã.Þó¹*¦ ¶Z$¾ðÕ×þ„äÒ[ND…uè¦üþûÃkY‡Ê6¢´5)6.A9)UœÀ”¦\›žp6MˆO=?’œyî·óžýÆj¿÷ݺ˜R <¼ÎM¨ [™&º°æ>W"¤A–xJˆA„÷>Œ­%µx— ÉÜ÷s„[žÂëܲ¸M ‹+¤ÃG)ÿôÿ%¹ð:x>*¬w<?Š×µ ÝØ‰ 뜈ªµ™éK$c'I.¾Ž-M¡²òA»Uü ÁÀÃ>‚Î5»vÊ+˜Sž&9÷2ѱ ;6=Ž×²U—w-‘Öb£’“O liSšu“ظB|æ§kDL=ù•`Û§¿ny¢ó‰)/tRA®Vy~ÍU܉AXb«)BJAntøZÃ"êÚÍ‚Ÿ!ûð¯lx ¯uàšjœÚD¾ËoŸþ1^÷NüÎ-è†NW¥=W}SÃÌ\&=N|îelyFDÔmÂ&˜”Ì®Ïløˆ»®×´ZÚ4†$rÒ°–Íeã*¦2㪢ªÌܤҚwKk bêÆ×ÅZ”Ÿqü~SžÆLžuÓçÖ"¤A–ØcŠA„…‡­ýƒÖêoˆˆºÁ¦!—'÷è—A7u_|ŽI±6E)´v:Õ9la”døâ3/¸Ö¼Lƒ\ÌÛ·*€¶2 aŽì}Oã÷﻾Õ·ç»Ò’gM Õ"éÌEQ· Wˆ‡^9•œþþ•?ð£Ã«û^YS;>Ù©<ÿÆ“1à‡x]ÛQ~ˆ-ÍNœÌšú\ˆAXb#BJA®ˆ({í}Y®Æ{£›{É=öe‚‡Q¹f®¶ñX÷Ÿ¤Š™½LrñM¢Ó?†4FùÙ5w½‹[:Àb£ º¡ÌîÏ9 äj˳`¬Å'HGa«%·fÈ~ðvbã ɹ#oÄ'ÿî7W³˜š>´?ïm|ìw²»¾ð*Ì^7ÓšÔåõíÏÖgHÇO­¹Ïƒ)A„%v/"¤A„µŒˆ¨¸qð3x›¶ìÇï݃×6X›ô‹¤‡I1³ÃTßøÉÔ9W5µÆÚtî&¶ZD7uâ÷ìÁëÜŠnêBePÚ¯UI©kÄ”uáôã§Ig/CR]°†²7¼m벆ÄTæÑ/ý¾ß¹ý™…bʦ)˜”`ðÐ[ž%;¹Øa¯DH ‚ ,±¯!%‚ ¬EDD}À CÅëÜJ0ø(^ç¼æ>T®å…W—J/Î.lT";EõÍ?ÇÆPaR%uû°•*ۈ߿ËîÊ÷¡²ÍnržM±•Z6T\F5t¢;jqA8}‘ŽÀÌŽ€I!uÖi­‰©Ž-¿¬2õ.»,M6~”r-{ã'¯i%]ýˆAXb)BJAXKˆˆú€… ‹×µ ^ÇtK?º®åg°ˆ+˜â¤kÿŠJøë@7u£üÌÂã8¶Z$~‡ê«âr¤$ÐüCa£"ÊÏâ÷îÆë؊׺•Ë»œk\ˆüìÉå·H.4 xˆ`ãÇÐ]µpú+ ›¶— ¿ƒ-Mͯ›p›×-®\|ãùøØ÷~-àðÐj¾Ï†ûžùz0ðÈSfv˜ÌÞŸw¿iŠdìJë5µî"¤A–ØgŠAÖÓ‡öç æ€Rþÿ"Wã&6~¯{~Ï.WÕ:€®k… &šâ fnœtüñéŸNÇÌ\kÈì}šÌI±2Ö`˳Ä^£úÚ?· ÝO¸lTåá÷ìÀëÜŠ×6ˆ®oEùY¬5•I #¤Ãï_8‚)Œ¢³M‹+R”G°ñcƒ»5]No0åi’Ko¹ê+/@ÄÔXǸB|ò¹?9˜yà—†Vëï9óû?7˜}ôË3÷ÿâ—Áb‹“¤CÒ²'‚ ¸-‰)Aa53/¢¬M³\‘÷Áóñ{vãwn]$<rîçqÅbŸ&>ÿ*frˆtbWý1º¡Ì¿D¸ùI'=Vã˜Sœ >÷ щg×Z÷Î-a£ `ñ:·àwnqÕj í.´°q37Frù’‹¯»ö;/xoá§4á–§ðBçò‹¥•I1Åqâó¯BšÖþÙ3~x4ìì^Çf¼ö Ôý؃ÊÏ ­â_|ì¡túÂÏÛ™ÐÒ²'‚ ˆAV)3¿÷Ù|—(뉈º©s²‡×¾ ¿{~ÏN¼öèú¶ëEÔÄ’Ko‘ŽŸ"?JïùÇzíÉ>ü«øý÷»Jùj‹McÌÌe’¡‰†~Ê’£ã×06*ƒMñZð:·áwms•g™:ÊM3,NŽž ¹ø:éÔPê¦+ÏlAšyà‹ý÷£² ,*_IÒÙaâ¡ÐÙfd"ß­¿T¶ РÀoß ™:×*Yš$¹|”äÂkŽ;Øø«ÿzhµ^†øÌóû£c‡ÿpóã÷«LýšY~R‚ 7F„” ‚°ºðÕBÞÌ ˆ‡^:\<ÒŸ{Ùm[ Ò^ê ìµ¬ÇïÞáZôº¶¡ÚˆR â*¦4I:q†ôò;$£'œˆª>ÐËë"óà/ãwmuÁæjq¨¶™¢zôoHÇO£‚¬|ŽÓ⺩¯k~÷ts*¬Gi›DØòéÄÉÅ7H§/b“J­Åî^¯R@eêÉ<ðËøÝ;PaŽÅb*&¹„™¹ŒM*Dˆ˜º™ï—Ê4å…è¶A®ˆ[)` £Ø¸Œ2kÌܦ0ü«/þÑ?Ê8<½Z/Ëô¡Ç÷‡»žþÝ`Ã#»×‚˜!%‚pcDH ‚ «SÏëºÖX{À¦Q3&Áƈϼ@rá5âs¯ˆ˜Zˆà5÷ºª¨Þ]ø½{ÐõíæPJ»Ê›Ò$fâ,Éè1’áwIÇN`+…õ²á¶¿GfßñÚjâi˜ŠÊ¤#Ǩ¼ò°Æ ÿõpÓG7¬f1%BJa‰])Aa¥SüËþ¯sÇA¿{û€nD×åÁ2_ÉaJSDÇ@rñÉÅ7×´˜R~ÝØ…n]Oп¿w7º¡ó5™:G:v’äÒ›$£Ç?´ˆZð@Aöþ¿O¸ãSè|_­½L-"³$—ß¡úö_AZ]3kc¥^ÇüÞ=x-ý¨l³ËßJlµ€™¾H2r 3už´0rËQïû^â ~ÏN­O¡[Ö_³FW±ÅqÒÙa7•OD¯Cû.×˦àgðZPu-îªE%lµI[žÁ¦ÉûJ×µ#¦žüJ°íÓ_ ·<ѹÅ”)A„%v…"¤A„•|ˆ±Ö?ˆ§ts~ç6üþ½xmƒxùu¨º¼;Ô%Ulu3;B|æ'$ç_!>ÆZj9R~Uߊ×6H°î¼î]è¦.TXçDTcË3¤ÓçIGO’\|døÝÜšwóoH£Âz²ý Á¦ÇñšºOä|~þU¢7ÿ®Þ6>›¦`·6൮GÕµ ¼kÒÚg÷2éè Òɳ˜ÂÈÝüäà÷î"ܲÕØY`51e 6®ŽŸr!ê6]»7#í»I‡iŒ ²xíP¹Úý'.CTr÷¡Ò6>°H\sbjÇ';WS¦œ)A„%v"¤A„•xh±Ö?ˆf`ñ¡ÐC×·ãwo'Øð¼ŽMè†Î«­2IÕŸ:Orá5’ GHÆOC¯Þ½¢rÍxmðÂïÙnêtYDJcMâ²l¦/Œ"9ûÉð;ïV~ûÞ ÆkYGæþ_$xØ©ë+Q (Ìì0ñ™ç‰ÞýÞüZ®‚•q¿aeݲžpÓãèÖAt] ʱÖ`£"¦0†™8C:v‚tføžH›DTw|š`ãÇÜD>Ïc‘˜ªIFŽbçÆׯ´^Æ &kñ:6:UË_³q •±¥ '«>dE›*q|ò_­ûôÿü;«ù²ÿòŸ}-Üþ™ªÂl°~R‚ Kì†DH ‚ +…%EÔ ‰º¾ ¿wÁ¦Çñ;·º žL;D§1éì0éèq’Ëo“\z3}WVÏÅò|TØàªn6<æZó»P™Š}¥lf˜trˆèÄa’KoÝ;9§4~ï²÷ÿ"~ÏnT®ÉµVºc8XK:yŽøÄaâ³/צÁ­È­Êó±T˜%Øø8^Û º¾ åg°XˆÊµ‰†C¤£ÇœˆZ­‹¶R?Cfçgqk ˆ)“bÊÓµÏQ´ºÛø”/@g›PÙfT®•mD)ωï¤âZôæÆn©"ê=×!MˆŽ~w4>ö7¿•?ðì7Vïý~>óè—~ßïÜþÌJS"¤A–xœŠA–ÿÁ䉧­ ½¯ˆº/pbªáæ'ð:·Ö„ŒïÒiB:u–äò;¤#ïºlž™Ë®Åf¥¢=”Ÿq"jÓND5u_QWZÀ £¤ã§‰O>K|öe–Oû¢"Øô1²{¯sóõùÒ˜täñ‰’Œž¨Mƒ[[®ZvYŠ ë]_ç–yêjüäYâKoaf/C²ü2´l¥€ªk!³ó3ø}{ÝÔ¸yyˆSÅqÌì¦4¹,‡ñsß±lº.jhG‡î÷·ILŒ©°…‘ÛRõžëP-xNÄÔ @„” »#R‚ Âò=ˆ<¾ßÚð š ‚,*×L0ð0Áæ'ñÛ7AÆe'¡<¬‰IÇÏ^zË…xŸÆÇWXÅ”­ñÚ6nÿ~÷NtsÏUe D%ÌÜéØ)¢“ÏŸ} ÌòÍýÉìþYÂ]?‹×6xý´·¨Lráѱïaæ&–yƔ¦1:S?ø~×6tC‡“iÊæ¶<ƒ™¾H|á5â³/¡‚\-H|ùb+ts/áÎOãwípSåÔÕpzLJ:uŽtüô*˜Æ§œˆÊ4¢ëòè¦7%òJÛkmrž-Mº°r­ïÞ:¬%1õà¯ü‘ß³óó+-ø\„” ÂOWR‚ Âò;xÜuƒ‡^¶ k"Øð‚ÁGÝXû°Áy Ö¦n²Üå·I.¼N:1ä˜Ë¾ÂCáµ®'ÜõYü¨Æù\—1s£$#ljO=Grþ5lÁŠØ(²|‘p×çðš{Ÿ[‹­Ì~žøøa÷ÿÝEpS² ª 2õãuïÀkìrEÚs"£ZÄÌ^&9ÿñéçôëVĺ\©ª³•^çVÂmŸÀëØäDÍ‚|©tê<6*aæÆ!.­¼’òP9ך§›ºœ,Ôþ|¨»)Œb«³Ø4­‰¨{ó½²Õ"Ñ©çÏÄG¿óëù?:¼zŸûÃ}Ï|=èß÷ÔJS"¤A–xÄŠA–ÏAãΈ¨ë~uyt};á¦á÷Þ‡n@g›kççÄUvŒŸ"=N|æÒ‰!Ìrm¯}'¢¶ÿŒ«ºiêvYY~檈*Ž“Žž >óñÅ#Øê¤+­ZE¡2õdø%ŸªÔOòœ IDATŸ/ζ37NtòY’‹oºjœ{<ñÍV‹¨l~ß^ü®íèæ^·6ÚÇÚÔ šÂ(éð;Dǃ ¼0+nmæÅTôÞG°ñ#è–õµVDÀ¤Ø¸ˆ­–°ÕéÔ0+`€ö\>TÖUE©°ÑUêYëÚ+ #ØJkSÔ‚ê½{­‰Ïüô­èí?ÿïDL-DH ‚ ,±‹!%‚ ÜûƒÅÝQ×7ëÛÐù>|Þ¾ï¯ÉíÚõ’ˆtrˆtbˆèø÷]ÅT¥pïÞ^ˆn Üø1¼®íx-ý¨\ÞµxYëª6Фc'ˆ‡^$¹|[žÂÆU–OVÔ­ Ë“}ì+Nzäò×d%¤3—ˆO>G|ñÍEÝcwMT àeðûïÃïÞ‰—ï«­M‹j S!=N|æyW¥¦¼{.Ðnï:ùøÝ;ð×=ˆ—ïEuî;eô]žu-Š…áå|~EDeê]‹^. snòa\Ƨ°•ÙZâ!·ü¾WkJLíúü·ƒ ì^®bJ„” Â{ZR‚ Â=tC»Ë‡‰JØêéÔyÌôE¢cß%8ƒ­ïþõ¼¶ ƒâwnA·¬C×·¢|—Ÿd“ ¦8éÂÊOÿ˜tìfnÌ…´¯¦ç½Òxmƒäû ~ß}¨Lã¢àsÒ„tò,Ñ»G2r¬ÖBv‡ÿ•(…ß»ÇI¶Áùµq“ó\~W2rœøÜK`¬“P+>[i‰ë‘DظL¸ñ£ø,SI-3kæ¦<½¬BÏU¦‚:—G×µ¢2õX,¶:çDZuŒqŸ³2Ep툩Ç÷‡»žþÝå(¦DH ‚ ,ñÜ!%‚ ÜýƒÃþAkíA´÷ååô¾tC;^ûFŸuÕSMÝ躗÷•0Å ÌÔ9Ìì0Õw¿‹™<{w‚Ï=¯}Aÿýx[ðZ\%W˜C]™ÎVœ "z‘tü4fæ"¶ZbEWDÝÁà#dúU¼ŽÍµ‰{ ‚Ïã*éØI¢·þ’tæâÉfº"^üîø][ñÚ7¡: È:IW0sã¤c'‰Ïþ3;ì¤ÇÁV  }­ûñû÷¡:ç×É&Ulq‚tò¬k%½—ŸUkÁZts7:¿•krÕ†Õ9÷ßÊ,6*¡<å®EµH|áÈ¢#ßüõüÃC«÷ù²üÄ”)A„#BJA¸‹…å)¢®E7uãun!³ãSîÝÐŽÎ4:1—k“êNb £DǾ™¾pg‚Ï=¿}^÷Nü®mxíœì¸"V’*¦4M:q†äìˤgÜá>*®®Š¨› ³ûódö=ãr´Nä³Í‘\>Jõ­ï`K“·IiÐÝЉ׾¯s+º±Ö;•T]µÚØ)’ G\n’Ö+Zh|(R) êZ6~ ¿wO­zÌeL¹<¦QÒ±S ¤w1_ÊÏbJSxͽxíPÙ&÷žH(•…Òju¬ÅSÁ¶ÏþÇpË÷ZL‰A¸1"¤A„»p0X"êjŒû«×Ò驪mý{®Zª±ÃµÇiïê!zä]Ò‰³D'覈ݎôöðZÖã÷ìÄëÞß¾ ÕØé*|®ÈŽÒ4fò,ñùלˆ?…­Üã*“{½z^Hæþ_$³ç ¨úV7epÁ´7Sš"¹ø:Õ#æBÄý…¡Ü7¹8 ”« kéÇïÙ…nìBePÚÃ&¦4‰™TÚUÇY ~ˆ×:à*¢”r•P•Y÷Ý­Îa­A)µ6Öb­‰©Ÿì¼Û•Š"¤A–x4‹Anÿƾ&¢~sÕ<0³xù~¼öøãµmÀkê©y ‹5‰ ­¾ð:É¥7‰Ž®UÌÜÄaZi¼|^û&üÞ]x];ÐM]‹D”-O“Nž'=F:rŒdä¶<-¶÷ÀkÛ@ö¡_!x¤V³ J)MH§/ûÉÅ×ß#_ª&¹ü :ßßwŸ¹f”öÝäµÊ,fú"Éå·I'Ïa«…Zˤì±ÞW†$~÷v‚ÁGÑ-ë]k¬çAšb¢DeW 8séÖ'*Ï­¯IAûx›QÙ&”Ò˜øŠˆªb+¬IWmEÔû®ET‰“Ñw¿Y}ñþQþÀáU{s)üÅoÿ^v×~C…Ùàn½¦)A„%Ñ"¤A„ÛÅô¡ýyƒ9 ¬Mójüu}ºu½›v×·¯cº©ÛèÒØUx”¦ˆÏ½B|îeâS?r•é ¦©iÝè&üù}÷á÷î¾Úþ¥jÓÈ*3¤Sç]5ÔÈQÒáw1¥)ù°}üþ}düeüž]W3žÜŠAšŒžpÁç“CN\]ù©1(@·®'è߇×6ˆÊµ üЉ¨êfv˜tø]Ò©s®eÓZ@ª¢>° ±¿w7ÁúñòýW'ò™ÄU,•¦°•¦0ÊM‹>í£‚6‰Pžç‚ïëZkaóUlµ€+ØÊ 6M¤­òÊZ¬15}h>óè—~ßïÜþÌÝS"¤AnŒ)Aá¶lîW»ˆºî¬ÛàD’×µ¿k‡ oêrº$‚4Á–§‰‡^tbjè§W,x>º®ÕåDõïÅ_wÿåaM­ýkêéØI’Ko‘ ¿-IEÔ-ãù„[?Af×ç\…ŒŸ]$¦l‘^~›Ê‘o¢¼Sœt­yë¨MÎk?S I/a £$c'1SçH§ÎKEÔí!II•`ËÇ Ö=€nêAÕ¦Ú$–¦Ig.¸É}ïÕ«}”ŸqžçãwlAÕ/Qqɉ¨ÒÔ¼¬n°"¦n"¤AnŒ)AáCmæ×šˆºæ1Š®o¯pòºw൮G7´ÊUKE.'>ûÉÅ×I§Î£ëÛ ÖíÃ_÷ › —mtí}i‚­03—IÇOŸ•äÒ›ØòŒ|Øn׊…udîûyÂ-ûÑ­ë›ãdSR%?-M¢›{]^XàDqSœ ?M2~ 3}éöå óØJÖlù¸«lè@9Àbã ¦8N:1Qqq¦òÀ]+eãwlB5t8É›V±Q—±¥Il\®…Þ ï»Q%®¼ý퓞~áZÍb*Ü÷Ì7ƒþ}O݉àsR‚ KìËDH ‚ ·²y_Û"êÔl~×6‚Gðzw£;Ñu-µ›ðfK“ب‚ÊÔ¡²ÍµÉl¾kKŠŠ˜Âéø’³/ŸÍe wÝÐAfß/nzÝØéÚ*YÐÊgݺa›¨XœÀL'>Jráø™šÌî¶R@7vl~¿k‡›Èçg‹Ê˜ÙË$c'°¥its P™tSº¡íªˆŠ«—1Å l\u+k‘&DG¿;û›ßÊxö«øÙ6î{æë·[L‰AXbÿ,BJAø`ö'¿b­HDÔ «¨L~ï}?Jпw^<¡”«²Y8uÍššˆ%8C|æâ¡±qE.åÝ@{øÛÈ>òkøýû\{ ­-ƒ™½LtêÇDïþÊÏ,Ê™î<¶:‡×¾‰`ð¼Ž-.PÞ Ü÷'.cæ&°IkBÕµÖDT&qû £ØD*¢nÏZ‰N<'bê"BJa‰³)Aáæ6èO~ÅZÿ š¹ïƒöQaáöŸ!³ë3è–u(Ô\‡›Úv%‹ÈÌ\&:q˜êÛÿYZóîÞöå‡è|™=_Àë܆—ïCež/SiB:uŽêßÂÌ-–ŠÂ]ÃF¼Î-51µ irùæÿÀ¤˜ê,¶ZÄ'jaåJ.Þí^‹µ#¦ö…»>ÿGÁ†Gv1%BJa‰™)Aá½7ä"¢>(^K?á¶Oâ÷݇nîEÕå)d_[7Aoø(Éå·IÇO“N_tcè…;´óÑxù^Â]ŸÃï½ÝÜ ë]õŒI]6¸Ö0Ïc¡˜²I•tô8Õ7¾­ÊÝ lTå‘{ü7ð;¶ÔZ-ü<‰æ§ñÙjuÍÏ…Û¼Õ"Ññgχ۟úçÁÆ}cõ>ßîzúwoUL‰AXb[&BJA¸ñ\DÔE7õîø~ÏN¼æ^—yã…\™âf“ ˜¼Ö¡´W‹+2˜© ¤£Ç‰Ï¿J:vÒ¨£’\ÔÛ¶ãQxù~”Ý·×UDåòN<ÙÔeÍ’N]ÀÎCX‡×±¯u}-·hþŽÊ$ß úÆŸcÓXZÁî6*ƒò6>F¸éñ«k7ß kAk@aS'¥lRÅ'±qQÖèv¯G£¼ÐMBÌÔ»{šüКô ò3‡WïsñÖÄ”)A„%¶g"¤A„Eçûů˜jõ ‹"¢nêIªÑ í„;?ƒß¹ ÝÒçÂÌý 5ˆNž%¹ð:éøIüÞ=øÛð:6¡Ú]æI\»Ñô—'uú'$£'0…‘-bµv?á¡›z6 殊ÂÒ$fv[™Eeêkm•×_Û+¡ÙÁÀCè†Ð Ä“I1¥IHæ°©\øÛ!>*÷|Ý>tc:Ûä~W°åLyÊ}—ŒA…9tC'*Û^åNÖªm¥€™¥PZËEþ 둦 º±äæÿk1;én+3& 5fnœøÌKozm}ÿ$ûÈ—¯Öër³bJ„” Â!%‚°Æ™þ—ßïõÙy£JIR‚ 7F„” ÂeúÐãû­ ¢ù¸×¾‘ìý_$Üö PkSˆ«¤ÓçI‡ß%:ñCÌôELir [¯e~ÿ^—-tEt„uNt¤1¶†×½m„ùCü fúÉØIâÏ’NŸ_[2D{xͽxÝ;WÜd‹¨© $ç_ÅÌ^ÆÄU”Ò\[õ¾‡@@y!áæ'ÐM=è¦n÷:~ÒØeQŸÁÌýk'©ªs²™É6â÷íÅï܊׽Ög›\¥Yc+3nšáÄY¼æ'‘>ô=†4Aåòx[\x½º:FÑFeÒñSTßü¦8.ÙEK]ÇZkž×¹¿w7~çVT] hJîçQÉ $k?¸@4]×âZ2ÃW§=×j±U÷ç›ÒŒÈCÀZ &Aeš\^Ceêjm®Ñ|uš­Îa­©U¾ÏYBi׿êùÄD'ÿ :òÍ_Ï8<´:Ÿ±ûó™G¿ôû~çög®ˆ)R‚ K<"DH ‚ ¬ ¦íßg­>´PD-z ø¼îdv}¶Ö>¶iÏÇÆÌì0fæ"éøW15sѯÚ'¤B7÷âwnÃïÞ×yµâ­!M°åÒéó$ßÄÌ^r-`·”ïtÍ?£4Ê 6~ݲ¯¥¼ÐI “:15z‚têщg1³Ãî`½¶¶0¨0çÚº:6¹¿¶¬s¡ä¾»N¶RÀÌ\"¾ˆò3èúVnwö–)McŠS}{ðÚ7ºC7êÊé[-\z›Ê«ÿɵŽI9Ž»4•9'{ÛˆêÞ‰ªou"<*9Ñ•ˆûŠújèyX 2®4Ýk%ULi [[»áôÊsÓòrÍ.#*SïdRÏW”ÙJk’›Q×â…nʨM±q…øÜ+kBLë|föOþÉÿ*BJáR‚ «›éCû­µÑÞ—oêÁiÄïÛãÄTCG­-Íõ¶q3uŽtbˆtê<ñÉçœ I£UuÍtS7^ÇæZûÐ.tsÏbU™!¾H:|ÔU)Ufo“èXpÈS^Mn@¸á#xíÑ-ë®ææX‹)M‘Ž'>Jtìûó‡ÆU¾uA…9¼Î­xmƒµÉyèºÖšˆ2®kö2fæ2x>*¬åÝ ë«ë•N_Bi¯g^¾¯–/uELLy†äìËTï¦Ú8W+6*ï#èߋ߷w¾"ЏêòârM|¤·ÕßYcÀZtS—kå rNLÕªH«ØjSÁ&•5TÕ¦A{®ª0Ûà¦^¹×Å%lµäîqitîs‹E–­‰/ùAtä›Ïä^•yÓ‡öB2˜?ð£Ã²#A¸æ© BJauòAEÔuG”ú6üunÿ¤«˜ªoEùÙy1•Nœ&=I:~ŠøìK«"ø\7u»œ¨žø}{Ñͽ®eNûµJŠfú"ÉèqÌä9Ì] FVØjÝÐF0ø(^Çftëz×hÁb°å’á£$ŽŸx[;X¯ºMKE7÷áwmÃ_÷^Ç&t}ÊÏ`±P-a #¤“g][žê.´b]9d+ÒÂ(ÊÏ8‘Yß¶ øœ«ùN&:~•m\;7$c°Ñº©Ýë@Õ·ÎÍÛê\MDÍ`Ó䎒Ù4Fyª±»zžqÕkàÞK\qblvk“U+¦, ”v"*Óè*¢¼šÔË®R­<íîëwø>g£JœŒ¾ûÍê‹ôV«˜An°ƒ!%‚°ºø°"êZ¼–õøëî'Øô8^¾U×Z›J滜œ‰Ó$Ãï’\z“äân »YAÕJ£ëÛ\{^ÿ>‚þ%DÔì0éØIÒñ!î~—+\¬1(kP nY×6¼Ö×Rc¬5ØÊ,ÉåwˆÏ¼@|æyH"lR]ù›• ‹nèÀkßX›ž· ÝÐ¡±q37F:~ 37†nêqmYw=ȉ)k fvؽç®mè\ó¢àsÒS&:ö}â¡Wµ˜R:Àj—!ôîÁ_ÿº± ¥¼ùï–+ND%Õ»:ÐMä«G7´£2 .|»6µo^’Uç0…QWAtÛ+ìî ÖX”RNÆåš]lc±Iâ²k_,ÏÜõß[Ä” ÂÚB„” Â*áv‹¨E ?ãd@ß}øëÄkp·ÖºÊ”¸B:1Drñ âs/‘\~¬kY¶hmDçûñû÷ºÖ¯…"ʤó­_éøiÒ±SND™ä&S ï›ÏOòZÖ»l¢4Áš[ž&¹ô&ñЋÄg_ÂÆeH“÷™V~U׊×>H°îA'¢šº ¼²\qNCµÊ¤ÐÉŽeNmÓ•Q¹FüŽ-®ísaðyc&ÏQ}ë;¤£'V•˜R:Àb]µX÷NüõºÖW¥k×¥XËmšÄÆ•{*{lš 2躼“œ™7‘Ϥب8ÿm\bå&€)¬IÁZT¦¡ôîBÞÌ.¸ß·<)MâžV†‰˜AXˆAXáLÚŸ·Öº"꺇F]¿s« ùîÙårêZ M±i„J˜é Äç^&>ý<éÄÜRÈ÷D{¨L^s~ß^‚ÁGk"ª åùîpV-ºŠ¨‰!Ò‘c˜¹Q'Ø–6M\fQçüžÝN6vº1k±Ñ¦4Eré-â¡]õZ¥°ì~âùèl³ ¼x¿g· ”믶x•§I§/`fGÜß_ß¶lèMySÁïqÁç*È]SÖU¥¤#ǨùSlqre‹)í»Ï˜öð{÷ <ì*Ö´ç$o\¤Œ)Œaãò²Z3k :ׄÊ4A˜C‡ .Ç-M0Q¢ ¶:‹)Ï® j©ZÕ^£Â:t};ª®ÙM…Daâ"Äleε]òš6h£J\yûÛÿ¦ñçþ÷ÿFžö‚ «R‚ +”éCûós@Yÿšæ»zælìÂïÝ…×¹ÍUNulBåò®B'q“±ÌÌ0Ñéç‰Oÿ3;¼ žxÚµ~5u¬àa¼–þšˆ œˆË˜Â(éä9’ ¯¹ª›e>Í&Q­ eþºûÑMÝ.à»–]dËÓ¤3—HGŽŸ{…tø¨«€X–2ÃCuxù^‚MOà÷íq²0ÓàZ¼Lì&çM_$- £P¨ºüŠÉøI§/5ø}÷¡ó}µ÷½@LEsÄg_¦úæ·ÝG¶–k´2v”žû,zžËˆº"¢<›&Æn(ÂÜ(¶ZXþkÔ¹ûE]«kiÓµÏ_uâ+ùf—ñÿ—ÊCÕåkykYPÊU|%Ule37æ*§–‘ˆZtK¢£ßýÍoå<û yú ‚ ¬DH ‚ ¬0ZüQ.w©{'~ß¼–¼¶WÝa 6.aæÆ1…1¢ãß'9û²ky»ïíã5u >VQë\vÊ•T0ÅIÌÔy'm¦/ ´Ï²ªìz¿C[TFeðûö¸ª¯ºVR¬]Š)N¸ÖÃÑã$—Þ";9_±ÏÊ ÐMÝ„Û?ᯀ ›l“kŸ´é|Õš™¹&¹º~+tvd^"ÎWµ-œÈWš">õñÙ—kylËùs¨\K(š`ða‚E7u×Ú^÷³¸Š)N`£J¯Œ¦7k ˜ÔM­MžS~µ–CÒª«*šÇÆÅeöYTXkQµjPÝØA¦V]ƒI0•ìÌ0Öše+¢®[“j‘èÄs"¦AV"¤AVËFD]‹öðjà~ÿ^¼ü:—“©¯UUH'Ïbf.½û]’‘w]Xî]:˜é¦.ÂÍOºì«–~T¶Ù,-®Í°4I:užäÜË$ã§Q~fJ4¶2‹nh¯‰©ÇP¹&ðÂùj37N:z‚døÒ‘c¤Sç1…±{'=”BåšÉìþt¾ßÉ&í_­Z›ÃÌ\rmŠaÝŠ9@¿fnÂ:üέ.¤]-h3)¦0úÿ³÷f1–]י濇sîÓyž3rN&Gq‡”Y¶Û6ºË Ì •^ äSÛ€Täjô[çC?4Ђ ~èîj@åjÏ’l¦IQâ˜Ì$™óó<Üw>çì½ûaˆL’I2‡ˆÈÖA$ɈgßsÏÞ_®õ/Wÿ áÔyz_º5,ÀU ¼ÁoÂ9Y×J¡à.¯¬Q%QX†Ø¥áK΄€Ðõí4‰ÎK~v"_T¥ðóü<œ »˜¢ … ¢¶¢¦ÂKQu¡345¯Z é6Ú±-®_û;²˜b†Ù3°b†ÙáìXõùŠNP;ÜÀ7 {Ÿ†jê#ùã§6ofî2ÌÒM×άNP¦Ñ!ëÚàœ"IÖØ ‘j¤C½ˆ’¥U˜•qD“ç­LÄÕRfo¼i¤‚+e!:¡;C÷= YÓOÌÒ€°ùy˜Å›ˆf/"š¿ »:¹ÍS"Y‡Ä±ß†îŽ×(ÝgÛ€ªÖ KÔîiBÀKŹ={ißBAÓ6¿YÛ Ý~"ÕÜmB˜ì‚‹‡há:µ=æ÷©«áløex^¡j<Œ…Ý[¶œ¢ÊÝÿÕ®^)gB™B0 IDAT¤ÐüD  žKÓ*\XާqÎÇSHå¶¿>M])ʈ"ñnaÃP^ƒ-®À™ê®«þüª÷!‰©øw™Óoýï†aváNˆ…Ã0ÌÎ%{æÕÓÎéÑ,¢¾ð`I5@Ö4Ãz ^ß3P̓â,U€Xƒpâ Û¾ù j)ç6m"ߺˆRGãi€Mqˆ4î„a¯Œ#œúvmÂÑ´³½%:Ö/†‚«ä¡2½P‡ã)uwe9ÊÌZ¾pòÌü˜ìL, Ý–¾G¼çá ¾HkTÓ á%IИ¶¸ »6U!µñ{ÃíÍ5Ú ®²QÓÝvˆ¤ÇÝ_+0Ë·|ü×0¹i?½ý¯1(ÖÀø¼á—iݵôµ(„+¯Â–²€ h­ÜÞ[3gÂx"_# ”o¦Hü¸°© ©œ`·åõ’ªÓµT¥“¨:ÕUóq•ZBÈmyMÛ¾&Õ"ÂÛï}\ü«?ÍœþÅYÞ90 ÃìXH1 Ãì@²g^}=Qý»Ö…Ô¶@¤›àüt÷4]Lùt@. ½‡hî‚ëoÒA.(Æ™9ú4$ÁF^ƒn?ÕL’~΄påÌÊ¢ÉÍ_…HÖÖîiÉqg14`#Ȇ.¨æAÊ.ÊtC(Ιö0»:Iùæ¯Ò„Dmî¦#Õ¯çIèþo@µ AÕ·SÅOü~X`GX¡6Ö¢ý³W±å5ØÜ4tÏÓP­ÃŸk!upÕ¢ùk¨^ø¯p•]¿­>ð[„e +xž‚ó“ BÄU²°ÅÕÏ…•ïí5sÆ@¦¿†DŸ¦Ê#k©Z*(PîYiuKª¥œµ€3©FÈD Ë(D!‰þ 9ˆ½SýùUׄÅÃ0Ì®ƒ…Ã0Ìb/ˆ¨Ï=f 3]ÉøG ºó(TÓUKYg# ¬ ÑÜ%„·~WYƒ‹ª4:þk¿½„¬i‚7ü2tû!ªÆªm¥Ã¡±ˆÊ¬N!š<³xNiìÍJ¯Ej@(¨Æ^¨Æ¨Ö$¦tΆ@·Éåç\{ÑÜØìÔgÖóaDƒHe »ŽÃëy’~fCD²î3“óÌê$ÌüÊJÔÆÓ ÷ïÅdgºóTc/pw6‘³°å,Ìì%T>ü/€Ò[3‘ÏZ@ùP-ƒðzŸ¢u«iº#y×3¢ªjSrß­™s€Le 5~ µ(¯¿¯ãÌ&T °åµMÊ>£éy"Q gCÈtsœ¹¦àL[Z«äáªÊóÚ‡÷‹)†a˜]tR`!Å0 óøÙ{"ꋨ¦~ȺvøG¾ Õ:L‡l!m´–„ç`æ.#œø¶¸òå:BBÖµQK`ÛA’um±ÈP”cSÉQXùÔ˜•ñXp‰}Q)p?§hVàõ?™é‚j V>/EëQ-RxiÁ¥ 1•ŸðMF²ºótçq¨¶‘;¡òÊ£ l•ãø>ú̺T‹§Î¿œÿÉw3§ÏŽña†Ù9°b†y dϼ|Ê9ÿ $Nî×k ÚÁëy’ÆÅ'j¨ÕN'–V©rfî2ÌüU„3C(ŸäFû!èöÃÔj–¬ƒšªmªØì4¢é 0«Ópa‰EÔýØL!¼Á ûH %êÒ*ìÊlq ÕOÿ–&$–²_ÜTèTûa趨ÎcPÍ4ÝO'¨*«Z€ÍÏîNRÓ×x}Hxصy¨L7TÛÈFþÖº„pQ³táÍ·M‡HÖÝ%5¾|¿G"J@5õ“ˆê8B"JiQq¾I}’»öH÷S¡µqÕ¦NlÈ'gBʘªä7ª¿,cÊ9д¼T…•'ë¨"ÊYj\QCx]¾r]XL1 Ãì8XH1 Ãl#±ˆ…Äk|5ÝuÞÀ ð_ ¿ùO6P;Š1°…E˜ÕI¸J€€¬m‰«êé°li2™ÍÍ"š» »:[ÉÅaå–/î !$¼Á š¡Z†©©•ÏYØJfé6ìÊ8‚ëgas³TÁ&tÛA¨–¡8À~˜Úï¼$ªƒ2l~fm€Ø_ÞŠ°çý"<\P†¬m¥àóxBaüUšÈ7wÁåŸÁ¬NÜ%¦>÷}¢.,Ceº¡{ž„î<Y×F÷VÀU‹pa‰¤‡Xx<Ä: ¿"ÝDR*YOä3pQ.(“¤-.Ö@(gHÐ íoHa‘Ê@( ç \X¥ µrŽD Â_S Ã0;R Ã0Û‹¨¯Ajx½OÃz ºç$UÜ=N}ýà¥4*¯ç­Í!Z¸»:gN…|-yg!;¯ÿY¨Ö¨¶@ª8´ZÀUó0+ãˆf.Â,^‡HÔÆ•Q)kÈKÑ÷ +°Å%˜• :`뜵›îÌØR¶°¯÷Ȧ¾8Øü.1U-!šúÕOþ?ª‚Û>—’®4¼Ž£Ð}OCÖµS¶WP5T\ÅCSÚx¯øHÄD$—Rõ~ d¢–®¿à‚2µÆæçá*k´Fʇ¬i¦*5/X˜.(À–²$è…b¡û¨ëR-"š½ô7Õÿï“9}6ËW„aæ1l;YH1 Ãl,¢ð¡”j€?ôMx#¯B·„ðk»å…U—Í^D8u®ZL•/Þf¯…òá„LÝs’Ú%;C÷‰¸Ê£8Kí–^’ZÈÂ*lify 6¿UßxÉxê³Ù˜ì „ÒPÇ¡ê;¾0‘ÏUònÿ áõ³€s$[À|²¾Ú^MtGD•s,¢¶B€XKù’u€Ÿ†ôR”Ÿg#¸ [ÎR¥TªþŒspA+TIUÎη·R†t‚Z_¥¦`ì  „eª¾‰Êà¬ò­_3‘ÊPÕT"¡SÀú=âèÿ(³«Uh‹¨-‡ÅÃ0Ìcx"²b†Ù<²gN 8çF!ÕóÕ¸Ÿ§„HÔ@Ö¶Q–ÔÐKP=”#¥|8XÀDÔ®'$UáH V©¬´ŒhúcD³—`óóÂÍlÁ:)’†^ ÞðËÐ]Ç!“ ±”Š…ˆM9,,#¸õ6d²ž¦ç±ÝØ6É8 >ÏÏCÖµQ–Wºñ®‰|wíù¬­¨[X† J’k{ˆ×ÃKBÖwÆ-|wW‚: -/.ÃW`"³°˜b†ÙÆÝ )†a˜G‡EÔƒ>}$„—„¬m…î9 oðE¨Æ>jUYQA¶¸ ››¡|k¡šú¡2]u€Òth«æaKYDS!šù®´ú¥!ÎÌCH!à‚2tû!è¾gh_*CdOöŠ…œƒ ˰Å˜Õ ÀY:lsðò¶¯›­äÍ|‚äSÿ ²¶-®À¹«Ô0n3ËcpA™s½¶ë~‚ )༦‰BÏeÜ»þgâŠOVâ¶ØÜÚ<œ YLm#.¨„Á•ø_k~÷GÆWƒaf‹žŒ,¤†ažì™SçÜQ÷ûÔÒƒ¨k¥óÁ¡šú7D@Q¶´ »2pâCjËË/BÖ4QpvçQèŽ#um4L’˜²Å¸Â"ÂÉsˆf/Q•ÅÔ#œ…ÔpBBÖwÀë}ê3ëD°D™6a…ÂæýôÆ41”y³6KmdÎRõšs\1µ ÂÃdgà*yx}Ï@6öÒšA_ëw¤Gq¶°,¦¶l]X ‘¬…¨ï€ðRqvWH÷J~.ÎÊPõ¡—‚Ð$})h¾@ƒ ‹$ô¹ v[p&Bpùç áÕü³Ìé7ÌW„af“Ÿ,¤†a•3¯Ž §OC¢¯ÆW=qdm+¼þoÀZQ™Ã²‹ª”?”Bxã-7ß‚+åðùÊYÛ Õq^÷Iè®é dº *SK0ËcˆÆß‡Y¾M!Î,¦èðì¬n„î>Ië”nÜ˜ÐæÂj<9oÑøû¨^úG¨æAxÏÁz™2£ü4„Ÿ¢ïUÉo„›C ¿€`ñ±ëf‹+°Õ<¼®ã™ž¸Š pQ™Ö ¸ Õ2Dõbq¸qð®`ó °…@jÁæp3Ö„þ!©jýÞP>ez…eªð,Æ-“JÃ…U’R©¨¥u´.,ÑŸªpÕ"x‰¶W-¢ð×£ãBØS™ÓgÇøŠ0 ÃlÒ“’…Ã0Ì£“=s*caO³˜º7²¦Þà‹ÐýÏB5õCÖ´@x & ¿ùÏÍ ¸ñ&‚ëgá*ÀF_ý=3ÝÔFÖ󼞓q@p ‰©‚€£™OÞ~¶°H cï™{º‚ TËtÏ“P̓5M^,–¢*\ifeáø{®ÿ \em#ß ÎÒzô?ÿÀ+4²^'bá(áJY˜å[0kóÚ'1%5çm¶° V¨r°¡3^3ª64K7\{á­_ÂW ü4’Oý+ø ²®Púîw›—VaK+÷Êžb`—­ ’õ•ƒTñ‡ÿÇÐ…•¸Úé³×ØYCRʯ…HÖBzôÙÁE ,Óçf9ÇÕR[ý¹X-¢òÁyßÌÿŸ2§q–¯Ã0Ì&>*YH1 ÃlbJèÏWé üáW {Nnˆ(ø)ÈXD­ÁæfN]@pågp¥U¸è>ÇÍ  ÕØÕv^ß3Ð=OB$j ¤Gbʆ°kóoÿ áø{:ùµ¢k_¸*yȆ.x}ÏPv]+à§!„¤–¢rfeÑÄnÿ ®°tïuZS½OQKfß³ ]€ö)'G(Øò*ìòÌꄟ¦às8–~wÁ–`+yèÎcPõ@"½qo™Å›®ŸE4ñ‰ÜÉ)’*ÓÄ¿ïÀ«©ÌFnQlCH˜—aó‹\ÍöÀK£¨²)Q øiÈu©kªâ,,À…eÈûxï j•MÔ‘T÷S4=Ñ„°ÕMã+çઈÏÈEæ‘?«E„·ßû4¸øWÊ"Šaf‹™,¤†a6Ÿýr.’õð¼ ÝuœZ¾êZ©eE$¢Ö¼iDÓ#¼õ6la ®š¿+Ø÷A~˜„ðS ]Ðm¡ûž+¦j±Ê kaV'\? 3w宀çýý ta²¦‰ZóZ†©º&Yv6‚«¬Á¬N!š<‡püصY¸ ‡”ÕšPÀ¹î}^ïÓTqÕØ x‰øS«“ˆ–nC¦ê)à™×ä~ÞðÔšWÊAw‚¬k#aËC³xÁÍ·Mž‡ÍÏÁå/_/!¡»Ž!ñÄÀë{ŽîqWð¹µ°å,eL•VYL}-’DT²Pý»Tp&"ÁWZ‚R|yÍ.»¢çdm\홄ڨ0uQ…„~PäàóGý\dÅ0 ³}»R Ã0[ÇþS"Yøe¨Ž#P-ƒ”U“¨½#¢ÖÇÌ'ˆ&ÏÁæfaËYÀšMøñ"YÕØÝqºÿ9èöñ˜`"8À®Í"¸ü3DóW©âÃÛþzº(€ðÓÐÝO@·€ÌtSk‘ôàœ¡Êµì4ÂÉM‡ÉNÞWå=߉4tïÓÐG¡;’ ôÒô:œ+­À®Í#š¿™j€H7Ç•¼?ù<¶°[ÎCµ B5tR&Ñ]Qá­·Í| “›…«è¾ò†^Bâ‰?€î:N•„w‰)p¥,li®’g1u¯÷¹Ÿ‚H5AøI@%(°Ü’ˆrå\9K"üëdîWÝ·Ö$!“qÅT,Ÿœ ãl©<\~ÎTYL=èõ­\k!¼ú÷Ä"Šaf›ž ,¤†a¶žì™SO:'Ï@âµ½÷$©Ì†R­ ë;âꋨp™B'âujŽƒå•Or((®Í!š8‡húc˜Õ ØÒ*`ÂG>°ËÚæxªbTûa¨Æ^’…ÎRæW) W\B4{‘™—äõúº˜ìd]Ý_um$yM@™^·‰hêlv ¶²öH‚×?ú$Nü>TëP|ýc1å\X‚+R¾” +û^L9€L7ÒT ä5ª JÔm´ò¹ÊLnfñ¢™Oa³“ V³šÝà Jʇlêƒn?Õ2DÓïâ)l+°…E˜Å'ÏÁ,\‡ÉMoYåÚ¡PmïÀ+P}ñëj!1%W)Àæçc` ‹q%ö‹÷ŠD‚ ¡{ ë»íQfuÑä9D³ŸÆ÷ÕÒ– á%‘|áuø#§(|þîàsk(_ª´JÓé··†5ʧªB/Imsʇs.®¼ÌÃWà Vئ÷«32UÄAêÂOCHMb*,SµVµ[ÎBžjéL„àòÏYD1 Ãì„- )†a˜ÇGöÌ«¯;§G!Ñ¿^¯L7Bµ€j? Ýq„ò€Rw‰¨ Hy@³a®!š¿›_xl"ê ¯¿¡^ÿstÀnì¥ÃeälKqÈöìE3k³qx°¿û\Õ"U#ÕwP°|ÛAÈ8XUa K0Ëc”µ2†hþÊÖ‹¨Ï£¼XL½ Õ<Ý6ËÂ@ȸµršÚ*kñë·{·•O(¸òœ©@5öCfº퓈ÊÎ šþ˜Z^®Ñ}µ]/+•AúåïÁx"UOÓ×1UL•V` Kwåí€zg-„Tw¦æ¥›è3ŽÚò‚ý³Z ö_©Óëté Ul%(üRR–U\-…j¶œÛ·í–.¨„á7þCú·~øyÂ0 ³¶>,¤†a?;]LÉt#dótët÷ ¨æAˆdUm8GíoùE˜ùˈ®S¥ÑÚÜC„`oõSB…U˼þçà ¿ÕÐh?žZ%a‹K0+ˆ&ÏÁd§HÒì’vpa²¶^÷I¨Ž#wrf„ UZ…]G4{Ñâu˜ÙKpQõñ.‹—„lè‚7ü të0Tç1j…òRT1U-®Í®ÑÔ8‘¬§@ú=ÓÊ'aË«pQºe2~O"¢þhö2ɨ¹Kt_=–—¨ šzñßBwøÂD>g" ï..Q%Ά4ܽbŠBÄq§ê(ÝxW]®¸˜¶¼À>6õÅJÑ‚DD"‡ÔSø½«ès ¸öMN› *a´på'Õwÿòû™Óg³¼ë`†Ù!,R Ã0;‡•3¯Ž §OC¢aG“Ó ]Pí‡áõœ„l€Lgè°ìUDå©åË,\C8y67CÙ);úé'ç {Ÿ‚×ýtÿóP ”Ã"èPiKË0«“o¾ ››‰Áý{È ÎYxÇ »OP^V¢.;•VaV'Í_…™¿‚hö"URí'£ILÕ¶Â;ð Måë:‹'Mê° ››Y¾g T}ç–fôlñ‚Ñû¬¸–ïd°éœ )\~þj\õ1lnv‡¼Í|èþg‘|æâa©;bÊ9bP\Í/PëÚ.ÎZÀ9’P©úŠ#„UØJˆªp¥lÜÆ§vìï°1`"]\¦ÿEÂ<\´w'ò±ˆb†Ùá»!R Ã0;‹ì™S {úqŠ)‘j€¬k‡î< ¯ç)Èæ~jSÑ î ŠpùŸÕë*¢É`VÆw¥~ ÞÀóP­àõ=Yß á§âõ+.Ãäf\{6;M‚D'°3Ú’$,„sP-ÃðúŸ%©‘¬§ðkÂUr0Ù™8#‹¦ºJ~ç.‰ò SÈ:ÊýÒ]Ç¡»žˆÌŽÄ[X¦©s—!”Yß¾~üÜï9°…EØjºãMÔ4Íæçi­¦Î#œ8»6»3 /ÿèo!qü¿‹Eš»ƒÏ]X¥j©Â"I›ÝPÍæâ p/QÛ ™¬t’~­°JSãß‹&Ýéÿ+™Z“ëÚãÐóÝKBlTTºJ6?OëºGªYD1 Ãì’8 )†a˜É†˜úßo›œI¤)¼ó¼ç!›úi¤y\…° [\†Yºhþ µµ-í§!µ{#§ 2ÝÐ]Ǩ²À¯Ù¶¥صyWN-nŸisÛn1%áL8Cí‡Ï“ÔH5Ü™pX-Àæfa–n"š¾€húcšÄ¶[P2ÕÕÔÝu‚¦9v¥–)À¸° ³:‰hö„—¢5;µELPøu~¶²F¿SË „ôàœÍ/®Œ!áíwhÀn¸uÒ$ŸüCø¿YÛ( 1e l5O¿s9·ƒe‡¸óXÓLUQ^ B(8S¥ö¼°W\†‹*;§5ïAÍúD¾šˆd- $Ôœ…‹ª±˜ZŸ¸{'ò¹jÑâõÿ‡EÃ0Ì.Ù‚³b†ÙÙdÏœpÎBª?Þ*#t¢¦ºã0ü‘oA6öR8®Nppae#[ÉÌ]Bxû]˜Õ‰½w±¥‚jè„wðסêÛã0ðv Ù¶le .¿[XDpíŸáÂJÜ2¶½gWÍC5 Àþf,Ÿ¡i_°@P‚Y›‡Yº…hòCD“çaK+»xM4dM3TÛt,¥Tëjè0]-Àf§¨b*QY×¶ƒä‡€³–Z£ªyè®' š6ÖË•²°ks®½àÚwM¬ÛeËÔÐ…ä3oðEjë•ëÕCÔÚë*Øx"ßΩ,"I(€XD5P›žˆÛ\M%Øâ2\X»B3!D¢žþ¢ÁKÒK¥kà‚òiåÝår\µˆpêüÁùŸ|7súìï†av,¤†av ›/¦„ò ÒÐ]Çá¼FQ5MYI.ŒC°³Sˆ¦Î#¸õKØìÔÞ¿ØÊƒjìƒä7¡º Z†HúøiSÅeØÜì1e¶º%IPå“É:xý߀jþÜZUhrÞÊ8Â[o#{o×Ê{_Am¤]Ç¡{ž„nlì‰C¦M+­Àäfa®ÇZéÇXÍB•Zfu.(ÆQC´^°Õ5Øü"«ÿ„àúYØâòÎÏ^»tÇa$žüCx}Ïȯ¿‹Â8·¨DáôQù±ç9A¤2µ-‰¡)+(Qðw9K!úbï}Ä9k!S É:ÀKAú±˜2lXªÅ;Ó-wp‹(†a˜]¾½c!Å0 ³»Èž9õ¤sò $^{èo"d²ºç)x^j„L7Ç¡·ô·è®œƒÍÍ œøÁ·`sÓ{âÀü@I/ ÙÔä5¨–a ^Š2¦¬Ù@feá­_Å×o³¯mWµ­Ð½OAµ ôú´¯(€+ç`²Sˆ&>DõÊOáJ9ìÖÉf÷uEêÚ {Ÿ†?ôUóÕ4“,\>_›ƒÍ/ÖÄ•HnÕëaå˰ùEè®ãTÑ‹¨õ×\{ÁåŸîî굯@w?Äñß…î:‘lؘb¹ÑW-À­ÍÃÙp›ÅU¬ ?YÛç*ÅCÂ2l%Tó$¥°÷?ïœU…y)Ü^ô™ãª (ÖspÕüŽÊÌbÅ0 ³GöÚ,¤†av'Ù3/ŸŒ IDATrÎ}P1%’õðúž7ü2TËP\e!6;pü}×þyã`¿¯–~ªí ¼Á ÛAµ Ò£C¶5°¥˜…kˆæ®À¬N@ UýД!ëÚáõ=M"ª®mcª™3\e 67ƒhú*çÿ+ ·µ…ðñ"ë;è½|àÊÐòk(÷KÞ >·Å%zÿ*ª$Ûj1e K°¥U +oŒï-‡6Ç®Æ÷Õ>^ß³ðÿ:tçqj‹‹Ãé]Â…8P{!ž¦¸µk㬃LÕCÔ¶Q¸÷zu¡ ¨j«’l´þ§÷Í}Dù dmD²¯¢ˆ+Ûz.ûäDQ Ã0{lÍBŠafws¿bJ¤àõ= oàP̓Teã×|VDåfM_@påçãÀÁÏ BiÈDTÛ¼þç :C5õRB8 [Î"šùÑÔ˜Ü ¤—¤š>t¨ ¨û$TÛHœcU !–Wò°¹iDÓŸ ¸ò3ØürÇ~[+j‹“™nzo¿ ÕÔá%IÜI W-¬NÂæç)ðÜKp›ÞÊgó‹VÞ~²±÷®,¢fuá7Þú%‰ì»ûÊ9ÿЯAµ„Hg ¤G¹Ea ¶¸BÙEAiKÚãÖ«€dM3å&yéXꆰ¥¸µ9¸(ؘ>·ÿî£ø:™Bzõí$¥¼äÆDQq^[þ±T¶¹jáí÷> .þÕŸfNÿâ,?†aöÈNŽ…Ã0ÌÞ {æÕ×Ó£èÿÌ}ª^ÿs$7Z†!ëÛ¨’DjQ•5صyD3Ÿ ¸öF¾\ÜW•6ôàôRß:LYFÝ'¡{6äˆs®¸‚pò¢‰`Ëk^ Ά_èªä!ü4tß³Ðí‡ :©ÝI}vr^4{á·`²“¼Vw¡{¡ûž…7ôU'yi©©bªš‡ÉNÃfg(È?‘„~Ä j³6WÉB·‚ÌôÄ“)›Z9)ÏËæ¦)?W*‰#ߦ‰|͉ú;¹EA*ôyTÉmŠ0t.n»­m¡V4åÇR7¢ º•q’îb½:‹÷Ä@,¦t2SP~\MæàÂ*\X¢Ï¢µyº—¶°²EÃ0ÌßW³b†Ù[¬‹)‘ªí÷_€î<Õ"²®Âè›H6tS»ž”pA™‚µ«DÓçk ’ P™NØâ*Uèx)ÈÆ^¨LµRšvmŽ²Â¦.4,,q¥á#àõ=Cbªý0DMåYKaÚA®œ§jNg!¼\X…P>Dºðñ”Å4ÝK•<¥WÖâûgóƒì÷ïÇœ"ù—¨¥J4¤û( èºGUªä¼‰|,¢†aöù#……Ã0Ìþ!÷¿}gÀ†åQHõÇ|5¶òé*!á½oøeèž'!5ʧVç(«+žèåLWÎÂ,ÞD8}fáÌâuQ[€lè„7ðx)ªd‹«×à¨êÆ,ßB4uÁµ7`–oóåÛ®û¨¶‰ã¿oè%ÊíÒÉÏÜ>°&7»6Oõpœõp&„ðÒ4ÐÁ‹'ò­·\%jo{/W¹ø7g2ÿöÿÝ/×å{?øÑëNˆQ ôà[ÿç_üð,¿[†aîÚ±b†!!µq¾.ÀÚÓ?ÿ“ïì›cöÌ˧œóGYLmƒ5YÕÐÕq^ß³PMý”·¢}šî–)HÞÙ8ó&š,QÑâM˜ù«ÇÞY¼VÁÕR[¶Z$›úàõ?oø¨úvà3Ì4-ÑWŽ¿pì]Øìlv†*©˜­CiÈtTó ü‘×à ½‘¨û‚²ål~®šÿÒàsf{pÆP _ªžÚø’uÊËÁš3®Z<#R Ùýpþä‡qJZ7*¤Øx¦²b†¹ÇNŒ…Ã0Ìg…Ô]ü‹µv”Ås_T? YÛ Õ~þPÜf”n„Ð 8W ä(üze¶¸ Ýy²±ªy¶gàT 0+ãˆæ® ¼õ6Ìâ ¾À›ºX"Q Y×Ýýü‘W¡šh2@aØ&¤*©) €3\aáíwN|³:IáÍ< `s‘2YO¢°÷ixýÏA6õQ뤳€ ¡¥ãŠCÀ…UZ·jvmžÖ˜³£. M^ÈyÃ/žQ}gDºqߊ¨uXH1 ÃÜcKÆBŠaæK…Ô:ûPL½úºszýüîøÊÇ(„ö!j[ Z†àœ‚j‚¬i‚ÐI88 (×`–ÇÍ|Špü}ØÜ »ë; ÛS;Rm dS?lt°.®À®N"š»„àæÛ°Ù)¾ä´\ÂOAÖ¶Aw‡?òTûajÑ`Ë«°«Sˆf/Â,Ý„¬kƒî~ºûÉÐìõ #[XD8ö¢‰s0Ë·aËYÀZp5Û£¬‰BÕÐÝ÷,¼¡—¨ÂPyp6‚+ea–ÇÜxà <Ýq”Za•Gí¯a.,ÁUóp¥,¯ÇcÀ•°rñ¯ÿsëdNŸÝ"ê{?øŸŸ„3gî%¢Öa!Å0 sG? )†a˜¯R´É†ûo¡t§Ï¾þ±ýr]XL}éã“ò‡’ $¢ÿTëdm3eÜ@ÀEØÒ*ìÊ8Âé ˆÆÞ…Y½·PR½Ð]Çá ¾QÛ Uß‘¬§ïW|˜Õ D3Ÿ ¼ù6lqùÎëà÷ý‰€¨i×ý¼‘×Hdè[ÍÖÜü‚‹·q}Eªºuªó(t× šÊ—¨'‰U©š­°„ðö/N ³t ®œãuy˜%ò’µ-ÐÏÃ?ðTó „WVòtM~„hæÀÑ„7WÍC÷<oà¨ÖaÈd=µ[juA‘þÛr–«¥¶W-"œ:ÿFpþ'ÿÃ~Qßÿóѧõ(€¯ÂBŠaæÏR Ã0÷'¤Ö±ÿ9RvtŸ‰©Ó±˜jà'§„ðÓP­àú5èŽ#µm^ŠD…àÊ9ØìÂéO^? ³2q_‚Bµ A÷³Ü%¢¾›9}v_<DD­ÃBŠaæ;5R Ã0&¤ÖÙob*{æTÆÂžNŸÞ·bJyÐmá<Ýu²®¯”L[-À®Í!š:àÊÏaV'û€“ò¤‚j=¯çIè¾g û R Rpa f颙‹Çß…™»ÊáÚ_¶ÉIeà |‰#߆j;HQBÒ8úâ2‚ko úéßÂæp?ÂP$ë {ž„?ø"Të0Dº 2Ù(Mm|ùØü¢é©íoñ\eâ˯(¼áo"ùô¿Ž3¼R€ ‰mvmá·Üþ„—¢‰m_ûít×1x}ÏBÖwÒÀ!áLWÉÃE¸â \XâàóM`?Ѝ×GG3‰HŸÁˆ¨uXH1 ÃÜãÑÍBŠaæá„8‹œ‘8c¥æìë§öEIÄþSºã0¼á—¡;B6tÓô(©áœ‚l~ÑìET/þ=lv’‚®ög©Æ>èÞ§áõ> Õ2HU9ÎÑÏŒ˜Åë'>Dpý,l~ñÁå×^]­d=¼Áàü5¨¶ˆD „P$:òóoýÁ•ŸÁd§áAÛëD²žÂ¶¼ Õz€¦%&ë ”Objm6;…hæ"¢¹KÔÊW-ð¬£4¼Þgxê)#*Y!M¡,.#¸þ/o¿C×ÚO?Ðú¸(¬…ð5èž'IëÄÆ×œ (_*¿gSóÜÛ§"JWôi%ñÐÏ=R Ã0÷ØS±b†yx!µ±AߟbjÀ97 ©þx/ÿžªeÞÐKÐÇ¡{ R Y†‹EÔ¢…ë.ýLvŠÄÃ=Ú¾ Iíf²±^ï3Ð]' Z†èn œ Åaé ¼~¶”#Œößó]$ëáõ=ÿЯ‘(JÖCH ĶksÇÞAxãM˜Õ)¸°üÈë$k[(Ãhðù8_ªðSŸSfù6UKÍ]Yº Vöï¦ÓKBw‡ì·¡ÛA¤›î–Wòoþáí_ÂYKÉ=üú¸J"•?òtç1ˆšæø{Ú;ù_•5ØÂ" `ΘúúkZ-"¼ýÞ§ÁÅ¿ù=Q )†a˜{ì XH1 Ã<ºÚذ³˜Ú3¨Æ>xƒ/@wƒlêƒL7QË”` ‹0‹7Üxféli0áÖ¼©¡º ›ú(Ç¨í ‰)/Ø.¬6B8õ¢‰Üz®”[Wî‹{Ø?ø-ø#§h­jZ t‚DTnáøûÇÞ…]‡­æãª¨M\žšfèþçà <Ý~"QO9HRÁeØüÌÂ5D3Ÿ š¿³2¶uÙôRPm#ðGNAwÇ­®^’Ë«E„·ßApý,µ¥ ±yB$¦d¦þÈ«P­#Ôþª}ÀZ¸ [\!9–ÁAô_r 7DÔ_ýiæô/öPùw?üÑiX±iÙ‰,¤†aî±G`!Å0 ³yBjë0.…ýéwóÇûå’˜’?†Äk»ù÷ ð_„n?Õ2YÓBU/pa¶¸³xáíw`¯Ã–¶¯êEi¨Æ>¨–axýÏB5õC6ö‘(s.(ÀU T15qá­_Æ­b{ôY¯<øCß„7ô"TË0d}U'9›F8ñ¢‰a–Çàʹ-ÏÚR}Ð='¡{Ÿ¦  Tf#DÛ…eØÂ2¢¹Kˆ¦. šývmnOW² /Õ:L÷S÷ ‰:8g Œpâ}„·~ ³6‘¨sÒ¶HªèŽCÐý߀jØh¹…µ°AÅx"Ÿ³EÔ÷~ð£×£›;]–…Ã0Ì=ö ,¤†a6_H­³?ÅÔ˧œóGw•˜²¦™ÆÇ·PûU]+– Dli…&ÜMžC4w6?÷ø¦Ü)ªiºý¼‘W!k[IƬgå”s0ks0³Ÿ"š½Œpü}¸j~ïl^5”­Õ÷,tû!ÈLϵ6‡hò‰s”ÝT\Þ†Ðwué'tªuªãt× èÖˆš¸5Í„@X†­¬‘”šøáÔù=|.t‚&Fö?Gm¦½é IÓuazûØì t<õp{ö£ÎZèΣðÖø5€ŒƒÏ«…ø^_Ý×ùXDm®ˆZ‡…Ã0Ì=ö ,¤†a¶NH­Ãbj§>Dª^ßs$¢ÚCÖ·C$âåøpjWÇM‚höS˜ì4\9·3^¾ò¡Z‡¡»NÀ?ø-ˆšfÈT <®œC´xÑôÇ0K·M}”vï¦%YÝyŒ*:ŽB5õQkž³*?õ¢É`–nÂæ(Xþq¾ÖŽ#P-ÃÐG¡š)ÇHùq°v\Í6yáøûˆ&Îí‰i‰ªe^ß3ÐÝ' š‡ jš!ઘ…Ç߃Yà[ ¸‹ *Ã~•&ò5tÆá邾f¸ »6UöMð¹«\k!¼ú÷´ŸDÔŸüð/N)¸3€8¹•?‡…Ã0Ì=öK,¤†a¶^HÝÅ¿XkGþ'ßÙ7›Òì™W_wNBnÍß:?ÜÓO@¦¡:B·@w€lèŠC°U<&>³2Ijþ*ÌòmØÂ2vVû› ÿ)ªm„½|›2ŒtPŠªR*kˆ¦?A8ù!lvÑÜå]%¦D*Ývºë8t×q’;~zc*[8õ¢éO`®ÁäfvT>“L7Bw?Õ2 Õ~*ÓYÛ (µK«pA Ñô„·ßE8þÞ®¼ÏUct÷ èî“ÐG jš)j fe ád\±UwLKœ«ä!¼¼ƒ§è3 ¶5Ή´6a…&ò­ÍÃÁíÙàó;"êÿ,súÍï—gÓŸüð/NIëF…Ûò—&,¤†aî±Çc!Å0 ³­BjSSn´Cµ¤v¢æ~Êú‘šG5OÒfö"¢ù«0 ×` K€³;ÜhéAu7ð ø‡B'!”H8 [Z¥Šœ©ó°+c0ËãqþÕÎÜ Èt#TËTÇQèî' [†DD8ñ!¢És»â¾–õÐGHDõœ$Ù&V`V¨²Ð,Ý„ Š;öwp•Ývº÷)ˆdd*Cm–6‚-ç`³3'Þ§ßqæã|g®©¶è®'à |²®P0Ìê$ÌÜ%˜ùk°•ÜΗ¹1®R€j=@ÚÑD¾8øÜ…e¸Ä”-®BH±k?ƒ‰\þù¾QßÿóÑ£å ùãç³b†¹Ç–‚…Ã0ÌcRëðŸ#eGϾþ±ýp½³gNe,ìiáôé­S"QY×Ý~ºçI¨Ö49ÏKÄ“éJp…ED ×aæ¯"¶¸´ûDÔsȺ6xýÏB÷< Ýy,®üHPVQP†-­P¸öôDs—aó ¸; {Û_q²ª¡ ªë8åzµC$ëhJ«0K7©õpê#˜…ë»tY­M*C-nÇá ¿á×@x)SÎÁUrH?ñ>Ìâ ˜…ë5‹^»„L5@6PnÙW ë;)°ÝصY’¹³—HæîÒj"Ui`ÀÀ Mý‰:j5lP‚\e ¶¼¡v×D>TÂðÆÿ!ý[?üûåùþý?pZøãÇù:XH1 ÃÜckÁBŠaæñ ©uXLmâÎKBÖ¶Aµ€×÷,TÛµyIª8 +°Å%˜¥[ˆ®ÅS¿f°SÛ×ÕzÞà ÐíG¨E±¦™ÄTT…+ç`‹+0³)‹iæÓmŸúFÒ°ºã(¼¡¡ÚÞQå,Ìò8¢ÙO޽»{EÔ~iHMSÔòæx…ªÙ¤ŽÛ,l9‹hâ„“Á,Ý‚ÍNmþ—‰Z¨Lt×qx^…j €vX¸ü"ÌÊ8ÂÉs°¹Ù½ÓÖ&$Tû!xýÏCeº(ø|#c.OŸ!¥¸ ¸ãƒÏ]P £…+?©¾û—ßÏœ>›ÝÏ—"¢Öa!Å0 sG- )†a˜#¤Ö!1åŸ>ûú©}qpÈž95àœ…T|pʇ¨i‚j L˜¶Cu­€Ÿ†ˆÃŠmqfy fáÂÛ¿Š§~íq¤†î<oèE趃à\ÛBb*¬À—aÖæ`®!šüÑìE¸°¼•[?E­_Gàœ‚n?LQ‰²Ü ¢©ón¼µëZóè:x ˆttû!èÞ§á¿ ‘¨¥PpçàlW^C8ö.¢©`oÂi}¶r'„Ÿ&YØuÞðËÐíGè= WÊÂf§Ž¿‡pêƒ"TË0¼¡oB5B¤ê©]ϸ  I„—sÛ2‘ÏU‹§Î¿œÿÉw3§ÏŽí—uøÞ~ôºbTý;ýµ²b†¹ÇÃ0ÌîR'tµ7 IDATëX‡q)ÄèO¿û›?Þ/ëóE1% :áü¼Þ§!3ÝwF´ÃQëY~‘*¢®¿‰hî\PÜڼ݌ÒÐG û ûžn;QÛ !$à@UAÁÕ¦)wãïQ(÷}\Oj< ÿð¯SE”_!œ5påÂ[o£zñ`W'(¯¼F_¼ˆºç$¼ç¡;C¦3w‚éMWX†-,!š¿ 3wÑÌ'°Ååúºë'~*¢ÒRוs”ãuíŸáªÅ¸5×hgBxÇ¡ûžjì#‰'I¼x‚‰©-È×bµóEÔ:,¤†aî±Åa!Å0 ³»„Ô:Öa¼)•ý/ÿãk?Þ/ë”ýÿÙ{óøºªóPûÙÓ4ÍódÙ–-’lãl,6† ‰Œ N.%éFímo“@sO’æ¶÷~Uš¦Iš¶H°Û€<Û²e[’5ϳt$sö°¾?¶l“¢$<Îz~?ÿƒÎ°×z÷ÞçÝïzWý†[•¤‚z£|m‰Q²5­uöÁÜQa·aù` ѦØ}8áq°mù}>IîEÏ_Š(À(]ƒ–YŽš”yöÁ›ÙÊHã3X]‡±zŽò»*¥o"za ž…µn#l*ŠîqEÔÔ0fÛkDO>=Ò†0ÃàØ2ÍÀ(¬Æ(]^P…âKFñ¥ºM¶mË]f9ÞíJ©þçµc¢–½ð\…aR¶#á "!¬Îƒ˜-/áDB³ý’dŒ~'ªŽžW‰^¸-­ÅH8WžDXaÄôèÛ‘/^EÔß~ëÛ·Ž„&ëcIDA )‰D"™#÷”BJ"‘HbSHd$xùûjÚç¥'×éªòx¼ÄËlÚ¦&ç1|% ª»sÞô(ÎHÑS/œkö,+nÞ^ràMD/¨FKÍC/]ƒ–QŠš nŸœH™"rô ¬žìÁ¦ßz¿Q¶ÏÂMnï£Ä ·¶c#Bƒ˜0[^ÄjADBryÞÛ‰áC/ZQr•ÛlÜŸêöÿÒ=`Û8SƒØÃíX}ÇÝ>`}ˆèô™wƒª¢¥ã©ÜŠQ°5%çlSrv—æ5ï+‚¢hÇ”“~+ BàY°½° 59Ç•…³»{ +‘ÎDÂ1ß–˜ŠWeÛNmwÿ@ð{>²qdl<6Ç …”D"‘¼9§‘BJ"‘HbWHe'ù^_CiZ¦mïz¶©'xKeqÜ$¼"2µÍ™­wFÛSÍÖW±zÜí×Ïö9’¼£$Á—ŒQ|Zæ<ôÂjÔÔ\WL1+¦¦†q¦FˆžØŽ5p5!ÍÝ…,g!jr®+¢„ã.Ÿì>„Ùú*ö`3bfÜ}8—¼³øø…UèÅ«Ðó– &¤¡xÎ-›Ålv«ÙúŽcµžmÈmU»†ÞÎTZ½Ç0[^ÄiGñ&É ~»÷¥ð$JBš+¦r+Ïîb ®´‘»#_hõ¼vä‘)ÌÖ×¢ÇÿB î¥¸¹ÇïxåõÚõ+ª‚ÃØØ30È÷|”¡‘ј‹R‰D2G.#…”D"‘Ä®ÊKö󿯯¦$DĶyæTß{íÄ.Çq‚;îØ‰ïÌÎ ˜=Gêœñ¡:H•gó…GMHC/]ƒž»ØS¾d* ¸ý¥fÆA8ÇAM€î!ÜŠ¨®ÃXû°[SCîò<ÉÌäÔ”\ô¼¥è…U®˜JÌœ­Ìg›Æ‹HèlÏ/Å“àJ’ÙêB«ÿæé—qF:(³½Žd~øNáI´ôRŒ²µh9 Qü·*J8!„èôïœïxQwÞõÍj„]ÿ‘›7o\¿ª¯Ç wpˆüôQú‡FbrLRHI$ÉiŒR‰D»Bª05¿»®šâÔD–ÍÓ§ºø÷×Oùó®¨êlÛ¹mk[<Äp¬¾6ààÔ)B¿¢·þŽYT51Ým¬]P…^TbøÏ.Ir»ŸÂÁ™ÃêiÀ<½{àÎd¿Q;¡Ó<¨i…è9‹Ð –£e/t—âé>(ª&fCeE°›0[_Å8((ª†QvenñJ´¬ù¨ÞdPup,œè”Û›-2é6¢W4÷=‘VǸQŸÿR°Tèz¸à[®cãê•x=}CÃüûC?§w`(&Ç&…”D"‘Ì‘¿H!%‘H$±+¤JIܽi9…³Bê‰ü×¾ßîçãî³4'ObJQªÝ.Ïì 5¨¨þzÉUxnBÏYˆâK=':P܇ì‰>ÌM;±G»p&û])"¹ø!ò$ ¥—¢¯pÓ§£x’Ü]ÏâVN9C­Ìì}1=‚âMFʨ‹‹ŒœEè%W¡ ÏíÈw¦Šmf13AäÐãGÌæí_Œgu†[n¨¥ví*|ýÃ#üÇÿ «o &Ç(…”D"‘Ì‘·H!%‘H$±+¤æ¥'ó•Úå¤$0cÙüêx÷hžóµq(¦J…A)¦.PÂàMFË,CÏ[‚^X5[é‘2[%Ü 8+>Dt {  ³ó Vç~œ‰œ™Ñ³ËÆ$5) -§£hzñ*Ô”lU”Yqø1%Dd «ëჂª¹Ñ%Ý‹ž·£¨5%wvG>aGÛÅÔpšš7Tl S* _œëïïÝ´ë¯^ƒÏëapd”ÿøÙctôôÅäX¥’H$’9òK)¤$‰$v…ÔüŒ¾R»Œ¼äfL‹_ëà'‡Z~ÿ›„øvTówn«‹‡ØÎŠ©zTíy¦¿DÁ›Œ–Vˆ–[‰QT–]âOq{DÍŒcÆ™èCñøÑÒKQ3ËPÍSŽ™tÅTÇ>¬ÎC8ãÝrùÞňSB=³½`9zÑJ´´¢s»†'Ó£nß(_ʬtzÃú=gv™eÇ¢-/‚•z‘qBC(š§r+zAu»šœT<‰÷ÆËø·ƒ=¬×i*¿w‰õM¯aóúµø½^†FÇøÏG§­«'&Ç,…”D"‘Ì‘¿H!%‘H$±+¤*²RùÒÆeä&ù™6-ihã¡Ã­ð}ÂaÜV©wTO}üˆ©õµBx‚¨l”güy$Þ$Ô@zNza zîâÙFæ¸"j°«ï8V笾FÔ„t´Ü Œ²u蹋Q…(š‡3;¸‰°û³}/VOöH›¬–º qJFK/v+nÊÖ¢e”¢xÏînh6aõÅêkDñ$bVãY¼5! ´ÙÊ)7ª`Û8¡AÌÎD›_üíU~’ ŠObõ4¶+Š Ôí¾7^Æ}¾"êìoóúµl½öü>/ÃcãüèÑ_ÒÒÑ“c—BJ"‘HæÈc¤’H$’ØR•Ùþׯ¥ä$ú™2->ÒÊ#GÛÎÿ¡HŠ)ÉO < î®m9îÎz9‹Î‰¨Hg¤«÷˜Û »¯ñMïW“2Ñ‹V`”­CË(EMÊBñøÝFÚáqœ©aìávÌŽ}ؽDZG;䤿õ(¡xü¨©蹋0ʯAÏZÞ·rmz{¤«ë fë«Ø#íœi<¯ø’ѳb”­ÃSqÛ;JÕÞxW@XQœñ¬¶×ˆv’bê"¬(ö`k'áá¯Æ“ˆøÜÝ_¯ÃQ‚oeÓ‰ë×­ææMëIðùãÞ_ž…×a¯œ•H*":…3ÑÙu³éì¡V„9Ï4ÜÏÌ(ÁSyFéÃËo5>G "SاˆžØ=Ûûa˘œ'"<²§¿Ëxß·u;ÇâeÜïDDaênÙ\K¢?щ ~üø“46ŸŽÉùBJ"‘HæÈD¥’H$’ØR+ 2øËk*ÉJô11¹w3O|çý5âXLÕ¿•å$1üó?[%“<[isFÉ*ð$¸"ÊœÁ™ÅꀖWùߟã. O`õŸ rô DtÅðK1õ{ˆWuÇÝ÷ÔjˆzPªÞég­«YÎo¼ž¤„Æ''ùÉ/Ÿ¤áTKL΋R‰D2GF*…”D"‘Ä®ºª0“ºk*ÉHð26ù}§ØÞtáv rà0ŽS·ãŽ­q‘DÕל:EèuïZ1¥¨(žDôœ…x*·b¯:WiãXˆðæéWˆž|{°éÂ툧ê(šŽ^´£°-wZz1hwW>Ή)³ófû^Dhèí –wEœP5´Œyx+·b”¯w+× !lÄÌ8V_#‘C¿ÀêoÛº _«eÎCÏY„wÙûPÓKþ[Å”pâÌVO‘#¿;â.ÅDæ“gˆg¥:"¨¨ÊëÏwÕò%Üvóf’Mò௞æÈ‰¦˜œ)¤$‰dŽtG )‰D"‰]!µ¶8‹¿X·˜ô/cá(?xí$/œî»_µËqœ S1þ£oøÐrã­ÜŠ^¼ Å›ˆ¢àØsóô"ÇžÂlF˜‘‹"Ý‹âKA/XŽž·-gZF Šf „ãV℆°‡NmÞÙö"2wÂCË,Ç»dv]bº»c¡ˆHkà‘ƒbv˜Ý©P\”ï7ÊÖâYtƒ»Ôò·*¦86Îô(fû>¢ ¿Ý;ûšøE„'q&¿/B_–"ê°rébn{ÏR“˜…xè×Ïpðø‰˜œ')¤$‰d޼P )‰D"‰]!µ¾4‡?]»ˆt¿‡Ñ™(ß{í»[û/æWƘBÔ£j·Çô½áGË]Œ§â:Œ¢_*ŠîáàD&±:ixÒ]òeNƒsñ—aÝÍ/o ZÖ|ôœE¨gÄ”m‚Å™Ãj&Úð$fÇþ¸¸i¥n£ñ’Õ¨©¹³Ëâ@˜Ó؃ÍDžt%ǺøÇ“½Où<‹7£øoS¶…ÄìØ‡Ù¾ìø«háIœ‰þŸŠ©¡¯êv¶Å˸?ÿ¥`©­«õ*ê-ë;ªWðñ÷m%%)‰É©)~öÔ³ì;z<&çK )‰D"™#”BJ"‘HbWHm,ËåOÖTð{™‰ò¯¯4²§}àâ?€!~iª¢nç¶­qñð5V_[*„Æ¢˜Òó*ñ,¾=jbŠáCÇ]òÕ}ØmT=ØŒ m^úDÄ“€šZ€Q¼-³ -{!Z Tl'2‰˜Çî?I¤áÉ·ÕX=PSóð,¨Å(]šV„âMBAAXaìÁ¢'¶»Ë#¡ ·Œò­œG¹•x*oĘw5ª/47Š)aEÝ;öavqf7>…wku[<‹(¡ëAà¢ß—UÌç“·ÜLjR2¡é)}z¯nˆÉy“BJ"‘HæÈ¥’H$’ØRוçqçê…|F¦#|{O#¯u^²ïw÷YšŒ31UªÝr¥«^P…gÁµî’¸”<o¢+¢¦G±zŽmy »ÿbfìŠèÓ¤x“ÑÒ‹1ÊÖ¢e–£e” &eƒª¹b*<39àûÉç°š‰}Ñ¡ &ga”¯Ç(Y–^ìV!©Š`6=ý2V÷œñžÙ¥‹—UGÏ_Šwé{ÜÞcÞ$PUΊ)!\6܆ÙòVÿ wG¾Ù±¾[Ä”Q\21¿dA9Ÿºåf))„¦§yìÙçÙsàpLΟR‰D2G&$…”D"‘Ä®ºqAŸ]µ€TŸÁÐT„oï9ÎÞ®¡K~ñ'¦Ö× á ¢²ñJ;6½°£l­».­Õ›4+¢F°zŽa¶¿ŽÝ'4„0g®¼ÄÄ—ìö/šw5zîbÔ”\T_ h†»Äpj{¤«ë0fë+8cÝ±×ø\QPÒ1ÊÖ¹Qé%¨I™(ªŽ°Mì¡̶ױº`Íî8è\9»Ù)ÞDô)^ƒÁ©0ÿüÒqô _¶ã±àïÕS¿s[m\4ô½bÄ”¢ ç/Ã(^‰ž·5£Ô­^13†Õ{«cVÿIœ‰>DtúÊOP|)èÙ 1ʯq—&ç€n¸Í½g¢«¯«û(VÏÑØ—¢ úÓЋW`”\…–555ßÝ9Ï1qF;1Û÷buÆnÅ™¿$}¢ÞÞXTÔÄtô¼¥x–¾=w±ÛïJQνæLöîÃDOlÇ ¡ø’cî:?'¢úïÔ½7BárЍ3,,+áö¾ôÔT¦ffxò…yáÕ½19ŸRHI$Éé„R‰D»Bê–Êb>U=d¯Á@(Ìÿ{é‡{G.ïÛø­R_bjíBõ¨”\Ò/Ö ô¬ùèÅ+Ñó–¢e•»" ÜQ}X]]5Öˆ„bkb5Õ›„–»Ø­&*¬rÅ”¢¸=¦„ÀëÄêiÀj߇=ÔŒ3=zù—¶Í•pùR\iX² -»Â]ž§yÂÆ™ìÇjß‹ÙuØíç55¶#1ÒÑRrÝ%¢KnFË(E1|œë/åÞDx³}‘£¿Uu›ê_áKø¤ˆâ²ï2:¿¤ˆÏ|èý¤L‡gxzçËìØóZLΫR‰D2G~$…”D"‘Ä®úà’>^5d¯N(Ì?í>JCÿ•á€âSL]»M=x±Å”bøÐ2ÊÐò*Ñó—¡ç-9[y"f&°û1»`÷7b´_‘‚æ-×›ˆâID/¬Á(¹ =)jRÖì‰f#„ƒ3ÒÕÓ€yúeìÑNÄÌø•ÑË—‚ž½½`9zþ2´¬ù {ÀqSØ]]¡Ö{g²/vDÔ§îEMÍÇ([‹§â:Ô@á›wäsœð8æé=˜í{ÁŠp%ö–ŠWpç]_ß&%¨r‰åúï`^q!ŸùÐûÉL 0ŽðÌ‹{xæÅWbrn¥’H$’9ò)¤$‰$v…ÔmËJ¹my‰†Nh†ÜÝÀñ+Ëý‡qEUêžýìæ{ãå|šSõºº@ñ$ ¥¡å,B/¬BÏ­DI¸ŒLcõŸÀê9êö:}EöˆzGã÷PÒ0JV¡¼aüB ì(86öp+Vï1̦]Øã=ˆð\†\Gñ%£e”¢ç-uc•µ¼ ®ˆ »ªûˆ»ÿ‰“—•IÔ4yiß!yz{LÆ@ )‰D"y3RHI$ ±+¤>·º‚›* ðjãS|í¹CtOLÇjvi Á§?³%.öY1UªÝ‚ª»$™óð.Þ‚^TãÊ UC˜÷!½·hÓ.¬ž£—¬R¬¢¦æ£,Ç[¹Õ­–JHCQuPUDt {¨«§{è4V×Aœðäù÷/RToZ ÏÂë0Ö¢x“ÜXÙ&bz«÷ѦXÝ ˆÈ¤ Ès¨úè…Ux—½-£ Ÿ#Euû€M cõ6m~1=úŽÅ”O…ìéÁï2Þ÷­@Ýαx™ê/~õ›¥aÇ ·Çâñg¦ø“O~„üì,¢¦ÉžGxøÉgb2RHI$É)R‰D»BêO×.bË‚|¼šFûXˆàs‡è›ŒÝÕü†Æ7¶¬¼¯"+%h¨j[<œ{ãÿ¶¥VM[ô,»y£Q¶Å—Œ¢ê®Ü˜qwc‹žÜÕ}DЍ?œÖ³—²¦£g–£ç/Øw5jjž+¦PˆºÏÍî#X=G±{Žõ{v»S@ÓQ“sð.Þ‚gñ*ŠæÇ O`¶=þ4fÇ)¢Î7bÞ$ôœ Œ…›Ðó—¡&eºs +‚3ÙÕuˆhÓ‹ ð–ûKÅ«ˆ²m»´½§/øûzûôLì.MKMáO?y9Ù˜–Åk‡ò௞ŽÍ˜H!%‘H$oΤ’H$’ØR_¸z17”çahm£“üŠÝ‡$¯Á76¯`Af –ãÜ·«½/xcyA[|¬Þãà˜ð³âUD½xð``õ’%A]Ó¿ØÙÛÇ¿þø!&§b÷þ‘šœÄŸ}ê£ææ`Ù¯9ÆO26oñRHI$É›BJ"‘Hˆ]!UwM%›æåah*§G&ùꎃŒLGb6©>÷l®¡<#ÓvØÝÚÇ?½xì>Ks‚;·mm‹‡sÑ<õ|m¤ñ¹{­þƙǖè@  ç,BË©@Ïšš^âŠ)ÇBD¦\15ÒyúeÌö½8}îû’³1æoÀ³ -Pè6ãFAØQœ‰^ÂŽyúeDtæü—ýI~OfªàYxžÅ›Ñ2æ¡úSA3fãÂíÄjß‹ÙwEUßôöxQÛ‚Á€Öë>ýÁ›ëÖT/MÕ5®þ~þíÇ?cl2v«õ’øÂ§?FQ^.–m³ïè1îì×19)¤$‰dŽŸ})¤$‰$v…Ôÿ\¿„órÑU•–á îÞ~€ñ°³qÈHðòµj(KOÆ´žoéåÛ{Ž#Æm•zGõÔïÜV™cõ· ¡Q)‘WèK{ÐÒ‹Ñr£ç-AË(CK+Dñ¥€c#"“83ã8£˜]‡Q4½¨-£Å—‚¢h³"ªèñß=õ":‡;ç]‚Hyç'#Œ IDATð,¹ Où´´bo¨Úo-´Ú_Çì>‚š˜ŽBLýTL }%P·³-^æéŒˆÒTêPIýøû¶²¶z9†®Ó30È÷øÃcã1;¾Ÿ/ÜþqJòó°l›ƒÇùÑÏ“c‘BJ"‘Hæø½—BJ"‘HbWHý͵KÙPšƒ®ª4 Mp׳û Ec·J#'ÉOð†jJI˜¶Íöæ^þõ•ƳO1uí6!t)¦.hö£¢e”¢ç/E/¬A  ¦äºÒC8+âV;)š»3œª!,·"Êly™èé=8½ˆ°ìu±Q3ð.?zñªÙ 5¿ÛøüLùþ“˜'Ÿ¿ÏlÞŒ'𹻿^‡£QI=óßn»y ׬¬Âcô ñýapd4fÇèõxøâí§¤0Çq8tü$ÿùÈã19)¤$‰dŽ”L )‰D"‰]!õ¥Ë¸¦$MU898ÁWžÝÏŒ»K¼ò“øêõU”’ˆØ6¿9ÕÍ÷_;ù¦×SÏvK0ÎϱúÚ€ƒS§½îŸ’wˆf ¥— Va”®EËœ‡êKUçl)!áIÌŽ}DO=Ý gzô÷4?—\”PeÎÃ[yzQ+ À}ŠÃßOsqç]_ß&%¨òfIý¡¯gÃU+ðz ú‡ùÁCÒ78³c5t/nûeE8ŽÃ‘Müðá_ÄäX¤’H$’7#…”D"‘»Bê®MËY[”…¦*4Œóågöµc÷A¹(5‘¿»®Š¢ÔD–ÍS'»øáÞS¿óõŽ ]U”೟Ý|o<œ§RL]„DÈ“€QrÆ‚ZôÜŨ‰éîÒ°3މÝw³c/Vû^ì‰>Ä̸œ¼Ë€^° oåÍ÷éùK‚j °-žÆþûDÔnݼ‰kVâóxèáßþ9=ýƒ1;fUUøËÏ|ŠyÅ…GpôT3?øé£19)¤$‰dŽ&…”D"‘Ä® ^_ÍÊÂLT ¡Œ¿ý;˜ŽÃüŒd¾R»œ¼äfL‹ÇŽwðãƒ-çÿ¼*hêvü-ÇÃy;V_[:+¦n—Wñy&>†-k>zÁr÷_îbÝ‹@ fƱzŽb·!¦FξNM+DÑ wÇC!æ fÛkX]‡0; fƶ)—ñ],âXD©Ž*ª²ñ|ßsÓÆkؼ~-~¯—¡Ñ1þógÓÖÝÓóðÅmŸ`ai Acs+ßýÉñyïÑ´Mßÿû/ï”´D"‘¼áÞ(…”D"‘Ä®úÚ 5¬(È@ŽôòågöÇt*2SùRí2r“üL›6´ñÓíoç£v9ŽÜqÇÖ¸Hþ¥˜:„Ç𣦢ç-Å(ªAË«Dñ&¡  ¢SX½Ç°ºbõÁnGX´ŒRÔ”<<ó×£å.FKÉMwwä3Ãs³õU¬ž£XÝGp¦†])%s« ƒQßê{·lXÇÖ Wã÷¹BêÞŸÿŠ–Ž®˜ž/|úãTÌsW)ž<ÝÆwî(¦Ž_UUV-«¤fÉâM+*+vÊ ["‘HÞŸI!%‘H$ð­]GÅ®Ö>l'¶î‰÷lYAu^:‡{G¸ëÙ1‡ÊìÿëÚ¥ä$ù™2->ÒÊ#GÛÞÉGÆ™˜Z_+„'ˆÊFyUÏ&:†5%-§£¨=JB( ˜a¬þ“Ø}Ç1;`÷ŸrwØû­§I ={ZFƼkÐÒKPSrÜÆçŽw›ž·½ŽÝ׈ÕsÔS’·OœŠ¨Ï)Xjëj½ŠzËÛýŒë×­ææMëIðùãÞ_&#ná±cäajªR÷ì¶Í‡âá|–bÊí¥$g£g/@/ªA/¨BMÎvETt{´{°‰hËKX]‡Ïk¹ž·=o Æ‚¨ i¨‰™nÅ”máL áL cuÄ8…ÕsT6>«8´+ŠY¨{ññxöç¿,ºÞq…ãÆÕ+yßõIôûçþÇ~͉Óm1=?úÉÛ¨œ?EQhjï þG\ñǼta9k«—SRGjr2CG6©ª²S^è‰Dò†|M )‰D"!„ˆØ#ÓNM°£¹‡½]CWüqkëJ–å¤áû»‡îˆmßR—Îÿܰ„¬D““û´ðĉΠ÷¼+¸ÏÒœàÎm[Ûâ἞S÷¢þî]¹Þu¨:jb:ZF)Féô¢¨É9(ª†0Ã8“}Xý§0[_Áê:„ˆN¿å¯Ð‹j0 k0æo@1ü(þTÝ늩ÐöX7VçA졬Þcoë;â ‡vE±‚ºÝ÷ÆÓ°/¤ˆ:Ã5+«¹uó&’›˜à'¿|ŠcM-±=OŸø0KÌGQZ::ùç=À•úü²dA9«–UR^\D % C×0-‹‰ÐÔ¦¬ô´ò‚—H$’sH!%‘H$ÀŒi ¦µ†§#ewkÿ+¦Eá·®¤2;€ìíäkÏŽé8¬*È îš%d&z›üh¿9Õ}áŸãNL]»M=ø®SŠŠâKAK+Ä([‡Qº55E5v'4ˆ=ØŒÙú fÛ^Ddò¥Q¶£ä*ŒÒÕîŽ|žWL9öxŸ+¤ºbµḇ0Þ‰Sµ- èa½NS©C%õB~öšêe|xëõ$%$2>9É¿zŠ£'›cz¾þø£dù¢¨ªJkg7õ?z˶¯¨c\4¯”«–/¡¼¤ˆ´”d<†(X¶ÅDhж®4žÜtçmØ)/|‰D"yCú&…”D"‘À®Ö>Q‘™Jšßƒ¡©Ûfh*±1^héåPïÈu̺ªò¶®dqv*¶#xµso¼p$¦ã°¦(‹¿¸z1 ^ÆÂQ~øú)žké½x_(Ä·£š7¸s[íX<œçïV1¥xPSr1ÊÖâYxjJ.ŠæA8&bf {è4æé=˜­¯âL^Ø/× <ó7`¯B/¬r¥ë>Pu„câŒ÷b÷5bÎVLÙ#àXñ}Õ"ê‚‹¨3¬ZVÉm7o!91‘‰PˆŸ>ñ5žŒéy»ã¶Pµx!ªªÒÖÕ÷ï}¨i^Ç6¯¸«k–3¿¤˜@J2^¢(X–ÍäÔ½ýìo8ÎëG°›þãž»wÊŒK"‘HÞÃI!%‘H$p÷³Df¢ÕE™ÌOO!ÕïÁ£*8–ÍÐt˜cýc<ÛÔÃñ+Ã]xu¸q%‹²R°ÁžŽAþagl ©«K²ùóu‹I÷{›‰ò½×N²«µï¢~§p·UêÕSbj¬¾6ààÔ)B¿hÅ—(…AÑ ðøñ,Ü„wù-¨IY®ˆ6":…=ÔŠÙ¼³õœÐE¬tTTÔ„4Œ²uè…Õèy•î2>ÍB ¬œÉA¬ž£˜û±ûO\Üã¹RqŠUŸ^·;Oþ"ê 5•|ì½[IIJbb*Ä#OmgßÑã1=Ÿùð-ÔT.BÓT:º{ù—ûÊLøòV–ä³¶f JKȤΊ(˲MOÓÝ?ÈÞ# ì;zül5—R‰Dò¦J )‰D"»ž= ¦M›²´$ÖgQš–DŠÏƒGSqAزéÍp´o”ͽœº¼ ‹=:ߨ²‚…™)XŽà¥¶~þqwCLÇacY.Ÿ_SAšßÃÈL”ï¾ÒÈË헦ɼS±”½((ž$< 6â­þ€Û#J÷ ŠàŒv9±³y7NhðÒ–îE ¸ÕRyKв $¤¡hÂ6‘bj³ëVÇ~¬Þ†øè/5+¢TÔú@Ýθ¨F<Ãw}}›P” Ê¥©J\^±€OÜr©IÉ„¦§xôé¼v8¶nÿàûX¹´]Óèèíå;÷?ÄÔôÌe9–‚œl®^QEż2Òx Š¢`Û6S33ô rèøI^=t„Hô·«¸¤’H$’9r')¤$‰äœ:CIJ©ÈLeEA%iI${ Œ7TLõMÎp¸w„g›ºi ]–cNñܳeó3R°‡Ý­ýüßcûÁãºò<î\½€ÏÃÈt„oïiäµÎÁKz Âa•àöÏn©‡s¬¾6 „¢j_Œ‰ÄÅ—ŒQºïÒ›ÑÒKQ¼‰ ¨®ð i|†èÉçp&Àq€KŸç(Þ$´¬ù…UhY Ñ2KÏîÈ'Ì0bfgz«c?fÇ>¬î£—å8/:RD]2u†% ÊùÔ-7HI!4=ÍcÏ>Ïž±Ý[ðn}W-_‚®étöõñÝ?ÌDhê’CNf׬¨bÑü22Óx=TEÅvl¦gÂt÷Ðpª™—÷"‰ÎùRHI$É9“R‰Dòf!u†¨íP™`yneiI$y tUÁ‚ÓSz†y¦©‡®ñK› §ù=|ms åé)˜¶ÃÎÖ>þù¥c1‡ðÙU Hõ MEøöžã—­©¼#hW%øìg7ß×ÀX}m鬘ºýJ<>Å—‚QXQqzÎ"wIœª!ž zü¢'¶ãL #̸ò5! -)zn%zöÔÔ|ÔÄ Ð Dt'4ˆ˜Çl}v)ßÉwÇÉÇ"ês_¹çVG¥þR‹¨3,*/ãÓ·¾‡´ÔT¦f¦ùÕŽ]ìÞ{ ¦çô“￉ÕUË0tîþ¾û“‡›˜¼$ߑκšåT.˜GfZŸ×‹¦jضÍt8LïÀ GN6ñÚ¡&§~ …”D"‘Ì‘ßI!%‘H$¿[HEÀ‚¬jòÒ)$‘äÑÑ5Ûq˜ŠÚô…¦Ùß5̳Í=ôL\š%8™ ^¾¶¹†Ò´dLÛæ¹–^þeOcLÇá=‹ ¹}Å|R¼ƒSaþ¿—Žq°çò6“—bê2'*¾dôüåxæ¯GËY„š˜¢ûÜ>QÓ£D›važ~{¬ 13¹âæTMHs{Kå/CÏ]Œ’†šîVLE§q&úp&ú1[_Áê>Œ=Ú»'c[Q”`¼‰¨;î¾§VuDPQ•—ó8–•pûßGzj*Óáž|áEžeoLÏíÇÞ{#ëjª0tžA¾÷ÀÏ»¸Kæ3Ó¬©^ÆÒådg¦ã÷úÐ4˶™ ‡áÈÉ&^=tô¼å˜R‰D2Gž'…”D"‘œ‡šES*²R©ÎK§ 5ÁSªŠå¦¢ÝSìíâ¹–^ú&/n‹œ$?Áª) $aÚ6Ï6õðÝWOÄtn­,æ“Õå${uBaþï‹ é½"ŽÍ´#œm;îØcõëk…ðQ¹,ØŠ'=)FÙÕ蹋Q“³Q<‰³"j³}fËKØÃ­83c`_ù;ש)¹èUèùKÑ ªP}I(Þ$P5Dtg¢{°™hÓ.ì¡Óî²ÃXÁ±ï›QmñôÛq¥ˆ¨3”—ñ™½ŸŒ@€™p˜§w¿Ìö—^é9¾íæ-\³² aÐ30Ä÷|„Á‘‹ó»HNfÝŠåTΟG^v&~ŸMU±m‡™H„Á‘QN5³gÿaF'&ÞÒgK!%‘H$sä{RHI$Éù ©3hªBev€ªÜtòSü$:ºª` A(jÑ16žî!¶7õ0<}qvÊONà«×WQH"bÛüæT7ß-¶—ü|xi)«*#É£ÓšáŸv7p¬ÿŠ+´Øå8NPŠ©‹”˜h´¼JŒ²µ®ˆ ¢z“ÂALbõ4`žÞƒ5ØŒ3Ù¶;“©(îý#P€^XãVLÕ ~·)»¢!¬0Îx/V÷"ǃ˜Æ™¾rǧ"êλ¾Y°ë¯u†ÒÂ|þÇGn%3-ÀL$Âö—^åé]/Çô\èÆëÙpÕ ¼ƒ¾Áa~ðУô ^Øk"Áïcýª–-œOnVƬˆÒp„C$¥x„-­¼´ÿãoï7I )‰D"™#5’BJ"‘HÞº:ƒ¡©,ÎNeyŽ[1•`h¨Š‚åBQ“α)^ïâéSÝ„"öÁ¹(5‘¿»®Š¢ÔD–ÍS'»øáÞS1‡U•ñ‘¥¥$®úÖ®£œ¿R7ÅÔ½¨¯7ŽžSQ¶-o‰Û°Ü— €˜ÇêkÄjß‹Õg¼a†cw2UÍÝ‘/µ½`zþ2Œâ•(†o6;SŽ…3Ú…yzÑ–¡!œé1®˜æçq*¢>ÿ¥`©Ðõ pEöZ+ÎÏåŽÛ>@VzáH”ç^y_?¿;¦çüÖ͛ظf%>‡þ¡þýáŸÓÓa6»ðy½¬«YNueyY™$ø}èšæî¬u+¢N´´ñê¡#ô¼³~†RHI$É›‘BJ"‘HxûB ÜÆç¦í°¶8‹šütò“ð:š¦#EL:Ƨx¹}€íM=L›fiQiZwmZNaJ"3–ͯ;ù¯ýM1‡?ª)ç•Åø ÞÉi¾¹ó(ÍÃWúaζ۶ÆÅƒùXýµÛ„ЃLL) ZFzQ zÞR·Ç’? a÷ŸÄì:ˆÝ{{´™z÷L¦ª¡x“ÑÒ‹ÜÆçËÑ‹V h@€pVg¢—hãvÌö½n#ôèÔåkÚî°KQœmRD]™ädóÇû 9é„£Qv¾º_îˆmò¾ë7rݺ«ðy< òÇAW_ÿ;úL¯ÇÃUË—°rébò³³HLð£k:B8„#Q†ÇÆ9ÕÚÎ+ÓÙÛAÆ!…”D"‘Ì‘J!%‘H$ïLHa2bâÑT®.ɦ*/œD~CGULÛa"bÒ>bwk;[û˜1íwô}óÒ“¹kÓrò“˜±l?ÞÎýZb:ÛVÎçý‹‹ñëÝÓ|ã…Ã´Ž†bâØÁ}–楘:OTÝ]º–¿ÔýWXãŠ(Eè Ö`3V÷a¬î£ØÃ§áÉwïd**ª?-{!zÞôü¥h¹‹Qt/Š "!œ±n¢§^Àê:„3Ùi«Äv)J4¨{)®¨·ƒ©…/ÆÂñæeerçÇ?Dnf‘¨Éî½øÅ3ÏÅt Þ³i7\½Ÿ×ÃàÈÿù³Çhïé}[Ÿ¥kkª—Q½¸‚¢ü’ü èº[1£ŒŽO¸"êÀ‘·ý¿ )¤$‰dŽH )‰D"¹0Bê “Ÿ¡qMq6ËóÒÉJôáÓ54¢Ž`2¥yx’=ìhîÅrÞÞ®` 2SøòÆåä%û™1-~~¬Žé8ÜqÕBÞSQˆO×èŸâë/¦c,¶*bÁ}ŠApû§·´½Û¯›±úÚ€ƒS§½•Ôóz“f &g£gW V¹KÕ3P aG±‡Ncõ6`uÂê?OÄÑHAMÊ@Ï_Ž^Tž]š^|NLE¦p¦†pƺ1O¿ŒÕ{{¼çâöÑŠSuç7¾p&E¦rþçö@vF:Ÿûø‡ÉÏÎ$jš¼´ÿ<µ=¦c±õÚ«Ù²a~¯—¡Ñ1þë‘_ÒÚÕý–>CÓTV/_JueEy¹$'& kB@Ô4Ÿ ©½ƒ×7ÐÜ~qv¹”BJ"‘HæÈ|¤’H$’ +¤Î01Iñ¬)ÎbYNY‰>¼º†XŽÃX8JËð$/w ðBK/–óÖaQV*»q¹I~¦M‹G޶ñБ֘ŽÃç×T°ua^M£},Äמ?LÏÄtÌëk|óÆ•¿(+¥ÞPÕ±wûõs^bJÓQÒѲÊÑ ª1JV¡¦ä¡¨±qF;°š°:öavxwWDjr6FÉjŒÒÕhe(I™oS“Ø£]8£˜û°ûO`õ€p.ÜÄ©ˆ²l;ÐÞÕ[÷/÷>X6£©±vüiþä!?; Ó²xåàaúõ31“Íë×rÓµ×à÷ùãG?ÿ-ç)EaåÒŬZ¶„â¼’1t˜Q4·wòúáNµ¶_ÔqH!%‘H$sܧ¥’H$’‹#¤Î01 ø<¬-ÎbiNY‰^¼ºæ&Ä¶ÃølÅÔÎÓ}ìnëç|ïËKrü͵KÉMòŠZÊ‚˜ÇnÅ8…Õ{ {°gü.3r8¬(ѺxQ‘¨¹M×´úöžÞÔïÜÿS¦gb¯q~ %™?ûÔG)ÈÉÆ´,^?|”~õtLÇåºuWñžMHðùç¾_ýÁ÷¯ZVY¢k:½}üëbr*öª3“ùóO”¢Ü\,ÛbßÑãÜÿدc:>×®^Éû¯ßH¢ßÏèø8÷?þäï•HKÎçªåK˜WT@jr†n`Z&ã“!Ú»{yýpGN^ÚM@¤’H$’9Ò)¤$‰äÒ©3LFLr“ý\]œÍÂÌÒü^¼gĔ助Æ1vžîãõ®ßýPY“ŸÎ_­_BV¢ÉˆÉ}Zøõ‰Î˜ŽÃ_­_ÂÆ²\ M¥ed‚¿{ö cáhÌ#ÕçážÍ5”g¤`Ú»[ûø/C8Œ+ªR÷ìg7ÇÅCÿÌÎï” ® j™å·+º!Dx{¤«í5¢M»pBƒòt>¨:8zþ2ŒÒÕèūΉ)UsÅTh{¨³}Ÿ+¦FÛß°+¡Ìq›‹s%%¨Bɧn}«—/A×tºúúù×?ÌD(scJôûùóOŒ’ü<,ÛæÀ±Fîýù¯b:N׬¬æ[6‘èO`lb‚?þ$Ç›ßÜ/qÉ‚rV.]Lyq”ä³Q¦e3 ÑÖÝËc8ÖxYÜ·R‰Dòf¤’H$.­:C(bRœ–Ȫ‚Lf¦’æ÷`¨®˜ŠÚ6CÓŽ÷±«µ}ÝÃozÿÊ‚ þòš%d&z™ˆ˜ü×¾&~sª;¦ãð7–²¡,]Uišàî혌˜17Žt¿—¯m®a^z2¦íðÂé^ê_>~ÎÚUE Æ‹˜Â6kEt*huo4Û÷=ù<ÎX—¼ñ¼­ÌMEÑ ŒÒ5hùK1 ªPSóQ /áJ¿©aìáv¢-/b÷5bv¼¡úlVLűˆºãî{j5D=(UgþÛ'Þkª–aè:ÝýüÛO~ÆèDì5Ô÷y=üÅퟠ¤ Çq8xü$ÿõÈã1¯µÕËøÐÖëIJHd|r’Ÿüò)N5Ÿýû‚ÒbÖV/£¼¤ˆ´”d<†¢(˜–ÅdhŠöž^;Á¾£Ç.k¦R‰D2GZ#…”D"‘\!u†PĤ,=‰•™,ÌL!àóàÑTÛfh*±1v4÷p´oôìûVfòÅk*ÉHð2Žò{›ØÑÜÓqøÒÆe\S’¦*œšàËÏ`Æ´bn™‰>¾vC5¥iɘ¶ÍŽ–^¾³§ñM¯‹715yÿ§j­áž *å]çfp Š/ÏÂZ´Œ2ôÜųbÊŽ…°¢gw䋞z«¯Ñí/ç"JuDPQ•7{שׂ©ÂÐuzùÞ?cxl<æÆhè:_Üö ÊŠ p‡#'šøáÿˆé¸­ZVÉm7o!91‘‰PˆŸxšÃ§˜WTÀºšåÌ/)&-5¯Çƒ¢(X–ÅäÔ]ý¼~¸ý ÇqœËÿ¼#…”D"‘Ì‘ÎH!%‘H$—WHa*jQš–ȺâlÊÒ’Iõ»bJÁŒi38¦¡”íÍ=4Œsuq6~õbÒýÆf¢|ﵓìjí‹í8lZÎÚ¢,4UáÄà8_úÍ~¢¶sãÈIò¼¡š’@¦mólSß}õÄï|½‡qœºwl‹‡•±úõµBxêQ©’wŸwšÉ)¨IÙx—Ü„šš‡–µ5%Çí/e[8‘ÎäÎxßxôä3õNßÉú”?þùXżâB„#8zª™üôјŽßŠ%‹øè{o$%1‰‰©Ïïy¯úU: IDATÔ”d*ÊJH¤â;+¢lB3Óôôþÿìw|Õ™°Ÿ™Ûuï•®zµ%Y½ËÝãÞ ôÞ†B ‰÷ûöÛð )Ú…dÙýv¿ɦgC ½wlllãn²eÉVï½]éö2óý!É@Ìn7}ž?mÝ™óž÷ÌÜ™ç¾çöW×òQU5ÁÐÔùAC)@ øœÇ!¤`j©I|Á0Y±væ¦Å‘mÃn6b”%¼¡0}./UÝà z|\S”N´ÅȰ7Àoöc{K¯¦óð“åeÌ›N‚£½#üð½ý(üžJ±GðÓe¤;løÃaÞ«ëäw{‘nS¥âÂS‹×©ª¾™tq:U¡G†±è2tQÉÈŽ4d[<’1ÂI8X©x†+e{Â%¢î{ "CÕë+€;ÿÖß^¿v%‹æÌÄd4ÐÝ?ÀïŸ}‰Þ!Mƽþ®ÛÈIŸŽªªÔ44ñ›§_ÐtËòs¹õÊK‰²ÛÇ«Ÿ"ÌæQáp·×KW_?‡Õ³ë@þÀÔ[{P)@ 8!¤€©%¤& „ÃäÇ;(OŽ!=Ú†ÝdÀ0!¦|Á®@‡ÅˆI'3è ð«]5ìnÓöÑ+Ë™‡ éæ÷ök2Ž´¨~²¼œéQV|¡0ïÖuð‡ê¾Ì!„˜|µ;ƒ]l:†œåN]\F¥Î1­RŽL"êopíêå,ž7“Ñ@ÏÀ xîeºû4ÿ÷ï¼…¼Ì TTj›ùõSÏk:Ÿ‹æÌä†KWb2šUU'D”‚Ç祳§êºFv¬ÂãõMÙ8„‚ÏynBJ ¦¦:ñ«ªäÅEQ–MFŒ »Ñ€~BLÉ Ixƒ!~¿÷8ïj|Qó‡VÍdfJ,PÕ3Ì6hSH¥;lüxY)iBê­cíü×¾/¿Å¸¢òDH§Tl]·¶åB¸‡*WHª~=2Qâ®ôQpªR¨RgЬŒúö[BD}A®Z¹”¥ æ`6éâϽBgoŸ&ûáþ;n&oFuÍ­üò‰g5GBl ÊK˜SRHBl ’$qEÁåñÒ;0HÕ±:öªÖÄŽˆBH ÁÉ!%Lm!uâ†DN\$³RbH‹²i2 “ÇÐUU¥sÔöæ^Þoè¢kÔ£É<ü|õ,Ê’c8Ô=Ä7Ðd™Ñ6\VFjdÞP˜7jÛx|ÃWw ˜©\êPPÖ 1õUJø I’*ë·¶\Ha¯«¨pè}úõ:™¯ÜÅÛ[¶k./BH ÁÉ!%hKHtzX:#‰›K31êu¨ªzb _XQqC´»Øß9È»u8})“$IüËÚÙ%8P€}üãæCšO¹q‘<°¤”d»o0ÄKG[yæPÓi9¶ªà ËT*²±r뺥çý”¬‘Ê¥ªªV ëîDðY.PpÏWœ5ÉÚűú’…XL&†Gøó‹¯ÓÜ¡Í)Ð÷Þ|¥y9H²DS[>þ ¡pxʶ7Âlæ¢Ùe”åç’‹ÅlF'¯ùü~F]n¢ì6Ì&^Ÿw?ÜÉû;öh./BH Áç<ÿ !%ÚR=c^æO‹çæ²LlF=ž@ˆA¯Ÿh‹‰ƒY‚ÐDÅTûˆ›=íýl¨ëÄMÙ˜ô²Ì#kgS˜EX…½mý<¼¥J“ã)?>Š.)!ÉfÁ ñ⑞;Ü|ZÏ!ÄÔÌ,¢î}ð¡uª$UÈœÞðW/ZÀÚÅc1›qòØK¯ÓØÖ¡É>ºçÆk(+ÈE–eZ:ºøåÏà§\;Í&óËŠ™U”ORBV‹½Nª*øüú‡‡9ÞØB{O/W,_L\´¯ßÏÆí»yïÃ]šË‹R@p2BH ÚR}. §ÇsCI==./Ol$+ÆNyr Év ƒY–†ÆüAÚFÜìhéesc7žàÔSf½Ž_¬™M~|$aEeW[?ÿ¼õ°&ÇSa‚ƒXRL¢Õ‚;âùÃͼxä̸ƒ PL•«ª\‰Ì’ îF¥°M’”õŽõ[]h¡Ÿ)5ÉŠ…ó¸lÙ""̆FFxü•7©oiÓd_ÝuýUÌ,ÌG§“iëìæ—O>‹×çŸ2í3 Ì/+¦¼0Ÿ´¤¬ŸQCN'Ç›[Ù} Šöî^ÒS“ùÆ ×ãÀç°i×^1eO ΄´'¤Ü>.JOຢt,=Ýc^Ùv˜ºQ²bì¬ÎI¥,9š«ù3bjÔ¤yhŒ­}lnìÆš:Ó8¬F=?_=‹Ü¸HBŠÊŽ–^þõÃjMާ’¤hþþ’bmfÆü!ž©jäÕ£göåVQi•%©bãÝ«¿®Ù‘ÊEKUÕXqAˆ)…m’¨p¬ßqÁ½Ì~ëG_­ÈTž)5ÉÒùs¸|ùb¬ CN'O¾úÇ›Z4Ùgw^{³‹ Ñët´w÷ð«'ŸÃå9÷]èõ:æ—•PšŸCzJ2¶ˆtºñ©yþ`€aç(uÍ­ì=Tý™é’Ó’¹çÆkIˆÆðÁîysó6ÍåE)@ 8!¤í ©AŸE \]˜ŽE¯£kÌÃ/¶¦q蓇òã£X‘•Lir ñV3f½Y‚ ¢2ê P?0Êžö~67v +ç<¦(³‡WÍ"+6’¢°­¹‡ß~T“ã©,9†ÿ½¨ˆ›™1§6òFmûÙq¦˜ªD¦ì¼ îQ÷üøá¥²¢VH²tV„ã%sgqÕÊ%X- ŽòÔ«oQÛØ¬É¾»ýªË˜WVŒ^§§£·—_?õ<Î1×9k,ËÌ--bVQ>Ó’“°[#Ðëô€J dÈ9JCK>ú¹Ui)‰ñÜ{Óu$ÆÅà ضw?¯½¿EsyBJ NF)@ ~´a¿:¹[öXœ‘ÈåÓ°èutŒºùù–ô ŸüÒQœÍò¬$J£‰·™1étHà #¾ƒcloéeksaåÜ}'ÄD˜xhåL2cìà [šº©ÜY£Éñ4+%–¿[TH¼Ǫ̃?ÈãûxçøÙ]FQiÖoúÆê×.„kx¤rñ:UÕW ŸÙJš³“š÷n¹ž”„8Á ;÷WñÂ;5—!¤àd„à÷ö«:Y;Bʲ$3‰µ¹©˜t:ZG\üÓUtþíuBf¥Æ²:;…¼ø(b,&Lzù„˜òø©S{ÛûÏjLÉv ?[QNºÃ†?fC]¿Ý{L“ãi^Z?¸¸Ø#¾ú¸žM ]çºY”˜ª\\!©úõÈDMùÆ*´JR¨Â±þÃÇ/´{ï}Td¨z}pNwOœ[ZÄ —®ÂnµâtñìïQu¬N“}zÝÚ•\2g&&£žþA~÷ìKô ž•sçf3§¤ÓÒpDÚ0è €J0bÔ妥£‹}Gj8T{ü 36ÚÁ·o½”„x‚¡»VñÜ[4—!¤àd„à‡ïíSõ²¬™öº!–ÍHbuN † !U±é½.ï>ÆEé ,ÍL"7>’h³ £NF’ÀRôø©ía[su œ•˜Ò¢¬ütyÓ¢¬øBaÞ9ÞÁ?Öæ áÂéñÜ¿°€˜#Þ¿ûè8[›z¦Jó¶!Kë7®[uÞïÔ6R¹Ô¡ ¬Ÿ²bJˆ¨ αˆšdVQ7]¾šH«Q·‹çÞÜÀÁm ñ«W-cÉüÙ˜Fz†øÃó/ÓÕ{f`(ÈÊdniYӧሴa4‰`(ĘÛMs{'‡j³¿º†/ûêሴóÝÛo"51`(ÄGUGxúw5—!¤àd„àÿ¼»O5ê´#¤¼Á0˳’X‘•ŒA§£exŒŸ¾Ï—ÛÚ[g&qqzùñQ8,FŒ:U8Ì ÇOMï[šz8Ðufaψ¶ñà²RÒ"­xCaÞªmçÏûë59že$òÝùD[Œ {üfÏ1¶·ôN-¢òDH§Tl]·¶å|¿¾Oˆ)Iÿ³©Ñù®ˆZWQá0õ’ĦR»Ê ò¸åеDÚlŒ¹Ý¼ðÎFöÑævW¬XÂòs1›Œô ó§^¡½ûÌܲ¦§qѬ² eÇd4 I¡P˜Q·›öîö©áãÃ_}ƒŠH›•ï}ýfÒ’ …C||ø(O½ö¶æò"„”@ |λˆR@ =!å…Y‘ÂÒÌ$ :™¦¡1~òþ†½¯|Ìe3’X’™DV¬‡Å„Q–PTð… x|Ôô9ÙP×IMßÈ™y±‰±ó£e¥¤Ø#ðC¼VÛÆ“´¹¨ð’Ì$¾=?‡ÅÈ7Àî®e×9Z›ëoº‘ KLe¨ªZ¬;7U9 Î Uy¡Ýc×UT8ô>ýzÌ”¬V+ÍËáÖ«.%ÊfÇåqóâ»›ø¨ªZ“}}ÙÒE¬¼x>“‰þ¡þëÅWiíì>­çÈHKaáÌRr2¦…ÉhœQ!\½}|TU;꧸‹«5ÂÂýwÜÌôädBá0û«kxâ•75—!¤àd„àïßùX5éušio0¬°2;…Å™‰èe™†ÁQ~¼ñ£þà)W/K,ÎLbyV2™Ñ6"Í“S*Þ`˜>·êža66tq¼ÿôîÚ”ÉKJI¶[ðC¼\ÝÊ3UMšO˳’¹w^.³‘!ŸGwÕžõ5¹¾´+Qy"¤3®ßºnéÈù~½Ÿu1¥àT¥P¥Œ\éX¿õ¼ïßO3ÕEÔ$ŹYÜ~Õ׈²Ûqy<¼²a3»ÖdŸ¯¹d!k.¹‹ÙÄÀð½üMm§g—Ï´¤D.šUFnf:±ÑQ˜F@"ãòzèêíçPÍqö:B <-ç´˜M|ÿŽ[˜žšL8¬p°æ½ôºæò"„”@ œŒR@ü¯·?V-í©°ª²2+™E‰èe‰ºQÜxw tZŽoÒëX™ÌÅé‰d8¬ØÍF ²„ª‚7¦×奪{ˆõ]4 –s&8ø‡ÅÅ$Ú,¸ƒ!^8ÜÌ GZ49žVå¤pÏœ\¢Ì=~*wÖðñYZ‹ëTPœa™JE6V^ bª\UåJdÎÌÎn°ˆ¸÷Á‡ÖIªT©……å ³gðõ«¿†#2·×ÃkïoeǾƒšì÷Íã²¥‹ˆ0[áñWÞ¤¾¥í”Ž™œÇ™ede=^%K2a%ŒÛ륳§êºvî¯ÂœÖxLFß¿óV2ÒRP…ªÚ:þô«šË‹R@p2BH ð¿ÞþHµôši¯ªª¬ÊIå¢éñèd‰cý£ühÃ~|¡ði=Å cMN*ó¦Å“mÃn2 —%”‰Š©^——]C¼_ßEëˆë”ÎUœÍÿY\L¢Í̘?ijUM¼r´U“ãéÒÜTîš“C¤ÉÀ€ÛÏì<ÊþÎA팯 NL-ZªªÆŠÓ&¦„ˆZ§JR… éZis~V&w\ý5¢£¢p{½¼±y~´_“ý¿tþ._¾«Å°Óɯ¾Åñ¦–¯t¬Ä¸Î,£0{qÑÌ&²,‡ñø¼tõ pøX{UãòxÎH—Û^ÍážaMÆrSi&˳’[ŠÌhÛãÂý`¤rñ:UÕW  ±¢„Ÿ˜Q-ÚýÓç,•$*Ö7–ýþÙ—5Cvú4îºîJb<>/ïnÝɦ]{5Ë‚ò®[»[„çØyýªë¾Ðg‘v.š5^•‡ÅlB'˄à ^¿ŸþÁ!ŽÔ5°k#ccg-¦õwÝFNútTU¥¦¡‰ß<ý‚æò"„”@ œŒR@üà­½ªÍhÐ΃­ªreÁ4f§Æ!Gz‡yཱི7½$6Â41•/ŽÔÈ" zô²DXSíN7û:ÙXßÉ€Çÿ…Ž9/-Ž\\Hl„‰_€?}\Ϧ†.Mާk‹Ò¹¥lv“ž^—ÿûáª{µY0ó“åeÌITv· ´>²íHÅÆ»W=~!ܾ˜º€EÔ=?~xéý·ÝXQ˜3c‰„D}K+•?£ÉXfLOã®ë®$.Ú×çgÃö]lؾ[“±Ì))äÆËVc·Zu¹xöÍ÷8T{üüŒÕbaÑœrŠs³IŠ%ÂlA–eUÁç÷Ó70Dmc3;öbhÄyÖcúþ·—™ŠJmc3¿~êyÍåE)@ 8!¤¸ÿͽj¤I;B*V¸®8™)±H@UÏ0?Úpö×;I°™¹47™)1¤FZ±uÈ’DXQ épzø¨½Ÿwë:û;^”žÀ÷c12â ðÛ½ÇÙÖÜ£ÉñtcI7–fb5èéuyù׫©éÓ¦zdÍlJ’¢QýƒülÓA•VTeݦ{Ö^/WC•‹+$UÿÙ]â.p%+j…$KKî»õŠs²$‰†¶v*û Z|´ÌHKá7\=.¤ü~Þß±‡w·íÔd~fæsókˆ´Úu»xáí쯮ýÜ¿0›™_^ÂÌÂ<’â㈰˜Ñët(ŠŠ/à§p˜ÚÆfö:LOÿ¹[ïþ;n&oFÇ›ZùÕ“Ïj./BH ÁÉ!%ÀýoìU#ÍÚRþP˜›J3)KŽàP÷?ÞxàœµgZ”••Ù)ÌJ‰!%2‹AN‚ ¢âòiwºÙÙÚǦ†.\ÿÍN€‹3ùöü|¢-F†½~½ç;Zz59žn)Ëäúâ " zz\^Ùv„ãýNÍÅ!ÿréŠ(À¾Žþqó¡OÿÉ6EQ*.15R¹Ô¡ ¬—T)ãBQ÷=P‘¡êõÀ“ÿö­[®£$7I–hjëà?{å ­ew&™ž’Ä=7^C|L4>€Í»öòÖ–íšÌSi~·^y)Q6;.›—ÞÝÄÞªêÏüÙdbni³‹òINŒÇj±`ÐëQŸ?ÀàÈÇ›ZÙ}°ŠŽž¾sÓwo¿‰‚ìL$$êZZyTƒ•xBH Áç²- +ë¶®[Û"î*ÚçQ“Ü}ÃÕÌ,ÌC–eZ;ºxô‰gñš‹5%1ž{oºŽÄ¸|Ûöîçµ÷·h2oY™|ýšË‰ŽŒÄíõðú¦mì>Pż²bÊ òÆE”u\D©*‚A†œNê[Úø¨ªšÆ¶Ž)Ó·n¹ž’ÜìOUâýEÑÖ;ŒR@p2BH ð×w«Ñ“fÚ넸kvö©\9§&8Xž•LIR4 V3&½ ) N_†¡Qv·ö³¹±›Õ9)¬›M¤É@¿ÛÇì¨á@× &ÇÓݳs¸¼`½ŽŽQ7?ßr˜–a—æâ°tübõlòâ# +*;[ûxdÛ‘¿ù9Eå‰N©bJ›¬«¨pè}úõ:™Ï®›õ9ÜuÝ•Ì,*@¯ÓÑÚÕͯžxϧ¹˜“âcùÖÍד‹?dûÇxyÃfMæ/73;¯½‚˜¨(Ü^/GŽ×a13-)›ÕŠA?¾£l dØ9JCk{«ª©oi›²1ýu%^åãO …5•!¤àd„à¾×v«±ÚRž`ˆ»gçPð…+WÎ)ʼnѬÌN¦(ÑqBL N_€ÆÁ1\ó§Åa7èsùø÷G©êÒäxúæÜ\.ËKì×ÑîtóÐU´;Ýš‹ÃfÔóóճɉ‹$¤(loéåÿ~Xý…?/Ä”¶ø2"j’;®¹œ9%…èuzÚ»{øÏ§žcÌíÑ\ì ±1Üwëõ$ÇÇÙ±ï/¾û¾&ó˜>u;†¿?€^¯ÇhП¨ˆrŽÑÜÞÉ9Ö4õ/ÏoÜp5å¯ÄBJ NF)@ @{BÊ ñ9¹_ºrå\Sžêœ ⣈0¨˜ò‡a…ƒ$ÑíòòoVSÝ«Íé¾5/KóR1ét´9ÝüÓæCtŽjï%=ÊläáU3ÉŠ$Vø°¹‡ßqôËHU èL[×-w›©É=<\ñeDÔ$·_ý5æ•¡×ééèéå×yç˜öªã¢|û¶HIˆ' ²ëÀaž{ƒ&s¹jÑ._¶“qrºzBDº\´tt±·ªšêºÍÄô™J¼Î.~õäsš«ÄBJ NF)@ ¾õê.5ÎjÖL{½Á÷ÌÍ%7.ê+U®œkæ¥Å±<+™üø(¢-Æb IÀ ñÔÁ&^ªnÑäxú΂|Vç¤`ÒéhqQ±ù=c^ÍÅaâŸVÎdFŒ`XaKS7•;k¾Ò±TgX¦R‘•BLMî}ð¡uª$UÈþU>ë•—2¿¬ƒ^Ogo¿yú†Ú[À?ÆÅwn»”„‚¡{æÙ7ßÓT ŹYÌ..$oF1Q‘H÷ÓP8̰s”–Ž.­åÀÑcšËÏx%^zŽöî~õäs¸<Ú’üBH ÁÉ!%À7_Ý¥&hHHùB!î›GVìøTªmM_±råsIF"‹2É‹$ÁjF/ËãòBUésû8Ô=ćͽš[Üü{ X•ŒA§£eØEÅæƒô¹´·®N¼ÕÌ?®,'#ÚN0fSc7¿ÚU{JÇbjjpª"j’›/_Ùeôzºúúùí3/28¬½´:ìv¾óõIKL$±·ªš§_GmÏÍLgAy Yӧሴa4?u/íä­>dß‘͎׿®ÄûϧžgÔ¥­J3‹iQ'~Õ+*þp˜AŸšÞ65vkfM©\TÈò¬d :™æ¡1~ºé ƒ¿æò’h³P±²œt‡`8̆ú.~³çôTW¨ NI–Öo¼{ÕãâtöøÖ¾Z‘©VO]s+߸ñj’ãã †Bì>XÅsomÐô8¾áÒU,š3^‰×Ý?ÀïžÑ^%žR@p2BH p÷Ë;Ôd{„fÚRÏô(+þp˜wwòûŽk:ßYÏšœ :m#.þyÛJ“¢Y0-žÌ;v“ä˜ †èqy©êæý†.š†Æ¦T,ÿ°¸˜E‰èe‰ºq!å „4—“Œh.+%-ÒŠ7æÍÚvÛ_FÏ©@в~Ó=kÅ‹Û)pßЬ{üL‰¨I®X±„å æb6éæO/¼B{w¯æúËd4òƒ;o!=-EQ8Tsœÿzñµ)ѶĸX.žUFAö b££0èd™pX9!¢ª7°ë`^ŸŸèÈH¾sû¤&Ž/о·êϼñ®¦ÇóµkV°x²o`?<û2ÝýÚZ[P)@ 8!¤¸ë¥jJ¤v„TXQøöü|Ò&*WÞ>ÞÁŸÎPåÊÙâþ‹ X™5¹øoë 8ÁAÝà(1Frc£ÈŒ¶a7ÐËaUů˜Úß9Ȇú.:œî)ËKJ¸8=,q¼”mÜ7Ö\N²bìühY))ö¼Á¯Õ¶ñ䯳uúmŠ¢T1õå¸ïŠ U¯¯î<ç»lé"V]¼³ÉHÿÐÿõâ«´vvk®ß z=?Xw+™ÓRQ…ÃÇêùãó¯œÓ6%ÆÅ° ¼”œÄGGc6™ÐédB¡0ŸžþªjëØ[U͘û“{_¤ÍÊ÷¾~3iI‰šYëoqÕÊ¥,]0g¼opˆ?>÷ ½}ÚxÙ’`fa>å…ùËæ—‹û™@ |ú)„”@ À?o=¢öŒy°™ šhoXQøÎ‚R##ð†Â¼QÛÆãû4ƒO¯»Ô44Æ;Ç;(Jpœøÿ~·Y–(Lp0-ʊͨǠ“ )*ž`ˆî1û;ÙÔÐE‡óÜnþà²RL‹G'KëwòÀ{û „Íå$7.’–”’l·à †xéh+Ïj:ÛÍbê p¶EÔ$k_ÄêKb1™áÏ/¾NsG§æúO§“Y¿î6fLOCUTŽÔÕóûg_>'m‰v° ¼„¢ì$ÆÇb1™‘e™p8Œ×ï£o`ˆªãõì9xçØÉ;Íii=¬/Ê˳|á¼OUâ½J{wÏÔ~É’$Ê r™SRÄô”$"m¶ef“QÜÇàÓ÷J!¤ºF=jýà({Ûûéõb3ê§t{Uå; òOT®¼ZÓÆS5ƒ¿^wiC]'…ŸR“ xüu2ùñQ¤FE`5èÑëdŠŠ;¢Ãéf_盺éuyÏI,?Y^Ƽiñè$¨ésòÀ{û)Úû¾Íâ‡KJH²YðC¼x¤…ç7Ÿ“¶¨¨¯euýÖuk[ÄëÖUT8ŒA}…$ñƒsqþU‹pé⋱˜Í ŽŒðØËoÐØÚ®½b Ößu;ÙÓ§¡ª*Õõüî™Ïj"mV.ž]Naö ’âNˆ(EQðúýôq´¾‘íû~®ˆšÄl2ñý;o!}b=¬C5Çøó[ëËrÙÒE¬¼x>“‰þ¡þüâk´tvMÙöÎ,Ìg^YÓ’“ˆ´Ù0ôË$IÚ*îZ@ð©ï_!¤TUUýa…!¯ŸÆÁ1ö´÷Ó>âÆ>E+¦Tu|Í¥dû¸(x¹º•gªš4ƒNLs›\wéýú®ÏR“ xüDtäÄE’A„AN–ÆÅT0DÛˆ›ý¼W×yÖw¸ûÙŠræ¤Å!GûFøá»ûÐâ·mQ¢ƒÿ³¸˜$›W ÄsUͼTÝrNÛ¤¨<Ò)º˜ZWQáÐûôëu2둉:WíX¾p._[v f CN'O¼ò&uÍ­šìÓõwÝFNútTU¥¦¡‰ß<ýÂY9o„ÅÌųË)ÍË!)>‹ÙŒNÖï0ê÷Ó;8ıÆfv¨¢høoÏ`Ðóƒ;?™~XU[ÇŸ^xUÓã}Í% YsÉEXÌf†‡yìå7hjë˜Rm”e™¹%…”æç’‘šŒÝfàÿq+ 1æv/‹‹vlO\@ð BH à …U£N Vö¨pr°kˆ–VÃÔ«˜úî‚|’ìã¢à…ÃͼpDÛïç?ZZÊÂéŸLsû ±›‚xÇßü܈/0>•/ÞAjÔ¸˜’%)*®@§‡=mý¼W߉ë,ítW±²œÙ©ãBêHï0¼·_“9)MŠæï/)&ÁffÌâéC¼VÓ6%Úv¡Š©©"¢&Y2o6W¬X‚ÕbaØéäÉWßâX“6Sòý;o!/3•ÚÆf~ýÔógô|f3óÊŠ™Y”OR|,6K:U¯ˆáXc {¦«ï‹ï\(Ë2w×Ô˜~xºXyÑ|.]zñ¸øœ¨Äk˜"•xz½Ž¹%E”ä2-9 »ÕŠA¯  3ærÑÖÝácõËî¾îÊ­â‰K >A)@ ¶·ôªÙ±‘D[Œäq1TF¼G9Ø=Dó ‹A7%Ú«ª*÷_THâ„(x¶ª‰W޶j:Ÿžæv´w„mͽı÷m§?@UÏ0%‰ÑÌLŽ!ÉnS²D(¬0êÒît³£¥—»ÏøŽw­šÉÌ”X$ ªg˜mЦ*OŽá_RD¼Õ̘?È“yóØÔ™Ž¥*8Ã2•Šl¬ÜºnéÈù~Ÿº÷Á‡ÖIªT9DÔ$‹æÌäêUK±Z"å©×Þ¦¦A›Õš÷ßq y3Ò8ÞÔʯž|öŒœÇd42¿¬˜òÂ]Ut¸g˜:úÉŽ‰üJÇjwºéõRœä 4)š›³^‡,APQõhco{?›ºNûx¿X3›²¤hTà`× ?yÿ &s27-ŽõaÂé ò§ëx¿aê.$|¾‰©©,¢NŒ‘Ò"n¸tv«§kŒgÞx—ÃÇê5Ùßß¹ýF ³g !QßÚFåcOŸ–ãêt2sK‹™U”OZR"vkzP ƒ ŒRßÚÆÞªêÓ¶ üýwÜLÞŒ àÌV{-.šUÆ5«—c‹¯ÄûËëïp´þìŠO“Ñȼ²bJósHKLÀnµ¢× Å@0ĨËE[WGŽ×Ó78D„Åü™Ï !%'#„”@ ðY!ãH¥'°tF9SùŒºq¡PTœÞÍÃcì¢qh£îìŠ),ñ½ÄYÇEÁŸ÷Õ³¡¾SÓ9øtUÑ¡î!u ‘m;¥c¶;Ýôº½'FS’M‚ÍŒI§C’ 8!›†\ìhéeKS÷iÛ ï‘µ³)IRû;ùÙ&m ©ÓãùþÂb"LŒxüþ£ãliê™òíVTZeIªØx÷ªÇµØï÷üøá¥:ÔJʦz[gpÓ嫉´Úu»xîÍ ¬9¦Éñ~ß­7Pœ“…$I4´µSùØ_8•ÇdIšÜm­„éɉØmãSºTu¼’fxt”¦¶vS“SÀš†Æ8Ò3ÌñÑÓüÎ4zYæþ…ùã¢ÀàÕñAc·¦sðÏkfS:QUt s£}#L²ž–c·;Ýô{|”'ÅPà Þj¤×!ñIå[Ãà4v³£¥÷”vÄ“€¹tE `_Çÿ¸ù&s²(#‘ï.È'ÚbdØà7{ޱ½¥W3íך˜ºçÇ/•µB’¥%Zéãò‚.>GÝ.^|çý3&>íÖæLìš—÷™Š¨@0Àðè-]TÕgdÔõ7EÔ$BH Áç¼!%_LHM2+%–YÉäÅGg5aÖë¤OÍnqOåC’ÎÄ˵ŠÍhà¾ùyDOˆ‚_ï®e§EÁ$zY⑵s(Lˆ:!qZ†]$Ù,gä|íN7î`ˆ¢ùñQD[Lu2’þи˜ªéáÃæ^ö¶91eÔÉ<²v6ùñQ„•=íýü|ËaMæeyV2÷ÎËÅa62äñóè®Ú/ÝSŒmŠ¢Tlºgí”x)¼ïŠ U¯¯îÔj‡ådqûU—ሌÄåñðêÆØu@›ö7\Mya²,ÓÚÑÅ£O<ƒ?üBŸ-ÌžÁÜÒ"²¦§e·c4èéÄnkÍ]¬9ÆþêÚ³Ë=7^CYA.²,ÓÒÑÅ£?C Ôì…[šŸÃ­W^zFÅg¤ÍÊÜÒ"Jòr>S¥ª þ@‘±1šÛ;©ª=ŽsÌý…EÔ$BH ÁÉ!%|9!5Iyr +³Sȯ˜2ëuÈ»¹WL¹¨ê¦apô´¶5¤(D[LŸ¿Ü]Ëž6튓^Ç?¯™M~|$aEew[?]£â­æ3zÞv§8L~ü¤˜ß]QU!3èñSÓçdKS7û;¿Ð1ͱäMIJ³µG¶Ñd^Öä¤r÷œ¢ÌÜ~ÝUÃÇçÃ%NÅÔù ¢&)ÈÊäë×\Ntd$n¯‡×7mcûÇÚÜUò®ë¯bfa>:L[g7¿|òY¼>ÿÿø™ìôi\4«ŒÓÓˆŽ´c4Ç+gC!Æ\nÚº{ÙwäèYŸÆx÷õWQþ©X}âY|~¿fÇYqn·_õ5¢ìv\¯lØÌGôÛ­V.šUJAö R≰˜ÑÉŸì~82:>5ïh]תˆúk„‚“BJ øjBj’Y)±,ÎL¤(ÁA¬Õ|b7·¢âòiqq´w„c}Îñ¹i§H ¬`3sÏœ\¢Ì=~*wj[DôübÍ,rã" MHœ~·X‹é¬œ¿Ýé&V(IŠ&;ÖŽÃdëÞ}šŒåÎk¯`vq!z޶în~õäs¸=ÞÏýÛÓRY0³”ìôiÄDEa2Ž‹¨P(̘ÛMGoQ_uª³‹(ÈžÁׯþÚ'âóý­lßwj—Q¤ÍÊÂYeçd‘{’ˆvŽÒØÚΑºFÆ\î¯,¢&BJ NF)@ àÔ„Ô$eÉ1,ÉL¤(1šøÏS£÷ SÛwj»Ñ{ƒ!¦;lŸ;k8Щ]Qi2ððêYdÇFR¶5÷0ê â0Ïj;:FÝ„•Ò¤h2£íDYŒe ðÃô¹}T÷³©¡‹Ú>çŠåÃæ^þm»6…Ô•ÓøúÌ,ì&}.ÿ¾ã(UÝCçÍu.„Ôøzl9_ú0'c:뮽‚‡ÏË;[w°y×GšŒåŽk.gNI!zžöžþóÉçs»?ó7Ó’“X8«”¼ÌtbQ˜F@"ãòzèìéçPÍ1öT! ³Xn¿úkÌ+-B¯ÓÓÑÓË>õ<£.—fÇYþ„øŒ> â3Öż²bŠs³Iˆ&ÂlA§Ó¡ª ¾@€¡‘QZÛ8|¬×wÊ"j!¤àd„Nš¤0ÁÁЬdŠ$Ø,X&¦ò… c ]£^ªz†¨%ø™uBdÅÚ¹cBô»}üûö£Ò°(ˆ±˜ø§U3™c'VØÒÔ?¤`3êÏI{ºÆë0yhõL²b" †ÇåÚÿÛqT“y¹¶([Êf`7ééuùø¿VSÝ;|Þ\÷BH:3¦§q×uWíÀëóóÞö]lܾ[“±ÜvÕeÌ/+—8½½üæ©¿¾SãY8³Œ‚¬ bãQ²$V¸½^:ºû¨®k`×C_xÝ©3É­W^Êü² z=]}}üú//0ìÕì8;I|nÙÁæÝ_N|ÆÅD3¯´ˆ¢œ,bcˆ0›Ñëu„à þ@€!縈ªª­ÃëóŸ65‰R@p2BH §WHMRÅʬ”q1e5c6èÑO¬1å „èõp¤g˜ú'þ/!¦FýA ¢¸µ,ë„(ø·íÕéÑ®(ˆ·šùÇ•3Ɉ¶ ‡ÙÔØ¢¨Dôç´]Ýc^ô:‰œØHfÄØ±› èe EUñÃôº|ìäý†.Z†Ç«b#LüÓÊ™dNȵ»ytW&órCI7•fb5èéuyù׫©9Å ¿©„R§NFZ ߸áêq!å÷óþŽ=¼»m§&c¹åе,(/ýŒÄ1 ã"*;“8‡³É„,Ë„Ãa<>/]½ýT«coUõ”šwÓ×ÖpѬRŒ]}ýüöéÖîµ›5=»®¿ŠX‡¯ÏÇ{îdãŽ=_è³q1Ñ,,/!?+“ĸ,&3:LX™Q#£Ô·´q¸¶_ pÚEÔ$BH ÁÉ!%œ!5I^\+²“)IŠ&ÁjÆbС“$BŠŠ;¢Ã顦w˜cý_LLø”'ÇpSi&6£ž——ݦmQd·P±¢œt‡ 8ÌÆú. ²ŒQ'O‰öõ¹}ÈäÇ;H¶b7ÐëdŠŠ'¢wÌËþ®AÞ¯ï V®5tó«ÝµšÌË-e™\_œA„a|œ=²íÇûçÍu/„Ô©3=%‰{n¼†ø˜h|þ›wíå­-Û5˧%Nßà05 MdNK!!&³É„N' …ñø|ô røX{UOÉ©pׯ]É¢931 t÷ðûg_¢w@»U´_E|ÆÇD³pf)Y™ÄÇFc1™‘eEQÃ#Ô·´q¤¶0xÆDÔ$BH ÁÉ!%œY!5IN\$+²’)IŒ&ÉnÁbÐ!Ká 1Õ5êáhïÕ=#”0²ôù+ züÌ›Ç ç‘(H‹Šà'ËË™eÅ ón]V£½$M©vxüèåñ©|©‘VlF=LhBLuz8Öïdvj,)öˆrí7{Ži2/·—gqmÑt,=Ýc~±õÈiß5ò\"„Ôi¸v“¸ç¦kIŒÁ°uÏ>^ߤÍwî.[Å%sfb4C„Â!LF#:Y&Vðú}ô säx=;÷WMé5™®Y½œ%ófc2èâϽLWŸvwbý2â3)>–åã"*6ÚÅlB'Ë(ŠŠ/à§h˜º¦V¯' Ÿq5‰R@p2BH gGHM2#ÆÎЬdJ“£I±G`1èÑIVUÜ=c^vñQû!EÁn2|æóýn‹ÒÏ+QmãÇËJI‹Rok'ÆbBš¢íðø1éeòã£H‰ŒÀ:13<1•Ï “1éd|!…wë:øÝÞãšÌ˺YÙ\Y8‹^G稇_l=LÓÐØysÝ !uê$'ÄqïÍב‹?dÛGûyuãš‹ÃaáÛ·Þ@Öô4dYFUAUUUÁç÷Ó;0Dmc3;öÔÄZLW®X²…s1ô óÇç_¡£§W³ã싈ÏÔÄæ—“—•A\´ã“©yáñ P×ÜJu]þ@𬉨I„‚“BJ 8»Bj’I1U–C’ÝBÄ„Ô)bÊåáP×{Ûû )ê 1Õëò²|Fòy% fÄØypY))ö¼¡0oÔ¶ašòíòú‘e‰¢©‘V¬Æñª7I’€°¢RÓ7Â?n>Ĩ?¨¹¼Ü='‡+ò§aÖëèpºyxËaZG\çÍu/„Ô©“Ë}·\ORü¸Ú¾ï /¿·I3í·Z,,(/¡¼0ôÔdLÆñ=U¼þñ©yµ Íì9t„ÞíìdzÙÒE¬ºxf“‘þ¡þëÅWiíìÖì8ûŸÄgjbóËKÈ›‘N|tôÄ:_aåÕÐÒNck;¾@Y>7?u!%'#„”@ pn„Ô$ÓVVf¥035†$[VãøSa<ÁÝcªº‡ÙÓÖ‡;"¨(¬ÎI=¯DAn\$,)%ÙnÁ ñjmIV‹&Úîô8Ø5DQ¢ƒY)±¤EY±õ'ª»¼Á0uNvµõ±©¡›1 ‰©oÎÍå²¼4ÌzíN7ÿôAN÷ysÝ !uêÄÇDsß­7’G dç*^x{ã”o·Ålbni1³ŠòINˆÃf‰@¯×øÿ`(ľ#5lܱ›®^íMu[³ø"Ö\²‹ÉÄÀð½ô:Míšg‰q1|ë–ëIŽÃ²cßAöVU3¿¬˜ÜéÄ9˜Lã;*Š‚×?>5¯±­¦¶NÁÀ9A)@ 8!¤€s+¤&IŒ`UN 3“cI‰¯˜ÒMìèæ †éóRÕ3ÄÝ,ÏJæÒÜqQÐætóЇèpz4Ûÿ Qüpq ‰6 ž`ˆW¶‘l·h*†v§›êÞ.ÍMeÉŒ¤Ï,È +Œúƒ´Ž¸þ?{ïÇuê¿»³½` ,zï½›%ªÛ’bÉ‘-ÉEñMìØqûInúMrþ»¾7×Ns|ز-Ë*–¬b‰”HŠT¥(± ¢wA5› IDAT`Àb{›Ýß @$%Û+†9ï_|agÎìùfÏÌ«ï|¯yyed–H2µæ¯é‹›š¸µ¡£$1îóµÝG™ E¯˜û^©ó'Ïéà‹ÞGI9•bÿ‘}nÇš¯^¯cKWM ”bµ˜ÑIét†t:$Ih4àóøÖ£O213§Êy¹ië&nßv5“ŸßÏwò,C㓪³|W._|àãËq¦à]\4¸œ9ïÕùJ§‰ÅxšœblrfMˆ¨„‚÷#„”@ °6„Ô %9nª+¦»82‡«A·œ1•­OäÄÉd²…ÀõZ-ãþ0ÿs÷fC1Õ~ÿm…NþûuíÚL„)žê£ÌaUÝu¼9áÅŽóÛ;)°šÈr:ƒ¤Ñ i²ÿ&d†ƒìŸœg×àÌYuV¼\|yK37×£_R¹ëžp승ï…:œ9v¾ô©û(+,DN¥8pô<ûâš§N'±©³®æFÊŠ °Y-è$™L†¤,³è”eJ ò1èõÌûüüç?elzF•ó²mÓ>zãuXÍf|ßúyNŽŒ©6Î\N_yèA òrP”4&ÛùPQˆÅãx| ŽO0>=·¦DÔ BH ÁûBJ X[Bj…’ 7Ô±¡4ŸR‡›A‡N›íè–ÉdÐ-×Á˜Æø£ïâ ÇUûýw»ø½kZ)°™%dž86F¥Ó¦ºëØ?97ç¯o§Øn&¡(Œ/E0ê´ÚÌuZM6c*—ñ…xsÂËîáYä5(¦~gk3Ûk³BjÔâ/vf!š¸bî{!¤ÎŸ›•ßúô'(/*"¥¤x§ç8?øéÏÖÌø´Z-Ú[ØÐÞBYQ!öe”“øA†Æ&Ø´—ºŠ2n½v+f“ú·¹]»¡›»oÞ†Õla)ä?ý'†FTy-5å¥\{Õ:6´·`Ðgk)f2 ( ÑxœùE'GÇŸ™%•Z»™§BH ÁûBJ X›Bj…B›™k‹ØXæ¦$gYLIÚÕE‰”ÂÞÑ9^89Å o@•ßÿºÒ<¾zu n«‰à²ªR¡:0µ€7[Ý~‘S<Þ3Êq¯ŸíµÅ´æâ¶™0-ת‘•4þx’_˜}£sìCI¯Püê5­l«.B/iöùó—³K^1÷½RçÕbæ·?óI*Š‹I) {ûxø©çÖÄØ:›ØÜÝAEq!9vzŽL’²Œ?bdrŠÃ}'Y\ò“RR´7ÖsÇõ×`6™XôûùžŠ·¹méîàž[·c³X„Büð™èRÕ54TW²¾­™ºÊ òœLF#šå…/)ËŒOÏ286Áð„:æH)@ x?BH k[H­k6pkC)W•æÓŸƒqYjd€h2…'ã¸ÇϾÑ9zæ–Tõýo,Ëçw¯n!Ïb$—yìØ(µ¹vÕÅÑÁ™E¼á¿]E63ádŠGŽð“Þñì r±‹›ëJh.po1bÔIh€¤’ÆK2´d×Ð oM¬"Ê¿m×U¢ÓjZ òg/Re·ÀŸ‡RçÙdäw>s?¥Å(JšÃ}ý|÷Ég.ë˜:›ØÐÖBuy)9¶¬ˆZ‘Áp˜Ñ©Žž8‰ÏÄj1°èÐ\[ÍGn¸6[w)àá§žc`t\•órUG+¿zûÍØ­Vá>·ƒ£'T1öÆš*®jo¡¶²gŽ£Þí\ªFC:arnŽgwí[^ÕR@ð~„Ô!¤Vp™üÕÍÝÔæÙÑj4d22@:ñ”Âb$Îq¯ŸWFæ8:ëSÅ5m­(àË[špYŒøcIí¥!/GuqtxÖ—R×¶-o?LñÈ‘a~Ú7qúËbY>Ûk‹it;p™ «r1+¦ô{¼:êáÍ ïe½ž?¸®kª Ñi5 ,ùÓ—©¢ûÙ"„Ôùc4èùÏ>@UY étš£'øÎãO_–±´7Ö±¾­…šò2v}VDÉ©P˜±©ŽãY𭊨üÁµåܹýz¬f3KËu—úUZw©»¥‰OÞy+9VÁH˜Çö{O¬é17TW²¹«šò²¬ˆ2èÑh´¤”±x½NÑ`@Qõõsàè1U͉R@ð~„Ô%¤þèúv®®*D¯Õ“Š‚Í G/iI§3ÄS Ñ8ýÞ»†g×¼˜º®º/nj"×l`iYH5ªPHóá Çù½k[q[³õ°¾h˜çú?xKÉÕ•\[UH“ÛA®Ù¸Ú™/©(,F“œðúyuÌÃÛ“—'cê·u°µÂ¤ÕÐ?äOv$žR®˜û^©óG§“øÊCR]^J:¦§ÿ÷ØS—t M5Ulêj§¶bEDé )%E0YQ³Þ…÷‰¨‚á%ÅüÊRw©£©žîº‡ÍN(áÉ»8p´wM޵©¶šMmT—•’ë°cXΈJ))"Ñs ‹ô ±®µ™Š’¢Uñ¹ÿHºÖ!¤à}!%¨OHýɶ¶,‹‚Á…¯yØXžO™ÃJŽ1+¦2™¬˜šÄ韰gx–Ã3kSLÝX[Ìç¯jÀi6à‹&øQÏ(ÍùÕÅÑ1ÏÞHü´zXß}w~qaäëª ¹¶ªÆ|¹fÃòüABQðEôyìšáÈ%‹zC›Ë³qvÂàv\“Å×Ï!¤.Àƒ¤FÃWíAj+ÊÉd2âß}ò’œ»¶²œ­ÝÔV”Ÿ.2R)‚á0Ss^÷ij°€Ålú…Ç Gc”¸ùØ-7žRwégô «r^ÚjùÔÝÁa·ŽFyjçnÞ:¼¶N{cëZ›—3¢Þ‰Š¢‰Å˜õÎÓsr·÷”e¾òЃÔT”‘Ig860ȨjN„‚xŽBJ Ô'¤þÇl,w#i Ïà›oöa3èi/Ê¥»ØEE® »Q^«Y‹Ñ}^?¯y/[ÆÍÏãæú~}C“žÅh‚GŽÒìVŸê›÷ã ÇùÊ)õ°þãÝ^üå­ã5À5U…l¯+¦Öeǹœ1uªXìóxipšãÿ%3-Ðçõó;r%=7!uaøê¯=H]e™L†¾¡þõ‘Ç/êùªJKØÜÝNCu%.‡cyk—†TJ!2íñrìäS&ƒá¬Ž‹'ÈÏuòñÛoÂfÉÖ]úÑ3/ÒsrP•sÒ\Wçå#äæä‰Eyæå½¼öîá51¶ÎæÖµ6S]V‚ÃnC¯ËΟ¢(„cQ¦ç¼ô óÖáâ‰÷ºz~õsŸ¢nY|öóú»‡T5'BH Á<ÿ !%êR±½‹ eùhã^?ß|óÅv3íÜÖèvÐ^”KM® »É€^«!ÉnóEôÏx}Ì{Ùk­pGc­¯#Ǩg>ç±ccªÌê_à ÇùÝ­ÍÙzXñ$ß>0ÀžáÙ³>†^Ò²­¦ˆë«‹¨Îµ‘c2`Xž¿xJÁ‰Ó3ëc×Ð,'.nWÅ¿¼©‹õ¥Ù8ëõøùÃï^Q÷½R†ß}èª*ÉáÄð(ÿòƒÇ.ÊyÊ‹ ÙÒÝICuy¹ÎÕb×yNŒŒ215‹Á ûPÇN$ev÷Ýq v«•`8Ì£Ïíàȉ“ªœ“Æš*>ó±âr8ˆÄb<¿çUö¾}yïßΦ®êh¥²´˜[¶Æ×©"qjÎKOÿzŽŸ&¢VøÊCP¿g}C#¼zà jæ#“žA!¤àL„Ô'¤þçMݬ/ÍC›[â_ßî§Ðf>íod%Mmž®b5.ûib#©(øbI‚¼22{Ù»ºÝÕ\Χ»k±õxÃq?6¦Ê ©ÁÅ ÞHœ/oiÆe6à%ù··O²otîC˨“¸¹®„-nªÏ‹ñ”Â\(FÏœ—gö….ÊõüÕÍÝt—dã¬gn‰?ÞyðŠºï…º0üögî§±¦€“#ãüÓ÷½ Ç/vç³u]'ÍuÕ¸œL#Z­EI‰Å˜šóprdœ±éôË >,rJÁb2Vü±ç_âÐñªœ“ºÊr~íÞ»p9Dã1^Üû»Þ|û²Œ¥³©mT”’•-6ŠD™õÎs°÷{ûH$~Ïßùìý4VW‘!Cÿð{ß~G5ó!„”@ |0BH êR§Š‚#³>¾ýÎÖ®‘’H)TåÚØXî¦ÒiÅi2d·‚‰T¶«ÛÐb=×OLÝÓZÉý5Ø:<á8O¥ÙíT] ûBÌGâ|ió{Úÿe?¯yÎù˜½Ž[êKØXîÎf¼õè´” ÄäžpœÃ3‹ìœf¹ ×ó7·¬£³ØÀÑYúÒ¡+ê¾BêÂð[Ÿþ͵ÕhÐprtŒo>|a„Ta~[º;h©«!?׉ÉhD’´¤R ÑxŒ¹ùEúG˜˜C£9¿s)é Žîº›P$Â/¾Ì;=ÇU9'5å¥üÚÇï&?×I,‘`çko±óÕ7/Ý †FÃú¶fºš©*+!ÇfE¯ÓYµ²µòðñ~Þ9vYþåÝ;¿üéOÒT[µg{Þ: šùPÒz‡F„‚3× !¤@}BêonYGW±‹ pxf‘ÿ|w|ë/.Ú›ª]v6”åQ•kñ,¦ +¦–â 惼1îåÕ1Ï%­ô‰Žjîk¯Â¢×á Çx²wL•BjÌf>ç776f ´Ç’üÓ›}Dô9Ln¬-bkEåN+ö宊©t†¨œb.ãÐô"/ Í0¸0bêoo]OgQîjœý—_Q÷½R†/=x-u5h4Ç'ø¿ß}伎—ïÊesW;-u5æ»0MhµZE!ã]ôqbx”±©àÂýNi4¼ëŽåBàžÜ±›·SåœT–óß~õc¸]Nâ‰$/¿±Ÿö¾~ÑÏ+IZ6´·ÒÙÔ@EIѲˆÊnŸ”S ¡H„ÉY=ý¼Ós9•:û8ûÔrœ¡aplü²e| Š’¦wxT)@ 8síBJ Ô'¤þþÖõ´/‹‚ƒÓ‹<|hˆ<‹ñ¬>—j\vºJ\Ëų èµY1•T²bjh!Ä[^öΑJ_ü¯æ®>ÞZ‰Y¯c6ã©ãêR ‘8¿±±§ÉÀb4Á7ßä”BßȘR@pæÒ+„”@ ¨KHé´þþ¶ ´8P2p`rž÷Œâ4Îéxá„L¹Óʦr7µyv\f#F齌)<Ɉ/ÌÞ‘Y^ó^1õ¹ õÜÙTŽI'1Œð̉IUvÙ› EYŠ%Oëøoôqhzñ¢³ÈnæÖúRºK\”9¬Xô:´šlÍ’p2+¦öOγs`š`Bþq¦åïo[Ÿ³t†·'øëW„:_®D!õë÷}ŒÎæ´Z-cS3|ã{?")ÿòX³šÍlìl£»¥‘"w>V³I’H§ÓÄ“ |~FÇŸž!–H\‚+ÑðÀ·‘»†:Ó+…ù.¾pÿÇ)vç“HʼþîažÜ±ë‚_¯×±±£Îæ†S2¢td2 §d‚ᓳs¼ÓÓÇ‘ý¤/@¦í©q6:9ÍŽWßäBnÙ¼˜$å'FÇ…‚3W^!¤@]BÊ(iù»ÛÖÓäΊ‚·&æy²w Ç9 ©"Ée˪˜Ê51ê´h8]L½9áeïð, 剩Ïolàö†2L:‰‰@„çNL¨rËÞ\8†?žä3Ëç#q¾þÚqŽÎú.ú¹+œVnª+a]IÅv3æåŒ©”’&”™ DxcÜËîáYBg!¦Œ:‰¿»u=Mî”t†7'æù»½=WÔ}/„Ô…ás¿›®–&$IËøô,ß|øQâ¿@ ™Œ6u¶ÓÕÒHI¡›Ùršˆòùƒœãp_?Z­ƒ^w©‹¹ÿ£·®v¦{á•×Ù­¢ÂÙ§’ïÊå‹|œ’7IYæÍC=<ö³ç¿þôlêl§­±Žò¢BlV :IdV3¢Æ§g9Ü×Ï¡ãýtKÝç~õWèniD«ÍÆÙ {_SÍ|$’2ýcBH Á™+¯R@ .!eÖëø»[×ÑŸoŒ{yºo‚£þ‚?œLQá´°®$Ÿ†ü\fIʶéVÒâ2þ¦æÙ=4K<¥œ÷9¿´¹‰[êK0Jãþ0/œœ¢Q…RóÑ8x’:kW;~ýµ^ŽÍ-]²1TçÚ¸¥¾”®VÓib*˜[ 󿏗]óÄ~Ag+‹^Çß.ÇYj9Îþaß±+ê¾BêÂðÙ{îd}[ :Ibbv–zøÇDbï¯_¦×éØÜÝAGc=å%…«"*“É“«"ªçÄ YÆj1]â§b ŸüÈ­ä9Äâq^|õ ^~}¿*ç$בÃo}ê>J S)öéáÑçvœóñŒ›ºÚéjn ¤ÀÕb>MDBaF§¦9Ò×Ïᾓ嚺÷.Öµ6£“$Ægfy~Ï«©–Ø…G)@ ø9K¯R@ .!e3êù››×QŸŸC*æµQÏŸœÄfÐ_Ðó„2e ëJòhr;ɳœ.¦VäÆ[óìž!&Ÿ»˜úí-ÍÜTWŒ^’[ óÊÈ,UN›êâh1– —ùdg56ƒŽ¹pŒÿõj/Ç=þK>–úü¶×ÓUì¢ÀfƤ“Ðj@NgÅ“ -†Ø?9Ï®¡’íf7êù›[ÖQ——³WG=|ýµÞ+ê¾BêÂð™}” í-è$“ssüó÷#y¯Ó£N’–·æ5QVTpZVM")³206ÎÑÄâ‰K/¢VŠÑpßGn!?×I,‘`çko±óÕ7U9'96_þô'(+*$¥¤8Ðsœþôgú8³‰ í­t·4R\ÍlA§“Èd )ËB!Ʀf8ÐÓKïÀð%³çv¿ŠZ¶ìÅIÆ'…‚3×^!¤@]BÊi2ðW·tSëÊAVÒìcÇÀ4VÃÅÙÖJÈØÌl©pÓ˜ŸCžÅˆQ'e·ò-Ëqv;ØÞÑ9¢NÑ _¹º…jŠÑKZF|!L.Ph3©.Žüñ$Á„Ì}íUXô:<áÿðê1Nx—mLÍnª-¡½(·Õ„Iwz¶Ûˆ/Äëãö ÏVìƒâìÿ¼~üŠºï…º0ШDHÅ LL !%g „”@  .!•g1òµ›º©vÙ‘•4{†gÙ=<ƒù"×Y %dò­&¶V¸it;²bJ’Ðj5«SãKaöOγwd–@üìÅÔï]ÛÊõÕEè´Z†ƒôyýØ/pÆ×¥ ”” &dîm­Ä¢×1Šñ÷ûzX^ö±µærs} - ¬¦÷¤âr}°áÅ{– ×g2™Œ³o¼ÙwEÝ÷BH]î¿ó66wu ×é˜ñzù×Gž ¢¸ˆmT–‘c·&¢¡£“Óêë'Ь µüXÌ}w܌ەK<‘dÏ[xnÏ«ªœ³ÉÈï|æ~*J‹Q”4‡ûúùî“ÏüÒÏY-f¶twÐRWCiQA¶Ð¼6»­2)ËøC!F&¦8Ø{‚ãƒÃ—5Ξ~y/•dHEcq'§…‚3W^!¤@]BªÀfâ/·wS•kCV^šeßè&tIÎJȸ­&6–åÓ\à\͘Òj •ÎJÈŒ.…98½À˃3gÕÙí®k㚪BtZ  AÆ—Âè´ZÕÅQDNJÈ|¬¥³^Çl(Êßîíah1´fÆØYìâ–úšW¤â)bÊK2¸dÏÈ,C‹Á÷ÅÙ?¿u⊺0Ü÷‘[¸z]'½ž¥@É9%nvzžS ^NÍÐ{ryŸ ‰¨å‡b4Ü{ûM湈'“ìÝÿ.ÏìR§?0èõüîCPUVB:æè‰¾óøÓ?÷ï­3W¯ë¤­¡ŽBwV“yµ¾WR–Y ŸäàñþK–õ‹âlÆ;ÏS/½¢!‰ÆšBJ Þ·ö !%êRÅv3±½‹J§„¢°s`†7'¼¤K+pV2¦6”æÑZà$9ëf¥NQ8!3ˆp`jÓ¿PLýɶ¶T¸‘´úçƒx± Ò&üRK)„“2w5W`ÖIL³BjÄZscÝTîæ†š"ÝŽÓ ×'Ri–b Æ–ÂÔæÙÉ·˜VãìßÞî¿¢î{!¤. ¿í&®Û¸ƒ^¢((i½NhSË"jr𾡿æלˆ:•{o»‰¢ü<I™}òôK{T9'’¤å+=HME™t†ž“ƒ|ûÇ?yßß9sìlìl££±ž‚<³ ÝJ¡ù¤Œ/`xbŠG{›¸ìqv͆nŒ=³ó <µs·jæ#1<5#„”@ œR@€º„T™ÃÂÿ¸±‹ ‡•xJáÅ)L. —.OFQ(!“c4°¥ÂMK¡“‚å:E§Š©é`”ý“ó¼2<ËBôýíàÿì†N6U¸‘4Ðç 0ác”$ÕÅQBQ'SÜÙTŽI'1ˆðׯô0î¯Ù1_[UÈÕ•48p™$mvk•’F§Õ “´$R Ï÷OòíWÔ}/„ÔùS_UÁýwÞFIFCö±2³,¢"LÌÌÑÓ?ÈÜüšQ™LVz¹³Bêµwó“»T;7_ýܧ¨«('“ÉÐ;8Ì·~ôÄêËs:¸ª£5›•ïÂb2£ÓI¤ÓiâÉär}¯)ö9Æðøäš¸žÝr#×o\Ñ gnaQUsŠD™žBJ Î@)@ @]BªÒiãÏnè lYH=×?Éá™Å˾Å-”±õl­( ¥Ð‰Ûj¬“–kL…’2Ó(ïL-ðÊÈžð{máÿb{ÊòÑǽ~ú¼~Üõ5—Ói"Éw4–aÒIL"|mÏQ¦‘5?ök« ÙVSD}^¹fC6ãN£Ad2Æüþù­›[ºbî{!¤ÎšŠ2¶twPWQNAž Ýò–áL&C0a|z–£ýÌzç1›Œª¸¦Lî¹u;%ù$e™7åñ^RíýîCÐPUI† '†Fù—>FžÓÁæîšjª(rçc1™$-ét†D2‰/dh|‚·ö221µ¦®çî›¶±móLžE?Ù± µ¼Ç„ÂQFf„‚3BJ P—ªqÙù“m”æXˆ¥ží› Ç³„¤Ñ¬͠gs…›¶•Œ)½i¹ÆT8™b2áðÌ"»†fñ„c|í¦nÖ•æ¡zæ–84³H•Ó¦>Á‘É‘SÜÖPŠQ’÷‡ùÚî£Ì„¢ª¿V£áºêBn¬)¦!?+¦4Ëq%+i¦ƒQŽ{ü¼<4CŸ×¯úû^©Oei1[º;¨¯ª$ÏéÀhÐ/Lj&»å3)óòûéŨ²Æé ÜsË”¸‘S)Þ:|”?¿SµñýÛŸ¹ŸÆšJF&¦› ¥¶wž ‹É„V«%I“LÊ,úý ŒMðÎÑãŒNM¯Éëùè ײ}ë&LFó¾%žÜ±‹ô)A×2Áp„Ñ™9!¤à „Ô%¤êòrø“míÛ-ÄäOŸ Þ¿*Ö ¡„ŒE¯cS¹›ö¢\Šl&Lz ­Fƒ²,¦f‚QÍ,ÒQ”Ks€£³>^óÐ^˜«ÊXŠÈ)n­/A¿,¤þrבӲÁÔÀ5Ù IDAT€AÒòÉÎj>ÞV…I'eËg2¤3O)xÂ1zæ–Ø54³&:ž³€Bê¬)+*`sWMµÕË"*++•´‚¢(èuz´Z ó¾%þãñŸb³šU>vË” §R¼}ô?zöEÕÆ÷o}ú´ÔÖ ÕjI$“¤”fã{Qñd‚…%?ƒ£¼}ô3skúzn¿þjn¾f3f£‘…%?O¼¸‹tZQÅ\BaÆf=BH Á!%¨KH5ºüÑõíÙÌDåOôŽ1¼Z³ã %dŒ:‰Íån:Šr)²›1Ÿ’1‘SèµZÌz %áÐÌ"/ ΰ¹Ü­ÊXŠ+ 7Õ£—$Æ–Büù®#,Dâª»ŽÆ|¼­b»%IEA§Õ¢Ój²bJNá‰ÄW3ÝÖbáö_. „úe»óÙ²®“æÚjòr˜ F´Z-Š¢‰Å˜ñΓLÊÔU•¯Š‚ïþäY,*Ù¦wzÌF#·o»‹ÉŒÏïç{O=§ºízËñÀÝ7Ý@yq)%Å»ÇúøþÓÏ«êJ Ülìl£±¦ŠŠâ¢Ój{Eb1ýN±ÿÈ1¦=^U]Ûö-¹ã†kVãìÉ»‘S²*ƾ1áñ !%g „”@  .!Õ^”Ëï_ÛF¡ÍD(‘âÑža&ýQÕ|ס„ŒN«aKE]Å. lfrŒºÓj -†8:ëãø¼Ÿj§MUb*Ép]uzIËðb?{ù0xRu÷D[a.ÿýºSâìè{†g¹±¶˜­•”9,Ø zô’–T:CTN1Œrpz‘]Ã3LÖ~L !õ~\N[×uÒR[M¡;/+¢´Z%M,dzàcljš¤œ"בCÿÈf£‘Üp-V³_ ÀÃO=‡A¯S]Ì+Jš»nÚFeI1)EáÐñ|ï'Ϫb쥅lêj§±º·+£Ñ€¤•XÙÉO$yõC¼yð³ó ª\§·mÚÀGo¼«ÙÌR À;v!ËêR¾@IϼR@pBH êRÅ.~ÿÚVÜV¡„ÌŒ0Œªî;%d4¸¦²6•a^~ÍJ:MTV˜ F9:·ÄqÏ•*S×V¢Ój\òg/"”U7?Å.~ïšV lïÅÙ3}8Mnk(ec¹›2‡ër¦›’ÉI*L"šYdçà4ÞðÚÝ®(„Ô{X-f®»j­õµæçVô:Oà]XdbÖC<™Ài·¯~n¥Û]7]Õla)äO?$iUó)%Í]Û¯§²´EIs¤¯Ÿÿ|ò™5=æ²¢B6wµÓXS•­íe4db: $-™ L{<|ý;ß'žPŸ_ý]ÝÐÍÝ7o[³'wì"™TÇõ,úLy„‚3BJ P—ZWšÇW¯nÁm5LÈüàð0s¡˜j¿û¤’æ ©qÙЭ+¥Ñ€¤É ލ¬à Ç82ë£×ã§Âa]ÓbJ«Õpue:­†… ²óQ9¥ºyYW’ÇW¯y/Î>4ÄÏúOoŸo1r{cëJò²bÊ ;­hýt0Â;S ü¬Šà”rBHÍbasW;]-æ»°˜Ìï½N$ð,ú˜œ™#)ËØ¬–÷}~jÖC4çc·ÜˆÍbÁ òÃg^`õX8+R©4wn¿Žª²Òé4GO ðÇŸ^“c­,-fSW; U¸œÙÚ^’¤%¥(Äâ æ}èõzŠòóÐjµLÌÎòOÿ˜HL½kÅ–îî¹u;6‹…@(Ä“;vO¨c;ôâR€©y!¤àL„Ô%¤®*Ëç+W·g1ˆË|ïÐ*‹f¯`Ñ븷­’º¼”L†©@„DJ¡4'+8tÚì–°l­¢8Çæ–èõ,QšcY“bJ'iÙZáFÒjèŸòÇ;’H)ª›— ¥y|åêVò­Ù8ûÏwÙ9øÁíàKs,ÜTWº’zÃuT——’N§ééäÿ=öÔšcUY ;Ûh¨ª ÏéÄd4 Ñd‹ÌGãqæ}KÌx½„#1ìV Wu´¢“tLÎÍñ/?xŒ`8‚Z¹ª£•ûî¸9gáOíØM4®ŽµoaÉÏôü¢R@pBH êR[*Üüö–f\#þx’ÿ|w¥˜z·aØ :îi«¤Ö•ƒ¬¤Ù;:Ǿ‘9ºK\´åRl·`3èÐ-gmÄäsËbê¸×O‰Ý¼¦Ä”A§esyVHðø£‘•´êæeS¹›ßÙÚLÞrœý¿ìžý…Ÿ©Ìµqs] ÝÅ®ÕnŠ:­YIJÈŒû#¼1îaÏÈá51õ_QHôz¶twÐÑTOYQ!V‹$‘NgH$“Ù¢×#c ŒŽS_UñYQ§²2íñrß·`·Z †Ã<úÜÕt?;•”¢ð‘m×RSQF&áØÀÿþè“kblµe\ÕÑJ]eEvkžÁíš—ÎfDy}ÌxçÑët8sìŒMÍ`1›ØÔÙŽ^§cÚãå_ø8KÁ j׊u­Í|⣷cµ ‡yú¥=„£êØ®>ï[bfÁ'„”@ œR@€º„ÔµU…|qs.³¥X’ï¼3°&·C-zI˧ºj¨Êµ#+ »†gy¦o‚B›I£¡¹ÐI{aî)SÒˆË)<‘8½sKôyýÚÖ†˜2é%6–»‘4pÜãçw$­Âµvke_ÞÒŒËlÀKòooŸdßèÙµ‡¯ÎµqK})]%. ­&LËSr:C0!3â ñöÄÉ'ï¼5+ "a{þ%’²ú$yJIsûu[©«¬ “Ép|h˜{ä‰Ë:¦úªŠeUNnNΪˆJ§ÓDã1f½ xÑëõ8sì§ EQØÒ݉A¯cÚ3Ï¿ýè —üª]+:›xàÎÛɱÙE"<ýòB*Éøò..1»(„”@ œ‰R@€º„Ô¶š"~sc#N³_,Éw JªWH¥Ò>¿±*§¤¢°sp†NNá¶šVÿFÒhh*pÐZà¤ÜiÅnÐ#"¦æ£ z=Y1嶘.«˜²t¬/ËG ôzüüáŽwU9/×Vò¥ÍMä.‹ÏÙßÏëcžuŒÆ|ÛëŠé(Ê¥ÀfƤ“Юˆ©x’_ˆ·&æÙ54Cò2d‘ýWR’¤eC{+ÚZ(+*Àfµ “² ’rŸ?Èàøƒ£ãäå:ÏZD­”ez†xà®ÛqØì„"žxñebqõm#VÒn½f3õU•dÈÐ74¿þðñË2–úª 6uµS[Q†3ÇŽÉpêÖ¼¬ˆšñ.`2NQ+Dc1–!®ÙÐ…A¯gv~oýèI¼‹>Õ®íuçÛˆ«°‹Û©|ac#¥9b)…gû&xkbž“þ—~NÉdhÌwÐUì¢ÂiÅfÔ£×jÈd ¡(ÌGâôyô{ý8L†K"¦œfÅ. +¤þL¥B궆¬øÌ1fÅç?¾qœƒÓ‹äØëKóØ^[L“ÛIžÅ€A’Ðh ©¤ñÅœœòêèo\d1u¥ ©Ž¦zÖ·6SSQ†Ã¾"¢@N-‹¨ÉiF&§0DD­päÄIî»ýfr"±ÏîÞ·FEÁ/çúh®«Fƒ†±q¾ñ½]Ôóµ7Ö±®µ™šò2œ96ô:}¶X¹¢‰Å˜ñÎ3¿¸„¤Ó’çtžõqEÁç°¡½“ÑÀ¼o‰ÿxü§ŒÏ̪vh¨®ä³÷܉k9Î^Øû:ÞÅÅ53>ƒÞ@EIu•åä¹°˜Mè$ÝjVb ¾¡0ßµ@ ¬"„”@ 19•YÉÖ•4á„ÌL(Ê»S‹ìc&¸v¶ŸÜÝRÁ§ºj°õxÃqþýÀIUl5üyh5¾°±b»…˜œâ©ãœYÀfПõ1Ré4Mn'íE¹TåÚ²SË5¦’ŠÂb4Á o€þù6ƒî¢Š)—ÅHGQ.àÐô"¾ë°*çå£Må|v]-öeñù^?Îá™ [fkE×VÒävà21HZà=1Õçõóژ眷 þ2®!Õ\[ͦ®vjÊKqØíô:@CJI …›žexbƒ^AEÔ ÇNrÏ­Ûq9Dã1žßóó>uÖ*ºîªõ´ÔÕ ÑhŸà¿ûÈE9O{cWu´QUZ‚ÃnÅ ÏŠ¨TJ!‹²àó‰Æt:I:—ØÆ ÑÕÒˆÙhbai‰ÿ|âF§¦U»VÔWUðÐ=wâr:‰Æc¼¸÷ æ.ïÄt:ƒÉh¤¦¢”šò2 òr1MH’D:FNÉ$’2IYæ‘çvÜð•ÏÞ¿@ ¬"„”@ /ÎdÝ\#&„¤ÕRÒDä3Á(Gf}¼22ÇøRø²õžÖJîï¬ÁnÔá ÇùÖÛ'I¥Õ+¤$†/lj¤Øn&’Lñı1Žy–°èuúXJ&CËNg±‹ê\v“ò˜J( ¾h’…½ÿESùVm…NÒÀÁéþr×UÎ˯´Tð`W-v£o8Î×_ë¥gn颜ëÚªB®«.¤!ßËl@/iÉœ!wÏrhæÂfC¨]HÕWU°¥»ƒšŠ2rsìôÙlrJ&Ž09ç¡x £á∨Žs÷MÛÈs:‰Åã¼°ïuææÕY«èš Ý´ÕסÑhž˜ä¿ûòY¹­¡ŽÍ]íT”ã°ÛNQ)B‘¾@€h£®WvÎæ}K$eù‚‰¨SYôèlªÇj¶° òƒ§ŸçÄð¨j׊òâ"~ý¾Q—K<‘dÏ[›X>“H4N®ÃNGSUe%8íöÕˆJZ!™LK$I$“„#QEáÌÔ-!¤àž…‚Ó…Ô U¹6¶ÕÑUì¢$ǂ͠CÒ¾×Ém1š•{Ff/X±ç³áÓݵ|¬¥³^ÇL(Ê¿¿}5ÿ’›u:~cc=n«‰`Bæ{‡˜ Ç.à>Œ1ÕU’GˆÃd@¯}¯^ÑR,ÁÀB“  ’–²œsSZ­†B›‰&·%á­‰yþvo*çå®îm}O|þý¾.]‹u£¤e{] [+ ¨Îµ‘c2¬Ö‹§<á=³KìžaðÇ¥!URàfëºNšj«p9˜ F´Z ©åÂ×Ós^ƦfH§3仜—«ÕjénidcgeE…Ø­ÖUY’”³5¢|KÄ ô:’tq3{²Bª›ÅJ âÑgwp´@µkE±;ŸÏß/Eùy$’2¯¾sˆc'1~ˆgK")SVT@}U%•%EËR1{%­R’I™`(L8;«c !%ïG)@ à ©rÍn¨)fS¹›r‡%ûb|Êö/<ÉàBw¦Ø;2Kâ"u¾ûõ«øHc&ÄT ·œD«b!•cÔóß6Ôã²ñÇ“|ûÀ¡„|QÏ‘ST:­l*ËJ§Ù€qe.•4þX’_ˆ£s>L:é¬3¦t’–"›™úüRé4¯yø_¯öªr^~m}=w6—¯ŠÏ¿yå(£—±¨ŽQϵÅ\]Y@¹Ãºzÿ¥ÒbrйPŒƒ3‹ìšaÂ9«c®U!åvå²¹«–º óó0³QŠ’&ãYXddrEQÈuä\öX™˜žåÚëN£cªŒûõm-¬okA'ILÎÎñOßÿ1áhô—Ü÷:Zéjn<-#*“aµÀüüÒ±xƒþ⋨—ü´5Ö‘cµŒ„yìù—8tü„j׊‚<¿ùÀÇ)vç“”e^÷‡úú1 ì)E¡¢¸˜†êJJ ÝØ,ôË[SJš”’"‘H GˆDch>Dý*!¤àý!%œZÁ¢×±½®˜«Jó©>cû—œNˆ'[ óÎÔ»‡g‰$St¬_ØØÈí¥%‰‰@„o½Ýî"mù¸8M>·¡ž\³¥X’o½}’¨œº$çŽÈ)*V6–åS›—͘2ê´h85c*Dÿ|I£¡4Çò §—´ÛÍÔæe…Ô¾Ñ9þ÷kÇU9/gŠÏ¿zåèY‹ž‹/7Õ•°µÒMiŽ›A‡n9c**§˜ D88½Èî¡YfB¿X$¬5!å´Û¹zCתˆ2›ŒÙbÉé4±DÏ‚ñ™Yâñ$.gΚ‰•‰™9®ÙÐuš(81<¢Ê¸ïjibcG+:IÇÔœ‡þÁcÃ,bõz›:Ûik¨£¢¤»Õ‚NÒäTŠ`8‚gÁ‡œ’/IFÔ™øüZëkɱÙE"<ñâ˼ӣÎß#€ü\'_|ðW))p“”eÞ<ÔÃ;=½X̦ó>¶V£¥´¨€¦ÚjŠòó°šÍ«ó•RRÈrŠx2I0&ŸÓ6K!¤àý!%|8!uª|ØVSÄær7u§È 9!”™ DØ?1ϾÑ9£‰ 2Ö/mnâ–úŒ’ĸ?Ì·œ\bª|ɰùìº:œf¾X’Û‚xêÒv \ɘZW’G}~¹&#I‹Fò)bjh!(±°˜2è$Šífj\vd%Í+#³üß7úT9/gŠÏ¯í>Ât0ºfÆç0éùHc9Êò)sX°êuHÚlóH2Åd ÊÁév LÿÜ{o­)»ÕÂÖu´7ÖS˜Ÿ‡ÅdB’´(é4ñDςə9²LŽÍºæberÖÃÖu§‰‚ãƒCªŒûΦ6v¶£×é˜öxù—>†?xúVU£AÏÆŽ6:›)-tc;CDBaææI)) zý%Q+øAZj«qØí„£žÚ¹‡·«³¦@®#‡/=x¥…È©ûôðÖáló9Ó 7PQRD]e9…ù.Ì&:IG&“Ë•®yK ñDò¼ ¨ !%ïG)@ à܄ԩl©psCM1uyvò,FŒ:)Û†]INÊÌ£¼;ýÿ³÷Þqr]÷aï÷ÎÞggfgg{_ôB€€HнI¦ ­jR=rä’(²_ž_âÄqülÇq^œYv$[qlË‘"Y–dI‘eËN‰^»‹Åö:;½Üž?fwY@É,v/|¾ÿñÙ;çœÛö|ñ+ž¼0÷¶7õŸ¿a3wõ¦qÉ2³Eþì…á«ÚöúrÓôò‰Ý=D½n2…/<‡j˜ë2–’ªÓõ³;gS2BÌ÷j1•¯iŒe‹ -Ö‹h§C¯Þyœ2Ía±šaðØè,_zΞ)2¯Ÿ¿õØ1æŠÕ 7ÎTÐÇ=}Íìi‰ÓpËÈ’„nZ”TéB] ?<Þ±k.§“™…E¾òo“ÉÕ»9z=î•”¾Í47ÖÓ¹œNy-5/W(1¿”AUU<ÏUéþöÓÈŠôwu ‡)W+|ÿÑ'xæÈQÛ¾+"¡ ÿì㥵)…nè>~Ч%xs÷†iZx½zÚZéjk¡1Þ€ÏëYkZ¢é[DE IDATš¦SUr…"Šª]–s)„”@ \ŠR@ÀÛR«ìm‰s[wšd˜¸ß‹×)#K`¬DmL*ŸÍ2xa–‹o±ϯܴ…Û»Ó¸d–‹|í¥[¯}:äãã»{{\,–k|åÐÚ: ©UŠŠF:ìck’þDdE2®¤ò™…•´ÌÓó9‰t°.¦¼.™æ°ŸŽhÍ0xtd–?~ÞžBêÕâ³Ä¿?p”ÅrmÃŽ·-à®Þf®ki òã_‰˜Ò “¢¢1™/óÜÄÎ’¯i+ÓõRA¯gpÿÎíìÚÜOsªñÕ"JUXÎ噘Â4 ü>߆¿VfæØ»c+­©—EÁÑ3Cر´Ý–¾nºn'n—«.¤þ×w¨T«ìݾuå|% úü8õ(UÓÈë"ª¦(õ›uQ«ä‹%ú:ÚˆE"”«U~ôøS!µÊÎt7w¦Ø–Š’ xñ¹dR=¨¢Ì«œšÏòÄ…9Î-æßÔ±ÿå;·rkwN‡ƒÑL¯Å´ñ£¼5àÁ]„V„Ô—žCß **A7¶¯Š)÷Zô›j˜k*cÙçóM±5!¥ŸŸá+‡ÎÙò¼¼R|Ž-ùw޲|™ÒN¯$± ÷ô6³»9N*èÅ÷1u1[⹉¥XS¯ºúúÿþ¶}ƒmé¿Ù!×;vª*Ëù<ç.\døâ½ím~[\+³ ‹\·uóŠ(Ð9rò GNžÁaÃÈÍMÝ]Ü|ýnÜ.óKË;;D[º‰Ö¦—Åa=k5"j UÓñn€ˆ¨×R(•éik¡!¥R«òã'žá±çÛö]á÷zùü§ £9n=}–GŸ=D4ü©ß+Wj„C¶õ÷ÒÓÞJ,Æív½JDÕT•ZM%W, ëæ9—BH Á¥!%\~!µJ"ÌmÝMloŠ­Dm¼,¦jšÁbEáÌ|Ž'Ææ8>»ü†Žùk·lãæÎN‡ƒá¥}rlÝ#ŠÞ± ÙÞEÈãd¾TRæ{7¦ë[lJÖ#¦Ü²ŒCz¹^ØØr‘‘L‘;’´†(†Á‡¦øêa{¶Yÿ—ïÜÊ-]M¸äºøüG^’ò¶‘éiqw_3»Ò 4¼x]Nè¦EAÑÍ88±pûnÚzU7ˆËùÂmA¿opµæ¢j,çóœgøâɆMÉ„­®•¹¥%vnX/>Ë¡ã§l™JÜßÕÉ­û®Ããv£j:ŠªâóxVê@Õ#¢²ù"³‹K†Ž×ãÝp"jí¹U®ÐÙ’&‹Q­Õxèéçxäéçmû®ð¸Ý|áSÐÑÚŒiš;3ÄCO=G,zÝÏ—+5¢av ôÑÑÚL4Âãv!I Ã@ÕTªŠŠ¢ª”Ê ÃàJ†õ !%—"„”@ på„Ô+¥Ëm]Mìjn 9\/Àì”ëõ*jºA¦¢0´”gpt–#Ó™Ÿz¬}ëvnêhDvH -øîé‹(º}…TOCˆmï$èv2W¬ò•CCNH­mðV"¦ö¶ÄÙÜ!ðâ‘åz)Ó¢¢êø\2ÙAU7øÑ¹)þì{ ©_»y7w½,>ÿí£/QR4ÛÍc ᮞ4;VÅ”S^« VP´Û“ïUÝ Z–u›e1¨j*Ëù£Sœ§!¶ˆZe!³Ì¶þ^:ZÒ˜¦ÉÑ3C<÷Òqœ²Ýš-HÜuÓ~º:p8ÔCÖZjÞr¾Àìš®ðù7¬ˆzYÈTiK7‘lˆRU<{ˆ?ñŒmß.§“/|úAºÚZ0M“ç†ùÑàSÄ£‘W}®RUH%âlí립9M$Äír!I`˜&š¦£jÅr…rµŠi^÷§R@ð:o^!¤àÊ ©UšC~níN±·%AKØOÐãÂå°E7ÈTTF2ž¾8Ï3ã ¼Þ3úßÞ¾ƒw´%‘çó|ëøv~’$"üìÖvü.'³Å*rhkƒÏ¨¨h4ø<ìoO²%!¬‹)‡H`˜Çç–ùÿž<ÅrU±Ýyy¥ø<¿Tà×~‰ª¦Ûö:ÛšŠrwo3[£$ƒ+b n—$éªnkŠz[¾X˜bhlœh(h[µÊâr–Í=]k¢àøÙóFoG;–eqzd”ï=ü¿Ÿ¾Žvú»ÚinLðûq­œ+Ý0Ñ š¢R,•)W*HŽõ‰ÜBJ .E)@ àê ©U|.'wõ¤ÙÛš »!HÄëÆµòDzfšjs%^˜ZâÀÈ eUç·îÜÅžÖàÔ|Ž??2LØë²íÚoOÅxÏæ6|N™©B™?94„Ãfí¹ŠŠ†Ïåä=›ZyG[rM.Z+¢¢ª1™+óâL†#³¶S¯Ÿgòü뇎l˜bó—ƒ}­ nínºýîÞæ«ºAüùßøÝÛönܹyàšYËl¾@w{+½ímX–Å©áQþð†R’$ÑÓÞFoG©D>¯w­¸¼iškR*W,òÄóGèlkÁëqÛî¼Ô•T¼¦dEÕxúÈQ¾ûÐ[_k_øôƒôwv`a1rq‚Sãlè£)'àóátÊX膎¦éÔT•|±D¥ZC^çR!¤àuÞÉBH Áú ©Uœ·u7qC{’žxˆ˜×ƒÇé@Ô•¢ÙSù2‡&¹¾%Á¶¦pb.ËW‘xm»ö»Ò üÌ@+^§Ìd¾.¤ìX y¦P!äqñ›wì$î÷`Q¯+%KÒZ1í’ª1S¨pd:ÃCç§7t׺ç.ö®ˆÏÓ 9þÕßáZû‹Á4Í«Þeïçãwo»~ëæÁ›ú®™u,K´77Ñ·" ÎŒ\àÑgâqo,Qîr:élm¦¯³ƒdC ¿×‹¼RËOQU2¹ŠªÑ’jÄãv“Éå8|üTý¿m(¤TU#‹ÐܘDÕ4ž}é8ßþ»Gl}­}þ“°©»‡C¢ª(†±Ù¶ÚýpUDeójŠºa"Û„‚KBJ X!õJö·%¹£'M_—§CB7MJªÎL¡ÂK3c*_Þ0óù»w³»¹.¤ŽÏeù7BH]~þ7~÷¶wnÜÜÛ}ͬcMQH6ÄèîàüØ87ø4>¯g]Çåõxèlm¦»­•T¢¯Çƒìp`˜&Š¢’Éå9?6ÎÁc'˜œàöw\Ï}·ßLÀçc9ŸçÔÐÑHÈ–BJ7 ÂÁ­©º¡søø)¾þƒÛnÝí­ìݶ…ÞÎ6ZR8åºh²,‹ª¢ÔET­F®XBÓô [ëK)@ ¸!¤€)¤VÙÑãæÎÛšb4¼Ýε¢ßºi2¶\âä|–#SJªf+1e˜·t¥¸½;Kv0¶\äO_8džBj¾T%æóð/ß¹•Æ —¢¢ñõcX®(ÜNŠŠÆ¶TŒéA~—Œ,I–EY5˜.”96³Ìc£³Ln1õéŽÍ.ó¼tÍÝ÷ë%¤nÚ½cpÓŠ¼¹Ð4h8ÌæÞ.$$†/ŽóÃÇžÄï[ŸTb¯ÇCw[+]m-4Æcx=’Ó2©) ™lžs.rèØI¦ç^õÝ[®¿Ž÷Ýu+ŸŸl¡ÀÐèE~n·ËŽ×7Ÿ¶tºapääiþêod›ñoêîdïö-t·· ‡q»]Èy­¢ª©Ì/-S*—1LkÃBJ .E)@ `c ©UzãaîìIóî|¯èäf˜&5Í`±¢pv!ÇáÉ%–«Š-Ä”f˜ÜÙ“æÖî&œ£™_}a¯Ói»kh¡\#îóð«ïÜB2ॠh|í¥jšÁuÍq&óeÆs%¶¦bìJ7 ú¸e’„¹"¦f ŽÏ-óèðÌºŠ©ß¿wÛ›bXÀKÓ~óÀÑkî¾_/!uóu;û»:®¥u$à÷±µ·I’Ÿàû ø|Wq ^‡¾Ž6z:ÚˆÇ"¯Šˆª) ‹ËY†.Œsðè f—^÷87íÙÅýwßNÐï'W(0:1…Ûå²¥²L ¯ÇM{KÃ09zæñløqoíëaßÎmt´¤‰†BxÜ.$ÉaXX8åzáòL.ÇüÒòº+£!%—"„”@ `!õâç_|Ï>zã!$êE³%À¹R˜·¦d* ç—òœ\b¡TÝÐbJ5Lîíkæ)œ‰óKþÇ çñºì'¤Ë5~¿rÓVùšÆŸ¿8ŒD½“à+™Ì—¹-²mEL¥C~îWGLÍ—ªÉp`t–ñléêþqüþ»ö²-Å^œZâ·;vÍÝ÷ë%¤nÛ»{°§£íZZG|^/Ûû{‘&¦øÞ#¸ RåJ ¯ÇÍö>ú:Û‰GWD”ì@7 jŠÂB&˹Ñ1Ÿ8ÅìÂÒO=Þ;vïàƒ÷ÞAÐ _,2>=‹äp»ì(¤L<-iLÓäøÙóüÙ·ÿvCŽÕá¸nëfvoÙD{sá`·Ë…$ÕÿÑEÕ4tÝ@ÓuÂ’C"“Í‘+1m²—BJ ^çoN!¤À>BÊã”ù÷îaS2ŒaÁt¾LYÕiû y\¸ èËU•‘L§3LäÊÜOò(ºÁ}›Ú¸±=‰ì8·XàÏœÇgC!•©($ü¾pÓâ~¹šÊÿxa¿ÛI<üºß™Ì—Ë–ØÜaw:Ns¸.¦œºiQÑtJUŽÏfylt†‘Lñêl%‰ÿô®=lME1,8<¹Èï<~üš»ï×KHݾoÏ`w[Ë5³Žš¦á÷ùر©‡ÃÁØä4ûÈãW´†T¹RÃãr±}S}]í$bQ¼n‡Ã4¨Öj,.ç8;rCÇO1¿”yCǽ~ÇV>üî» äKEfæÑ ÖBÊ4MÜ.]m-X¦Å‰¡a¾ú­ïn¨1º].öíÜÆÖ¾nÚ›Ó„\N0LÝ0PU•|±L¾XÂïóÒÚ”B–dó²ù†iÚâ|!%—"„”@ `!åw9ù½{¯£?F7-ž¹8ÏŸâΞföµ&hêbJv`Y ™&¹ªÊp¦À©¹,£ËE<¨ÎFUÓ¹KûÛ“ÈœYÈóGêÇnd* © —Ïß°™¿‡\UåO‘xéŒêw'óe¦ zãázÄTØGÐí\I7²¨j:sÅ*'æ²<~a–á¥Â•Ý$Êþã½{ØÜÁ0-N.òO\s÷ýz ©;÷ïìlm¾fÖ±¦(|>vmÀáp0>5Ã÷yïè²·µ©þÎvâÑ(·I’0M“ªRc!“åÌÈžé8™\þMÿº­›ùè{î!R(•˜_ZFQU\6”ä¦iâr¹ènmÁ²,N ð'ÿë;ã]æõ²×v¶ô’N&øý+" tÃD×ujªJ¡T¦R© 9(ªJ( µ)…S–É dry C)@ °+BH öRa‹ß½ç:zãatÓ䩱yþàéSø\NîìI³·5NOCˆˆ×ËQ¯­¡™&…šÆx®Ä‰¹,ÃKydÇú×ݨh:ÜÚÁõmIÀé…ùâȆŒæú‡ÈVšB~~ù›hð¹ÉVU¾|ðmÑ-!ÿ:†fš,–kÄ|nzãázĔˉì0-¨i:s¥'WÄÔÐbþŠÌÅç’ù½{ö0 c˜ÏŽ/ðûOž¼æîûõR÷ܰo°­¹éšYÇr¥JÐïc÷ÖÍȲƒ‰éY¾÷ðãx.cºp¹RÃïó²} —¾Îv"ÜnW½X¹iR©Õ˜_ÊpntŒçŽž`9÷Öî]›xà½ï" R(—˜]XB×uœN› )§Lw[gF.ðå¯{]Ç‹„Ù»} Ûû{I%â|>œÎzM(ÝÐÑ´ºˆÊKTªµWÕ‡RUp0@s*‰Sv’/•Èdsè†a‹ó!„”@ \ŠR@€}„TÌçæ·ïÞMOCÍ0yblŽÿúÌéW}ÆéppkWŠÚéM„ˆy=xœ$@5-ŠŠÆt¾Ì±Ù,ç—ò˜–…c¥kßUßȪ:ÝÑÅž–8pr.Ë×Ú2e/WSiùùÅýÄ|n–«*üüYúa’þ7_Kg¹ªàuÊt7„hV"¦êÅÏkšÁB¹Æ‰¹,O\˜ãÌBî²Î%èvò»÷¼‰÷ôÅyþóS§®¹û~½„Ô½7ílmJ]3ëX(–ìÙ¶§,39;Çwz ÏeË«"j׿~z:Úˆ…ÃxÜîzjžaP©Õ˜[\âôð(/œ8ý¦#¢^ËŽM}<ø¾w †(UÊLÌÌcY¦-…”eZȲƒîöV†.\äþê[ë2–T"ÎõÛ·°¹·›Æx ¿×‡,ËX–‰¦¯ˆ(E%[(PSÔ×혧j‘`€¦d—ÓI±\fq9+„”@ Ø!¤û©„ßÃoß½›ÎXÍ0xlt–/=wö'~~[’Û»›èO„ið{ðÈ2‡„n˜U™B½pöK3˦yÕ  ïîaWº€ã³Ëüõ‰± •VøFÉ×TZ#>·¯Ÿ¨×ÍrEáKÏŸeGSŒÈÛH]ÊVUd‡D_"LÛŠ˜rÊõT¾š¦³XQ8=ŸåÀÈìeS¯‹ß½û:zV"ñž›ã¿<}úš»ï×KH½û曓×Ì:fóBÁûvlÅ);™š›ç»=†ËõÖïãr¥F0àgç¦>ºÛ[iˆD^Õm­\­²YæäÐ0‡Oœ&›¿o½ó¡iZhº†ªéÔ…\¡ˆ¢j¯+¢VÑ4H(@*‘ÀírRªT™[\5¤ÀÆ!%ØGH¥‚>~ë®]tFƒ¨†Á#Ã3ü·ƒçþÁïíi‰s[W›’â/^§Œ,±ÒÑMg¦PåøÜ2‡'ÑM몉©‚¢ñ©ëzÙÙÃŽÎdøÎ©qÜ6iãýÚ¹´GüüõýD¼.2…/>{†ýmI|—A°-WdIb ¡e¥ˆý+»+.”kœ]Èq`d–SóÙ·õ[ ¾ºøìn¡&ƒfùâ³g®¹û~½„Ô{n½i0•ˆ_3븴œ% ²çv\N'Óó |÷¡Çp:ßü}\®Ôˆ„ƒlëëyED” Ö"¢æ—2œæàÑ“ËåË:—-½Ý|âþûˆ†Ã”«FÆ'qÊΟ*J64–EoG;’$1<>Áþå7¸ú÷´·²gûú:ê=w=½Ò2Ñ´ºˆªÖjä %4]Cë«ë:‘PˆÆx ·ËE¹Zcf~AtÙ#„”@ `!Õòó›wî¤#D1 :?ÍŸzÃßßÞãæÎÛ›b4¼ø\2©žVÑ JUNÎåxaj‰’ª]q1U¨©|foÛS1LàÅé%~xfÙ!Ùî**± ÿdoa‹¥²Â}ö4·v5!_Æ”ÈÅr §ÃÁ@²1Xí®hAM7X*×8½ãɱ9ŽÎ,¿¥ßH¼üö]»Ö"ñŒÎòG?%Ï®¬—zïmïlŒ7\3ë8¿”! rÃNfùîý©û¸\©Ñ ³¥·›žö6¢án— Iª‹¨r­Êìü"§G.ðì‹Ç¨TkWd.›zºøäý÷‹D(W«œÇãv!ËöR†i2ÐÙäŸä‹ù Lóʽî6õt±wûzÚZ‰†C+镆i¢jŠ¢R]‰ˆ2 óM‰>]׉†Ã$bQBª#ä7nßAk8@U7øÑÙIþüÅá·~¼h[»SìNÇiû×ê­¦-WÎ/x~b‘…Rõ²‹©‚¢ñ¹ëûÙÜÁ0-N.òðùiÛ ©¾D˜ïê&äq±PªñGÏŸå¶®+ÛMm¡\Ãှx„ÎXð+ÄÔê9<·˜gptö ‹©æ°Ÿß¼ã­GâÙ…õR¸ë¶ÁX$|ͬãäÌÑpˆ›¯¿ÛÅÜb†ï<|€Ÿv¯Š¨›úélm&z9"J×uJÕ ³ K=3Ä‹'ÏP®V¯Ê\ú:ÛùôÞKC4J¥VåÌðü>ß«º½ÙË´ÐMƒMÝ8ƧføÃ¯}EU/Ëñ‰=Û¶°kóíÍMDBA\N’†a®Õˆ*–Ë”+Õ·Zg&Ñp†h¯Û¢jŒOÏb!"¤À®!%ØGHu7„ø··ï 9䧪|ÿÌ8õÒèÛ>n*èã¶î&®oMÐöô¸p;$,@ÑM–« #™/ÍdÏ– ¸/Oß’ªñ¹ëH†1L‹gÇxdxÆ–){UM§?áÝ„x÷íƒÑpèšYÇ‹S3DÃ!nÝ¿¯ÛÍüÒ2ßy輎((Wj$ã1¶öõеåvÕE·nè”+Uf9vfˆÃ'NQSÔ«:—îöV>óÁ÷‘ˆÅ¨Öjœ—Ì=iö¶$臈xݸ^!¦ŠŠÆÅl‰sY†—òÈoqãQÓ >·¿Ÿž†0šaòäØFfÞ¶èZÝ`K*ʇ·uâw9™+Uù³#ÃÜØ–\—ñÌ–ªH@"LW,DÄû²\T ƒåªÊÐbžgÇxf|áU…_‰÷ƒ3|í¥‘kî¾_/!õÑwÝ5 ø¯™uBjk*ʯݲ¦ ’ªó­ãc|çÔÅ+ö{N‡Ä-]MÜØÞHo·o€®†z7·ÇFgy|tŸËi»kH5L¶¥¢|pk>—“Ùb…¯½4ʾÖĺŽk¦X«]×õºqÉ,«.¦²5•‘¥"O]œã™‹õêõH¼í¤Cþ+‰·QX/!õÀ}÷ ú½ÞkfÏŽ\  sÏÍ7àóxXÊæø‹ïþH0À¾ÛiK§k©yš®S,•™š_àÅSgyñÔ™+Rhû­Ð–Nñóùñ5UåĹaÂ’ Óˆ Ã@Õ4¶ôöàv9™ž_ä+ßø6™\þ }¿)gïö­lîé¢1Þ€ßëE–e,ËDÓuTMGQT²…<5E{SóÞ*>‡ÆD^w]HMÍÎc˜BH ]BJ °ÚÑãÿ¾yA/EEçÇFùþ™‰«òÛûÛ’ÜÞÝD_"LÜïÁ#Ë8ºaRRu¦ ŽÍfxizÝ4ßPtÍ0ù…ýtFƒ¨†Á#Ã3 ^˜»l5ª®&ša²#ãþ-øœ23Å ß8z=-ñ 1¾ébÝ0ÙšŠÑÝ$ºRc  ¦›dk £™"Î’­ªkâ³¢]¹H¼õf½„ÔÇÞsï ×ã¹fÖñÔù"aÞuËMø¼^J• s‹K4D#kQ–µ"¢Êe¦ç8rê /:‹nj.Í©$ŸûèI%¨©*'Ï  lÙhA×ëÑK[ûzÖºþé7¿ÃBfù§~¯µ©‘};·Ó×ÙNc<†×ãAvÔ›]hš†º’š—-QÕ«#¢Vñ¸Ý4%âx=jŠÂÔü<†!„”@ Ø!¤û©ÝÍ ü_ïÜJ2२h|í¥Q~tnòªŽáº–8·u5±9!î÷àu9‘%0,‹²j0S¨pr>Ë¡ÉE4ã§‹)Ý´ø…}ý¯êæ6xaŽðeîæwU6¦É®tœ÷nnÃç”™*”ùëÙnØPãœ.TÐL“­©(ݱQ_]LI€bÔë„Í—ª´GƒD¼.JªÎ_ŸãoN^¼æîûõRŸxß»W£…®ŽŸ"Ñã½wÜ‚Û寲L ÓDvÈ€µ51;ϱ³õ®yMD­’N&øÜ¤)GQµµ”=; )M×QTm}=õî‡K¾úÍï2»¸ôºŸïéhc϶Íôu´FðxÜ8$¦eÖE”¦Q©)ä %4]¿ª"j·ËEº1QBJ lR@€}„Ôž–8¿zÓVEãÏ óÐùéu˶TŒ[ºRlKÅH½ø\2I´,*šÁB©ÊɹG¦—(*Ú늩šnð/nØLëJ7·¿šâɱ9¢^·í®!ò¸®9Î}­x2Sù2ß9=ÎŽTlCŽwºPA7M’zãᵈ)Iª‹BY’p8$jšÁÿ<:zESC׋õRŸºÿ¾A§,_k¸¸¼Lµ¦òν»H'“8–U¯÷£é…R™‹S3?wžOÁ47ö£¶1ÞÀ/>ø!Òɪ¦qb%BÊŽÔk;©lëï­w?Ì,óß¿õ=¦ç_ÝÌ`sO{·o¡»­•h8„ÇíÆá0L³~ E¥ª(äòE Ó\µŠÛé$J® ©éù… +7_‹R@p)BH öRûÛ’ü‹7÷{ÈÕTþì…aŒÌ¬ë˜ºBÜÑ“fGSŒtÈOÀ-#K†5Mg©¢pv1Ï “K,UjkbÊ´,rU•_¿m-ázñìžàé±yüöKg²,‹=- Þ=ЂG–™È—ùÁ™ ¶6F7ô¸§ e4ä/a &æ]©&IH+çiC=5T5-ÊŠ†×%ã‘e$ 4Ó¢XS].òÂÔFfm)¦„ú‡ÉdsȲƒŽ–fÒ‰KDT®PâÂäÅR™ý»·)”J|ëGsôÌ9Û]á`€þ‰Ÿ£µ)…nÔ#¤~{FH-çòä Eîºé¸]®W›wºa ¯tÌ+”Ë”+U6z*©ìpÐÒÔ¸!5·¸„¢ !%vE)@ À>BêîÞf>{}Q¯›LEáŸ=Ãá©¥ =f¯S掞4{[âô%ÂDVŠg[V½øjñlE7ø_Ç.ðôÅyšÃöܾ³3Åm]M¸d–‹<>:KOCÈvóx~r‘_~Ç&V"ñŒÌõ¹ÙÒ%î÷\"¦ÆseM.òèÈ eÕ>bJ©Ÿ&3 Ht·µÐ”Lðùp:e, TM%[(rabŠ#'Ïpfä»6ðÀ{ßE8¤X.óí?‘“glwíý~>ÿÉŸ£-Ý„n+BÊ~‚<_,‘Íç¹nëã±µˆ'Ó´Ð }­Xy®X¢Z«­HÆìpÐ’ªG|Õ…¹¥ ŠªÚbìBH Á¥!%ØGH½«¿…Ïìé#âu±TVøâ³§92±ÉFBâæÎ7u4ÒóÕ‹g;V6J–e1]¨r`dš‘å†i®ý?»pKWŠ[»šp:Œf |ÿ»‰C”*e¾ó÷8tü”í®}¿×Ëç?õÍitÃàÔÐ0~»)Ë¢Pª éD‚d¼€Ï‡,;Öž±ÕšB¥Zc¹GQ´uí˜÷Vp8´® )Ue~)CMQl1v!¤àR„ì#¤Þ³©O]×CÈãb±\ãÿæ4Gg–m·ÞûZÜÑ“f{SŒ„ß³ö¯÷úŠØ˜)T8:›áèÌ2ša®uæÛèÜÖÝÄÍ)œÃKž›˜§=b?!õâL†Ïíë'êu³\QøÃçβPª’x)*1Ÿ‡;’lJFHø½x2‡„f˜u1•/spb‘'ÆæX®lÜÍ¢R/“+qÊ2mééÆ$~Ÿ§,×›h*¹•ˆ¨CÇOq~lü’ïoíëá÷ßG$¢T©ð·<Îs/·Ýµïõ¸ùŸ|€ŽÖfLÓääÐ~߯®i§ëùb ‡$‘N%H6Äðz<È–e­<_%4]g~)S?×6Q«8$­ézÊž¢*Ì/-Sµ’852zû‡î½ó @ðòóQ)@ €¿õ¤µTÙøÔÞ¿¥íê!äq²PªñOŸâÄ\Ö¶ëþñ]=|dG½x6–…iCª§ò•UƒÙb…“sYN.ÚBLÝٓ榎Fd‡ÄÐbÃSK´Ú0ýðøÜ2ŸÝÛOÄë"SQøâ³gÈ×T"^÷ÚgVÅÔ;Ú“lIFˆêbJ^Iå+)ã¹2/N/ñØè,™ x !Ù|ÛE[º‰¦dŸ×‹ì±¬ºˆÊæ _œàÅSg_WD­²¹·›Oܱp˜rµÂ}‚§µÝµïr9ù§¤«­eà )]7Ö ·¤’4D"x¯§\Q5Ue9—gøâ/œ8ÍèÄÔ?x¼îN>õ³ï!‰P®VùÑãOñÄ¡#¶»öeÙÁ¯|úct··b™'Ïàóz6ÔuÝ R­âr¹hJƉ†BxÜn Ã4©) ™,Ãcãä‹%î¹ùÂÁ Šª257n¶}g¼JHi*‹™,åjuCŽ3ðãq»ñz~O{+ŸùÐû‰G£Tk5zêYyæ -ŸK¿ò™Ñ×ÑŽeYœ¹€Ëå\«s·†®8Nã1"¡ .§ IªKªr­ÊÂÒ2C.21;iÖ#  ¥2¡@€¿ÿgˆ†ÃÔ…ÙÅ%TMî¼VHer9ŠåʺËápôûðy¦eá$’v6Åhz\¸ è&ÙªÂp¦ÀÑ™eÆs%ü.çºÎç¾M­ìiIàNÎg͉¾¢î’]¸˜+ñÑ]k©¡ðÔi|®7W¹¨hx]27¶7Ö#¦^!¦ Ó¤¬êLå+¼4“áñÑY¦ WÿþúÇ ¤Jå ·‹–¦T=5ÏãÁáx¹ÆP&—gèÂEŸ8ÅÄÌÜ[þÎÖf>ûáûIÄ¢T…GŸ9Èß?ù¬-ŸK_øôƒôwv`aqvt ‡$!ËW¿¸¦éè†ßë!Ù#àZ)F®éåJ…ÙÅ%Î3>=»ÖMo•ª¢âr:ùäý÷‹DPT•ÙÅ%UÅδ§›êBJÓXÎåÉKë& eÙAÐÿêˆ(§,c&ª¦¢¨ÕšÂb6Ë—þç_‹.{@ð„à—pк«7ÍŽ¦© ŸË‰Ó!¡¯lœg Õµód¾¼nãü'{úxÏæ6|N™©B™ÿ0x‚‹Ù’m×ýõº*ºyI­"Ã4éKDØÑ£#$ìuárÔ7_ši’¯©ŒfŠœœÏ2’)à–×§ƒÔû6·±»9Žœ˜Ë2‘+t;mw^& e>¼­¿«.¤þË3§ñ½Å®\EEÃã”Ùß–`WºÆ o­F˜aZT´zDâ±Ùežaæ*Š©kUHéºN¾T&ðÓÞœ&‹^"¢³YÎ_ç౓LÏ/¼íßìhNóÙÜO²!FMQyì¹Cühði[>—>ÿÉèî`èÂELÓÄåºzQ˜Ë¹<ºnИh )'à÷á”ëÏM×)–ËLÌÌ12>Áäì<îŸ â ÓB×õµTJES™_ÌØ¢+ÝOܸm+BJÕt–syrÅâURk"ÊíÆëõàv¹ ÓDÓ4U£RSÈå hºŽÛíâ¿üÅ7„‚×>×…¸çϱºbAîêmfWºT¨¾q®×0ªw}›.”96»Ìc#ë#¦þéõýüÌ@+^§Ìd¾Ìï<~|]ÙÛå•]WSÃ’ôk)ºAo<ÌÖT”¾x˜¨Ï½öYÍ0)(¹2Çç–^*¼¡³ËÉû·´³+ÝÀñÙeæJU¼²ýÚ«Ï«|`kûJjh…/>wÏÛœGQÑp:$nhodG:F:ä¿DLÍ«¼8“áÀÈ,SWẾքÔÜâ™\žŽæ4ý]Äc¼žW¤æ) ‹ËYÎ]¸Èáã§.‹ˆZ¥µ)Å?ýèhŒÇ¨©*ƒÏ¿À{Ò–Ï¥þ‰ŸcSO'CcÑ4çÊG:æ‹%jŠJsc‚ÆDŸo¥ÐƒùRuEüÎ0v#¯!5·¸ÄÜR†ž¶Vº;IÄ¢xW"¢ Ó ZSÈdsœãÐñ“Ì.,]öy57&ùÜÏ}T¢šªòä¡ùþ£ƒ¶|.ýòÇ?–Þn$$†/ŽSU|^ï•ù1Ë¢P® é:éD|­Ð¼,ËX–‰¢ÖEÔøô '†Î“ÉøßøXTMç÷ßG"EÑ´ Û•î o\^QCJÕtr…™\‡Ãqå~Ô²e™P0€ÏãÁív½*"JÕ4jŠŠ¢¨”*etÃ|ÝÃ!%¯ó\BJ .R«´GÜÕÓÌÎôk»¾Y¯Ú8?:2ÃøUHûgïØÄ=}͸e™‹¹¿uàó%ûn.ØÙŇ^™öô›«UTT4ƒ>nhOÒŸ¨‹)¯SÆáÐ_Q@ûØ\†£Ó˨†yE  pkÛW„Ô‹Ór5{ ©LEyUjè<‡û2oøŠŠ†[vp}k=•¯)ä#àv®lò^Ž˜:>»Ì㣳W¤VšÝ…ÔÜâó™ezÛÛèëj¯‹(·I’0M“êJDÔ™‘ /µ„ãÀ»ï¼•¶hºi’H¥Éæë·Í[B¢«½¿×‡i™¤29â©ÔŠ†Î›¦…K–hŒDÎQ²,cYZ5¬¼¬iä E,Û~Mï+„”@ ¼Ìº.„”@ ¼ºZ¢+ìçæþvvFéû(nÜ®JEÇ…|p^âS×naÏêv—Ìh2˧þ"©Rýî˜ôÑë¸ûÌJœg¿¡@òœfTvwGhkXÎ[j·, …2Gb)Ì$È”õS²$qßÀ*¶´5`9°wj‘xA«ËPóœnœÕúÙçOâv]øJ/Å%³¹5ÂæÖFº#~ËbêôØšKòøXŒ¡ÅÌþ¼zR±Å8ñdšu«{é_ÕKS¤*3ªÊ%­Ll1ÁàÈ{#~+¢Î¥¹!Âo}à=t¶¶`˜&ϾxˆùÑÏêr]úøýïbÛÆuÈ²ÌøÔ ÉL†p0ø†%G±\Fq»ioi¦!£¨ÈÕVÕRYc1‘dxbŠÁáQ4ÝxC"j Çq¸ïŽ[ioiÆ0Lé ™|ýn„!I­Q>?¦e‘ÌdH¤Ò+"¤*"J¦±!ŒÏãÁ«ª(в\q¨:eM_Qöë|†BJ ^f]BJ ^»Z¢5èåöu]ììj®´òQÑQ6LbùGbic(žY±ãü£ë¸~unYf$‘å?=üYͨÛë~nHû?>w÷ ´^ä4¥Ú¶­½‘®°¿Ò&IX”M‹D¡Ì‰Å ÏOljÊoXL¹e‰ûúØÔÁ²ž›Zd!_¦Á«Öݸ “ÛÖu¢, ©!—|Ñ>ßã’Y×ak[Ý‘ÀYâ·l˜,ÊOóÈÈ,'Þ€˜ª![Œ“LgèïëemoM a¼ªŠ$UDT±\b!‘âèÉö>vQ*¢Î¥!âw>x?]m­¦ÉÞCGøê~Z—ëÒ¯¿ç¶oZ,Ëœšž%žL ‡Î[rhš†ªª´E›ˆ„‚¨ŠŠ$IgÝÈÄ$ãӳ躼‚Õˆ–esß·ÒÙÅ0M’é,é\ŽzE’$:¢Í~,Ë&™£RE IDATÎO¥p»Ï_ü›¦…Û%Ó ãózñ( nŽ\qX®¶åiº~^"j !¤àeÖu!¤àõ ©%Zƒ^níïäòÎfº#‚ê銎rµ¢ãp,Åãcs_xãbêß]?Àµ}m¸e‰“ñ,úð t³n¯û¹!í+]‰“Ó \²ÌåMËí–!UÁí’q‡²i‘,jœŒgynj‘X®tÞbJuÉÜ;ÐÇÆ–0–íðÌdåý¢~OÝ‹fYgµ†~öù!T÷Åo=T\2ë£a¶µ75v¶íP2-ò%Ž-¤ytdŽÁ…×/aj]HÅ㤲9ÖövÓ¿ª‡¦HªR5¬¼Òšwdh˜ç½¤¢! ð»¿ö^ºÛÛ0-“}‡ñåïý¸.×¥Ý÷vlÞˆË%393G,ž 1~]ïaY6¦eâv¹i‹6PÜJõß, ¥óñCãLÍÎc;Ö9˲¹÷Ž[èlmÁ´,R™ ©lý )Y’hmn" `Û6‰Ôù )Ó´p»]4E*Qª[AQ”Ó­yºNIÓªQ¥óQK!%/E)@ àü…ÔmA7®igwwtYLYѱXÔ8:Ÿ:ïç%þÞm\ÓÛ‚K–8±˜å?þìeÓªÛë¾Ò~¡+qlÇA’$6D#ìèh¤+ äQ–s‘4Ó&UÖŽg94—b<•ǯ¼>ãq»¸o`ë£aLÛáé‰f³EÚƒ¾ºÓq–[CÇ’9>·wûÒeaÉ’Äúh˜MtEüU·,áT«Ý*S)ž‹q$öÚÛÔjUHÅãäòúzºèïí¡!Riï’¤Šˆ*–ÊÌ-Æåé‡j"¤:à÷ñ{z/½˜–Å£ƒ|ñ;?¬ËuéÃﺛ›q»\LÎÍ1;¿HSCä5½Ö0MLËÂïõÒÒØ@0@©Îô(‹Ì.Ä99>ÁäìÜ ã>ëx ‹{ï¼™î¶6,Û"•É’Ìd©W$ µ¹‰p(ˆmÛÄSiâ©4ÊëRgŠ(¯g)#JA’dlÛB7Ltà —//•X©g%!¤àeÖu!¤à ©%Z^n\ÓΕ=-t…ý„0ú7÷¬¢¿9ŒiÛ<9>ÏL¶@w8PWcb;HpÃêöåÖÐÏí§\ú,,ÇqØÔÚÀÖöFz#‚žŠ˜²«b*^(sl>ÍS§æya6ñêçZcBjIDõ÷õ²º§‹†PZɰ1«U5só‹eï¡£5Níózøý½Þ®,ËæÅÁ|áÛ߯ËuéC÷¼][7ãv¹™ŠÅ˜ž›§¹±áW¾&•Éb&mÑ&ÚZš ø|¸]•9c˜&¹BÉÙ9†OM2=·€z‘²åtÝàÞ;n¡§£˶Hgs$ÒêÇqhmn¢!ÂqS)âÉŠòê•­¦i¡*nÂ!|^/ª¢ *n ’eZº¡“É.ˆäBJ ^ŠR@ÀÊ ©%¼*·®ëäŠî(=‘j5ÎREÇbµ¢ããóšK¾æ÷ýôMÛÙÝÓ‚ .¤ùw ž×ñ?¼v 7,…´'²|vïI|ÊÅ©ÄÑL‹5M!¶¶7²®9LƒO]–O†e“Ó &ÓÆ’ŒÄsH¯ÒITݼk`k›Â–Í/ÆcL¦ ¬n ÖÕ˜è–Ç-ŸÕúOûNâWj+œ}]4̶¶FV5—«Ý–ÄT¢¨q|1Í㣱_)¦jEH-‰¨u«WÑ×ÝIc8´œ3dZ&…b‰¹Å8‡ŽŸdÿ‘Ar…ÚÛ%Í£*|òÃïgUw'¶msppˆÿó­ïÕåºôÁwÜÅÛp»ÜLÏÏ393G´©ñe6›/ i­-´47.‹(ÇqÐ ƒl>ÏälŒcãÄø}·…·¬ëÜ{û-¬êìÀ²-2¹<ñK1¶RضMKS#M‘HEH%S,¦R¨¿BH™¦…ª*åñ * ŠÛ] +wlLÓDÓu²ù"ùBW]ìÏ!¤à¥!%¬¼Z"ìQ¸}};;›—œ—*:´¥ŠŽ…4Ožšç…™W¯èxà–ììŠ"GæSüû‡Ôõuÿã븮¯Ò>Ïò¹}C]|äu“Þ?»º¢ô7‡iò©xÜ.$IZS³Ù"‡çS -d0lùeXB…{·¬buSòy|lŽS©<ëšÃu5&eÓ" ºy˪V\²ÄÐbEHjq·@Ö4‡¸¬³‰Þ† a‚rF>X¼ 1´˜áÑѹ—S—ZHÅãËeÖôt³º»‹†p¨Ú:TyˆÎ—ŠÄâ<~’½‡ŽR,—kö¾q»]üÁG>Àêž.lÛæð‰aþéß©Ëuéýo¿“+·oEq»™™_àÔô,-Íg)Ç![(b˜&ÑfZ£Mø<^\.Žc£éé\މ™9Ÿ8I2]‘ó·bYã¾ÛofUw'Žm“ÉXL¥¨W,Ë"ÚØH´±‡…dŠx"…ª¾TH-UD5V3¢w¥"ªÒúzZDer ¥Ò?v!¤à¥!%\8!µDƒW妵\Ñ}ÅŠŽ‹åÅÙW®˜ú‹[/ã²Îf$àP,ÅüY} ©ÃÖsÄÇU¹$Ç’Ó Zƒ>®ê‰²>¡ÙïÁëv!˦eS0Lf2EƼ8“D·ì³Ð#^•wm饯1„aY<2:Çx2Ϧ–H]I^7iò«gµ†~~ÿ0ÁZRK©¶ÍÚæ0;;›émòª¨²„CUü5N,dxòÔ<ÏO-.¿îR ©‰™Yúº:YÝÓE$ª¶QiÍ+–˜™_äÈÐ0Ï®èiÁ%Á±ù4ÿgÿð%—ׂnÙlˆ†ÙÞÑD_c¥bÊ]ÍÓ­¥V¾ ÏL,ðôÄÂ%RÿðµoïÙ=°éñU]„ƒÁå ˲È‹LÎÅ8zr”çFÓººw>õÑпªÇq862Ê?~õ[u¹.½û®[¹vçTEav!ÎèÄ$¶ãà÷yéjk¥1Æ£ªË"JÓtâ©4#S¥T*_rµD¾Xâ]·ÝÄÚÞÇ!W(‹'êVH¦Is$Bk´ ‰…d’…xGÅ4-¼•H(ˆßëEQ”j |ED™Õó2Ù<…b I¾¸×@)@ x)BH OH-áWÜÜÜßÁ5½­¬j¨îøæ’qœÓ΃ ž:§¢ã¿Þ¾“mí8À 3 >ýÈ‹u}ÝÏÍÄú§}ÃD¼µ!>ršAPUØÕÝÌ@[#í!þ3ÄTɨd‰¥80“ Á§òÎͽ¬j¢Yœa$‘㲪@¬%ÞH€Ý•ÖУói¾p >„Ôši±>ak{#ýÍ¡Šø­f¸é–E²¤3ÏòÄØÜÜrÙE}@\H$÷DBÁÇ+K×EfæxñØ ¬;µÄ'?ò~Ö÷­ÂÁáøÈ8ÿ•oÔåyÜ{Ç-\·ë2<ªB*“er.F{4J$Ä£¨È²Œe[”Ê‹‰$'OM28<†n5#¢–ÈK¼óÖY·ª‡\¡Òz±eÌJ¡ë a:¢Q$Ib!‘d!‘$à÷ÓâóxÎQT[óª"*—§P,"Éò%9v!¤à¥!%\|!µ„OqsKWö´°úWTt<=±À³ üõ»hkÀLÇyàуu}ݸe»º¢HT2±¾°„°·¶ÄGN3pËWt·°­£‘®°Ÿ€êÆ%ËX¶Sm¹,3•.°.&ê÷R6-~24ÍÉx–+º£u5&‹Å2kC\ÞUi =KñåGk3CêUÐL‹¾Æ »»£ô5iðV‚ë+­|6É’vcWØQÇÙ<®&ùB™ùEdÿÑALÓªëùü{zÖ¬`hìÿóKÿR—çqÏm7qÓÕ»QÜ –eaÙÖé–/Ë¢X.±H121Éøô ºn"רà)–ʼí¦ëÙ°º‡|¡ÄìÂò%’2oxNë: á]­­H²D6WÀqìÊŽyªŠRÍÿ³,Ã4)k™|¥"êRŸ³R@ðR„.ZÂëvqsWv·°¶ZÑ¡V3¦–*:FYV5éŽø±Ø;µÈ_¿o˜'ÇcÔëJþ×wìdk[E|˜IðÍ#ãuU‰cÚö鬢¦>u9,Ø´mEñdž‘Dhújþœ&3.ëlZ—ý3q¾stÛõ¦™ïEä'dggÓ7¬i¿¨ˆÿ³¿Ü#;<þf\G?ñþûX×$IŒLNñ™/|•Zþž ¸bû¶nXG{K3Ÿ·ËMe W汦ë<öì^GÆð¨J݉¦Ü|Í•q‘% Å"Su&¤,ËÂïó ñz<ø¼žeÙdÛ%­¼œU,•k¶úK)@ x)BH µ'¤–P]2{Ö´sMo+k›C´¼ÈUáaÙó…Ãñ,ÏN.òäx Ó®Ÿ5]–$þÛ;ÙÒÖ€åÀ¾©E¾sl².+q4Ãâò®fîÙÒ‹wI¨9`X69Í`2Sàd<‹m;´¼5{.ãé[–ßç¥!Äãñ ¸]¸].$I^®Ž*keæãIòÅbÍK6!¤à¥!%Ô®ZÂ-Kܹ¡›\ÞOУ,WIÙ¶ƒnÙdÊ:£É¿<5Ï/Æç1íÚorË2ÿíŽlj`ÙÏM-òÃãSu[‰“× þäú­´‡|ØŽCA·P]žjÈ®aÙä5ƒ™l‘áD–‚fÒð ÔØ_óÇ’9®^ÕÊæÖ–ÏO.ò“¡i\uº+×+1—+2™ÊßøˆR+wn﹇í›Ö#Ë2§¦gù̃_Å0Ìš9¾ÎÖvmÝÌÆµ«imnÄçñâr¹pM7HçrLLÏbZ—mÞˆÏë%‘Nó…ý>Z—cbšoÙ¹ƒ›7, ©éØ|MgHY–…Ïë¥1Æ«ª¸Ýîå]ótÃ\1‰D:Í|~ëeô7‡pÐ, Y’–™Ѫbj,™çñ±9~yj¾¦+¦¼nÿõölh cÙOO,ðÓ“3u[‰S4LþøúÚƒ>òºÉÓ ¸e‰ ÑÍ~^· Y–0-›‚a2›-rh.ÅX*GGÐGO$Pç1’ÈrÝêv6VÇåÙÉE~6<³\™÷f!–+1‘Ê !µ‚|ì¾w°cóF\.™‰™9þî‹_£¬é—ü¸VuupŶúûzˆ66,W?ÙŽ®$3YÆ&§8pô8'ÆNqÓÕ»yë×á÷úH¦Ó<øÖe»TªŒ®Ú±•˶lÂír‘/kVHÙ¶ßë%áõ¨(n7n— Ç¡’¥è†A©T&ÚÔˆâv“Îæˆ§RËÙ~µŠã8ü!¤à%!%Ô‡Š¼üù-;èk aXÇ2” “µÍ!½rïÛ¹¼*>&fçøŸ_ú:ÅRù’ÏúÕ«Ø9°‰þU=4FÂxTuYDišN<•fxb’ý‡ŸžY~Ýž+wñ¶›®'àó‘ÊdøÒw„»N+7mÛa×ÖÍìÚº·Ë]Ùe/6TCBʶm>áP¯Çƒâvã’]€ƒaZ¦¦é¤³9rÅ"^J_W'ª¢ÉçI¤2¦YÓãàØÿã‹BH Á¹!%Ô‡j úøÏ·ì`UCͲxxx–ÿýü×öµr]_ëšÃ4úTT— Iªd¥Ë:£‰{§ã<::‡VCÛËG¼*yëe¬mcX6OŽÇxb͉ŠÍ~Ï%S'YnZÓqZŽÏóÄxìM7ç eÆBH­$ºçmËâc*ã}éä …‹~›û×°{ÛVww ñ(*²,cÙ¥²ÆB"ÉÐØ)ödf~á%¯¿n×e¼ãÖ=|~RÙ,_þÞëV”;ìØ¼+·T„T©ÈÌÜR œmÛü>B!TU©Š¨Š([Q¥²F&—§¬i¸\.4]ÇïõÒÛÙŽª(ä EæI,˪éq°m›¿ùâ×…‚sBJ ¨!Õöóé›wÐ  Y?šá³{‡*‹9pm_W÷¶°©µ¦ª˜‚Êno™²Á©tžg'xtdŽr ˆ©f¿‡?¿å2V7…0,›ÇFçxfr¡nï!ËqøÔ[6Óð’Õ F9 Ë:+Ë´$©rîAUÁ§¸ð).\²Œe;”M‹D±ÌÐb–£ó)¼êESCñ ·®ëdmSEþb<ÆS§æßts>^(3*„ÔŠòÁwÜÅUñ1=?Ï?|ù›¤s¹‹öù;6m`×ÖÍôv¶ …PI’0-“b©ÌB"ÉàÈû ²H¾âû\sùvî¹í&‚~?él–¯|ÿ'ÔsÇêÖ ë¸jÇ6TÅ]mÙ[XÞ ôRàØ6~Ÿ†pZiÍs¹d ÓÄ0 ÊšF*›£¬égU§éºß祻½ ª/•˜-âÔø^³–eó™/ !%ç"„”@ PBª·!ÀŸÝ¸îH€²iñã¡i>¿ïäK~îÊžö¬i¯f©•Pm@·²e‰t_žšçñ±¥K8ÜôòÀÍ—±º1ˆnYü|dŽç§ëú>úä5›‰tr†3‰º½‡dIâ“×l¢Éï!]Ö™HÐ-Åõ«ü âQ ¨.¼Š¿Û…²ToVÚ.‡ãYF’Y\’DgÈAÏãøb†»6tÓ×İ,™ã¹:…/G²¨1,„ÔŠrÿ[oçšË·¡* ³ ‹üãW¿E<•¾ Ÿåõx¸jÇV¶¬_KW[+A¿y'6ôÈ‹ÌÎ/rôäÏ:B©¬½æ÷Þµu3ï¹ë6BÙ|ž¯ÿð!LˬÛqÙ°ºëv_^©(*–^¶Mñ‚â8ü~ÂÁ^Oe×<—\‘ïºab˜fµ"*‹¦¿2¯Ë4MBÁ­MMx=*%Mgj6VóR¦iò·_þ†R@pBH õ!¤ú›Cü‡=Ûè ù)&ßœäË/޾êëvwGÙ³º­š|ÕÝÞ$0l‡¬f0žÌ±&Î##sULõDüÙMÛé©V|ýdhšƒsɺ½‡—Ìï^½‰&ŸJº¤3•) Y6îטÕâ8öTò¥Š¿êFYÊS±+;(žJåNdqhú.Èy .¦¹{cÏYYeû¦ãoº9Ÿ*éœ\L !µ‚¼ûÎ[¹v×TEan1ÎÿþÚ·ekÜùÙ½m3Û6¬£­¥™€Ï‡Ûå®ÌÓ$—Ï39ãÈÐû;¯°ëË6oä½wßN8$[Èó=Œnèu;.ý«z¹áÊxU•B±ÄôERŽãòû ‡‚xT·Ë\QK­y%M#ͽbEÔ¹˜¦I$¢¹1‚ÏãAÓ &fç¨õçÝ0øŸ_ù¦R@pBH õ!¤ÖGÃüû¶ÑòQ4Lþõè_;4öš_¿»;ʵ}mlnõ{ñœ!¦ršÁd:ÏÞ©8Ì’¿bª¯1ÈŸÞ¸îp€’iñÃãSOÕí=äu»øí«6ÒèSI•t¦3• ©×†lÙ!BƒWŧ¸ñ*òi1eÙä4ƒ©L‘¡xǨ߳¢çqd>Å=[V•UöÂlâM7çÓe¡!¤V’wÝv×_±ª‹'øÜ×ÿ•¹Å•‘™­ÍMìÚº™ÍýkhmnÂïóâv¹«U6™\ŽÉÙŽòâàÐú¬í×óþ·ßI8$W(ð­ŸþœR¹\·ã²º»‹›®¾¯§"¤fæ/|æRÀç#²$¢Î©ˆÒ ƒR¹L:›C7Ì×µƒ¡eY„‚Zðz½è†ÎÄÌv?ÏhºÎÿúê·„‚sBJ ¨!µ©5Ÿ\¿•ö¼nòÍÃã|óÈ©×ý>;:𨳦-­ DÞ³*¦ršÁt¦À³“‹<6:Gº|áª^®âëÄb¦nï¡€êæWl Á§’.WŸi;œïfV¦åàW]Dý^|Š Û…ê’‘$ òÉk³¹"ÃñyÝ ÙïYWo„}3q>°}ÍrVÙNLq8–zÓÍùlÙàøBJ©ä·ìaÏU»ðª*ó‰$ÿô/ßyÃíam-ìÞº… kúhmnÄçñâr¹°mÍÐÉäòœšžeßác]‘óØº¡Ÿ¼ã."Áùbo?ô(…b±nÇ¥§£[¯½ ŸÇK¡Tɺ¿ð$$~á`UQP7²ìÂqìÓ"JÓH¥³æëQKX–MÐz/Ô‹*—5þþëßBJ ÎýÝ!„”@ Ô‡hkà¯ßJ[ÐKN3ùú¡1¾slâüºÚ¹ym›[hYS²„¹\…Sàù©8OŒÍ/j+~>¢þ䆭t†}t“o=ÅH"W·÷PØ£ðñ+ÖÓàUÉ”*]¶ã¼áì`Órð¸eZ^üªÕUS®êX “Ùl‘ñcÉíAß @ÿåÄ<ÿf×ú³²ÊŽÍ§ßts>«ŸBj%¹û¦ë—+q)>ÿÍï25;¯÷êëîd÷Ö-ô÷õÐÒØˆÇ£"K2¶c£é:©LŽÑÉ)=ÎÐØ©=-ëÖòÁwÜEC8L¾Xä»?F6Ÿ¯ÛqélkåŽë®ÁçõR(UvÙ[ч $‚UÅïõàQ=¸\2¶] +× “R¹L2“Å4­óQKØÕú–äÃÎ@‘ IDAT¤¦ëLÎŰíÚ/–Jüã¿|G)@ 8÷wˆR@PBj{GÿöÚ-´½ä4ƒ¯ãûƒ“oø}Ú¸ym'[ÚNWL¹$0m‡œn0•.°&Á££sÄ +×¶²¹µwýÀrÅ×7s*U¿}>•³k=¯Bº¤s*½²çbZª[¦Ùï! ¸ñ¸]xÜ2²$a;EÃb.Wäh,ÍñÅ ¡óSŽÎò;WozÓT®½yÝàXL©•ä®=×rË[®Äçñ°˜LóÏßú§ff_×{lXÓÇÎM¬íí¡1£ž!¢4ÅTŠ‘SSì;rŒSÓ³ä<6õ¯á×ÞùVÃa ¥"ßÿù¤²Ùº—¶æfîºñZü^ÅR‰©ØÊìJ'¡`ªâõ¨xT—\Ý5Ï0Ъ­y©lî ‹¨%ÇÁëQi‹6/ ©©¹V ©|±Èg¿ñ]!¤àÜß%BH A}©Ë;›ùÔµ›i xÉj_|a„Ÿ˜^±÷hkà†Õílmo¤5àÅ«¸‘«b*¯Lgм0“à¡á’+P1µµ½‘?ºn`¹âëk‡F™ÎÔo[LKÀËGvöö(¤«äÓrpÉÑ€¿â®äL!¦J†Åb¡Ì±…4'24û=¯KLýth†?¼nËYYeÉì›nÎt“£±¤R+Èí×]ÍíÕJœx*žý}Ʀf^Ók·¬[Ëîm[èëî$ âUU$IƲ-Jå2 ‰Cc§Øwä³óv×ÇkúøÐ=o£1¡P*ñ£Çž$žªß¶Õæ†î¾ù>År‰©¹7&¤dI"ðãõxð¨j5°ÜU©ˆªŠ¨b¹L*“ŲìQË8ª¢ÐÞÚR 5טŠÍcYµ-¤rùŸûÖ÷„‚sBJ ¨!µ»;Ê'¯ÙL4à!S6øçýÃülxfÅ?gcK„›Öv°µ½‘¶ª˜rU3¦ z¥=lßtœGFgYÈŸÅÔŽŽ&þíu[h T*¾¾ôÂ(±|©nï¡ö]¶–G![6Kæ*%Órehöy¨ >Å…Oqá’+­2%Ó"YÔZÌpt>EÄ«¾ª˜Ò-›‡Gf^’U6^Ç•k¯DÑ092'„ÔJrË[®äÎÞ‚ßë#‘Nóà¿þ€‘‰©_ùšË6odçÀ&z;;ˆ„‚¨Š‚$I˜–I±Tf!‘dpdŒýGW|ǾWbýêU|ø]wÓ‰P,—øñãO]´Ï¾DB!Þyëý~Jå2“çÙFé’e~^§R¥¨¸Ý.,Ë®Š(b¹’eÙ+,¢ÎÀírÑÕÖ‚×㥬kÌÄ0-«¦Ç “Ëñùoÿ@)@ 8!¤€úRWõ¶ðûWo¢Éï!]ÖùÜÞ“<6:wÁ>oC4ÂÍý ´5Ò¬ˆ)·\ Ô^Ê-:Pmå›Í¾þʦ]]ÍüÁ[¶, ¶/&q²ª.]a?ر–Ç]R©‹—‡%KŠ_uáu»ð)nWeW+Í´HuFYÒxÝ®WSÉ’ÆóS‹üÑõ´}ä4“9<Ædºð¦›ó%Ãâð\B©䯫vóÖ¯#àó‘Ìdøâw~ÈÉñ—æÜ)Š›Ý[·0°¾ŸUí„‚·€i™J%æaïá£dó÷þ[»ª‡Þûvš(•Ëüä¿$¶B»^ ~?÷Ýq3A€R¹ÌTlǶy­w.Y&è÷áñxªUQ .¹,¯:ºaP,i¤2,Û¹`"jùx\2Ýmmx=ʺÆìü"†iÖô¤2Yþù;?BJ ÎA)@  >„Ôµ}müöUiò©¤K:ÿðü žŸ¿àŸÛßæ–þ¶µ7ÑôâWܸd ˶)è&3U1õøØÜëj¹»ª·…ß»zÍUÁöO{O’ÕŒº½‡z¼oû‚ª›LYg,•¿R¯HØ£â¯VKù«b @3mÒeáx–ÑdY‚Îÿ¬×Ne _Ìœ•UöÕƒcÌd‹oº9¯™g…ZI®ß}9o¿å>?©L†/}ïÇœ_þwŸ×Õ۷2°¾ŸÎ¶‚~?JU^¦E¾Xdf~c'GyîàÊÚ¥Ôkzºøè}ï ÚØ@IÓøÙSÏ2›¯Ûqñz<¼ç®Û”´23± ÓD~•]9]²L0à¯fDUDÔRž—ahz¥5/É]Њ¨—;®®öV|Õ ©¹ùEôR‰t†¿û#!¤à„êCHݰºO\¹FŸJ²¤ó÷Ïç鉅‹öùkšBܼ¶ƒM´…|ø×rnÑ’˜:8—äááÙ×T1u]_¿u†`ûÇç‡(fÝÞCkšB¼gk~ÅMV«THI—èXBžJ_@qãWÝ(Õ‡OöÉT3®FYlÚƒ>Ž/f˜ÉøÃk·œ•UöFZ3kݲyq&.„Ô ò–;–[ÃÒÙ,_ùþO86-¢ÊšNY×Éå X–ÅÞJôur–Ò4æãèFmÿ1c1™äKßÿ©R@pBH õ!¤n^ÛÁÇw¯§Á§’,jüݳÇynrñ¢G_c[ú;ÙÞÞHGØ¿,¦,Û¡h˜Är%^˜Mòó‘Y¦3¯Üj³gM;¿yÅiÁö¿ž=ŽQãÁ´¿ŠuÍaîX…_q“Ñ N]à ©×‚e;= ^ŸâÆã–Q«S†e“ÓM¦3N,fÈ”uE¼fœÕJùφW$ľÖ0,›„ZQ®Ú±•{«­a™\Ž?þ¡`€ÍýkhmnZQŽã ™\ŽÉÙûŽãÐñ“5s=í|üþ{hij¤¬é<úì^NMÏÔí¸È’Ìûßq'‘`ˆ’Vfn!N©\Æív/ÿŒiZ¸d™†H¿×‹ª*¨ÊÙ­ye­ò_¾X¼¤™M²$ÑÓѶœ![Œ£éµ-¤æã ¾òÇ„‚sBJ ¨!uÛºNþÍ®õD¼ ‰¢Ægždßô¥Ë5ék rÓšvt6ÑöŸÓÊg1Ÿ/qp.É£#³/Š}K'¿¾{ ÞŠ`ûÛgŽc×ñï¤-îÙÜ‹Oq“ÕtÆ’ù‹]8ðŠ˜–ƒ_uõ{ñ).²³Ÿ°Ga±Pæožä…™Ä%?®U AnZÛÁåM´‡|T7.Y>«bêÐ\’ÇFçMžú¾}}Û¹ŽˆW!^ÐøÛgëúÚÒÚÀÛ7÷âs»ÈhãÉ\Í©%LËÁã–i xñ«nTWEL™ –Õ ¢~^·‹Ô› •ò•°‡ýS‹BH­ w\ wí¹UQ‡ÊwL Û±ÑtT&ËÈÄŽÙ°óZ¡³µ…ßxï½´E›Ðtƒ_ì=À𩉺Ó²ùÐ;ßJc$B¡T$¶ P*¡(nB!|^>¯UQ‘$©* 4]§¬iä El»vªWeI¢»½ Ÿ×KY×Yˆ'(iµ]Å9=7Ï7zD)@ 8!¤¸éŸ~Àeó’L¤VñîM=|è²µ„ªBê¿?uŒƒsµ³ygÈÏ-ë:ØÙ¥3ì[®˜²(&s¹‡cI©ˆ©·nìæÃ—Ÿlÿã—Çp¿JÈn-³µ½‘»7öàu»Èh:ã—(Ôüµ= :¨n™f¿‡€âÆãváqËÈ’„ã€,’DÙ°øú¡1Î% y”7Õœ·‡}BH­×ô±s`[Ö­¥©!‚T5±¶mS,—‰§Ò O²ïÈ1&fj?‹©-ÚÌ'Þwí-ÍhºÁSû_dhl¼nÇG7L>ü®»iŠD(–KLÌÄdhŽD–CËeI^®`+k:Z5#ª«VÏRš®±OR¬a!U*k<½ÿÐ??þ§>ð@Z|ã‚Ó!%Uö<øDƒÛÒ?#K|¸ïž-½¼ûZB7óù2ÿßSG9KÕÞÃ\ÐÇíë;ÙÑÑLwÄO`YL9” ‹ù|™Ã±$–ípÛº.B7 Õóñ\¤]š.;:š¸kC÷i!UC-{¯„i9¸d‰hÀƒ¿*¦–Æ Àq eöMÇÙ? YÒÞTbêùÉ!¤Þ[Ö­åŠm[XÕÝIC(TgÜ;‹Éûcÿ‘Afëæ¼Z››øÍ÷ÝGgkÝ0øåƒ«Ûq*i:½÷íD0-«RåvãU=g´æé”Êš®“/–jª"ê\ÎnÙÓYH&)–joã…RYcøÔÄã?õìÇþ÷_?pJ|Ë‚—"„”@ œÃžês[òµ&¦Þ½µû·­&¨º‰åKü?¿8ÊàBíþ±µ#ä㦵ììj¦; xF+_É0Ñ-›GA‘%æò%þß'âWÜu{ßììjæŽõ]x\.2åj…”TÇnZHö(D¼*íAïé*—ªHL5†âž›\d±P~Sˆ)!¤Îã‹£—mÞÈÎÍôt´ QI’°l Yªd“é†ÎOžø%=ùLÝcsc¿õþwÓÕÖ‚n˜<ûâ!Žž©Ëñ*Ët¶µðî;oÅëñTÿ¯ƒ$I˜¦EY×—eT¡X¬‹?‰J¨¹ÏëE3t) ¥RÍŸQ@ð:Öt!¤àå©51õÞí«y÷@ÕÍ\®Ä_ÿâC‹™š¿Ží!7®igww ]a?AÕâ’+ù2UéQÔM>»wˆS©|ÝŠŽÝÝQn_׉ꪟ ©s1­Š,¼¢§¯âÂqlÇÁ%WÆK3-EÑd–}Óq¦Ò‚u,¦„zí(Š›+¶ 0°~-½í„‚A”ê.m¦eR(•Èå‹Dðz<JE~ðÈ/xrß uw_4FÂüÎßCgk+†iòÜÁî«s(Ëøý>¶mègsÿZšÏj¥,”JhºAYÓÈJ8ÔÏóÀi!åAÓ â©4ùbñ’W©¬18:¶÷—Ïì½_ˆ(@ xkºR@ð«¹íÁŸïÀv>Üp)ãƒ;Öò®-•]ÜærEþËGIdëæ:FýnîïäÊž(a?zVkØl®Ä™ÏO-’ÓŒºSWõ´pKŠËE¶FCÍ_ ¦í°>&ìQ(ê&±|‰¯Z©fsU2¾tË&UÒId9K1žÈáQê¯ÝR©WÇçõpÕŽm ¬_Kgk ¿¥ÚZk˜ùBéùŽ%™ÉðÞ·ÝAS$B¡TâÇ?ÅãÏí«»û" ò;¿v?Ýmm˜–ÉÞCG9x|¨.޽P, X¿–þU=4FÂxª»æIRe­M¦3,&ST3çëïáèéh¯)à ‘ÊËç¹T n©¬qthøè“/ú=Z.¯sMBJ ^·|þ¡=²,?À%S¹¼y·™l‘ÿòÄaÆÎص®nö¼ w®ïæ­»i TZÃÀ²ʦÅb¡ÌÑùû§¤ê(³è-«Z¹qMŠK&[g-{gâ8Ðß"äQXÈ—y|,FkÐK³ßCGÈGÄ«¢TÃç Û&[68•Îs$–âdñ4<ó|ÝÍP Àï~è~zÚÛ1-“ýGyáØñš>æB±L8`Û†u¬íí!VE”„eÙȲ„$ɘ–É‹ƒ'PÜ ^Z·¿{«BJ7Lé4™\~¹ìb!D”@ ¼q„‚×É¥SÛµny·éL¿|ü0é|Ý^Çß½zw¬ïBqÉØv¥5Ì-KX”M‹D±ÌàB%³¨ÄÔu}mܰº½"¤ê´e*bp}4BÈSÉ*{dx–þæ0²,aÛªËEgÄG£×ƒÇ½$¦ršÁt¦ÀÁ¹$'38T‡k!¤^J[´‰][·°¹ ­Íø½å8º¡“É噜±÷ðQŸ8»mMO½ïD)•Ëüì©gøÙSÏÖÝø|üÞ‡ßKoG¦eqàè ŽÖä±.‰¨›6°º»‹†pª,˧B©D2•¡©1B8@7 NŽO`;µŽ…Tg;>OEH%3Ò™’|qÖ›RYãÀà‰…ç½_ˆ(@ xc!%çÉmÿüóØŽó€,±êb|Þo\±ž;×Wvq›Ìø‹Ç2)ÖíõûÄ•–CÀÓeù|‰¶ o¹5Ì®VLÅ‹eN,fxf¢¶ÅÔ «Û¹~un¹¾…”ãÀºh¸žŸ+ñÈè,¢‘åv=Ór(š&>·›®°&¿¯ÛU I¶lrºÁl¶Ä ³q^œ­ì¦X«c&„ÔiºÚZÙ½m V¯¢¥¹ Ÿ×ƒK®ÌCÍÐÉdóŒOϰïð1_aǹ¾®N>öîwÒÒÔ@IÓxäéçùÉ¿¬»9àózøý½Þ®,ËæÅÁì;|´¦Ž±P,Ó 1°¾Ÿ5½Ý4†C¨ŠŠ$iVvÒ‹-Æ9xü$#S|ø]w/‡´NNašªZ¿ùo§…”A*›#™Î _àêLÃ0yþȱ…}üÉçþêÏß‚à#„”@ ¼A.–˜ú­+7rûúN<.é<ÿùуÌåJu{Ý~ûªË!à§ÒyþûSGÙÒÖÈ5½­ô4yT¹ÒÎW6-âã iöNÇkr—·×´sm_nYªfHÕ§’‘XÛ¯TÂó›cSKùœêÓrÈëª[¦' ÙïÅëvá’Àr ºÉL¶Èá¹{§1kPL !kz»Ù5°™þ¾¢ x<*²$c;6š®“Lg™˜ä…c'89>ñ+ß«§£ß-M”5GŸÝË{²îæ€GUøý¿Ÿ¾îNlÛæÐñ“þÞwÑÖÜDY×yâ¹ý|ÿ‘'êo¸]üÁG>Àêž.lÛæð‰až}ñÐ%=¦%5°¾Ÿ¾î."¡À²ˆ²,‹|©ÈÔÜ<Ãã“LÌÎ.ï€P,kÜÿÖÛ–3±&gç(k:î3~¦®^èîhÅçñbZ&élŽÅd—kå…”¦éÆä\ìÁ·ß|Ãoˆo9@pÖt!¤`åØóà ²­ÿËæ$™ÈJ¾÷'¯ÙÌMk+¡ÙãÉŸ~äEE­n¯Õ§®ÝžjæÒh2ËONÌ0ÐvZ|ÌåK¸$èo³¶)DÈ«¢ÈÒ²˜Ju†âöN/2—½ôbê¶u\ÕSR9Í`¬N+¤T—L_ch9<ÿ™ÉÖGïú:Órˆ˸d‰¾† í!?Õ…K’°‡²a±X¬T¹í›Ž“(^úöËÿ…Ô–uk¹bû«º:hβ,‹’Vf!‘âÄè8û 2»°øºÞ»£%Êo¼ï^Ú£ÍhºÁ“{ð‡«»9 Ë2ŸúèXÓÛc;99ÌÓ^’c)Ë47Fرi«º:+Q’$aš&ùb‘™ùGƘŽ-,ï€x&ÅR™wßu«:+™X3±y ¥R] ©®öü^ßB*µº¿hšnœšžûÎ÷žyâ>ð@Z|»‚ ´¦ !%+Ï…Sxí–åÐìÑd–?{øEÒe½n¯Ñ_7ÀuÕÌ¥áx–Ÿ ϰåe*qbùŽ[#¬n öª¨.ÇqÐL‹dIçd<Ë3 Ìç/˜ºc}Wô´à’ [6êv—=ËE_cp9<ÿL‚ÕMÁ×üú%1e;kšBt†ý7î3rÁE“ñ ÏMÅY¸„cö‹’$‰Ë·läò-›èíl?Cj€iUò†)Ž ²ïÈ ñdê¼>§-ÚÄo¾ï>:Z¢è†ÁSû^äÛ=R—ëÓ§>öAú{{p‡£Ã£ürÿ õó Å2­ÍMlÝÐϪ®"¡ Š»2f†i’/™]XäèÉfb‹¨ê+Ë¥|ñÿgï¾ãã:ëDÿN™>ÒŒz³%Yî*.)NOìÛ!$¤„Ðb²ü–¥„ݽË.q~„»÷Þ {—»\n–5,BIHœêØŽ‹ÜmY½K£éíÌœrÿI‰Yž‘ž÷?ðb­™93£ÑœÏ>Ï÷¤¸ïÖ[h˜Ú‚8<>A<‘œÕ€3§ïg$j*ËñºÝè†A(%š %B” ¦‹ %‚páÌf˜zsÀyäé}Ä´lÁ>7sC×4T¢È'&¢<}jø÷©iñ†eÒZUB£ß‹ÏiË…)@ÓM‚)“(»'Š$çü‚¼ Q]Q†×ãÆ4M&C¡Ðy­øJ¥5:{ûžûõžw‹%‚0‡Ÿé"H ‚ \x·?Ù¨r»,ñà¹ÞƧnhãÚ©€s2åo³TV/Øçäï6­™Ùâv|"Âo»Fh®ðÿÑŸŽ¥°,‹U>–”zñ9íØ¦N¬2†I(¥q|"‘ }áÄœEŽ·¯ZÌ¥ueH@8¡?œ(ÈR^»E>÷ÌðücãjŠ]çu›ñ4‰¬A£ßCýæ‚M¿fáT†ÎÉ(‡GCtc8Ô¹Y¹1_ƒ”ÛéäŠum´®XFme9·{f+WV7ˆ' ŽŽs¸³‹Ý‡Hk³³Ò²Ôïã#ï¹—ÚÊÜðìWä»?² ?Ÿ>þà¬\Òˆ…Åñ®^vìÚsAï/¥iÔTTм¬ijÛt<”¦VD%襳·‘1gq…¼h<Á½·Þ2³q<$n’$*ËJ)özÎ;HM‡¨ç^ÜùÐWmïßVAæø3])A„¹s>aêÓ×pUýëçoŸÜ‹f˜û\|æÆµ3[ÜŽŒ…y¾gŒÕg¾ˆl8–Ä´`YY+Ê‹ñÍlå› SiSû†ô…¸m6Lݱz1ëksA*˜Ê0MäëRì°QWìžž2¡ºÈ5+·=K¡é&ÕÅ.šJN‰YÓ$šÎÒŽsp4Dg ‚rO˜ç[*).æ²5Í´­XFUy— UU°,ÈêY¢ñýãt?ÉÞCGÑ cVïß_TÄGÞw‹ªªÐ ]‡ùöOU¿{ÿ»XÙÔÀ‰î>ž{u÷¹ŸTZ£º¢œ¶•ËY\]I‘×sÚÖ¼h<ÁÀÈ(GOõ04:†Ëé8ëûˆÄܳõ&–7Ôca† E£H…XÌÉ©Š?¾â",Ó" 3 6ÈýLžw¢Aòà3])A„¹·qû“6SzLBzÇ™þÌ#›ÖrE}.àð7O¾†nîgxûMë¸lQ9ph,ÄË}ã¬*?û]ƒÑ†™[1µ¬¬˜—ý´­|át†î`Œ]ôGx.P˜º³¹žµ5¥L&5FbÉ‚|]üN;5Eî™áù]Á•^ç¬Þ‡¦›¤u"»ZŸ‹§‡*#Ó"¦eŠ$80âD ‚eYÈàäy¾©ªò2.okfõ²&*ËJp;](Š‚eYd²"±8}C#ì9x„ƒ':/رy<|ôý÷Ï\ÍíµCGùæO~Q¿þ¾ûY½t '{ûøí+»fõöSišÊ ÚV.cQuEö©Ï¦LV'30:NDZŒOÏ)DM‹Å“¼ã–3+¾‚áÁH´ ÿ†–—ø)).Ʋ,&Ba&‚Á©«þñçýð‰ÎÃ/í=ô¾¯}áÓA.*¤A.¢›r£,ËíÀ ìß~v*àÈÀ‘ñ0ýë× úØÿá–õ3+Š:FCìœ`Yiñ9ßÞ`4A:k°¦¦”¦Ò"J\¶I)Ñ IDATvSÛ´©maÝÁ»'è Ï~˜º»¥5Õ%X@ ‘f4ž*È×¥Ôå ÊëšžßJPáq\ûÒ ‹DVÇ¥*,ò¹)u;pªJî b†I,“e8šbÿð$û†ƒ¦9«³Á =H-ª®äò5­¬XÒ@Ei .§EΠײÂÑ=C¼vè(GOu_ðcó¸\|ôýïš¹šÛ¾#ÇØþŸ?+È߃¼ç>š—5!I}ý<óò«³r»éL†ÅÕU¬lj¤¾¦š"¯UQ‹¬®‹'èåHg7#ã碦Å)n¿ézV65"!ŠF „ {LR©¯˜2¿ ‹‰`ˆ‰Éö·ØÆ8¢^Ø×ñ±Ç?ÿÈA„¼ ‚” B8“0õ¹›×ÏÌ(:4âo~³· ù ›/a]M)p`$Èá KJ¼ç}»ƒÑä¹Òϲ²büS[ù$ ²FnÅTg JÇHn^‘Ë6;óŠîim ­ª§§ r†T™;¤¦‡çF”¹ô>uÃ"žÉbWdû<”yœ8UEòHdt†£):Fƒì 5f'LjZZ¿ˆKÛšYÖ°˜r¿‡ÃŽ,ɘ–‰–É G9Õ×ÏÞÃÇèìퟳcs:|üÁh¨«Á0L=Î×ôÓ‚ü|úð»ßIëòeH’DWÿO¿ü*çó9“ÑY\› Q‹ª«ðž6×KŸYÅv¬«‡Ñ‰À¬„¨i‰dš[7^“ lHDb1ÆÏñJŠù¢¤¸ˆò’’?¤DˆAÈo"H ‚ ä‘›r#²ü˜ kßü{㊢#Aþî©}}¬n¹”¶©E{‡&9:¦þ¯æöû FhºÉÊ +Ë‹)q9°+¯Ï+Ф3tãž<ïAÚ’$qokÍ•~L`4šd"©äUö*Äaýs*•ÖØ{ôøøž‡>õµ/|f»øv!‚ŸDAÈC›¿þô6Ó²Úe‰†éÿí+ŠöOò™§÷ìñÉ’Ä£[/¥e*à¼6 3¥®Ø=ë÷5IÖ VUøXYá£Ô•›W¹S‘t–Þpœ×tŸc˜R$‰w¶5Ò\éð`8’d2™F.À"UåuQîvL Ï2‘HãsÚæô1è†E ™F–$–”x©.rã±+(’„aY¤³¤ÆÑñ\˜šLjç¦ %Hµ®Xʆ5­4ÔÕâ+òâ°Û$Ã0H¦ÓLL9ÖÝËk‡Ž02¸hïE‘yxÛ{f®ævðD'_ûÞägÔﻋµ«W Ë2=C<ýò«g1>«ÔUUвù÷²´~1–eq¸ó_ýÎ ò3ê¡{ïd}óJdY¦op˜§_ÞEVÏžÁ{Ö ¾¦†•M ÔUWâu¹§®t8¢ô sèÄIÁ ¢¦i™,7l¸t&°%’)†ÆÆ :H¹Nj++¤\'O¤=þé¿üàûÿI|{A( "H ‚ €-ÿþ̶/m¹ä±–*¿ozEÑç~[¸r¨ _Ür)«*Š1L‹Wú'‰&©ð\ø“³H‚xF§µÊÏŠr¥n;EA–¦®ð–ÎЊÓ1äÔdU–ÏàxdîiidåÔñô…„ÓÔ RPWä¦Äí@ŽŽ‡‰jY§ÛÂT4¥/œ ŠÊï Šù¤Jý>.kk¦mÅ2*ËKñ8]S[¼^z=02FDZ¼vø(†aæõ{ècï+›8ÑÝÇÿúæw òsö}wÞÆåkZP•ÑQ~ûò.’éÓƒÔâêjV-[BuE— U™ž•!‰Ò30DÇñ“Dc‰‹¢¦é†Ée­«Ù°¶UQI¦Ó ŽŽ…¤Ri Y’ÿ[óò%_´Ûl"D ‚ 0¤A ÔÆí;ü²™yX…ÏÚc/u;ø¯7¯§©´ˆ¬aò\÷šn⽈[Ã" &“kªKXUá£ÂëÄ95c*k˜Ä´,‘û†'隌ö³^»ÊÝ­ ,--&k˜tNFIftìja)ôh,ÉÅÈ]ÍÈë­‡šnÖ Šìvj‹]”L ­—ÈmÁŒkY# :¦Vº¦…</fª®(ã²¶V/]BeY)n§sj‹—I&›ÛâÕ?<Â9tâTÁ¼‡þü½÷³zÙ$$Nööñåíß)ÈÏ×÷Üq+Ö¶aSUÇÆøí+»I$“duƒe ‹Y±¤á´Q–e’ÎdˆLmÍÛôñDò¢†¨7þ^¯[½‚+×­ÁnSI¦Ò `J¥5:{ûž{îÅ}õÑö^ñ-@¡ð‰ %‚Pà6n²Q5ävYâÁByÌ'Ÿ»y=KJ¼d ƒgN`ZnÛÅŸU4I0‘L³¶ºtj+Ÿ—ª ËúT˜ê$è qd<„M–ñ9lÜÕÒÀ’Ò"²†Áñ‰iÝ8§+ö]ìׯ/~§}æj޲$ÄpvݰHdu\ªBÏM™;·Òmúu‹gt†¢IŒL²o(ˆË¦\” Õþ/_ÛxÕšÖç–/i ²¬§Ã"熴k™ áXŒž!ö:ʱSÝ÷yôgï¹—–eK‘$‰S}ý<¶ýÛâWÍnßÊ•ëÖ`SU†ÇÇùŸü’ÕË–°zéj«*p9(rnF”–ÍŽÄ8Õ×Ï¡“]Ä≼QÓ, ZV,åšKÖb·ÙH¤Ò ŽŽÌk!B” Âü%‚” ÂHM‡¨öu|ìñÏ?²AaÞAJažºùñ'7ʲÜN†©†/lZâb)ÝàÇ(u;òz¢É@$Áh>u}ÕE.âŸØ]Ïç@$ÁH,Åǯ^×a˜Yy`˜iÝ •5Hfu© &ùûwטš!åŸ R»&fŽi¾y¥oœßžÞô“÷Ý´c.ï÷ƒ|~£ó6H½ïÎÛ¸|M ª¢28:Æ¿üÇ÷ˆÆy÷8«+ʸr].D••øq9_¿ÚaZÓÈê:· EV˜ù·×í*Ø×¥º¼œ­7\ƒÇå"•ÎͺAJÓ2Ù÷ø¿{ðþ?…A¤A€›ÿí©^Y’ÚŸzè–í íØoùúScÒ.Éøæê>×Ö”òW×µPáqÓ²üðP/ ~oÁ>‡?;>ÀßÜÐFµ×E2«óbïÍ•~$ üN»Š*Kèæt˜Òó2L™¦Ec©¿ÓAÖ0Ø5 Ø9?WHíðÃC="HͲw½} W­_‹Ý¦246Á¿~ûy1+«¶ª‚+×­aå’*JKp:ȲÄôÕ¡0§z8ÙÛÇèD€µ«W²õº«q9B!þý?†Ûé(Ø×¥¢´”Û6]‹Çå&•N346Žašsvÿš–ÉöŽüø‰Wv|x{{{X|ëA@)A6ý) À´è[ˆajãö~ÙÌ<¬˜<<a겺2>qM3'Q-Ëö°¤¤¨`Ÿ¿_ž䯯oÙ‚¸g0€SU°+2¥n^»ŠÛfÃe“§¯î6µ•O'‘љȓ0eYÐXâÅï´“1 ^íŸÀç²ÏË÷üÞ¡I¾ßÑ-‚Ô,»ïm›¹æÒµØm6†Ç|õ;?d"ºhgqMÖ¶±rIå%~;²$cš&)Mc"¢«€îþ!2Ù “á-Ë–rëÆkp;]Ãa¶ÿøç8ì…gK}>n¿éz¼n)-ÍÐèÜ)¢A„·"‚” ¯©i&t`šÏõÉêÅ6WaêŠÅ|üêÕ”¹„Ó¾°—e¥…¤~}rè´-ˆGÆr'àN›€,I”¸ì¸T—MÁ©*(²„iY¤u“tÖ žÉH¤1/b˜zcÒ ƒý”ÌÓ u`$Èwöw‰ 5ËîÙz3×]¶‡ÝÆèÄ$_ýî LÎùãX\SÍ•ës!ªÔïÃa·£È2†i’JkŒ&950HïÀðLˆšŠÄX±¤Û6]‡Çå"‰ðÿ»­p·¯úм¼ãæMy<¤5¡±qtø`÷—Jktöö=÷ë=;ï!JAøCDAàwƒÔyM åQ-K_(NZ7°ý‘²$ásØqÛU\êt˜’0-ÐtƒÔ‡©7©}ã”yæg:6áß_;)‚Ô,{û¦ë¸éê+p:ìLC<þƒŸÐ?¹œNîÞ|#þâbÒšÆÈD€L6{Þ·›Jk>Ñyø…}{üóì@A΂R‚ œyšfZ|CWÌö…¦6oz¦õpÃùÜΖuOƒÔÉ@”ÇwŸAj–ÝzÃ5Ür핸¡0ÿöÃ'èžõûYÞXφ5-,k¨Çï+š Q¦i’L§ÐÝ?HÏÀ¦uvûÓZ†_1÷½í¼n‘xŒïþìI Ó(Ø×Åépp÷–›().&­¥ L’Ö2HÒ¹}–ˆ%‚ ̤A8û õ†ø¦n~üɲ,·sŽaê¶U‹xð’e;lL$Ò|ÿ`oA©—úÆøØU«)Ò>Iæ‚ÔYnñ™S.›‚ۦⶩSÃÏsaJÓ ™, ¬aÎz˜2-h*õâsØIfu^é§Â㜗ïá®`Œÿóêq¤fû³ôÚ+Ùzý5¸œN&ÃaþýG?Z4;–7Ösåº6šêá/.Âi·#I2†aH¥p¢§áѳQÓ2Y¯ÛÍýoßL±ÇK4ç{¿ø Y=[°¯‹Ýfçž­7Qêó‘Òr³´’©²|vs±DˆAf“R‚ œ{°L"†Ìc¦llǶ êjBç¦ÞÑ\Ï{×5Qä°1OóƒC…¤vLœ6¤} ’ c˜(ç8sFF¢ÈaÃm=L©ŠŒiZh†IF7HduÆãéY S¦KKs+¤âWúÆ©ôÎÏ ÕŠó¿wAj–ÝtÕÞ¶éZÜNÁp˜í?þ9½ýç}»+–4pÅÚVšêQR\ŒÃnC’dtC'‘J161ɱ®FÆ8ß­u†aâ°Ûyàö­{½Dq~ø«§IkZÁ¾.6ÕÆ=[o¢Ìï'­iŒƒÄ)”3ÔžJkì=z||ÏCŸúÚ>³]|kAfƒR‚ œ_š¶ ÃÔ¿=u'ð˜,Ñp&ÿþžÖXÛ„×®2Oñ£C½¬®ðìñï Ì i§³ô‡ãè¦ÅùÎ@–¦Â”Ç®L]™OÅ6¦fVLes3¦2úù‡)Ó„¥eEøœ6bšÎ+ýcTy]óò=ÛNð/¯Aj–m¼â2Þ~ãõ3W§ûÆO~Á‰îÞs¾½–åK¹¤eKëSâ+Ân³Òi+¢Žw÷Ò;8|Æqå~–[ (2ï¾ãV|Þ"b‰?zò’©TÁ¾.ª¢rÏÖ›(/)!¥¥ ÃÄ EyËŸ3 ƒ‡EˆA.¤A˜ 5s2³€ÃÔæ¯?½Í´¬ö?¦î_³„ûÚñØUFb¹ Õ\Y¸Ajïð$zù ü.;‘t†¾pôfiGe1uU>—}*LÉ–YÓœ¹*_ ‘F;0•[!5¤²¼Ò?AÕ<]!5IòÏ/Aj–]wÙzÞqËF<.7¡©«Ó;Õ}Ö·Óºb—¶®¦iñ"|E^ì6ÛÔUór!jhlœÎž~z††PÿHT9ǯȼ÷oÃWTD<™à?ó,ñD¢`_EV¸gëMT”–Ö2ŒMNO$QUõþŒ¦e²ûŽûÜC÷Þùñ-AA¸DA`vƒÔNîûdIjê¡[¶/¼çó­ÃÔ{Ö5qOK.›ÊH,Éôô ©#Aþäòåøv"é,½¡Ø¹—eA±Ó†KUð8l3aÊ´ k˜dŒ©0•ÔHg³ S–H°ÄïÅï²Õ²ì P枟CÍG¢)þé¥Ã"HͲ«/YË]›oÄëvŽFùöÏ~Åá“]güó-Ë—rÅÚVÕž¢tÝ žJ2<6Áñ®^FGQdù‚‡$ɼûö­ø‹‹‰'“<ñôsDb±ÂýÂ/IܳõfªÊJIg2 ŽŒ‘Ö2Øl¿¤4-“íùñ¯ìøðöööõÿTAæøï“R‚ &HM[¨ajãö~ÙÌ<¬˜<,Éœ6 êÁK–ñŽæz\ªÂP4ÉOŽöÑ\ÀAêàXh檑t–žÐ…=qµ€"» ·MÁcWqÛmاÔ™ÛÊ—ÌL$Òg¦LÓB–%ü^J\¹°vp4H‘Ã6/ߟc±ÿãE¤fÛ†µ­Ü{ëͧ]®ãøÉ?ús­+–²amu5øŠŠÞ¢tâÉ$Ããéìbhlü­ˆzÓd$¸ãVJŠ‹I¤’üì™ç F"ûºÈ²Ì]›o¤º¼ -“¥gpˆlVÇný÷[„(Aa®‰ %‚À… R3'ü"LÍ„©‡.[Îí«ãT# ~zl  ‡šÏ\50šÎÒš›•–^»:µZJÁc·aWd,ëõSg¦tÓ®È,öy¦fae8>Á©*óò}9‘HóÏAj–]ÚºšûoÛB‘Ç3suºýGÿÁ¿¾yPk IDATëšWÒ´¸_‘›š $º¡O$pôT#cØmê\~EæÛ·Rêó‘H¥øÅ³/… öuq:œ¼mã5ÔT”“ÉféìÀ0 v;©´FÿÐè÷EˆAæšR‚ ÌMzƒçMÓlŸëá‹íaêCW®ð½må"ŠB$ÁÏŽöô ©ï[¿”¢9RÓLÓ¢ÈiÃcSqÙT¼öÜðs€Œa’1L™,“É ÉŒþ{ÃTÖ°pª2u>ÏÌÕ»C1äÙ„•g‚IGwAj–­[½òw®N÷Ú¡£§ýY–¸¤e5ëV¯¤¡®†b¯w&6é†A,‘dptŒݽ a·«áH$xûJý~’é¿ÚñcÉ‚}]н^n¼juUd²YöQ°¯‹ª¨Ü¶é:êª*Éê:»:}ã»?þy»Q‚ ÂÅ$‚” -HM{Yzø©m·XHÏy2«7Ú¥½7{ðÙîQ–ø½{,½á8÷¯Y‚×®ѲtO^œ 5Í0-Š6¼SWäóØÕÜV>^_1•Êæ†Ÿ'¦Â”¦¨²ÌÒ²"üN;Á¤Æ‘ñð¼!Õ²|þ·Dše­+–òÞwÜ6uuº$?yêYvîï`ÝêU\±®•EÕU§­ˆÊê:Ñx‚¡Ñqžèddb—#é[÷¿}3å%~RšÆS/îdpt¬`_—´–áÞ·ÝÂâêêçuCßæ°Û{Å_~AábAJ‹¤0-¾¡+fûŽm[Ô‰Âñ@¤ñË/ûâåueïZ\ «¤" îmkÄmS‰jo…Ô›¦…Ç®RævàRUœ6ÛÔŠ©Œi¢é&é¬ÎxB#šÎ È+Ê}øœ6&“{‡&©ô:çåû.‘ÑùÜ3ûEše«—5ñ¾;o›~ðx'.§“úÚjŠ<lêtˆÊÎlÍ;|²‹‘ñ œ{þˆ÷ݶ…ŠR?i-Ã3¯ì¢oh¸0ßëÉ4áhôÅ?}÷;ÿ¾iQÝA!Oˆ %‚@~©i 5LÝüø“WVø¿ti]Ù†B Sñ$÷´4àʳ 5m:L•»¸l 6EÆ®¼¾•/5H룱 %^Š6&i^êgI‰w^¾ßRYƒÏ>½O©Y¶ª©‘÷ß};¥¾b ÃDËdPUÛÔpüéQýù­yCããù±"ê÷¸÷ÖÍT–•Ö2<ûêz êµH$Ó‚¡—ÃÉä#þ‘‚ BžAJü RÓraÊþðŽmÔU 1L%Rܹº>oƒÔ´ÓVLÙrÛøloš1åRTYb<‘æ©ÎaZªüóò}¦é&Ÿyj¯R³ÈfSyû¦ë¹ñªË±Ûlä¾bZXVnET4ž wp˜c]=ŒŒp»y|4ïÜz3U奤3žßµ—S}ýñ:$’iÆB¡X"ù~¢A„|&‚” ù¤,“ˆ!óØ²Š²Ç¾vû¥"Lå©@RãöÕ‹q© Q-Cw0N>_œN7-¼v•R—÷Ô|)»"#H é¿:9D©ÓN‘sþÍ‘Ê&÷¤fƒÝfã’ÖÕ\ÖÚLC]5EïÌû_× ‚‘ýãt?ÉÄd»¶ZpÏ­7S]^†–ÉòÂìíËë‡<¢ñÄßí ŸÙ.þ² ‚ ùN)Aò7HM[Yæ‹|jSÛc5^×cŠ,…Ökóô¶¿çóWÕWÔåk˜ ¦4Þ¾j1NUÉëRo¦n»B…lj˦âRsÛù¦ÎÇ ¥4z‚qbZ§ª`Wåyó¾2L‹¿}ò5¤ÎƒÃngÚÖ®^I]UEªª’”»êãðø8¿|îEB‘Xa„¨)pÏ–›¨©('“ÍòâžýïîÉËÇšÉê ŽOˆ%‚ ¤AÈÿ Õ\éç¯oh¥ÊãŠ$²úcÅ[ûÂ{rajËŠÚ:_> ?"Z†·­\„C)¬ 5M7,ªLu‘‹ Y’Èí¶²È˜‘T†ÞpŒÁp§M¡ºÈUðï'Ë‚OýzRçÀa·sÕú5´¬XÊâê*¼n7Š¢YÝ@QdY!“Íð›w²çà*ËJ êM îÙr#µ•d²Y^Þ×ÁÑή¼{œ©¤ |ùËŸý«GÄ_rA¡Ðˆ %‚@þ©¶êþêºVª¼NbšÎ·tõ=qt ý©‡nÙ¾Ð^«?ñêG¯\\þuÅù³Ü"žÑÙ²¢¶`ƒä†|ë¦É†E8m †i¢›6YF’ kZDÓºƒ1úà ô®wR]‘þâž}ì^=pˆÞÁᳺm¯ÛÍÇÞÿ®™ÙK{åÅ×öãv: ë‚-×_MSý",Óâà‰N^Ùw`ÖïF„(Aa!AJüR[–×ñÐeËñ9m_~å({çq²e}9£8ÚwlÛ¸ ®Öt¡Â”CU¸¢¾E‚˜¦ÓŠÜsc˜~—Å>ª,“Èd9ˆ¢Èg^ÖtÃB‘%*§fL9U»ª LmåKdu ú#‰©«÷]Ü0µÐ‚T©ßÇ5—¬eÕÒ%¹­y'²,cZf.D…#œìég÷ÁÃg¢¦¹N>öþwÑPW;3{é…×öãrØ ê÷Á²`ËuW±´~1–eq¸ó/½¶Ön?‘L3 $≿ÿÚ>³]üA"¤AÈÿ õ¶•‹Øvé2Š6&iþ饣ìž<¿.“ˆ!ó˜)Û[ˆajIiñ·®ª¯¨›0å²)\¾¸ˆerƒ½ iZ”¸Ô»s³°2Y:Ï2HM›SåÞ©SUF–$Œ©05I2ŽcSeJ]gõÌB R¥%\¹®æåK©(õŸ¶"*Ñ„Âtöôóêƒ ŒŒ×}9ìv>ñà4Lmu;pô/ìÙ‡Ãn+¨ßË‚Í×^ɲ†z,ËâÈ©.^ܳï¼o7“ÕŸ!JAAJÈÿ õŽæzÞ»®‰"‡ñxšÿñÒ:F‚³sâµ€ÃÔæ¯?½­Áïùüù†)]åÒEå¹ ¥eé `² Üí ¦È5³õðädU>÷½‡ºa!KPáuâ±ÙpÙoZ15MÑŠa“J=s»Šf¾©šŠr6¬meõ²%”—”àr:PÃ0gBÔÉî>vuf`dtVîÓ¦ª|bÛ»Y²¸Ó49x¼“çwïÅnS ê÷Á0L6_w+°°8vª‡çw¿v^·™JjññPð/þûßþÅÿuAA)A„©0‘ßAêî–XÛD‘Ce,žæ_8Äá±ÙmG–I™ö§ÚüØÂ{ýsajËŠÚ:ß9l-*rظ¤® ˆjYú 0HYTxœTy¯©@U9ÿaXºa!ËL͘z=L©²„nš$2:#±]Á( QávÎÊýþ1ó5HÕT–³aM+«–.¡²¬§Ã"+¦AZÓ˜ G8ÑÕË«146>«÷-Ë2ŸüÀ{ff/:ÙÉó»÷¡*rAý>duƒ-×]ÅŠ% œèîå¹W÷œÓm¥’Z<e$xt{{û‚Šþ‚ ‚ðVDA ÿƒÔ}mÜ·f ›ÊX<Å{á0GÇ/ÌyiÑ'KRûSݲ}¡½þäÇ/ÿÝu UÔ»gós>—u5¥ÀT Å‘¤Â;þJ“ ¯ EÊGg †mÃnXHR.Lyí6ª‚Ë&£È2†i’È G“Sa *=® ¦æ[ª­¬àÊum¬hj ¢4¢dIžºjžF æXW»bd"pÁŽñ“xÏi[ÝvìÚ{N[?/&-“eËuW³ji#'zzyvç ¢Aá­‰ %‚@þ©w¯mâžÖÜ6•‘XŠ/=ˆÈ½Ï…¦6nßáßPSôÍ從i˜òO) ˆ¦3ô†âgtuº|SãuQêqÎl=윌b»+[tà *ÜN¼§ªâTsWå3-‹DÆ`$–¤'𬠦æKª«ªäŠum¬ZÚH¹ßÃaÏ…(Ë$­iŒ‚œèécçþÆÁ ~Œo{7Ëß°ÕmÇ®× .Ц´ [®»ŠæeMHHtööñÌ+»ÎègÉ4‘hâK"D ‚ Â[AJüRï[¿”»šëqÙT†cI¾¸ã §&çæJn&t`šÏõ‰ûÅv6aªÌí ­º ˆL)¥ƒT]‘›·cfëá©ÉÙ]!õfºaa‘ N»ŠË¦â²å†Ÿ›–E2k0Kq*Ã4M*f9LzZ\SÍ•ëÛXÑØ@™ß—[%˦A*­11ähW{ftbrÎŽñã>pÚV·gwî)¸ •L¥ÙrýÕ´,[Š$Iœêëç™W^å­¾6'’iÂÑèwÃÁЧ¿úh{¯øË*‚ oM)Aò?Hm»tw¬®Ç¥* E“|á¹zæþJnÏ›¦Ù¾ÃÔe•ž/7Wúßÿ‡Ÿ—{œ´Vù1H*Co8~^ÃÀ/Ë‚Å~~gn†VTËÒ=›“YNºaM=¹«ò¹l .›2µ•Ï"™Õ§è ÅÉ&å³4cªPƒTC] W¬mcyc=å%~v{.D)-ÍÄdˆ£§ºÙÕq˜ñÉàœ¿—>ú¾w¶Õíé—w\ %’l½þÚV,C’%ºûyê¥ü¾ïÍ"D ‚ ¹AJüR¼|·­\„SUŒ$ø‡ç:è'.ÖÃY aêÉÆµå¥ÿsmuÉ]§…))7{©¥ÊaA8¥ÑŠ_­n’iA£ßƒÏiŸÙzØšÛ°6½•¯Üýz˜rª ª’ Sé¬ÎèT˜ÒM“2×ù…©B R‹j§BÔbÊü¹%IÒi+¢ŽœêfçþƒÑ‹ö^úÈ{ï;m«Û“/î,¸¡æÑx‚-×]ÍšUË‘e™ž!ž~éU Ó˜ù7‰dš@0ôr<}¯Q‚ ‚pöDA ÿƒÔ‡6¬äÖ•u8…þH‚ÿúÛ E“õ1YX?ÍÊÖÃ;¶m]P'boS’$Qåu²ºÒ‡aZ„Óz‚qìj)ÓbIiÅÛÌÖÞÐÅ[é%~—·MÅ¥*8m¹«ò™Sa*M_8NÆ0)s9Î)LJZZ¿ˆË×´°¼±žRŸ»Ý63¬<™N3:àXW¯ìë ]ô÷Ò‡ß}/­Ë§¶ºõð«/aS•‚ú}Gcl½þÖ®^,ËôóôK¯¢úLˆ '“<þùGv ‚ ¹}ßAJ!ÿƒÔG®\Åæåµ8…¾pœöß`4–Êañ ]1Ûb˜j)ñÿîÖ† U^«*Š1L‹P*CO(†£ÀNÀ Ó¢i*HMo=ì _üYX2>——ªäâ”M™~žÎŒ'Ò¹S†E‰Ë~Va*߃ÔÒúE\¹®¥ ‹)).>mkÞtˆ:ÒÙÅîƒG.ꊨ7ûÐ÷жbùÌV·_<÷bÁ©`8ÊÖë¯f]ó*E¦oh„g^~•Ξ~¢Aa–ˆ %‚@þ©]½š›—Ö`SzC1>ûÌ&é¼zŒ 5LÝñg6>zëeí+Ê‹oЧƒT0†ÓV˜AÊç´Íl=ì'òfö,I;rÛø<¶Ü¾q+ßDR£/GÓÍ3Sù¤–7ÖsÅÚVšêQêóá°Û¤\ˆJ¤RŒ&9tò{:ŽŠFóî½ôÁûîšYYÔ30ÄÏŸ}¡à‚T fëõ×pIËjTE¡ox¤ï±o~wÛ?ÿÝ_í@A„Y!‚” ù¤¾¦™MM5Ø™î`Œ¿f?Á¤–—U‡Ï™²ý±Û6.¨Ë릵Q7Íöpâ†ñDW!©©°6M’o³¨eI¢ÈaÃ3µZÊmSQÓ´Hë¹Sá8iÝÄï|ë0•oAjUS#—¶5³¬~1%¾bv ͬˆ LrðD'¯î?D,‘ÈÛ÷ÒC÷ÞÉúæ•ÈrneÑÏ~»U)¬ß‡ñÉ[¯¿šËÚšûTEmWy»øK)‚ ³K)Aò?Hýåµ-ÜÐT*ËtMFyäé}DÒÙ¼}¼–IÄyl!†©'OÝùl×è¿Þ¾zQµÇ®Ìã~s ¦2 åaz#ŸÃŽÛž‹RN›ŠM–°,Hë‰4}¡8YÓ¢Øaû½a*_‚Tó²&.i]ͲúÅø‹‹°Ûl¹aå†A"bd<·5ïå½H¦Òyÿ^ÚvÏo\YÄOŸÙQPCÍ3YîÁáþûßvËg¯¿ü’íâ/¤ ‚ \"H ‚ ÿAê¿\ßÊuU¨²Lg Êß=µ—xFÏûçu!‡©Í_zÛÆ¦ê/]×XYYaêõ eG7M‚I‘X IÊó÷àwØs«¥ì*®©0eZ éd.L馅×~z˜ºØAªyYÖ¶²dQ¾"ïÔÖ< Ý0H$S säd»:O& æ½ÿÿØ»ï0»îúÀÿï{Îímîô*iF£^FÍ]¶Õ âÄØdƒÖ ù%›ü‚IlÀm„݈eI xBIHÈbY’-ÉE¶lõ^f4½·Ûë)ûÇÌȘbd{Ê=šÏë/|ï¹§|﹚÷sÎ÷üÑï¼—›–/Á®Úéèéåßö@Q¬¤RÉL¼«oðk_ûÜ'–_F!„brIBò?H=¸n9k甡*6. DùÌÞc¤rºeö¯i±)¶öÞ¿e÷Ì[£aj˼Ê25/7úù •Ó FRzâ)lÚ×—ÃŽÏiÇ÷sa*­é §2t„¤5CÅeW§-H-Ÿ7÷À­+—S[]E(èÇépކ(M#žJÒÛ?ȉó—8zö<‰dÊrcþûnY± »j§³·Ÿì;€¢ä÷HJ%3ñ‘hü=ÉÁ»Ã!„bÒIBò?H=´¡Ûf•¢*6.ôGøô³ÇÈê†åö³aÒ¦Øl31L=´ÿÄ7î™_ýQCuä鱡®Ð-H %3ô'R–Ü×~§¿slŽ)§§ª`š£sL %3tFÓ¤Ÿ8½¾~Võ‚Àø­yÓ4©Ý}œ<‰×NŸ#ÉXv¬è}Û¹me»îþ~þeïó(yz©„(!„búHBò?H=²q·Ì*EµÁ¹¾0ŸÚs ÃÂçïÙ!Û#W4Î ùvϤq¶~÷ÁЇjžXP¼7ßÂÔ›ƒ”Î@"Ã`2mÙ}mšpÙñ:øÆ&?wª &£·ò %3\èoØ2¿úàT®W:“]ïtØhºN<‘¤»€Ó¯ðêÉ3d²YËóßÿ­mܱº§ÃAwÿÿòìóywëg"™&þSorä¿KˆB!¦‡)!„ ÿƒT㦕¬©)AÎö…ùÔž£–ÞßóŠ|f}•ï¡dNk ¸gÒxËÇ0ešP[è'äv’ÑG'Jf,¿¯ Ó$8>ǔÎßiÇ>6ŸQF×7xö){†a®×tí@<‘¤£§3—›8rò Ù\î†ß÷½g wÞ´§ÃAÏÀ ÿòìsy³nã!*<<ò™'v6¶Ê¯ŸB1}$H !ù¤¾°y««‹±§{Gøô³Ç,½¿–ðàúåTø=$s?:Ûzè{'š§úö©é¶~÷ÁÐ}‹*ÿ¥¡"´aº'>ÿÅ ÕKNgo˜}m&·]Å7vKŸCU6(6Û”Ž¹¡pd}Woÿãç.pôìyr9í†×÷nÛÄÝ7¯ÆåtÐ;8Ä÷ìŸöu’%„Bä RBAþ©Ç¶®fee§z†yhïqKïï%e!>y÷2Êý9>ÝÂÏ´2 c†©=µ÷-ª{j:ÔaÂܱ[öÒÚhŠd²7ܾ6 Ÿk4H9UeC¹ß3¥cíÉýÛúgÏÐ4ý†Ïزu·®ÁítÒ78ÌžÝ?Z<§A"™fpxäåp2ùð“=<£Î+B!D¾“ %„äúâ¶5¬¨(ÄNtñȾ–ÞßË+ ùÄ]Ë(÷»‰e4þñT3ÿz®ýh`ò´¦wloIãp:ÔùssH¥4žh’X6wÃîkÝ0y®©{ÃëNåç~äáÇÖ«pàFÇïÛx7o¿·ËIÿÐ?Þ³oÊ缓%„Bä? RBAþ©Û×°¼¼8Ö5Hãþ“–Þß++‹ø«»–RêsËäøÎñf~z±ã—^7SÃÔæ'÷¬¿gѬ¯ß\S²lªÂ”aÂÜ"?.'©œFW4Iâ¼ìçMÇSöfBºgýl^{+—‹á0?|f/SõïM QB!„uHBࡽÇÍ×;óóDm³ñ•íkXRÂ^ïà ÏŸ²ôþ¾©º˜Ö.¥Äç"’ÎñÇ®°çrׯ}½„©ÉS¦ uE \’9ÎH‚Ô |[Hš,ÛmwÝŽÇåfpd„ülòƒT"™¦od¤#O|ö¬: IDAT[?²[~Õ„Bˆü'AJ!€öpÂ<׿@s'{†ójÝìŠÂ—·¯aqYºaòjÇ8méý}ë¬RþâŽÅ{]„ÓY¾ýÚežkîyË÷˜]a—¡8wܱ~F=¦}*”9~…”ÛI"«ÑNÑ%HM´™¤6¯½•{Ö݉Çíf(æûÿ÷ÙI›C*›Óèì%„BX)!„tÃ4ÓšÎ`2͹¾0{¯ts¾??š‡Ë®ò¥mkXTD7L·ð¥ƒÖRwÌ)ãÏo_L‘ÇI8•åï\âPKïu½w&‡©­OíÛ±~nÅ—ïª-+›è0e˜P?¤âYÖ‘8šaÜÐûS‚ÔäØpÛͼwã]x݆#þé§{€‰ÿ÷f*™‰÷ ÿåW?ý—ß–_1!„Âz$H !Ñ Ó©*†IZÓé‹§8Ó;Âþ¦. F¦uÝ|N;o]Í‚’ šaòRk_yᬥ÷÷ºº þäÖ…zœ §²|ã• ¼ÜÖÿ¶–1¦ž¿kãL¯“¦Lê‹ÝbÖ‘ú þo R“ãî[ÖðþMëðy<ŒD"üã©T2‰Æ¿Ñ“ܹ»±1ŒB!,I‚”BǺ†Ì9…~.ņaBZÓé¥8Õ3ÌÞ+]´ŒÄ§eÝ‚.m]ͼ⠚aðBK_}ÑÚAjc}%|ËBn'ÃÉ _;|#ïhY†I›b³5î½Ëî™6nÇÃÔ–y•eªb{W˽e/@ÛA,“ãêp Rm&©µkVò;[7àóx G£|ïß6!Ë•%„BÜX$H !ðÛß}Þ|Ï‚jn®)¡®ÐßåÀ®Ø0L“Tn4LïâÙ+ÝtFSºn…'_زŠú¢ 9Ýà`K/ûÒ9Kïïmó«¹ÿ¦ù¸ &2|íðyÞí¤ò35L­ß}0´¶¦àñ{æWÔãPïd&`ê GƒT4“ãêpì†ßw¤&Çm«øÝmñ{}Db1¾÷ï?{W“š'’i"ÑÄ—%D !„7 RBl}j¯ r;Ù4¯’Ûf•2;äÇï´cWtà ‘Õé'9Ö9ÄÞ¦nº£É)Y·¯‹/lYEma€œ®ó\sÿëðKïïßZTÇWÏ#èr0Hó?_:ljLÞ0iØÿß¶þd&áõ»†>ÜPóÄ‚âà½o7L&¨6s }„ÜNÂé,­ÃñÑJu“ 59nnXÊ}ïÙBÀç#ñýŸ>K&›EU•·µœD2M8ý§ððÈgžØÙØ*¿TB!ÄE‚”BðFWâu±y^·Î*¥ºÀ;¦Í0Id5º¢ ^ï乿zc©I]·r¿‡ÆÍ+™ò“Óuö^鿝^´ôþþÀ’ÙüÁÊz.;ýñ4_}ñ,§{G&úc†Ñ8ÕÁaº‡©yEß¿Þù¥tÃÄ¡*Ì.ð›h¾e$ŽM‚Ô„› AjõÒEüþ{·ôù‰&âüàÿî%–Hât\ßx”%„BÌ ¤„‚_RãJ|n¶Í¯â–šRª‚¼;vņnšÄ³íáG»Ùw¥›¡dfRÖ­*àå³›V0'ä'£ëì¹ÜÅG.YzÿÞ²Z>¸¢¿ÓNo<Åß¼p–s}“v'Î S{jï[T÷TCEhÃo Sšaâ¶+ÔGƒÔH*KÛˆ\!5fBZ±xÿù}ï!è÷K$øá3ûGð¸]où¾D2ÍàðÈËñhô%D !„7> RBÁ¯RãÊýÞ³°šU•ÅTxñ:T› Í0‰gst„¼Ö9È3—»ˆgrºn³ |<²q³ |¤5Ÿ]êäÛ¯_¶ôþþàŠ:î[V‹×1¤v:Ã¥Išá!Û{wl99“Æöõ„©œnàuØ©z =ù°=¿á÷©É±|á<þàý÷PO&øñ³ÏÓ;0ˆßëù•¯Qádòá'{ø B!„˜$H !¿9H›ò³i^%«ªŠ¨ xñ8ì¨6È&ñLŽöH‚—ÛúÙw¥›dN›u«-ôóІj‚>RšÎ\èà©cW,½¿ÿ˪z~gÉl<;=±$_7.»Š Ð ƒp:KóPŒ—Ûû9Ð܃f¼½ZTZÀ§Ö-§Âï!™Óøá™V¾ºÅÒûûOn]ÈöÕ¸T•¶pœ/<ŠîhrÚÖÇ4ˆè » Źk&†©µuO~¨¡®¾2à!àrÐOsq BÐí¸¡·]‚Ôä˜_;›÷¾¢Pˆd:ÅϾľïµÙL³QB”B!ÆIBÞ}×PQÈÆúJ–•Rêsá²^a’Õ "cWL¼ÚË ­}\ïùwiyˆ¿¾{~ñ¬Æ÷Oµð£³­–ÞßvÛ"¶Î¯Â1¤÷Ÿ¤/žšöõšÉaê‰#—v|hÅÜÆ€Ë>§/žæLï%>× ½Í¤&ÇÜÙ5ü×ß}?%……¤ÒéÈ3ïú½{65Ê/B!~ž)!„`â‚Ô¸UUEl™WÅ¢ÒÅ^nûè\<Ý`8•¡i0ƾ¦nŽt üÆe5Tò‰»–QæwËh|ïd3?9ßnéýýw,fS}%U¥u$Æg÷`0™É›õ3 "(4î»ë®™ö]0LsGO,Õx²{xNeÐsCo«©ÉQ[]Åý÷} RZÚ•Êdvù<ž0B!„¿@‚”B0ñAjÜ­³JÙT_É‚’ …®ñ0¥†© ýa^íåµÎÁ_»ŒUUEüåK)õ¹‰er<}¼™ÿ¸Øaéýý—w.e]]U¡y8Ê#{ONgón= “6ÅfkÜ{ÿ–Ý3í;ñí×.jIyèQC½aïÛ“ 5_"5•»>ú¡ìª()‘%„Bˆ_K‚”B0yA ÀÜUWÎ]µå,() ÐãÄ¡Œ†©¬®3˜Ìp¾/Ì¡–^Žv ýÒû×TóñµK)ñ¹ˆfrúpCÍ Šƒ÷ÞˆaJ‚Ô„{:c×ØÝØ(!J!„¿ùï$ RB1¹AêÚ X?·‚;kËYP$ävâT 2ºÎ`"ùþ0û›º9Ó;rí}·Ô”ð±µK(öº§³<ùúö7u[z?¸n9k甡*6.Fùô³ÇIå´¼_oä ÓØ1ÕcºÝ¨aJ‚Ô„yÚ¦iOìll•_!„B\÷ßG¤„bj‚Ô8Åfc]]9ë+©+ Pà S¦i’Êé $Òœía_S7ú#Ü1»Œ?¿c1E'áT–¿?r‰C-½–Þßmhà¶Y¥¨Š‹ÜsŒ¬nXi†Ñ8ÃÔ,­üî¢ÒÐ{}N»å·G‚Ô»&!J!„ï˜)!„`jƒÔ8§ª°±¾’»jË©+ôp;q*6 RšN<Å©ž†’i~gé =NFRYþîÕ‹¼ØÚgéýýÈÆÜ2«ÕçúÂ|jÏ1 þU¼‡>»iEãÜ¢ÀÁ™ô}Y¿{Oí}‹êžj¨m°r˜’ õΘ†yÈPlO>öðŒ÷B!„˜X¤„‚é Rã<•­ó«¹mV)s ý\ca*Óˆg5B'.Ua(•åë‡ÏóJû€¥÷wãæ•¬©.AÎôðàžc–ÜŽš/l\Éìß¡´¦ïð9í­3é{cõ0%Aêí‘%„Bˆ‰$AJ!˜Þ 5Îïr°u^·Ô”P[ä'àt` SŠ °ÙHå4¾yäÏX|RóG·¬bUU16àTïŸyÖšAjNÈÏè)ð‘Öt~z±ãé'Ž_j<¸c{ëLúþ¬ß½§öýófýôæš’eV S¤®„(!„BL RBA~©qEë+¹}v)5>‚.ªbýÃÐ4éŠ&9ÔÒǾ¦nº£IKîïÇ·®fEe'{†yxïqKnG]¡Ÿ‡6¬ :è%¥éüû…vvkÂ0yZS¦6?¹gý=‹f}Ý*aJ‚Ô[3 Ífšßzü‘Ýò+!„Bˆ‰&AJ!ȯ 5®Äëbóü*¶/¨¦ÒïÁfRºaÏæèŒ$y½sçš{è‹§,µ¿wn[ÃòŠBLàXןے㦾(Àg64Pð’Êiüëùv¾{¢ùÚ×àó†âÜupÇúðLú>Y%LIúÕ$D !„b*HBò3Hûƒ•sù`CN»Šiš×náÓ “DN£m$α®!ž¹ÜE$Íÿ›/o_ÃÒ²p´sÏ?wÒ’ãfAI×5PðÊiüè\ÿxòê›^cDt…]35LmœWýÏwÕ–•åc˜’ õ "¦Í|@B”B!¦äï RB‘ßAê÷–ÕòÁuøv’Y¡T†B ¯ÃŽbm슩Žp‚W;xörñ¬–·ûÚ®(ìܾ†%eè&ià±§,9n•ð©uË©ð{Hæ4~x¦•ïŸnù•¯ÉajëSûv¬Ÿ[ñå| S¤ÆDtƒ]š[Ûµ»±qFM!„BL RBA~©6ÔqßòZ¼;½ñß=ÑL}Q€••ET¢™n's‹ü”ùÜo SÑLŽ–á/·õó\sMÏ›íñ9í<¾u5 J‚h†ÉK­}|å…³–7Ë+ ùÄ]Ë(÷»‰e4þñT3ÿz®ýúZ€I›b³5ÎÄ0õÐþ߸g~õG=Õ1ë1“ƒ”®ñy QB!„˜N¤„‚üR´ºž,™ƒÇ®ÒKòRk%^÷Xš&UanQ€RŸ·]E±AÎ0‰¦³\ŒòjÇÏ5÷Óißž·ƒÇ¶¬¦¾8ˆfjéå¼xÎ’ãfEeuçRÊünb™ß=ÑÌ¿_èx[˘©ajý7Ô<± 8xït…©¤ž¶iZã;[åÌ/„Bˆé$AJ!Èï uÿšù¼wñ,‚”i˜‡Cß!!J!„ùF‚”BßAêOn]ÈöÕ¸T•¶pœŽH‚¬ö›o½S>‡Y!E.»r-L '3\ SG:¦t{*>·i%sB~2ºÎ³—»ùû#-9nn©)áck—PìuNgyòõ+ìoꞨÅÏÐ0µ§ö¾EuOME˜º‘ƒ”i˜‡ ÅÖøäcϨñ#„Bë %„äwú³Û±u~U¥=§+’$»þIÊvÝNMÈK¡Û…SU°Ù £ %3\ès¨¥—×:§d{j ||vã føHk:?»ÔÉ·_¿lÉqsûìRþÿÛSäuNeyâµK¼Ú;¡Ÿa˜<­©FãÁÛ[gÒwr*ÂÔ¤$D !„Â*$H !ù¤þâŽÅlª¯Ä¡ªtDât„“ïè©y‡ŠSU™òò8qª ¦ ]g(™á|_˜W{9Þ=4©ÛS[èç¡ Ô}¤4ÿ¸ÐÁSÇ®XrÜÜY[ΟݶˆB“‘T–¿{õ"/¶öMÊgÍÔ0µu÷¾•ÛçU}÷æš’e¦n¤ e@› ;$D !„Â*$H !ù¤>~çRÖ×UàP:# ÚÂñëºeï×ñ:UN'7NņaBZÓL¦9ßáÙË]œïŸœ§Á×øÌ†ª^R9Ÿ\hç;Ç›-9nÖÕUð§·.$äq2œÊò¿_¹ÀáIž›k¦†©ÍOîYÏ¢Y_ŸÈ0u#)Úl¦Ùø­ÇÙ-gr!„BX‰)!„ ¿ƒÔ'îZÆÝu娅®h‚–á89ÝxWË´Ù Äç&äqt9ð9cWL™¤r:ý‰4g{GØÛÔͥȄnÏ‚’ ®k 2à!™ÓøñÙ6þñÔUKŽ›õ•üñ- ¹ '3|íð…)™“Ë4ˆè » ŹëàŽõá™ô]È0eå %!J!„V'AJ!Èï õÉ»—qgm9vÅFw4EópMŸ˜ÕUU·ŸCÅëtàPl˜&¤4¾xŠS=Ãì½ÒÍÕáØ„|æ’²Ÿ¼{å~‰œÆN·ðƒ3­–7[æWñ‘›Pàv0”̰ëåó¼>EsqÁ Sÿgï6ÖW~ó®Ú²²w¦,¤ "ºÁ®'w>Ü(gn!„BX™)!„ ¿ƒÔ§×7pÇìRÔ± Õ4E7&vuUÅFuЋÏåÀï´ãuر+6Œ±+¦úâ)Žw³ïJ7máø»ú¬eå…üõÝË(÷»‰e4þéÔUþå\›%ÇÍ{Tó_ošOÐå`0‘áo_>DZ®¡)_™¦¶>µoÇú¹_~'aÊRAj,Dinm×îÆÆuŒ…Bqc’ %„äwzhC·Íz#H]Œ2Yçn‡ªPôt9ð:ÆÂ”ª &ÉœFo,ű®!ö5uÑI¾£ÏXQYÄ'îZJ©ÏM,“ã;'šùé…KŽ›÷.šÅ‡W×p9H¤ùŸ/ãD÷ð´­aÒ¦Øl{ïß²{æ}‡GÃÔ–y•eªb»®÷X"HIˆB!Ä J‚”BßAê³Wpó¬R ;–äò`&ymv•ª€‡ Û‰Ë®àwÚ±+ ša’ÈjtG“í书ºco/L­©.æµK(õ¹‰früÃÑ+‡¾xН¼p–óýyy1Ë!Ã0§:¾L·ß¦ò0HIˆB!ÄŒ!AJ!Èï õøÖÕ¬¨,FƒTÓ4©k?6Mg^q€ª ÃŽjƒœaÏäèˆ$x¹­ŸýMÝijگ\ÆÝuåüé­‹(ô8IeùÆ«y©µÏ’ãæC+êø½eµxvzã)v:Ã¥H>¯ò S{jï[T÷TCEhÃÏO|ž/AÊ4ÌC†bk|ò±‡gÔqB!ÄÌ&AJ!Èï õ¥mkh¨(ĺ" š†bÓ¾NYÃÀ¡(Ì- Páw†)ÅFV7ˆer´Äy¡µƒW{IæÞ¦6ÖWòÇ·, äv2œÌð¿^¹À«í–7¸²ž{—ÎÆã°ÓKòŃghŠZaÕ¡ØØ»cËÉ™ô=SóŠýJ}îiR¢„B1“IBò7HÙ€ÛobYyèΓ 5.g¸ì*u…~ÊüÜvõÚSÑt–æá¯´°¯©›œnŒîëùUü·›Pàv0˜ÈðµÃçy½sÐ’ãæÃ«çñÛKf㱫tÇ’<~à4W‡c–YÃäiM5îØÞ:“¾ïëw冀ëJ#¦±{:‚”b˜¢„B1ÓIBò7H)6_Þ¾†¥å!tsô ©æ¡ü àPêŠü”ùܸì*6@3 "éMÃQ^ià¹æ¶Î¯bÇšy]iþö¥óï²ä¸¹Í|Þ»x»Jg4ÁãNÓ:·Üv¸ìêÓ_Ú¾ºqYYa«œ „B!ÄT %„äor¨ ;·­aqYºaÒMru(¯À1·]aNáa «DÒYš‡bij·Î*!àrÐOó?^:Ç©žaKŽ›Þ¼€{Öà¶«tD<úü):" Ëm‡ßiçñ­k˜_|Z3ŒÜv5,g!„B1™$H !ù¤<•/n]ÃÂÒàhŠ$-qK˜ÍfÃëT™]à£ØëºvÅTF7Èê^‡Šj³ÑOñÕÎr¶Ïšýãÿ»e!ïYXKUi$øÂs'éŠ&-·n'mYE}qœnD¶ôîúòá+»îX/aJ!„BLÎß ¤„"ƒ”ßiç±­«YPR€ftF’´XhŽ"»jÃc·3;ä£Ð㼦°ÙHç4¾{â*?:ÛjÉqóßo[ÄÖùU¸T•¶pœÆçNÒKYn;м.¾°ys‹ätƒW{øÛÏGt…]†â”0%„B!&œ)!„ ƒTÛÁc[VS_ R Z†­7G‘Û¡âTUf…¼”ùÜØÓ4éO¤9Ù3Ì -}–›ÜüÏo_Ì–y•8T•Ö‘8Ï ?ž¶Üñ)õ¹ùüæ•ÔÈé:û›{øúá £ÇÈ ¢+ìzþþ­r¦B!„E‚”B¿AªÈãâ [Þ¸r¥+jÍ 5ÎiW©/ Pô`»JJ7L2ºÎP2Ãù¾0û›{,3§ÔÇîXÂÆúJªBËpŒÏî?ÁP2c¹ãRî÷иy%sB~rºÎ³Wºù»W/¾é5†I›b³5î½Ën9c!„BˆwK‚”B¿Aê¯\éŒ&iµpð9í¬©.ÆiWÑ Ã4q¨ ¦i’ÖtâiÎõ‡Ù{¥›óýù}§ØÇï\Êúº ªBóp”Ïî;ÁH*k¹cRôòÙ+˜ò“Ñuö\îâ‰#—~åk%L !„Bˆ‰ AJ!Èß UðиiåµPÐu©ÇÉÒò.Ue$•¡;–¢2à!èvâTl&¤5¾xŠÓ½#ìoêæò`4/·åw-ãîºrìŠBÓP”‡÷'šÉYî˜Ì*ðñÈÆÌ*ð‘Öt~v©“o¿~ù-ßc˜´a;ödûA9ƒ!„Bˆ·K‚”B¿Aª:èå³›V2»ÀwéR¿›…%AªJ{8Η¡¡¢Ûf•RW àràS9ÞxŠS=#ìkêλ' ~òîeÜY[Ž]±qyp4HųšåŽIm¡Ÿ‡64Pô‘Òt~z¡ƒ8våzß~È0ŒF SB!„âí %„äošòñȆÔŒ]¹Ò³~ª*ð2·Ð?6xŒ¿;r ]%‘Õh¨,dEEu…~.vņnš¤r£WLëâÙ+ÝtFy±-®[ÎÚ9e¨ŠKQ>³÷©œn¹cR_à3¨ xIå4~r¡ïo~»‹‘0%„B!®›)!„ ƒT]¡Ÿ‡6¬ :è%¥éôD“´ŽX;HÍ)ô3«À‡CU¸:ãï\Äë°¿ñæ•YS]̬~§‡ª &ÉœFO,ɱ®!ö7uÓINë¶<´¡Ûf•¢*6.DxpÏ1²ºa¹c² $ȃë¨ xHå4~t®õÌQ¬ø«º´<Ä_ß½Œ ¿‡xVãû§ZøÑÙÖ Y¶„)!„Bñ+ÿ/AJ!`ç¡3æ¡–^ô<»ºeIYˆOÞ½ŒŠÀh(軂ÔÒòÅ^×µÛܾyäA·ó7¾Ï­ª,( ÒP>zÅ”×aG±f˜Ä³9:#I^m`Ï•.âSô¤»ÆÍ+YS=¤Îôðàžc–<& …|â®e”ùÝÄ2ß;ÙÌOηO쇘æ×²ª«ñàŽõa9ã!„B RBôÆRfÓP”W:8xµ—\žÌ´¬<Ä_ß½œò±PÐOÒ>’°ô¾^VQH‘×…jƒs}a¾õúeBפ²ºAÎ0¸cv«*‹¨xFÔbCÓ ¢™‘/µöñ|sϤ?ñîÑ-«XUUŒ 8Õ;ÂgžµfZYYÄ_ݵ”RŸ›X&ÇwŽ7óÓ‹þ9¦ADWØe(Î]¦„B!f6 RB¦ifuƒH:KËpœÛú8tµwÚ'¨^QYÄ_ݹtìÊ•=±ak©¥å!Š}nàl_˜o¿~‰Bëm-#–ÉáPÖÎ)ceee>7î±[ùrcaªu$ÎËm£aj²ž|÷øÖÕ¬¨,àTÏ0í=nÉc²¦º˜¯]J‰ÏE4“ãŽ^á™Ë]“öy¦„B!„)!„R9ÝtÛ`ô*œp:KëHœZúx¡µŒ¦OËz­®*æãw.¡Ôç&šÉÑMNû“åÞ­%å!J}nlÀéÞž:v庯úE±LÃÎí³Ki¨(¤ÌïÁmWQl3L¢é,MC1Žt °¿©{Âã·­aEE!&p¢{ˆGö°ä1¹¹¦„Ö.¡Øë"’Îñäë—Ù×Ô=éŸkDPhÜwÿÖ]rB!„˜Y$H !p¨¥×œ_¤ÐãĩެaIei‰ñJû®NÞ•6o >vÇJ|£¡ ;š¤;jí µ¸,D™ß ÀÉža¾s¼‰‚w¤ÆÅ29.·Í.eyy!e~7.UÅfƒÜX`¼:ç¥Ö>\í™°'áíܾ†åå£AêXןÛoÍ uÛìRþâöÅy]„SY¾ùÚ%\í²Ï7LÚ›­qïý[vËÙH!„bf %„À¶§öšwÌ)ãÎÚr–Pä}#Lßvu8Æëƒìkê!•Ó¦d½nUÊÇî é,]Ñ$½Ñ”¥÷õ¢²*üLàx×ß;u•à[úT¾H:;i¡àOn]HáX(èŠ$è[7HÙl°°´€Ê€çZÄùñÙ6|Nû¤|^,“£*èá–šR•PèqáTl6Èh£aê|˜Zú8Òñö”SUع} ‹J Ð “W;xüÀiK—õ•üñ- ¹ '3|íð…·½?&É!Ã0%L !„BÜx$H !¿:H[YYÄæyU,*½bÊmWQÆžæ6zÅTœ×:9pµ‡‘ÔĆ©_ Ñ$RŠbc~qª Ý0y¥}€¿ÐŽÇaŸÔÏerÌ)ô±ºªd,L9qª ¦ Y]g(™á|„W{8Ö5t]ËtÛU¾´m Kƒè†ÉËmýìnƒ‰ _;|ž×;ói%L !„BÜ`$H !o¤Æ­®*æîºr––…(ö¹¯=ÍM3Lâ™­á8Ç»†Ø{¥›ð]1µe~¹inCÉ ‘C‰Œe÷³ªØ˜_¤2àA‹8?»Ô‰Û®NÉçÇ29j ýÜ:«”yÅBî±0¤5ÁÄèS{¯tq¶/ü–Ëò9í<¾u5 J‚h†ÉK­}|å…³–<.÷,¬aÇšy]iþö¥óïÊ»õ41ÿ-§˜ܱ½UÎZB!„Ö&AJ!¸¾ 5nEeëêÊYZ^Hé¯SÑ$¯w²÷J×»¾bêCAg4ɰ…ƒ”CU¨/Pð¢‡ZzÙßÔƒSU¦t=9ÚŸÛg—RW ÀãÄ©Ø0TN§?‘ælßû›º¹Ðù•˺<¶u5óŠƒh†Á -}|õEk©÷/žÅYUOÀå ?žæ¼tŽS=Ãy»¾†ÉÓšj4J˜B!„°. RBÁÛ Rã–”…ØT_ÉÒòe~ž±[ù4Ý –ÍÑMñZçš{é‹¿³'ã½oñ,þh, $ÒtF’ '­¤œv•ú¢9ÝàÀÕ^íÅ1ÅAj\&§3¯$Èšêbê ýÜNŠ Ó„”¦ÓOqªg„}M]4 ÅÞôÞÛÉ£[WQ_$§ÆµÿùÒ9K—{—ÎáC+æpÙé‹§ù›Îr¶o$ï×[”B!„uIBÞY·¸¬€ÍõU£aÊçÆí°c›c*žÕèŠ&9Ú9:ÇTOìí…©ßY:›ÿ¼¢þZ(èŒ$§²–ÝÏn‡ÊÜ¢å~9]gs/µöaW”i]¯Œ¦³¸,DCE!s‹\ìŠ Ã4IåtúâiNt±¯©›Ö‘8Å^_Ø¼Šº¢9Ýàùæ¾vø¼%Ë}Ëkùý†:|;}ñ_yá,çûÖY >o(Î]w¬ËÙL!„Â$H !ï.H[XRÀ¦y•,¯(¤ÌçÆãPQm64Ã$‘ÓèŒ$9Ñ=Äþ¦îëSã¡Àï´ÓOÑI±pòü\Êè:{¯tójǪ͖ëg˜&󊃬®*fN¡€Ó]UÐ “dN£/–âX÷û®t“Öt>¿yµ…þѸÖÔÃ×_¹`Éãò¡uüÞ²Z¼ŽÑq¶óÐ. D,µ ¦ADWØ%aJ!„Â$H !¤ÆÍ/ ²©¾’åå…T¦º£INt³ïJ7ݱä[.ëƒ+ê¸ïçBAg$I4mÝ åuÚ©+ Pîs“Ötž¹ÜÉñ®!ly¤®ý8bcAiÕUET}øvª‚6¦º£I.DXS]LUÀ{-®ýÝ«-y\þpe=÷.Ça§'–ä‹ÏÐ4µä¶H˜B!„° RBÁÄ©qs‹lª¯¤¡²ª€ÃŽjÝ4Id5zc)Žw³¿©›ŽHâºBAg4I,³ì~ö¹ìÔú)óyHk:?½ØÁ™Þü«È†%嬬,¢*èÅ7v;¦>v+ŸCUp© iÍà™Ë»”ÕUÅÔøð9íŒßl˜Êé\Œp¸½ŸýM=Ä,¦>zóîYXƒÛ®ÒIð…çOÑùk®Ú³* SB!„ùE‚”B05Aj\uÐË–ùU¬ª,¦*8zÅ”:öD·dN§/–âTïèSë+yÏ‚ÑPÐIÐM’´p*ð8© ù(öºIæ4~t¶ÍrsÅ29tÓä‹g³nnNõ'ætƒh&G[8ÎK­ý¸Úc‰€ø§·.bÛ‚*\ªJ[8Ξ;õç7³°C†a4îÿÈöƒræB!„˜>¤„‚© R㪂^6Ï«dUe15^|NûØS£óõ'Ò˜&Ôxq( má8ÝÑ$©œnÙýò8™Sè§Äë"–ÑøÁ™ZG¬wkXw4‰Çaçs›VPæsc9ÃDµÙPm£ÿ;šÉÑ<åÕŽö_é&£y»=~ûb¶Ì«Ä1¤÷Ÿ¤/žºÑ¿ö‡²Š±ãàŽí­rB!„˜z¤„‚é R㪂^6Ì­à¦êª ¼øvìÊèÝLÓÄ®ŒÞÖMÑ2#má Uèu1§ÀG±ÏE,“ãŸN]¥#b½+qzbI|ŸZ·œÊ€‡Œ®Ó6’ÀeW(÷{pÙUÛèS‘tŽ«Ã1·÷ó\s¹< SqÇb6Õ©–áŸÛ‚Ádæ†ÿÞ†±A®”B!„˜¤„‚é RãÊý6ÖWpKM)UÁ±0¥*׿(Êh:W‡c„ÓYâkÞ¶Wäs1»ÀG±×E4“ã{'¯Òµ^ê¥ð»ì|êîå”û=$r?8ݹþ0›ê+Y^^H©ßÛ®£a*œÎru8Ρ–^µô¢ùóûûñ;—²¾®‡ªÐ<å³ûN0’ÊÞðß{ RB!„ÓG‚”BAj\¡ÇɶÕÜ\]‚’ ®±¨aɬF¤„‚ü R®[ÎÚÚrŠTN'£ëøª‚a˜¤5ÁdšŽp‚´fä}˜*¸© úyœŒ¤²üŸ£Wˆ¤­wkØ@"MÈíä¯îZJ©ÏM,“ã;Ç›ùéÅŽ_ùúµsʸ«¶œE¥z\מ̗Õu†’Y.ô‡y¡µ#ÓsÅÔ§×7pÇìRTÅÆÅ(ŸyöØèx78 RB!„ÓG‚”B¿Aê3ë¸},\Œñbk·Ì*¡¦ÀGÐ5¦Ls4L $ÒtDd4ÃÈÏs{EÐCuÀKÈãd8™áÛG/[r>¬ÁDšB¯‹¯]B©ÏM4“ãŽ^á™Ë]où¾»ëʹ«¶œ…%zœcÇ2ºÎp2Ãùþû›º9Ù3<¥ÛóІn›5:Î.ôGxðÙcy9ùúD“ %„B1}$H !ù¤Ù¸‚[f•¢Úà|„÷ÅeWÙ2¯ŠÛf•2»ÐOÀåÀ¡Ø®…¡d†¶pM7ó.*T½T½¸ %3|ëµË$sÖ RCÉ Å^¬]B±×E$ãÿ½ÌÞ+Ý¿ù‡¸³¶œMó*©/ »bêçÃâùþ{¯tq®/<¥ãLÎ÷‡ùäžcÌ„HB!„˜>¤„‚ü RŸÛ´’›jJP€sýa>ùÌÑkÿÍë°³u~7Õ”0·ÐOÀíÄ¡Ø0ÌÑ[Á†“:" ²ºAVË0UôR]à%èr0Hóí×/“ÊYïÖ°‘T†b¯›Ý±˜"¯‹p:Ë·^»ÌóÍ=×½ ‡ª°~nëê*¨+ôt;qŽ¿´¦ÓŸHsºg˜ýM=\ŒLêö4n^ÉšêÑqv¶/̧öß{ RB!„ÓG‚”B¿Aêó›W±¦ºp¦w„Ÿ=öK¯ñ:ìl¨¯à¶Y¥Ì- ¼)lduáT–®H’”¦M{˜ª z©)ðp9è§ùÖë—ÉhV RYÊünþüöÅyœ„SYþþÈ%µô¾íe_ñvûìRê~!,¦5ÞXŠÓ½Ãì»ÒMóplR¶çÑ-«XU5:ÎN÷Žðé_1ÎnD¤„B!¦)!„ ƒÔχ‚“=Ã<´÷ø¯}­Û®rw]9w×U0'ä#ävŽÞ d´Ñ§ºuG“$sÓ¦Fƒ”€ËN_<Í7_»dɹŠ"é,å~ÿý¶EŽMÐþW/òRkß;^æøo·Ì*½âÍåÀ®ØÐMHå4úâiNtñì•.ÚÉ ݞǷ®fEe§~Ã8»‘HB!„˜>¤„‚ü Ro]ÍÊÊ"LàD÷ì;ñßãRÖÏ­äÎÚ2j ýŒ…) S#é á$Y]'¥é0…[^ô2;äÃë°ÓOñÍ×.¡ÖûЦsT=üÉ- G'hOeùúáó¼ÒþWàv²±¾‚;f—1+ä#0öTEÍ0Iæ4zc)Žw ±·©›ÎÈÄ„©/n[ʊ·5În¤„B!¦)!„ ƒÔÎmkX> Žu ñ¹ý× œªÂ†¹•Ü6»tlòl'e4LeõÑ0ÕI‘Õu’9©ø9¨ z™òáqØé‰)¿CÑLŽš —Þ²€ÛÉP2Ãÿ:|#öE›æUrûì2ªƒ^üN;vUA7LY®h‚c]Cìoê¦'–zwãlû–—bǺiÜrF|ï%H !„BL RBA~)›ÍÆ—·¯aiY8Ú9ÈçŸ{û¡À®ØXWWÁsʘ_$äyóS‘t–žX’TN'™Ó'õéjUA/s ýxì*ݱ$O¹dÉñÏæ˜]àçþ›æSàv0˜ÈðµÃçy½spÂ?+ävò[‹j¸¹¦„ª ŸÃŽªØFÃTN£3’àh×{.u1œÊ¼£qö•íkX26Î^ïà ÏŸšß{ RB!„Óø÷Ž)!„ÈÏ eWlìÜ~KÊ ÐMx­c€GßE(°wÖ–³~nõÅŠ<.\êWL…ÓYzb)Ù©œ1)aªªÀKmÈÛ®ÒMðÍ×.c³àxId5j ýìX3ïÚÿöåóϬxØ6¿šUUEÔŒÞö¨Ø@7LâÙÑ0õjÇÏ^î"šÉ½q¦°sûšÑqf˜é䱤„B!Ää’ %„ägr© _Ú¾†E¥£¡à•ö¾xðô»^®b³±vNÙµ0Uèvá²+Øxs˜2L“‘Tcçxª.ð2g,HµG|óÈEÔ±Û­$™Ó˜[àVÕ R_}ñ§z†'ý³g‡|lžWÅêªb*þæÅ³“òYkç”±vN‹JC{ߦ9dV#–Ñè‰%ßU˜ª RU¥u$Γ¯_FU¬wÓ^ZÓYXRÀWÔáwÚé§ø›Îr®/<åë2¿$ȦúJVVQæ÷à¶«(6È&±t–¦¡¯v °¿©›¬nüÒû.o]ͼâÑqöBK_¤q–o$H !„BL RBA~©ÛÉ£[WQ_$§jéýìÝwt\i^àýïM•«T¥R*IVvÎv»s»: /³ K\Þe†ÉMÓ“ Ë ;À–xÙ]XÂmw»£sJ¶%U)”Jªœî½ïU²Ý3ÓÙvUõü>çÌ9sf¬Ò­çÞ+©¾çyžËzñìmýžÛº[Ø5aeK€°Ç‰S×*Kù,›l±D¶d’Ì—ˆ¥s”¿EÜx;• åÇÐTFçSüÙÑaq©BÙdu[¬ïÃcèL§s|åÐiÎÏ,Öì˜V·5±g°“õ!Z½.\ú¨¸˜/1:ŸâÅ+Óì‰Q¶¬š^gõB‚”B!DíHBê3H…=NžÚ³™þf?%ÓbÿH”¯½tîŽ|ï-]aö FXÙÚT Sš†ª*ß0cªD,•£ô.ÂTwÐKȇ®ª Ç“ü·#X øk¨hZ¬mòáµ½x h*Ç—žâÒ\²æÇ¶®=Ä#Ë;YÓÖD›×u#*V÷‰§Ø?å…ñlÛ®éuVk¤„B!jG‚”BPŸAªÍçâÉÝ›é ù(™&Ï GùúËçïè1ÜÕæáþV·¯Ï˜R([6ÙR™t±L¶Xb*ùÎÂÔ² —¾]U¸4—äžWA«^”L‹õ!¾oMnC'šÊòÅçO1OÕÍ1nŒ4óèòNV/EÅ›ÂÔ|®Èå¹$ûG£ Ç“5¿ÎjE‚”B!DíHBê3HEün~u÷&zƒ> ¦É3—¦øýW/ÔäX¶t…ÙÞ×ÎÚ¶ -ÕY7KûåJer¥ÊSSÉì[¦žjÒT… ³IþæLc)Ó²Ù ñ=«{pë“ÉJOÕݱ޳¬•¬lmzÃÆõ…²E"W`<‘f0ì§Åãªùuv§IB!„¨ RBA}©î&ŸÝµ‰ž&/ù²É?_šà_»TÓcZ×b÷`„5íAÚªû½1L™$«Kù eó›¾¾'裯ه¦À¹™EþêÔ(¸‰”iÛlŠ4óÝ«–áÒ5&3|þÀ)®,¤ëö˜êk¯l\ßÖD³Û‰CS±íÊŒ)]UÐ5•BÙä.\«ùuv§HB!„¨ RBA}©Þ ÏìÜ@w5Hýý…küÉ‘ËuqlkÛƒììdM{V¯ ·®¡U÷˜ÊVÃTªX SùÒ0ÕòÑò¡ggøócø ½á®Û¶ÙÜæ;WvãÒ5®-fxjÿI&3uìõµ³c ƒåá!·‡¦‚¢ Tß×øB†¯¿|žÓ±Äþ¾— %„BQ;¤„‚ú RÍ~>µc]¹²Éÿwî*zl¸®ŽqM[]ƒÖ-͘2t´êS¹’I¶T&S,3•Ê’/™ôUƒ”œŠ%øÓ£Ã\FC^3[»Â<¾¢ §¦qe!ÍSûN2•Ê6ı«ŠÂöþvv DXÑR SŠR™©V2-&“YÎN/ðÜðçf>°÷½)!„BˆÚ‘ %„Ôg øÔŽõDür¥2wö*qb¤.ÇoUk;"¬ïÑásá24TEÁ¬†©t±D®l¢) '£óüñë—hñºòšÙÖÝÂcË;1ªAêɽ'˜Nçê=84•ÿkc??°®—®aØ6– ù²Ét:Ç©X‚½ÃSuñÁ[M‚”B!DíHBê3H­lm⯧Ãç&[*ó×gÆùË“cu=ŽCá»#lèÑáwã¾iÆT¦TÆPU܆†iÙ›Šó¯]$â÷4ä5soO+{#šÆx"Å{O0—É7ÜûXÙÒÄ'w¬'âwcÚP4MtUEW•J˜*•™Îä9>gïp´.7n¯$H !„BÔŽ)!„ >ƒÔš¶ ¿òð::|nÒÅ2ujŒ¿>=Þã9ö³{°“ !Úýn<†Ž®*ض жÍd2ÇŸ?…®*ø·lïÞ6vD04•ÑùOì=Î|¶ÐpïcM[_Ù¾Žöjø<=À¥kô}øº¦Vgº•™Nç865ϳ—'¹ºiøû^‚”B!DíHBê3H­ïñË­£Ýç"U(óßOŽð¿Î^m¨qíùØ3ÔɦH3m>7§þ†½Š†ã)NFçyeb˲*Lmïkg{†¦2Oò™ç޳˜/6ܵ¿®=ÄǶ߸ÎþÇÉQöDÙ5áþÞ6º›<ø†¦R¶l²¥2Ñd–£“qöŽL1±˜mØû^‚”B!DíHBê3HmŒ4óË­¥Õë"U(ñçÇGøûó×r|{‚^[ÞŇVu_ªž ˜–E¶dMf9KðÊÕÊ ¦vôwðP;ºªry.Égž;FªPj¸s³1ÒÌ/=¸–6_å:û‹£üŸs•ðt9x|Ew/k¥»Éƒ·:ÓÍ´m2E“‰Å ǦâƒÔ¶î~á5„=Nó%þøÈ%ž»<Õ°cìw|áÑ- …˜¶ÍÄb†BÙ¤+P ºZYVÙ«(ÏéX‚—¯Í/™u¦ö urO+šªpa6É'Ÿ9J¡l6Üy¹«+Ì/<°–oå:û“#—yæòä·ü·]{†:ÙÒ¦«Éƒç¦MëSÅ“‹Y_™aßÈ‹ùúS¤„B!jG‚”BPŸA꾞V~þ¾Õ4{œ,ä‹üÁ«90kØ1¹<õÈf›”L‹çÇbJ&ÝA/aƒ Ë‰Ï¡£k*V5LŪaê•k³äJåº S.ïäÞe• u~f‘OЦÉ3—§ÐT—¦]ÿ7N]£3àÆëÐñ•ÿh7…©Ùl3Ó ^»6Çb¾XÓ0õ]+»ÙÚÝ‚ œ™^àãOiÈóòP_;?wï*BÕðù»¯\àÅw>W¶4±{(†Žm>7.]C] Sù"£ó)^¾:ËÞá)Šu4‹L‚”B!DíHBê3HíŒðÓw¯ èr0Ÿ-ðµ—ÎóêµÙ†㮀‡'vo¢§ÉKÁ4yúÒ$>‡ò-þ­ÓÐèð¹ñ;o„©ëKùÊ&ñLž³3 ¼te–d¡6aêC«–±µ+ŒœŽ%øÄ3Gò¼ìèàg–Âg¶Àï¼|žW®¾·ëlu[{†:YߢÍçÂ¥WbcÉ´XÌWfL‹qp,FÙªý-'AJ!„¢v$H !õ¤[ÞÅOܵœ&—Á\¦À×^:Çës ;ƽAŸÙ¹î&/ù²ÉÞá)<†þ–_ãÐ5"~~§»¦Œj˜Ê—Mæ2yÎÏ.òòÕY¹Â Sß³z›;+AêDtžO?{¬!ÏËž¡N~jÛr‚.ñl¯>Çkïó:[ßâÑå¬n ÒêuâÔ5E¡X6I䊌̧xnxŠ—®ÔvÆŸ)!„BˆÚ‘ %„ÔgúΕÝüØÖ!NƒÙLžß~ñǦâ ;ÆÍ~>µc]¹²É¡Ñ†¦¾£¯54•HÀßaàqè¸ CU°m(˜&³™<çf921Çl&GÂÔ÷®éac¤¨©Ï4hz|E%|œ•ðùÛ‡ÏrtòÖ\g[»Â쌰ª5HØãÀ¡i( M‹ù\‹³IÅ8\£0%AJ!„¢v$H !ð§G‡íçÇbL%³usLÿbMÿjÓ~§ÁL:Ïo½x–“Ñù†ãå->ùðz"~¹R™—ßò0]Sè xð:ŒëûLÕ=¦Š¦I<[àüÌ"G&爥r·5L}ÿÚ^6t„°c“qžØ{¼!ÏˇV-ã_oÄ_ ŸÿéųŸºµ×Ùý=m<ÔßΪÖ&šÝNÕ¹¦ÎÍ,ðÂøô{^*ø^IB!„¨ RB,ä‹öT2ˉè<Fc\I¤k~Lß¿¶—Þ8€ß©3Îó‡Îpf:Ѱc¼ªµ‰?¼žˆßM¦Xæõ‰9Ì÷¸®©Dün|½¦Žj˜*˜&óÙ"—æyåÚìm S?°®uíA,àèäOî=Ñçå{×ôð/7 âwê̤óüæ g8»=×ÙC}ílïogEKÍn†¦bCLÜ7½c3%H !„BÔŽ)!„,Û¶-Û&[2‰¦²œ‰-ðüXŒ‹³‹5;¦¬ïã#úñ9tb©¿~è çfvŒ×¶ùØöutøÜ¤‹e޼ µD×Ú}nÕ=¦|N‡¦bÛ6…²I<[är<ÉKWf˜Nߺ0¥( ?¸®—5m• õúµYžÚ²!Ïˇ×U§ϡKçøÍCg83}{¯³ûÚÙ3a0ì'èràÐ5l{i_°ÊŒ©g/OÝö+AJ!„¢v$H !,”l¡¡* –m“+Uö%:;³À¡±éš,•ûèÆ>¼®¡MåøÊÁÓ\œ[lØ1Þiæ—\K›ÏEªPâèTÓ¼5¿ƒ4U¡ÃïÆï4ð:>‡~}ªByi¿¢EŽMŹºyßaJS~`}kÚš0mxõê,Ÿ?ИAê‡6ôó‘õ}x étޝ:Íù™ÛéªÊŽîo§?ä§ÉU‰‰K³Ü¦ÓyNÇìžâÂm ä„B!jG‚”Bÿý䨽)ÒLgÀƒÏ¡£©7žäÏVbÆþÑè-Ûìùø‘̓|ßš܆ÎT*Ë—ž?Åp<Õ°c¼¥3Ì/>¸†V¯‹d¡Ä±Éøûž!õTU©.å3p~§¡Þد(‘+pi.Éñhœ+ ¼oó”¿7c¨• µªµ Ó²yùê,_|þTCž—nàÃko„Ï/<Å¥¹äûþNMe÷P'÷÷¶Ñòp9®ï –/›L§sœŠ&Ø;2Åå[|\¤„B!jG‚”BPyÊ^gÀÃŽþîêÓé÷àsÕ=nn,ÿŽ'94>Íáñin÷OÏÛ:Ä÷¬îÁ­kL&³|áÀIÆê`o«÷j[w ÿáþ5´x,æKœˆÆ)›·gUU!äv`ÛÐÝä!èºy¿"‹…|å\™œãêBÏ» SN]ãÖõ²¢%€iÙ¾2מnÈóòáó‹N12ççÛÐxd¨“{–µ2Ðì¯ÆDÓ†\©Ìt:ωhœg.Oݲ=Þ$H !„BÔŽ)!„ ¤–þ{Èí`ç@„{–µ²¬ÉSù`|Óò¯…|‘ËsI^Ÿ˜ãùÑ(Óº-ÇôSÛVð]+»qé‹>wà$W2 ;Æ÷õ´òó÷­¦Ùãd!_äT,A©lÝÖï©(à24¼†NÄï!èvà\:—¦ÅB®Èè|Š—¯Ípm1ûŽgL¹ ¯íeyK€²eñÂø4¿qèLCž—ߺœï^½¬nÂgÀi°k0½m,kò^¿ÿÊ–M®T&–Êqt*ÎÞá©÷}?HB!„¨ RBÁƒÔ¡³{(¶®ú›}4¹×—•,‹Å|‘ñDš×'æØ7%S,ßÒcú™»Wò+»pjW3<µï“ÉlÃŽñC}íüܽ«¹$rEÎN/P(›wæ—.]Ãmèt*3¦œºŠÂÍ3¦R›Œ3šHã1´·|=¯£¤Õ up,Æo½p¶!ÏK½†Ï ËÁž¡Nîïm¥+àÅçÐÑ«3¦²¥2‹ŽNÆÙ7e*õÞî RB!„µ#AJ!øÖAj‰¡U6_¾wY+CáÀõ˜P²lR…‹^¹:ËÁ±ñlá–ÓÏÝ»ŠG—wâÔ4®,¤yrß b©\ÃŽñÎ~æî•ÝæsE.Ì.+šwô–fL95®&!—‡¦¢(Pº)LŠÎ3Oá|“0åw|ÿº^šý”L‹£Q¾zø\Cž—zŸM.ƒïZ¹Œ»º[ènòà5t4µòðL±ÌµÅ,G'çxúÒ仾÷$H !„BÔŽ)!„à­ƒÔÍîëieç@„¡°Ÿ°Ç‰S×P…’i‘.–˜Jf92çàhì}¨ÿùûV³g(‚¡iŒ'RüêÞÌfò ;Æ,ïä'ïZNÐå ž-0O’.”ks0ÕS†ªÒôr¿1L-æKŒ%RŸšg8žÄ©¿1L59+Aª/ä§dšì‰ò;/oÈóÒ(á³ÝçæÑålí ÓðâuhhŠBÙ²IËL&+Qø™ËS,æ‹ïè5%H !„BÔŽ)!„à©%wu…Ù1aek€°Ç…K×Ð0«³6&“YNF2þ÷ãù…Ö°s ‚¡©ŒÎ§xbïqæoÑì«ZøŽ]üø]Ë 8 f3yÆi’ùRÍËÐU\šFw“·«Kù,›äMË2ÇiÕý§Bnß·¶—Þ ’iòÜp”¯¿Ü˜Aêá3ͯî=^×ásY“—=Cléj&â÷à©Î˜*›©B‰k‹^º:Ãþ‘(‹os}IB!„¨ RBÁ»RK6Fšy¨¯uíAZ½.܆†ªT–eK&±TŽ3Ó žqavñ]½ö/=¸–‡:ÐU•‘x’Ïû­hZ¤òEÆiŽOÍs~vî€çz*˜&Ï\šâ÷_½Ðçåæð96Ÿâ³ >{C>êdsg˜vŸ ÷7„©ñDš—®Î¼åþn¤„B!jG‚”BðÞƒÔ’-v t°¾#Tµq#LåK&³Ùç¦x~,ÆÉèü;zÍm_ÇC}íèªÊå¹$Ÿ~ö(éb¹aÇøû×öòÃð;u¦Óy&ë+H-14‡®²¬:cÊ¡i¨ÊýÂÆæS ÇSÜßÛJwÀKÁ4ù§‹üák—ò¼üÒƒkÙÞß¡-…ÏãïxÉ[=löóÈòN6Ešióºp:ªeË&Y(1Oòêµ9ž»<ùMOÄ” %„BQ;¤„‚÷¤–ô…|ìèï`Sg3Ê̺¦bY6ù²I<[àâÜ"F¢™Œ¿åk}âáõ<ÐÛ†¦*\œMò©g’+™ ;Æ?¸¾ÚÐÏ¡3Îqm1Ëb®~Ç®)èÕ=¦Z½.œšVÙcʲÉ˸ §¦’+›üÃ… þøõÆ R{hõß>Ÿ;FºÐx3ñV¶6±g0†¥0¥k×÷KJŒÎ§xñJe)_©¦$H !„BÔŽ)!„àÖ©%~´sWW ]>§¡*Ø@¡lÏŽ'ya|š¯Ìð­~zçî]ÖŠ¦*\˜]äO¥ø 3<ÉG7ðáu½xŒÊ ©k‹’ 0GQºJo“—V_%L©  ((€iÙœŒÍóëÏ0Ÿk¼=¾nŸ—æ’|ò™cäJ;om{G†:YÛ¤ÕW S@±úÅÑù4ûG¢¼0>MÙ4%H !„BÔêïl RBqëƒÔ’&—ƒÝƒî^ÖBO“¿ÓÀ¨nŒ]4-¹J˜zmbŽçGcÊ7f@}v×Fî^ÖŠ¦ÀÙé>þôQ¬þ™ý#›ù¾5=¸Ê ©« R4G§®ÑÛä¥ÍWY¦6/™\š[䵉9ŒÆ˜k §!~jÇîë¹>?ùôÑoZÚÖˆ6t„xtykÚšn<³zßÅsŽNÆùÛÓã;ÿôz^~ !„BÔàÏk RBqû‚Ô·¡³g0Â]Ý- 4ûhr90ÔJ˜*YÉ|‰ñ…ÊÓÜöO‘)–yr÷&¶v· g¦øøÓGzŒbër>´zn]#–Îqu!MºÐx3qLÛ&èv°µ3|=.ÚPÙL»XâÚB†£SqöG"LÝ>ÏÍ,ò‰§P¶>8ÜÝÝÂŽV·iv;ph60ÎñW§ÆvþÇ×>/?…B!î< RBÁíRKtUeÇ@÷õ´2ör9qêjeæFuÓì‰Å ¯^›e[W ë:B(À©X‚O>s´¡Çø§·­à;Wv㪩ñDšlƒnÒ~e!Í÷®é!ìqbSÙWJS”ë›i§‹%¦’YŽLÆyúÒd]?µîWwoâ®jø<;³ÀÇÿùÄ¿ èm㡾vV¶6áuèijþæôøÎ?¼þyù („BqçIBî\ºÙ=ËZÙ5ayØO³ÇYÝ„Y¹>ÓÆPU<†ŽeÛœˆÎóég5ôÿì=«xlE'NM#šÊ2žÈ4ì^EÉ,­è¤Ãç&W2K¤qéÕ¥|Úõ0U&šÊòúÄûF¢ÄR¹º{/¿¶g3[»Â(ÀéX‚O4xø|;öµÓôÏæ9—%{B!„5"AJ!¨MZrWW˜‡:XÕÚDØSÙ„YS–~J+`ÛDS9~mß Æé†ãwßjŠ`h±TޱDŠ|ƒ>50–αg¨“vŸ‹T¡Ìÿ89Ê™é»;YߢÝçÂmèèªBÙ²HËL%³›Š³$ÆÄb¦nÞËçÙÌæÎJ:Kð©xº™º••­LËæð•¾|ðô·Í}/AJ!„¢v$H !õ¤BnO=²™Áæ%Óâù±¿ýâÙ7ü]Uy¸¿ûzÚjñr9qê* P´lR…“‹^½6ÇÁ±³™|MßÓ¯íÙÌÖ®0 Kå¸0Û¸AJQ6w6r;˜ÏùúËçI拸 ý]¿–®)¬i ²¦-Hw“·:cª²ùy¾d2“És*–àùÑØ-º¢Ï¡óùGoÌÄ{a|šß8tæÛæ¾— %„BQÿ©%H !Dý©“§ÙL_ÈOÉ4Ù7åw^:ÿ¦ÿþže­ìè`EK€f§¦¡ª eÓ"U,1•Ìql2Îóc1&jôD·Ï?º…M‘f¦Ó9.4ð )CSÙ t9˜Ïø—Ï“+™84õ}½æòp€õ!–UÔ®U–òåKef³ÎN'Ø;½eaªÉeðùG¶0X‰wp,Æo½pöÛæ¾— %„BQ;¤„‚ú Rí>7OîÙD_ÐGÑ4yöò¿ûÊ…·ýº­]avôw°ªµ‰°×…K×ЪOt+3•Ìq26Ï‘(cwø‰n_|l+;BØ4~rëÚC4¹ âÙ_=|Ó²ÑTå}¿¶®* …lŠ4ÓUÝÄþæ§+ÎdòœŸY`ïp”3Ó‰÷õ½šÝ•ð9Ðì§dZòÕÃç¾mî{ RB!„µ#AJ!¨¿ Õé÷ðÄîô}L“§/Mò_^½øŽ¿~}Gˆ‡úÚYߢÍëÂmh¨JeX¶d2“Îq:¶ÀÁ±[¿ ì[þ²¾ôøVÖ·‡°€ÙtŽó ¤|NÕmANƒ¹Lß>|…ÊR¾[Ɔ­MlŠTfLy—ž®hC¾l2—Ésv¦rOÍ¿§oÑâuñÔžM×gâí‰òŸßb&Þ)!„BˆÚ‘ %„Ô_ênòòÄ®,kò’/›üÓÅ þèõKïúu›ý쌰¡#D‡ßƒ×¡¡) fuãì¹lsï3j¼ª¢ð•Ç·²¶=ˆi7~jr;XÙÀï4˜ÍäùíÏ¢©êmù^¦m³º5Èúö ½!¾j˜²l(”Mæ²yÎÍ,òÂXŒ#“ñwõÚK3ñzƒ>J¦É3—§ø½w0ïƒB‚”B!DíHBê/Hõ†||fçº^re“8?9zù½¿^ÐÇÃílŽ„é x®ïO´´ l>WàÒ\’ý#Q^Ÿ˜»åïÇÐT¾üØVV·5aZv%„M/4ìõÒìq2öãw̤ó|õð¹[²\ï­Ø¶ÍPK€ Íô…|n SKçðÂì"F¢ï8Lu<<±ë½ÏÄkt¤„B!jG‚”BPAj Ùϧwn Óï!W6ùßç®ðçÇFÞ÷ë¶ûÜìè`[w ]>§CU°BÙb>W`8žäð•McÝ¢ßnCã‹neekÓ²‰g œmà ÕêsÑòãwêLWƒ”~›ƒÔÓ¶ùÙÚ¦7èÅïràÐTlÛ¦P6‰g \œK²oxêmÃÔ²&/Ÿ½3ñ•)!„BˆÚ‘ %„Ô_ZÞà“o âw“+•ùÛ³Wøo'FoÙëû{†"ÜÝÝJOÐKÀi W—œ•,‹…\‘áx’#“qöDÉ—Í÷õý|Nƒ/<²…å-•§¹Å3…;²wÕíÒpÓôásèÄR9¾öÒù;¤–M‹¡°Ÿ­]-ôVÏ¡Q}Ê_%L¹0»À‹ã3¼tuæ[¾Æ­ž‰×h$H !„BÔŽ)!„ þ‚ÔªÖ&>þðz"~7™b™¿>=Î_ž»åßÇmhìŒpWW Ca?M.ÆMa*U(1žHóúÄ{‡§HËïéû]>÷èf›”L‹™LŽK³É†½^ºš<,kòâ1tbé_{éÜõq»ÓЦÉòp#!úC~š\7âbÑ4™Ï¹8»Èá+3¼xe†›ïãL¼ÿsî*vløÛæ¾— %„BQ;¤„‚ú RkÛƒ|lû::|nÒÅ2yrŒ¿93~Û¾Ÿ®*lïïàþž6†Â~Bn'N]EŠ–MºPbb1Ãks‹1“ο«×oñ8ùµ=›éo®<Ím*•c4žjØë¥'è¥+àÁmèDSYþóËç¯G Z)”LÃ~6Fš û ºšŠmWÂT"_dx.Å¡ñ/ŽÏ`Ùvu&Þz"~Ïm™‰Wï$H !„BÔŽ)!„ þ‚Ô†Ž¿üÐ:Ú|.R…2ÿíÄÿûÜÕ;ò½ïYÖÊΖ·{œ85 UU(›éb™Éd–ãSqžqm1óŽ^séin}AEÓäêB†« ™†½^úš}DüܺÆT*Ë×_:®©uql¹’IoÐËÝËZhö¬î1/[$òFâ)öDIäŠ×Ãg¶tûfâÕ+ RB!„µ#AJ!¨¿ µ¹³™ÿøàZZ½.R…vl„¸píŽÖ®0;ú;XÝÚDØãÄeèhJeSíLÑd*™åôt‚ý#QFçßz¶Ó7>Ím,‘fj1Û°×Ë`ØO›Ï[טHføÝW.ÔlÉÞ›É^S- „üÝ•0¥³²OØt:GOÐG“Ë ],óW§ÆøëÓãß6÷½)!„BˆÚ‘ %„Ô_ÚÚæXK‹×I²PâOŽ\æéK“59–uí!¶÷·³®=D»Ï…ÛÐP˶ɖLfÒ9NÇ84{Ó'çõ½|vçFº«Os»O2û.—ýÕ“¡–m^.]cb1Ãï½z¡æKöÞL¶dÒô²¥+ÌP8p}Æ”¢@Ù²ÑUUÈ—Lþßã#·uih½‘ %„BQ;¤„‚ú R÷,kåßß¿š°ÇÉB¾È¿~™½ÃS5=¦f?»#lèñ{ð:44EÁ´!_*3—-p~v‘ƒ£1ŽMÅßðµý!ŸÞ¹‘®@eóìó³ ,d‹ {½ …´û]85«‹~ÿ• ןpW¯2Å2}!/#aV¶¹ªû„)ªjP¼ IDAT `Ù6W2üÝÙ+µãÆæÙ§§¤ 冽^Ã:ªAêÊBšÿòêźRKR…ÝMîYÖÊÚöa¥úÿ•-‹ùl‘ñ…4‡Æb›¦P6?°÷½)!„BˆÚ‘ %„Ô_ÚÞßÎÏÞ³ŠÛA"Wäw_¹À‹ãÓu5fí>7÷·³­»…î&/>§CU°BÙ"‘+p9žä¥+3L§ó|lû:"~7™b™£SqJåÆ3öñ»14ñDš?|­q‚Ô’T¡Ä¶î>²¾§®aØ6( EÓ"™/2–Hsx|šƒcÓdKåÜ}/AJ!„¢v$H !õ¤v Fø7ÛVt;˜ÏùÏ/ã嫳u9v>§Á#C¶u·Òôp×÷S*Y•ͳg2yzƒ^üÎÊæÙ¯Öé{y§nRcó)þðµ‹8t­áÞ‡®*üü}•¥¡EË&S(á24œš†¢@ɲI勌̧x}b޽ÃÑT˜’ %„BQ;¤„‚ú R uò“Û–t9ˆg |íð¹ëËßê•Kר5á®®0Ë[4U7϶íÊ“ù–6Ï.”Mþñ§Ѱ×ËPK€ŸCSOñG¯_Ä¡5^rê?wï*š«3ñöOt;XÓ$ìq~S˜º²áÕk³<7ÏKWfØÚfG«Û‚´x¸t (Z6©B‰k ^¾:ÃÑ(‹ùRÃÞ÷¤„B!jG‚”BPAêC«–ñ¯· âwÌfòü§Ïr|j¾áÆõîîv FXߢÅãD©F©²e“.–ÈË,ä‹,äŠ 5ãææ uy.É=r g.Ùó; þÍÝ+ºÌg |í¥ó¼zíÆrÊ‘fö EXÕÚD‹Ç…K×PU…’iUÂÔb†W®ÎòüXŒùl¡áÞ¿)!„BˆÚ‘ %„Ô_úÞ5=üËMƒø:3é<¿ùÂNÅ ;¾ÿjÓ ÙPÙ<ÛÆ²AU*Kù2E“™LމÅ,ÙR‡Zÿ›ƒUƒ”¦*\œMòÿÆ©« w^Bn?y× š\ñl¯>ÇëßbièÆH3»#¬im"ì­„)­º”/](qe!ÃÑÉ9öD‰7P˜’ %„BQ;¤„‚ú R^×ËoÀçЉ¥süÆ¡3œ^hØñ}¨¯Ÿ½wa·ƒ‚i‘Èñ:t<††ª(X¶M¶d2“Îse!iÙ(uü~†Â:n4UáÂì"zt¸!gHµz]üØÖ!Õ™x¿}øÇÞbièÆH3ÛûÚY×¢µ¦TåÆŒ·k ŽNÅyúâ$ ùbÝ¿ RB!„µ#AJ!¨¿ õCúùÈú>¼h*ÇWžæÂìbÃŽïÎ~æî{‰%ð:tZ½.Bn'^‡†¦(˜6äKeâÙW3M ˪¿ßSCáM4ÎÍ,ògdžq5`êð»ùÑÍ7–†þæ g9}û¥¡kÚ‚ìè`}Gˆ6Ÿw5L•,›L±Ìd2Ëk³ìŽ2›É×íû— %„BQ;¤„øè_²çêèƒóG7 ðk{q• õ僧¸4—lØñýƧŽ%Ò”M CS ¹x^CÇmh蚊eÙL“x¶Àäb–|Ù¤dZuó~†Â~:›¼¨À¹™þüØ.£ñ‚Tw“‡n¬, ®. =ý.–†®jmb÷`„u!Ú½.܆ŽVÝc*],3•ÌrdrŽ#1¦RÙº{ÿ¤„B!jG‚”BT¢‚}t2ξáh]|pþÑ-ƒ|ïê܆ÎT*Ëœbd>Õ°ã{óSç³E®.¦)šíƾKM.CÃmè¸ ‡ª`…²E"W`2™EQrjý«k¨%@gÀƒ œžNðßOŒ6ä’½¾ÚÐ}iè¯<ù™w¿4tEK€=C•S7…)Ó²ÈËL,f96gÿH”Édý„) RB!„µ#AJ!€²eÙ™b™©dîúçk‹™šÏOl]·V/íkL$3|áÀ)Æé†ß›Ÿ¸/26ŸÆ´mtõ;EÙ6\n]Ãk¸Fu“ó’Uy²[¶T&[,Må0k´œoy5H)À©X‚¿:5†¡5Þ¦æƒa??¸®Q R_>xš‹ïcièP8À®Á6Ešió¹¯ïfZ6ÙRe)߉è<Ï^žbª”)!„BˆÚ‘ %„T‚Te£ÊSß&“NDçÙ7\›0õÓÛVð+»qé×3|nÿÉš²÷ëæ§.æK Ç“(Š‚ª¼Y(°ñ» šœ<†ŽÓPo„)Ó"S*“+™¤‹%¢ÉÜ_η¤NFçùÛ³WÐ¥áÎËÊ–&¾mOuih–/<Íå[°4´?äcÏP'"!"~Ï7…©X*ÇÑ©8{‡£LÔ𺖠%„BQ;¤„¸8·hwø8ÿÛ{VòøŠ.œšÆ•…4Oí?Y3JÞ«\ßw}iØb¾Èp<…¦¾}À)[6>‡NØã¬,ãÓTšŠBeíl©L¾d’*–ˆ¥räKæy?Ë[t<ØÀñ©8ÿçÜU” RkÚ‚ü‹5=¸u©T–/8Åè-\Úò±s0ÂæH3‘€Ï–ò™L§sÕð;ÅX fJB!„¨ RBüÔß¶÷ v²1ÒLgÀsÓSßì7|p~nxŠ+wàƒóÿ}ï*]Þ‰CÓ_HóäÞL§s ;¾?¼±Ÿ¨. [,y‡AjIÙ´q­^Cái84UU(›ÖõÙR‹ù"‰\‘L±|[ßÏŠê )8:çïÏ7fZßzÃÒÐÏ8u[®ïޠ݃6w6Óáwãuèhªú†S'£óì‰ÞѽÒ$H !„BÔŽ)!„ý“gm€ž —]6w†é T>8ëªJ¹úÁy&ãd4Á¾‘)†ã·ïƒó¿¿5»#šÆx"ÅÏg.[hØñý‘̓|ßšÊÒ°÷¤–”M§®ö¸ð:tœšŠCWßg39®-fÉ•Ê×—ùÝj+Ztú=XÀ‘‰9þþµ†\²·)Òü†¥¡Oí?y[—Ðuú=ìYakg ÷õS– ¹j˜:K°o$Êpüö?UR‚”B!DíHBn©%]»‡*œ#7^CG×*3:îÄç_|p-;ú;04•‘ù$O8û7ftä«3:NÅr~æýpþ•íëx°¯]U¸4—äÓÏ»íû"ÝN7oҞȸ²áV®p[ SÍ'>‡KWñTã†mÛäË&óÙ‹Yòeó}=•OS†n R/]­)·ÑxA꾞ֺXÚîs³k°ƒmÝ­tÜøº¦bY6¹²ÉL:ÇÙ™ö G97³p˾¯)!„BˆÚ‘ %„¼}ºùƒóζu·\S7Ïè˜Í83xßœ?¹c÷÷´¢© f“|ꙣäËfÃŽïÒ&í†V™!uåÍz3MN‡†ËÐñ:Fu¿ªBÙ"‘/0¹˜EU¹"ïö÷`%Hùé x([6‡¯Ìð÷ç¯áuè w^ìk¿¾4tt>ÅgŸ;VÓ¥¡a“݃îYÖJW“ŸÃ@Wìêl·ÊŒ©ÏÆ8K¼ïï'AJ!„¢v$H !ïñôцßm_ÇC}•MÚïtZR¶l¼·¡ãÐTšŠ¢(×ÃT¾¦¦S9r¥o½DÒ¡kt<ô…*AêÀh”¸0AÈíh¨sbZ6.ïäÞ64Uáâl’OÖéÒРËÁÃÜ×ÓJOÐGÀi`Ü´?Ø\¦ÀÅÙEöDßU˜’ %„BQ;¤„‚÷¤nþà¼k0ÂÝÝo>£ãÂì"ûF¦8>õæ3¦>÷Èf6w†Q€“±Ÿz¦±ƒÔ'^=|Ìg‹Œ/¤ÑïpZR6m\†F«·¦œš†CSQU…²i‘/›¤ %óE¹â7Ír¿›¾Ÿ’i²w$Ê?^˜ ìq6Ô9)™ßµªû KC?ñÌÑ÷µáûíæsèìŒð@O=A/~—‡ª`S ¿Ùf94>Í«×fßöõ$H !„BÔŽ)!„àÖ©%M.ƒ‡û;¸¿·ž&/—ã]ÍèøÂ£[ØiàDtžÏ<{¬¡Ç÷æ=±æ³EÆitM©é1•M§®ö¸ð:tœšŠCWQ•ÊÓ3E“ÙLމÅ,¹²‰^Ý…Ýeht<ô}M“g.OñÏ'hñºêœM‹ïY½Œ»—µ¢)pvz?}«þ.p: U6?ï UfLéÕe˜Esi)ß"/]™áð•™7} RB!„µ#AJ!¸õAj‰ÇÐÙ=áþž6zƒÞÊŒ)MŶo|p>7³È ß0£ãKmeCG86牽Çz|oÞ+‘+0–È`Ô8H-)›6º¦ö8ñ.CÃuS˜Ê•*{%Ò85…N¿›Þ ‚iòô¥Iþùâ$m¾Æ Rù²É‡×ö²µ»²4ôÌôúHC½—®±g¨“mÝ- …ý•ð[ÝíhšÌçŠ\žKrh|šÃãÓ|ãM.AJ!„¢v$H !·/H-q:{ª3:úßbFÇá+3¼|e†/?~ëÚƒXÀщ9žÜw¢¡Ç÷É=›¸««˜Ï¯£ µ¤lÚ( ´TgL¹ ·¡¡©*¦U™Ù–ÈÈËÝZ<.òe“º8ÁÓ—&éð»êœäJe>²¾Ÿ-]•¥¡§b >Ù KC]ºÆöþvêk§/ä#èràÐÔêR>‹D®Àp<Åþ‘(/_¹¦$H !„BÔŽ)!„àö©›?8ïŠpOw+ƒÕŽêSK3:†ãIzƒ>º›<˜6¼vm–Ïí?ÙÐã{óžXó¹W™š/Ù{3KaªÙãÄç0pé*CG¯.¹,”-tUA×TŠe“ÿuî*Ï\š¤3ài¨s’)–ùè¦6U—†žŒÎóé_jh*÷w°½¯þfM.GeVP0-泎LÆùßç®0±˜• %„BQC¤„‚;¤–8uí}ílïÿÖ3:TUÁ¡*”-›—®Îð…§z|¿ðè6Eš±©,Ù»²©Ù¦æïVÀéÀëÐp:CÇPEAlÛæÚb–¿=s…k‹܆Ö0ç$],ñ#›‡ØX]z|*ÎgŸ;þ¸Ÿ MeÇ@ô´1Ðì'èv`¨•ûk:ãžãŸ/MJB!„¨! RBÁRKÕÝ4£Ã¡©7‚0“Îñǯ_æÐXŒFý‰ýåÇ·²¾=t=H]]È 5H°m¸ \º†Ïat(ÕMÎË–E<[`l>ÍéX‚Ëñ$FuVN=KJüøÖ¡ëçåÈäOî=ñº¯uUaÇ@„{–µ°¼%€ÇЉg üÝ™+<7<%AJ!„¢†$H !µ RKÕ÷÷´1öÓêu¡Vƒ‡iÙLgr\žKòòÕYÅ([ó³[U¾òøVÖ¶1mHd \[l¬ µÄ²lЦſÎf\†^ùm(™©B‰«‹NF Ï%¡Žßb²Pâ'ïZÎÚ¶Ê^e¯_›å©_ú¦ìÛû;è z‰g œŠÍË’=!„BˆZÿ&AJ!j¤–èªÂw¬ìæÇ¶ ás×gI-…Å|‘‘ù/ŽOsplš²eÕýØêªÊWßÊê¶&LË&‘+2±˜AmÀ MåØi¦ÃïÆ²m2E‡¦àÔ5E¡dZ¤ %&“YNO'¸8“¤h™×c½HJüÔ¶¬ik´áÕ«³|þÀÉo«û^‚”B!DíHBê'H4»<õÈf†Â~l f%fܼ9ób¾Èè|š£Q^Ÿ®ëS.]ãKmeekÓ²™Ï™LfiÐÅ\¦ÀºŽ >7éb™ÃWfÐU…•-M„=N\º†ª*”M‹L©ÌT2ˉè<ǦâË~§Qï#](ñÓw¯dUõ¼¼|u–/>êÛê¾— %„BQ;¤„‚ú R-^OíÙD_ÈOÉ49;³H®Tf0ì'ärâÔU hZ,TÃÔ‹W¦98£hÖߌ)ŸCç neyK€²e1Ÿ+MæP4H-䊬n Òîs‘*”929‡K×ðþÿìÝyt%w}çýOíUw×ÚRkiõ¦n»ïÆXÛ†8!&Âd&v ž $$Áa˜ç<99C¡Ó£ìCN†yf’Æ6  Øí}Á½ª7µöåî·¶_Õóǽ’»—n[RÝj}^çp’CÔÒ­ºUÒÍûü~ßÒU¤u iCCJW¡«2dIZZE5Y®â¹É<›í‰ÈÃTÅõñë1Øž„ø—SÓøâÃÏ®©ûžAŠˆˆˆ(: RDDh® µ.eá÷^‰ ¹!ðÀÑqüÓ gpùº,®íiÇÖ¶ Z,º¢@’곋ò¶‹ãs%<:6‹ïŸ€ã‹¦9·YSÇçn¿ ›Û2ðD€…šƒÉr-¶×JÅõ1ØžAGÒDÉñpd¶O05¾¡ÈÚ“’šSS`ž¦jžÀLÅÆóSy»÷*llMÃõ'ÒMWâ¤l_`sk}ð|Ññpl®Oª²ô5¾!IõcOé,M¥)Pd"aûsU‡gŠxäÌ f*öª‡)ÛøÀ ƒØÜZ…Ÿ˜Äÿðù5uß3HE‡AŠˆÍ¤úsI|jÏèÍ&aûÿçð:6ŽŽ¤¹ô5%ÇCWÚ }ÙEz}¨67Q´]œÊWðÓSøÞè$jžÙñt¦LÜÛUØØ’‚'fªf*vl¯?1K¡=i `{8±P‚+è_ç}­¥0¥ÂTXš M‘†!_`®êàè\ ŽÍàl¡ºjaÊñ~ýúmK¡ð{£ø“ùÉšºï¤ˆˆˆˆ¢Ã ED„æ Røäž+ГI æ üïNãû'¦Ð–0~êkKއޤ‰7mèĶö ZCµ%I‚+”l'óüë©i<|b%Ç[õãYŸNàÓ·] ¹\!0]±1Wuâ{±„õhØš0·]œZ¨À4Ez­†¬¡#©+05 U¶8¨Þ¯o»<:[Ä3“ó81_†©)+zŽ/ðÁ¶c  :6/ýè…5uß3HE‡AŠˆÍ¤¶´¥ñûC»±>@Íóñ?r?:=ƒKÅSr60 ŒQŸ/•ÔT$tš\S^P‚âÉ…2ž™\ÀÑÙâR´ZnžðÁ¶7«ìO|hMÝ÷ RDDDD~®f""j® 5ØžÁ}·îFwÚBÕóñ?ž;…ÇÏÎ!c¾öV®Å0uMO.ïÌ¢=aÂ8'L•§óe[¨äxhµ \ß׎9´'ÍóVL•c… ~tzß=>¼í®Øñœ»âËñ&Ê5Vðç­†\ 9KGÞ®>?q‘=j‰/B$tí –¦ÀPèŠ I’à‰eÇÃx©Šg'ó84S€+dIzÃÇ`û¿yÓeK³ÊþùÐüÅcG×Ô}Ï EDDD)""4WÚ¹.‡ß}Ë.¬K™(9>þþéQš) ©«ý½Jއ¬©ã¦þ\Þ™CÇb˜’%øK«p*xäÌ,FF'0»³¶µgññ[wa}ÆB͘(UQŒ`–ÕrhI!gê(Ôê3º‚°þT½7Â! UFGÒDBW¡+õ0¥4Þ«Šçc¼XÅÓ“óxüì\?xCÐkžÞ¼ã¼YeÃ[S÷=ƒQt¤ˆˆÐ\AêŠîV|ìÍ;Й2Qr<üÍS£8>W‚õ†\—CÃMýرîÅSŠTj\Éõp&_Ácgçðã˜]Ƨà]Þ™Ãï½e'ºÒªžÀx±Š²ï µ±%¬©!_sq2_^Öï틺*£-a ©©0T†*C–$aˆj#ê=7™Ç£c³¨yþë S×ÃÇnÙyÞ¬²¿~òøšºï¤ˆˆˆˆ¢Ã ED„æ RW¯oÃo½ùrt$MùÄ1ŒªÐ—a¸õb˜º®·»ºZЙ4aj*äF˜*»Æ UwVÙß>5Šÿï'§×Ô}Ï EDDD)""4Wºuc>xÃ6´X:æk.þôÇ/ÀñW'à”š"ãÆ¾\ÙÝŠui MYš[´¦žš˜ÇGÇ/hÅÔ-ëð¡F`+;Úä•@ IDAT>NæËðƒø)SUЗM"¡©(:õRRD¯% ´Q߯—ÔT$tZck§(4f\=;¹€£sÅ¥mŸsUYSÃo¿yÇy³ÊþÏ¡±5uß3HE‡AŠˆÍ¤nÛÜ÷_7ˆœ¥c¾êàË?>´ê+ŠJŽE–pS'®èjAw&±¦D¢êù˜,ÕðÄø<<6ޱBå¿×Ц.üúõõÀVr}œ˜/!ˆñßž„¦¢'“@BSQp<œ\áRB!R††S‡¥©0Tyi¾'”\c… žŸÇ 3yÌU¬K™K[) ¶‡ýÅ·œ]S÷=ƒQt¤ˆˆÐ\Aꎭëñk×"kj˜«:ø¯?:ÙŠ¢Å0uCo®\ߊõóâV>©r OMÌã;ÇÆqâež8·wËz¼ïº­È™:JއÑù2BÄ÷oOJ×°>mÁÒT£óeHRs¼6_„Hè Ú&,M¡*Ð’TŸ Vv<Œ—ªxììò5¿tå&´5f•í;x_S÷=ƒQt¤ˆˆÐ\AêÎm½¸÷š-Èf*6¾üãCA´/¯äxÐ×ö¶ãªîVt¥-$uŠ,Ÿ·bêé‰y|÷øŽÏ¿8èûmƒ=øÕk¶"kj«>|%¤u Ý™,UAÁñpb¾Ô4Aj‘/BªŒŽ¤‰„®BWêaêÜ™`EÇC{€©*X¨¹ø³Gãá“kê¾g""""Šƒš+H½ó²>üòU›‘^ R?:Ñ$¿«ƒ0„.+ØÑ•Å®u­XŸ±–VL!Pó|L”jxfrß9VS?»½÷\]lEÇÃñ¹æ 8#kêèJY0UÇʼnˆ†š__„ÐUm IM…¡*0T²$! Y I°=¿zÿý™kê¾g""""Šƒš+HýüŽ~ü»+6#m¨˜*×WH5ãïjM‘qyg—w¶ 7›@r)L…¨ySeÏLÎC!îØÚƒ´¡¢äx8:[„,Ç·Hµ˜::Ï RM´eï•ø"„"KhOH4ÂÔâûabºbãàØ,¾b OM̯‰ûžAŠˆˆˆ(: RDDh® u÷®¼g÷F¤t“å¾ü£CM}î EÆ`G;×åЛM"uÎV¾šçÃÒ†M–Ph)%ÆAªÕ2Й2a( vc…TLÇ! Ck¬ô2!5^übHœ«:8<[À÷ŽOà±³s—ô}Ï EDDD)""4WúÅ+6âîHê*&J5|ùLJ‡Þ¡+2¶¶g°»«=™Rº M‘ë«»ÑÃöN,”Pv|¨J<£T{ÂDGÒ€®Äg…ÔKù¢ ¯ï뀩)ÃAB‘ëï—ã ÌU]Ÿ/âû'¦ðÓS¸?-0HE‡AŠˆÍ¤þý•›q׎~XšŠ‰R_þñaÄiA‘"IìÈâÊî¬Ï$5ôó¶†EÇÃLņ/ÂØ…©Ž¤‰ö„MQPlÒ¡æÂB ¶g14T]“år¦^_ͦÈWX¨¹86WÄ£c³9>G—Ì}Ï EDDD)""4Wº÷ê-ø¹Ëûa© Ϋø³GAŽañÃÛ:²xû`:“õ­a!„pEÇ(»æª\?ˆM˜êLZhKÐŘmÙ;ÿý¶´¥‘648~€C3(²„´¡!khHè*4¹¦¼ @Ñöp2_ÆÁ±Y¦›Î«xfbßÀ‰…rl΃Qt¤ˆˆÐ\Aêÿºé2ܾ¥š¢àäB~ððÒ,Ÿ¸™¯:øÙí½¸móâñ”ð§?>KSQq}ìêjÁÝ­h„©úSùê3‹¿¾•o®ê æ‰¦ SëÓ ´XõAíEÇíPsU–±©%«¤ž™ZXÚš÷r|BWd´% $t¦ªÀPeÈ’„ Qõ¦J5<;µ€ïŸ˜ÂO¦óM¤ˆˆˆˆ¢Ã EDàö}ä%Ùfx-yÓåxëænhŠŒó%|íà‘Ø®*Ø.~v{†6vASdŸ/âÏ~|I]]úWØÖžÁ®®lnM#mêÐdéœ0 êù˜­Ú¨¹Ñ‡©žL9³¤JŽ‡Ñ˜®Ò-iXªÒز·pA×™/BȲ„ޤ„¦ÂTUXš E’ ¶'0SuðÂtŸ˜Ä“ãóM{¤ˆˆˆˆ¢Ã ED`hx$'îG•:Lýö›wàÖsξƒG_uåJ3+»Þ±­·l\U–qt¶ˆ¯{0HE‡AŠˆèe P…|¿,ážÕþÙ¿uÞÜ8Gf‹Øÿø11 RŽ/ðÎËúpc_Y¡™¾òÈadMý‚þí¦Ö4®^߆­)dM}i––ÔÃTÅõ±PsPv}¨«T…ú²IdM€¼íât¾ËR)]Co6CQPõ|™-.Ëqd  Mµ¦×®+äk.ŽÎñØØ,¾;: Ç‘ž)"""¢è0H½Š(ÂÔ'†vã¦þÎ_>~ ºªÄòüùA€wlïÃõ}P$àù©<¾úè´XúÛØÔ–Æî® ¶g]ÚÊ÷b˜ªyõáç׿¨mg¯G6‰L#HÍ×\œ-VbùÞd =™4EAÍóqx¶ðºWH½TÖ¿¿©ÊHéºz^L,ÚNåË886‹‡Ž£ìú‘œ)"""¢è0H]€¡áz ¸u¥Öì¹7ô×ÎO¦ ø›'ÇvË^†xçö>\ÛÛ À³S ØwðZ,㢿WÅó±1—Â5=mØÒ–A‹¥Ÿ·•ÏjžÀlÕ^ÑSý¹ÔҖùªƒ‰R5–ïMÎÔÑN@Säú ©™äe>g"‘ÒUä, M¡*Ð7Qr<œ-TðÈ™YŒœ˜ÄlÅ^Ýë“AŠˆˆˆ(2 RDDaï¾C²,ß SŸ¹íJ\ÛÛÀóÓyüíS£±j?wY®Zß ÀÓ“ øoEÎÔ_÷÷«¸>z³ ÜÔ߉M­i´XúÒ–F7àxj~}ÅÔJ„© ˜­Ø˜,×bù¾´ZÖ¥,hŠŒŠçáÈLqÅV—ù"„¥)hOšç‡)I‚/”\ãÅž8;‡‘“+¬Îª3)"""¢è0H½+¦þpïU¸¦§pž\Àן=UŽï ©»vlÀ•Ý­<51¿zâØÍz-Ïdž\×ö´cK[¹ÆV>Iª?±¯âúõÿx>JŽ·l±e1H¦Ë5L—íXÎjKÔƒ”*˨¸ŽÌW|»£/BªŒŽ¤‰„¦Bo„)ED¢âú/ÖðôäªîëS2$lhI!m¨!0^¨b®j/ûì¥Õ°.e¡=a@‘%”GæV/H-òEYÚf#L©°4Š$A„!lO`¶êà'Óõ0õäøü²þ|)"""¢è0H-£åSÿùmל·¢èŸÅ7H9¾À/_½—wf!Bà‘Ó3øÆó§ÔÕû™%Çú”…7mèÄÖ¶ Ú: U$IpE€²ã¡æÕ·ó•]ÿ¢¶ÛÉ’„\ )C…BŒª˜¯9+¾Õm%t¥hKP$ äø8:[„ªDs¾!K@kÂ@R×`© ,MªÈ‚¶/0_spd¶ˆïŸÀÁ±Ùeù¹ RDDDDÑa""Z·ïà£p¿$#{Q¿”% _|û5ç­(úæá±ØžG¸÷ê-ØÞ‘BüëéüÓ §aiêŠÿì’ã¡+máÆ¾ ¶gÑšÐa( d©þ„·ªë¡ìú¨y%Ç» 0¥H6œ¤Nå+ÈÛ,z£B=éZd%×ÃÑÙbS<ÍQ‚„¬©!¡©05 M…&K8~€ùšƒcsEüðä4~pr Áøà EDDDáç>)"¢•14<’“÷£J€^h˜Rd _|ûµK+Š=3ƒGÎÆö¸BàW®ÙŠÁö,ü ÀONá[GÎÂT•U{ ‹aêÚžv\Ö™E[€ÞS^#LU\5_ h¿z˜R% ý-)¤t ~àÄBEÛƒ¦Ä,H…@_6‰œ¥CPt<›+5Õq„2F=LYš‚¤¦. ÷÷‚ùš‹£sE<~vß=>Ûý3¤ˆˆˆˆ¢Ã ED´Â.&LŠŒ/¼ýlïÈB!~tzí±»Bà}×bs[~àáÑI1‰é²ýšß“AŠˆˆˆ(: RDD«lhøÀ€*äû_.L¥ Ÿ¿ýjlm¯¯(úÁ‰)Œœ˜Œí±z"À®ß†-)¸Bà¡cøá©©¥­WQ*9²¦Ž›7t6¶ò™°T²,Áo„©úSùò¶ ItEF.…„¦Â‡f °}c· .‡Å •3u„ ¶‹“ åX g÷EKSО4—”&ËKï[Ùõq¶XÅ“ãsÄ™B心Qt¤ˆˆ"òra*kêøÜWaskžðð‰IüàäTlÑ>xÃ6lÈ¥àoÇ#c3P¤æ ‹+¦®ëmÃÎu-hOÖWLÉà!jž…š‹’ã¡âùØÑ™ƒÕRÏOà ±ª3±–ùA*P¨¹8•/Çêi¾a¨2Ú“&’š ]‘¡«  aˆŠ+0^¬â™Éy|ot£ó¥Ÿú RDDDDÑa""ŠØÃ^‰ ü·¶% |vïUØØØâöÝãø×ÓÓ±=6øà ÛÑ—MÂö¾yx OŽÏA’š/|”CÇ }íØ±.‡Î”u^˜*»fÊ6:’&²¦G<;™‡˜Z¼‚”„ØØ’B‹¥C„@¾æàL¾9fO êaJSd´% $u†ªÀTeÈ’„ Qõ¦Ë5<7•ÇÃ'&ñüT~éß2HE‡AŠˆ¨IìÝw`¨+“¸ÿþÛ®ºuq‹ÛƒÇ&ðÈ™™Ø“C|è†mèÍ$Qóþù…3xvj¡©_sÉñÔUÜØ×]-X—²ÎÛÊ'KÒÒÿþ£3õÕ^–ÏR-–„ÈÛ.ΪˆaZâ‹’t$M$u¦ªÂÒd(’¶çc¶êà…™Äãs RDDDDb""j2ÇçKC¹ÔýŽ·~ûÈ8;;߃ Þ¸ ëÓ Ô<ÿë…ÓøÉt!/½äx°4oêïÀ®®t&MXšº´­-0W±q¶TÂz|‹‹—©ùš‹ñb’ÿûg1LÕWLi°T–¦@UdAGÌU-â;ÇÆ÷|þm׌ð·Ñêc""jR××7ýÉNNmHZ<ÿÈøà Û±>c¡âúøÆó§pd¶«c(9LMÁ}¸®·=™äF¹ ÂúV¾š æù(Øn,ÂÔ‹AÊ€˜¯:˜(Õ.‰ uþõ'!cjHh ,M…¥©Ðe !Ç0Wµ÷ôf“#ümCDDDÁg5)"¢ævÇþïMÚg{2V_Ü”,ºa;ºÒÊ®|ö$Ž¿Ìpé8(9Ö¥,üæ›.Cªñ>,þ AÛ¨yUÏG¾æ"@óþ}R¹Fš­Ø˜®Ø—ì=È,MERS‘Ш²ŒáU–Gø[†ˆˆˆhõ1HÅÄb˜ÚÔšêÓ9¯Y‘d|èÆmX—2Qr|üó'pr¡Û÷ o»øý[wa}&?Qt<芌„¦.=Ý­æ Ø¾@Ùõ1[±†€ª4×Ò£ 1Ð’FÎÒታU3—pZ:î0DÚÐ5uXªŠ0 ÷ä,}„¿]ˆˆˆˆVƒQŒ ä²Zø;™äGR†šjö׫Ê2~ãÆmèHš(9þîéQŒª±=ÿEÇÃ}·îBWÊBÕóñƒ“S¸¼3Irfý)oª,ÁÔߔa*B ´¦3 xB`ºbc®ê¬™ûÈB$4OœÝóËWoáo"""¢ÕÇ EDCCÃ#¹ž¤r_»i|¸™Ã”©ÊøÀõõ Ut<üÍ“Ç1QªÅö¼W\¿÷–K[ŽÍÂT芌ք”®"¡iç?Ýͨy>*®™& Sa ´¤3u¸B`²TÃ‚í®©{¨æ |æÁ'ø”="""¢ˆ0HÅX³‡)KSñë×¢-a o»ø«'ŽÇzkXÍøÝ·ì\Ú‚øüÔÀÔ€,Ih±êÛÁ,M©*Pd AÂöØž@Ùõ0[±D¦Î RŽ˜(ÕP`""""¢UÄ EDt >0Ðe&þ¨3i¼·™Ÿ§ ï¿n­–Ž|ÍÅþÇa¡ß­a®ð±7ï@gª¾ñø| Ž/`¨Êy_§HR}NQc€¶¥)S!?h̘ò0S±«¦Î R¶/0^¬¢äzkêž±=O3HE†AŠˆèÒla*cèxßu[ÑbéX¨¹ø‹ÇŽÆz%Žøí7ïXš‰5:_†ã èêË™_ S ­¾bÊj<ÝÍB8~}øùâV>„«¦Î R5_àl±Š ƒ­")"¢KP³„©VËÀ¯\³9KÇ|ÕÁ×;‚²ãÇö¼aˆßºyÚ“ŠŽ‡S eؾ€öO=”% YCGBWa©‹aJBŽ/P[å0õÒ 5V¨ êùkê±}O?À EDDD)"¢KØÞ}†Ú’æçÖ¥¬›£SíI÷^½YSÃlÅÁ׉uø|äæËÑ–0P°=œÉW`ûâ‚’ YS;gÅ”º¦\Àö|T¼úSù<±ra*M-)dM5ÏÇé|¶kêÞp|O1HE÷ÙšAŠˆèÒU˜êL™¸çê-Èf*6¾öèÔüø†U–ðoº ­!íc…j=HÉŽÔ¥)Hh*šÚ~^SŽ/Pq=ÌTx"Xö0„À¦Ö²†Žªçãd¾ OkêžpüŸzàq)"""¢ˆ0H­!{÷Ê%Ì/­O[;W#L­Ï$ðï¯Ü„´¡aºlãÏã e¨ ~ãÆíKCÚÏ*pEE~}ÁH†„´¡!¡¿¦TEF„pDרx>¦Ëö²†© 6·ÖWH•]'Êð)""""Z= RDDkÐû¼7mhŸíÉX}+¦DbcK ï½bRºŠÉr þè‘X¯ÄIh*>xÃ6´X:ò¶‡Óù2ü „ü;‘ÔSI]i<™O…ÖSK+¦¼úŒ)×ãa*€ÍmidM %Çlj…‚5öyÀþàÛ RDDDDQa""ZÃÃÔ¦ÖTŸþƒ¹/–lÏâßî@RW1Qªá«†ˆñß´¡á× "gé(Ø.Nå+Ai™vÔ…!OåS`é0%KCÀ ‚¥§òÍVl8o LÕWH-)ÇçKkîÚg""""Šƒá=ÿðû»RÖ§ 5µœÿÿÎu9¼{ÇXšŠ‰R_}ôH¬WâäL¿vÝVäLÛÃÉ…VâhÂȘ,UAÒЖÂTžàŠF˜ª:°=qQa* HÀÆ\ 9KGÑñ0ºƒ”'|’AŠˆˆˆ(2 RDDÉõ$•ûÚMãÃ˦jžÀ5=mø7—÷ÃRœ-VñÕGÇúµ& üê5[‘55l'V6ä„Òº†„¦ ©«HèôÅ0Ô·òU=™Š}Áa*BȲ„ ¹Z,}UŽ£y"Ä'¿ýƒQD¤ˆˆè<˦ªžú:ðÎí}0Uc… ¾úè‘eÛÞ…sŸX´=Œ®RÈ C ¥«ÕR ’º]‘†/ΘºÐ0å!tEF_6Ù˜…åâÔBkíÓ€/B|‚AŠˆˆˆ(2 RDDô²ÃT›©üõ >/»nX‡;·õÂPœ.Tð•GA•åØž“î´…ÿpÕf¤W9H- ‚iSCRSai*Rz}ø9Pß"éŠ×Ã\ÕEÕõ_6Ly"„©ÊèÉ&—žxªPÁZû<à!>q€AŠˆˆˆ(* RDDôª†† t™‰?êLx0Ut<ܶ¹wl]CQp2_ÆW= -ÆAª/›Ä{¯Ø„´¡¢`×g/E±âk1LYªŠ¤® ¥kK¡Ï êaªêú˜¯9(»>Ôs芺"/mÙ›¯¹8/¯¹ëšAŠˆˆˆ(Z RDDtA.6LåmoìÁÞÍÝÐ'JøÚÁ£çÅ‘¸hIá=»7"¥«(8.FçÊ‘nAAˆ´¡!Õx"_RWë[ùð⊩šW~^i„)ÇPe›ÛÒÈ™:櫎ϗ–VZ­"ñû RDDDD‘a""¢‹RSæ—;“Ö¯¦æ«ÞyYölꆦÈ8>_Ä_>~ RŒ‡HmnMãî]HhêÒÓéšápD"©«hK°T¦&/­DsƒŽÀö|LWmŠ,a°=‹¬©a®êàðL ]]S×1ƒQ´¤ˆˆèuÙ»ïÀP[Òüܺ”uóË…©ÙŠwïÀ[6®ƒ*Ë86WÄßÓ+ á!ÊŽ‡ªç£êù(9déÒ S!€“AŠˆˆˆ(* RDD´¢\|Tîfr!ûçOÅúX®íiÇÛ×Ç6H@Íðƒ×÷vÀÔˆ €„Ðd’xAˆŠë¡âú¨yEǽdÃÔï1HE†AŠˆˆVœ+‚Ü“óý»'G?–2ÔT\ãúÞvܱu=4EA1¦AÊö„¸²» –ª`¡æàè\ ½ÙÚtE©‡) êù¨x5ÏGÑöp©u))"""¢è0HѪÉõ$•ûÚMãÃq S7õwà¶ÍÝKAjt®»yK¶/ CÂήXª‚ùšƒcsEª‚VË@ÊÐj @—¸Aˆªç£æú(»>Ê®wÉ\ RDDDDÑa""¢U74<’뵤/´&̦ -6¯ûæ Ø³©š"Çv…”ã h²ŒË:s0Aêø\ a}š,MEgÒDBWa( tE†,KK+¦ª®JcÆTÜ1HE‡AŠˆˆ"34|` ËLüQgÒxoÂÔ[ÖáÖM]Pe¹ñ”½Rìæ+¹~CU°­#CQ0Wup¦P9/¬-?oOšH6”¦È%ÀBÔ<¿>cÊ(Ønl¯?)"""¢è0HQä⦆6uá–uç©2b¶c®ÐTlmKCWÌÕœÉW^v¥—/B誌ö„¤®ÁT:LÕŸÊÏ0Å EDDD)""jw ?xe‹¡ý×u)ëæf S·mîÆÍ:¡ÈŠŽ‡Ñ¹2d9^çØ!R†ŠM-)hŠ‚ùªÓ…꫆5_„P IIM…¥ÕgLɧòÕ¼úù*ž’í!@<>[0HE‡AŠˆˆšÎÞ}†Ú’æçš-Lݾe=nêïh)£sÅØ 5÷ƒCÆ\ š"×·ìåËt/ S¦¦ÂPd(S5O æ×·óA“Æ`""""Šƒ5­f SoÛÚƒú; H@Ññp|®%fAJ!r–޾lª,c®jãt¾rQÇዊ,¡³1cÊT誥±•ÏöÅy3¦š5L1HE‡AŠˆˆšÞÞ¿xà]YSÿ{2V_”aêg{p]_d%×Ãñ¹2”˜mÙ ‚- =™TYÆlÅÆ™Båu…µÅ0Õž4j̘2T²$A4ÂTÕ¨y>òM¦¤ˆˆˆˆ¢Ã EDD±qÇþïMÚg£ S?»­×ô¶CF}…Ô‰ùRì¶ì!О0ж Èf+N*PßÀqø"„,)IMƒ¥)0^²bªæ T=ùšÛ43¦¤ˆˆˆˆ¢Ã EDD±³¦6µ¦úôU\¢ôÎí}¸º§ €¼íâô+<®™…!Б4±.e¾¤ò¨Ê?_„e4fL½¦TY‚ÔgLÙ¾@Ùõ0[q†X–Ÿûz1HE‡AŠˆˆbëßüí÷>7I~$e¨©Uùy—÷ãÊîV@¾æâL±)†ç­3i¢#eA‘€™ŠÓù*´e C¾!Iõ0•Ò5ªK“¡È2D æ¨y~=LU ¢0Å EDDD)""е¡á‘\OR¹¯Ý4>¼Òaê] ˜¯:/UcyκSZ“&d³U§òe¬ÄJ3_„€t$L¤ ¦ªÂTëOå Â5/€Ýx*ßtÅ^õ0Å EDDD)""º$¬F˜z÷Ž ØÕÕ‚ÀlÅÆd¹ËsÕ“N %a@jÇéBÚ n}ôEˆ!:“’º KSaiõáçAÂöØ^}+ßLÅ^µ­| RDDDDÑa""¢KÊÐð.3ñGIã½Ë9ø\ðîع.‡ÀLÙÆt%~A* ¾\9S€ÆSö–wËÞ+ñEý3ÇâSù,M¥)­|õáçŽÿb˜ ‚• S RDDDDÑa""¢KÒr‡)Y’ð ;7`ǺDL•j˜­Ú±;/A ä’ÈšúÒJ¯±bõ =eïb-nåkO¼¦LUªÔÔë ÔÎ~.‚pEƒQt¤ˆˆè’¶\aJ‘$ܽk—uf!‚¥ækNìÎG„ØØšFÆÐ" Rç}³ $4–ªÀÔêOå B,…©Šëc¦b/{˜b""""Šƒ­ {÷jKšŸ[—²n~=aJSdüÂÎlïÈ@!ΫÈÛnì΃Blj©À\ÅÆÙbŠíóeHÈZ:,U©Ç)MY~îúl_ ìú˜­ÚðÅò„))"""¢è0HÑšòzÔ¡*ø…0Øž„8S¨ äx±;þÅ •55ˆ°y‚Ô"Y’1êÛø’Zýž»•Ï*®‡éÊS RDDDDÑa""¢5ébÔ¥©x÷Ž ØÚž8µPFÅócwÜç© Ä\Õiª µH–$¤ ÉÆj©„¦BUdAWÔWL-nåóDðºÂƒQt¤ˆˆhM»cÿƒ÷¦ í³=«ïÕÂTR¯©Ímx"ÀÉ…j¾ˆÝñ¾\/V!7Y:WÖБÐëQÊÔTh²„0Äyaj¶jÃõ/.L1HE‡AŠˆˆ/†©M­©>]‘êÿž64ܵc6µ¦á‰Çç‹pE»ã|1HéðƒóU㥚¸GB9C¯¯–ÒUX0„€'8B ìø˜«Úp.0L1HE‡AŠˆˆèïùû‡ßß•²þ8e¨©sÿû¬©ã®ýhIÃGæŠAüþ†ž¤<`¡æ`¢Tƒ$ÅçÒ†KS‘ÔU$Ï S®àŠ·ò½V˜b""""ŠƒÑK äz’Ê}í¦ñáÅ0Õbéøù0KÁ/L)~Ç„ÀÆ–ÔRš«:˜*Ç+H-JéRzcÆ”®AWd„a}Æ”ã T=ÓåWS RDDDDÑa"""z‹a*­ª¿1ØžI¿kG?6äRp„ÀóSù¦~!ÎR³UÓ;Žm †@ÚP‘Ð5$ÃÏuEF,…©Šçc®â æ‰óƒQt¤ˆˆˆ^ÃÐðHnCZûè¾õŠÏôe“°}g&`¨rìŽ% –r¦WÌTÌVíØ¿GA"³8cJS‘ÒU¨rýýñ‚Ž ây˜¯º(9. Ua""""ŠƒÑ:™/ôe’÷Û¾¸ç©‰9Xš»cxiš®Ø˜«:—Ì{!Ò¦KUlléÓaXS®Pu}L•køoÜó௽m„W6Ñêc"""ºHe×xèØÄÚÆ/&õxE©ðœ-{Ž_Ró5ç’{‚ DÒÐÑ5˜šŒÔâŒ)Ô·òaˆƒc³{nÝØ5Â+šˆˆˆhõ1H½NCÃîìÿúUëÛ®K˜:7HÙB`ºdcÁv.Ù÷H!Rºúâv>]…¡Ô·ò9"ØcªÊ¯d"""¢ÕÇ EDDôíÝw`èÎí}_º®·}g³‡©06¶¦5tؾÀT¹†¼í^òïQ=LihMè°T†*C•å=Š,ð &"""Z} RDDDË$.aj %¬¡Áö&Ë5Ö@Z´¸b 5ô=ë3‰^¹DDDD«AŠˆˆh™íÝw`èöÁÞýoêïØØŒajqË^͘(ÕPtÜ5÷«xr|nϧÞzå¯X"""¢ÕÇ EDD´BîØÿà½C›º¾xË@gg3…©Å Uõ&JU”oͽ7óUÿyäé=½ïí#¼R‰ˆˆˆVƒÑ [ S·oéîTd)Ú?ü6¶¤‘15T]ñRewí©…š‹/|AŠˆˆˆ(²Ï¥ RDDD«ã³ß}ú‹·mîþ-KS´ÈþðØØšFÆÐPu}œ-UQqý5÷^lŸÿ.ƒQdŸK¤ˆˆˆVÏÐðHîžÝ½_lËÜE˜’ acK SCÅóq¶PEÕ[{Aªh{øÜwŸb""""ŠƒQ¢ S²$a WRe×ÇÙB5_¬¹ó_r<ü§ï0HE…AŠˆˆ(BCÃ#¹_ÚÑý×Û;rïXÁçŠ$aC.‰Œ©£ìø+V`¯Á Uv}|ö¡'¤ˆˆˆˆ" EDDÔ†† ܽ}ãþÝ]¹=+¦TYB¶±BÊñqºP+Ö^ª¸>þAŠˆˆˆ(2 RDDDMd¥Ã”&ËèË&‘15”\gòk3HÕ<Ï<øƒQD¤ˆˆˆšÐÐðŸÛÒ÷O×õ¶ï\®0†€¡*èÉZÈ:JއÓù ¼ Xsç×ö>Í EDDD)""¢&¶wß¡;·÷}i9ÂT–¦`}:¬©¡h{8S(à ÖÞgÇøÔ RDDDDQa"""ŠåS"‘ÐTôdÈŠŽ‡Ó øáÚ[!åúþàǤˆˆˆˆ" EDD#{ÿâw½us÷Woèì¼Ø0å!š‚¾li£¾BêT¾ ±? ¸"À|›AŠˆˆˆ(* RDDD1tÇþïÚÔõÅ‹ S~ÂRôçRH*Jއ ekð³€'|’AŠˆˆˆ(2 RDDD1¶¦nßÒÝ©ÈÒ«~­'BªŒM­i¤teÇÇèBiM)_„øÄ·c""""ŠƒÑ%à“=ùå;·ö¼ßÒ핾Æ4YÆ`{ MEÅó1:WB€5¤‚Ÿ8À EDDD)""¢KÄÐðHîžÝ½_lËÜõraÊñȰ£3KSQó}-!\ƒAJ!~ŸAŠˆˆˆ(2 RDDD—˜W SŽ/HØÕÕKU`û‡g kòaˆû¾Å EDDD)""¢KÔÐðHîîíÝßØÝ•Û“ÔUØž\ÑÝ SUPóŽ0HðJ!"""Z} RDDD—¸¡áwo߸ÿÊî–=óUoÝܽ¤Ï ­Ás„À}ß:È EDDD)""¢5bhøÀÀo^wùýo\¡(¨z>ŽÎ×ä¹Càã RDDDD‘a"""Zcªž?¤+ÊýÛ½õT¾¼&σQ´¤ˆˆˆÖ¨#³Å¡ÿõ“Ó|cÇUI]]sÇÿ{ßd""""Š ƒÑ·wß¡;·÷}éºÞök)L1HE‡AŠˆˆˆwìðÞ¡M]_¼e ³s-„))"""¢è0HÑyÖJ˜b""""Šƒ½¬Å0uû–îNE–.¹ãc""""Šƒ½¢¡á‘ÜͽÙÏß¹µçý–¦h—Ò±1HE‡AŠˆˆˆ^ÓÐðHîžÝ½_lËÜu©„))"""¢è0HÑ[ S[ZÓï‰û|))"""¢è0HÑE>0p÷öûwwåöÄ5L1HE‡AŠˆˆˆ^·8‡))"""¢è0HÑ64|`à®Áþ¯_µ¾íú¸„))"""¢è0HѲٻïÀÐÛû¾t]oûÎfS RDDDDÑa"""¢e‡0Å EDDD)"""Z1{÷º}°wÿ›ú;66[˜b""""Šƒ­¸;ö?xïЦ®/Þ2ÐÙÙ,aŠAŠˆˆˆ(: RDDD´jÃÔí[º;YŠôµ0HE‡AŠˆˆˆVÝùþsŸ¼yCçg,MÑ¢z RDDDDÑa"""¢H äîÙÝû•Á¶Ì]Q„))"""¢è0HQ¤¢ S RDDDDÑa"""¢¦04<’û¥Ý½½#÷ŽÕ|Î EDDD)"""j*CÃîÞ¾qÿî®Üž• S RDDDDÑa"""¢¦´ÒaŠAŠˆˆˆ(: RDDDÔÔ†† üÜ–¾º®·}çr†))"""¢è0HQ,ìÝw`èÎí}_Z®0Å EDDD)"""Š•å S RDDDDÑa"""¢XÚ»ïÀÐ[·ô|ý–ÎÎצ¤ˆˆˆˆ¢Ã EDDD±vÇþïÚÔõÅ‹ S RDDDDÑa"""¢KÂb˜º}Kw§"K¯ùõ RDDDDÑa"""¢KÊ'zòËwníy¿¥)Ú«}ƒQt¤ˆˆˆè’34<’»gwïWÛ2w½R˜b""""Šƒ]²^-L1HE‡AŠˆˆˆ.yCÃ#¹»·wcwWnÏâàs)"""¢è0HÑš14|`àîí÷ßÔß¾‡AŠˆˆˆ(: RDDÿ;vL 0𥠅±2† F¤ ¥.²p'!c€ß9ï瘙y¯õ©Ð3¤HR¤ )R†)C €”!@Ê eH2¤HR¤ )Rj%Øú^fÛhIEND®B`‚sparse-0.12.0/docs/operations.rst000066400000000000000000000210621402510130100167200ustar00rootroot00000000000000.. currentmodule:: sparse Operations on :obj:`COO` arrays =============================== .. _operations-operators: Operators --------- :obj:`COO` objects support a number of operations. They interact with scalars, :doc:`Numpy arrays `, other :obj:`COO` objects, and :obj:`scipy.sparse.spmatrix` objects, all following standard Python and Numpy conventions. For example, the following Numpy expression produces equivalent results for both Numpy arrays, COO arrays, or a mix of the two: .. code-block:: python np.log(X.dot(beta.T) + 1) However some operations are not supported, like operations that implicitly cause dense structures, or numpy functions that are not yet implemented for sparse arrays. .. code-block:: python np.svd(x) # sparse svd not implemented This page describes those valid operations, and their limitations. :obj:`elemwise` ~~~~~~~~~~~~~~~ This function allows you to apply any arbitrary broadcasting function to any number of arguments where the arguments can be :obj:`SparseArray` objects or :obj:`scipy.sparse.spmatrix` objects. For example, the following will add two arrays: .. code-block:: python sparse.elemwise(np.add, x, y) .. warning:: Previously, :obj:`elemwise` was a method of the :obj:`COO` class. Now, it has been moved to the :obj:`sparse` module. .. _operations-auto-densification: Auto-Densification ~~~~~~~~~~~~~~~~~~ Operations that would result in dense matrices, such as operations with :doc:`Numpy arrays ` raises a :obj:`ValueError`. For example, the following will raise a :obj:`ValueError` if :code:`x` is a :obj:`numpy.ndarray`: .. code-block:: python x + y However, all of the following are valid operations. .. code-block:: python x + 0 x != y x + y x == 5 5 * x x / 7.3 x != 0 x == 0 ~x x + 5 We also support operations with a nonzero fill value. These are operations that map zero values to nonzero values, such as :code:`x + 1` or :code:`~x`. In these cases, they will produce an output with a fill value of :code:`1` or :code:`True`, assuming the original array has a fill value of :code:`0` or :code:`False` respectively. If densification is needed, it must be explicit. In other words, you must call :obj:`COO.todense` on the :obj:`COO` object. If both operands are :obj:`COO`, both must be densified. Operations with NumPy arrays ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In certain situations, operations with NumPy arrays are also supported. For example, the following will work if :code:`x` is :obj:`COO` and :code:`y` is a NumPy array: .. code-block:: python x * y The following conditions must be met when performing element-wise operations with NumPy arrays: * The operation must produce a consistent fill-values. In other words, the resulting array must also be sparse. * Operating on the NumPy arrays must not increase the size when broadcasting the arrays. Operations with :obj:`scipy.sparse.spmatrix` -------------------------------------------- Certain operations with :obj:`scipy.sparse.spmatrix` are also supported. For example, the following are all allowed if :code:`y` is a :obj:`scipy.sparse.spmatrix`: .. code-block:: python x + y x - y x * y x > y x < y In general, if operating on a :code:`scipy.sparse.spmatrix` is the same as operating on :obj:`COO`, as long as it is to the right of the operator. .. note:: Results are not guaranteed if :code:`x` is a :obj:`scipy.sparse.spmatrix`. For this reason, we recommend that all Scipy sparse matrices should be explicitly converted to :obj:`COO` before any operations. Broadcasting ------------ All binary operators support :doc:`broadcasting `. This means that (under certain conditions) you can perform binary operations on arrays with unequal shape. Namely, when the shape is missing a dimension, or when a dimension is :code:`1`. For example, performing a binary operation on two :obj:`COO` arrays with shapes :code:`(4,)` and :code:`(5, 1)` yields an object of shape :code:`(5, 4)`. The same happens with arrays of shape :code:`(1, 4)` and :code:`(5, 1)`. However, :code:`(4, 1)` and :code:`(5, 1)` will raise a :obj:`ValueError`. .. _operations-elemwise: Element-wise Operations ----------------------- :obj:`COO` arrays support a variety of element-wise operations. However, as with operators, operations that map zero to a nonzero value are not supported. To illustrate, the following are all possible, and will produce another :obj:`COO` array: .. code-block:: python np.abs(x) np.sin(x) np.sqrt(x) np.conj(x) np.expm1(x) np.log1p(x) np.exp(x) np.cos(x) np.log(x) As above, in the last three cases, an array with a nonzero fill value will be produced. Notice that you can apply any unary or binary :doc:`numpy.ufunc ` to :obj:`COO` arrays, and :obj:`numpy.ndarray` objects and scalars and it will work so long as the result is not dense. When applying to :obj:`numpy.ndarray` objects, we check that operating on the array with zero would always produce a zero. .. _operations-reductions: Reductions ---------- :obj:`COO` objects support a number of reductions. However, not all important reductions are currently implemented (help welcome!) All of the following currently work: .. code-block:: python x.sum(axis=1) np.max(x) np.min(x, axis=(0, 2)) x.prod() .. note:: If you are performing multiple reductions along the same axes, it may be beneficial to call :obj:`COO.enable_caching`. :obj:`COO.reduce` ~~~~~~~~~~~~~~~~~ This method can take an arbitrary :doc:`numpy.ufunc ` and performs a reduction using that method. For example, the following will perform a sum: .. code-block:: python x.reduce(np.add, axis=1) .. note:: This library currently performs reductions by grouping together all coordinates along the supplied axes and reducing those. Then, if the number in a group is deficient, it reduces an extra time with zero. As a result, if reductions can change by adding multiple zeros to it, this method won't be accurate. However, it works in most cases. Partial List of Supported Reductions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Although any binary :doc:`numpy.ufunc ` should work for reductions, when calling in the form :code:`x.reduction()`, the following reductions are supported: * :obj:`COO.sum` * :obj:`COO.max` * :obj:`COO.min` * :obj:`COO.prod` .. _operations-indexing: Indexing -------- :obj:`COO` arrays can be :obj:`indexed ` just like regular :obj:`numpy.ndarray` objects. They support integer, slice and boolean indexing. However, currently, numpy advanced indexing is not properly supported. This means that all of the following work like in Numpy, except that they will produce :obj:`COO` arrays rather than :obj:`numpy.ndarray` objects, and will produce scalars where expected. Assume that :code:`z.shape` is :code:`(5, 6, 7)` .. code-block:: python z[0] z[1, 3] z[1, 4, 3] z[:3, :2, 3] z[::-1, 1, 3] z[-1] All of the following will raise an :obj:`IndexError`, like in Numpy 1.13 and later. .. code-block:: python z[6] z[3, 6] z[1, 4, 8] z[-6] Advanced Indexing ~~~~~~~~~~~~~~~~~ Advanced indexing (indexing arrays with other arrays) is supported, but only for indexing with a *single array*. Indexing a single array with multiple arrays is not supported at this time. As above, if :code:`z.shape` is :code:`(5, 6, 7)`, all of the following will work like NumPy: .. code-block:: python z[[0, 1, 2]] z[1, [3]] z[1, 4, [3, 6]] z[:3, :2, [1, 5]] Package Configuration --------------------- By default, when performing something like ``np.array(COO)``, we allow the array to be converted into a dense one. To prevent this and raise a :obj:`RuntimeError` instead, set the environment variable ``SPARSE_AUTO_DENSIFY`` to ``0``. If it is desired to raise a warning if creating a sparse array that takes no less memory than an equivalent desne array, set the environment variable ``SPARSE_WARN_ON_TOO_DENSE`` to ``1``. .. _operations-other: Other Operations ---------------- :obj:`COO` arrays support a number of other common operations. Among them are :obj:`dot`, :obj:`tensordot`, :obj:`concatenate` and :obj:`stack`, :obj:`transpose ` and :obj:`reshape `. You can view the full list on the :doc:`API reference page `. .. note:: Some operations require zero fill-values (such as :obj:`nonzero `) and others (such as :obj:`concatenate`) require that all inputs have consistent fill-values. For details, check the API reference. sparse-0.12.0/docs/quickstart.rst000066400000000000000000000034371402510130100167350ustar00rootroot00000000000000.. currentmodule:: sparse Getting Started =============== Install ------- If you haven't already, install the ``sparse`` library .. code-block:: bash pip install sparse Create ------ To start, lets construct a sparse :obj:`COO` array from a :obj:`numpy.ndarray`: .. code-block:: python import numpy as np import sparse x = np.random.random((100, 100, 100)) x[x < 0.9] = 0 # fill most of the array with zeros s = sparse.COO(x) # convert to sparse array These store the same information and support many of the same operations, but the sparse version takes up less space in memory .. code-block:: python >>> x.nbytes 8000000 >>> s.nbytes 1102706 >>> s For more efficient ways to construct sparse arrays, see documentation on :doc:`Constructing Arrays `. Compute ------- Many of the normal Numpy operations work on :obj:`COO` objects just like on :obj:`numpy.ndarray` objects. This includes arithmetic, :doc:`numpy.ufunc ` operations, or functions like tensordot and transpose. .. code-block:: python >>> np.sin(s) + s.T * 1 However, operations which map zero elements to nonzero will usually change the fill-value instead of raising an error. .. code-block:: python >>> y = s + 5 However, if you're sure you want to convert a sparse array to a dense one, you can use the ``todense`` method (which will result in a :obj:`numpy.ndarray`): .. code-block:: python y = s.todense() + 5 For more operations see the :doc:`Operations documentation ` or the :doc:`API reference `. sparse-0.12.0/docs/roadmap.rst000066400000000000000000000076141402510130100161670ustar00rootroot00000000000000Roadmap ======= For a brochure version of this roadmap, see `this link `_. Background ---------- The aim of PyData/Sparse is to create sparse containers that implement the ndarray interface. Traditionally in the PyData ecosystem, sparse arrays have been provided by the ``scipy.sparse`` submodule. All containers there depend on and emulate the ``numpy.matrix`` interface. This means that they are limited to two dimensions and also don’t work well in places where ``numpy.ndarray`` would work. PyData/Sparse is well on its way to replacing ``scipy.sparse`` as the de-facto sparse array implementation in the PyData ecosystem. Topics ------ * More storage formats (the most important being CSF, a generalisation of CSR/CSC). * Better performance/algorithms * Covering more of the NumPy API * SciPy Integration * Dask integration for high scalability * CuPy integration for GPU-acceleration * Maintenance and General Improvements More Storage Formats -------------------- In the sparse domain, you have to make a choice of format when representing your array in memory, and different formats have different trade-offs. For example: * CSR/CSC are usually expected by external libraries, and have good space characteristics for most arrays * DOK allows in-place modification and writes * LIL has faster writes if written to in-order. * BSR allows block-writes and reads The most important formats are, of course, CSR and CSC, because they allow zero-copy interaction with a number of libraries including MKL, LAPACK and others. This will allow PyData/Sparse to quickly reach the functionality of ``scipy.sparse``, accelerating the path to its replacement. Better Performance/Algorithms ----------------------------- There are a few places in scipy.sparse where algorithms are sub-optimal, sometimes due to reliance on NumPy which doesn’t have these algorithms. We intend to both improve the algorithms in NumPy, giving the broader community a chance to use them; as well as in PyData/Sparse, to reach optimal efficiency in the broadest use-cases. Covering More of the NumPy API ------------------------------ Our eventual aim is to cover all areas of NumPy where algorithms exist that give sparse arrays an edge over dense arrays. Currently, PyData/Sparse supports reductions, element-wise functions and other common functions such as stacking, concatenating and tensor products. Common uses of sparse arrays include linear algebra and graph theoretic subroutines, so we plan on covering those first. SciPy Integration ----------------- PyData/Sparse aims to build containers and elementary operations on them, such as element-wise operations, reductions and so on. We plan on modifying the current graph theoretic subroutines in ``scipy.sparse.csgraph`` to support PyData/Sparse arrays. The same applies for linear algebra and ``scipy.sparse.linalg``. CuPy integration for GPU-acceleration ------------------------------------- CuPy is a project that implements a large portion of NumPy’s ndarray interface on GPUs. We plan to integrate with CuPy so that it’s possible to accelerate sparse arrays on GPUs. Completed Tasks =============== Dask Integration for High Scalability ------------------------------------- Dask is a project that takes ndarray style containers and then allows them to scale across multiple cores or clusters. We plan on tighter integration and cooperation with the Dask team to ensure the highest amount of Dask functionality works with sparse arrays. Currently, integration with Dask is supported via array protocols. When more of the NumPy API (e.g. array creation functions) becomes available through array protocols, it will be automatically be supported by Dask. (Partial) SciPy Integration --------------------------- Support for ``scipy.sparse.linalg`` has been completed. We hope to add support for ``scipy.sparse.csgraph`` in the future. sparse-0.12.0/pytest.ini000066400000000000000000000004241402510130100151030ustar00rootroot00000000000000[pytest] addopts = --black --cov-report term-missing --cov-report html --cov-report=xml --cov-report=term --cov sparse --cov-config .coveragerc --junitxml=junit/test-results.xml filterwarnings = ignore::PendingDeprecationWarning testpaths = sparse junit_family=xunit2 sparse-0.12.0/readthedocs.yml000066400000000000000000000001441402510130100160610ustar00rootroot00000000000000build: image: latest python: version: 3.7 pip_install: true extra_requirements: - docs sparse-0.12.0/release-procedure.md000066400000000000000000000012701402510130100170020ustar00rootroot00000000000000* Update changelog in docs/changelog.rst and commit. * Tag commit ```bash git tag -a x.x.x -m 'Version x.x.x' ``` * Push to github ```bash git push pydata master --tags ``` * Upload to PyPI ```bash git clean -xfd # remove all files in directory not in repository python setup.py sdist bdist_wheel --universal # make packages twine upload dist/* # upload packages ``` * Enable the newly-pushed tag for documentation: https://readthedocs.org/projects/sparse-nd/versions/ * Wait for conda-forge to realise that the build is too old and make a PR. * Edit and merge that PR. * Announce the release on: * numpy-discussion@python.org * python-announce-list@python.org sparse-0.12.0/requirements.txt000066400000000000000000000000441402510130100163340ustar00rootroot00000000000000numpy>=1.17 scipy>=0.19 numba>=0.49 sparse-0.12.0/requirements/000077500000000000000000000000001402510130100155755ustar00rootroot00000000000000sparse-0.12.0/requirements/all.txt000066400000000000000000000000261402510130100171040ustar00rootroot00000000000000-r tox.txt -r docs.txtsparse-0.12.0/requirements/docs.txt000066400000000000000000000000271402510130100172650ustar00rootroot00000000000000sphinx sphinx_rtd_themesparse-0.12.0/requirements/tests.txt000066400000000000000000000000601402510130100174740ustar00rootroot00000000000000dask[array] pytest>=3.5 pytest-black pytest-cov sparse-0.12.0/requirements/tox.txt000066400000000000000000000000201402510130100171400ustar00rootroot00000000000000-r tests.txt toxsparse-0.12.0/setup.cfg000066400000000000000000000007131402510130100146740ustar00rootroot00000000000000[flake8] # References: # https://flake8.readthedocs.io/en/latest/user/configuration.html # https://flake8.readthedocs.io/en/latest/user/error-codes.html # Note: there cannot be spaces after comma's here exclude = __init__.py .asv/ .tox/ max-line-length = 120 [versioneer] VCS = git style = pep440 versionfile_source = sparse/_version.py versionfile_build = sparse/_version.py tag_prefix = parentdir_prefix = sparse- [bdist_wheel] universal=1 sparse-0.12.0/setup.py000077500000000000000000000043241402510130100145720ustar00rootroot00000000000000#!/usr/bin/env python from setuptools import setup, find_packages import versioneer from pathlib import Path def open_reqs_file(file, reqs_path=Path(".")): with (reqs_path / file).open() as f: reqs = list(f.read().strip().split("\n")) i = 0 while i < len(reqs): if reqs[i].startswith("-r"): reqs[i : i + 1] = open_reqs_file(reqs[i][2:].strip(), reqs_path=reqs_path) else: i += 1 return reqs extras_require = {} reqs = [] def parse_requires(): reqs_path = Path("./requirements") reqs.extend(open_reqs_file("requirements.txt")) for f in reqs_path.iterdir(): extras_require[f.stem] = open_reqs_file(f.parts[-1], reqs_path=reqs_path) parse_requires() with open("README.rst") as f: long_desc = f.read() print(repr(reqs)) print(repr(reqs)) setup( name="sparse", version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), description="Sparse n-dimensional arrays", url="https://github.com/pydata/sparse/", maintainer="Hameer Abbasi", maintainer_email="hameerabbasi@yahoo.com", license="BSD 3-Clause License (Revised)", keywords="sparse,numpy,scipy,dask", packages=find_packages(include=["sparse", "sparse.*"]), long_description=long_desc, install_requires=reqs, extras_require=extras_require, zip_safe=False, classifiers=[ "Development Status :: 2 - Pre-Alpha", "Operating System :: OS Independent", "License :: OSI Approved :: BSD License", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3 :: Only", "Intended Audience :: Developers", "Intended Audience :: Science/Research", ], project_urls={ "Documentation": "https://sparse.pydata.org/", "Source": "https://github.com/pydata/sparse/", "Tracker": "https://github.com/pydata/sparse/issues", }, entry_points={ "numba_extensions": ["init = sparse._numba_extension:_init_extension"] }, python_requires=">=3.6, <4", ) sparse-0.12.0/sparse/000077500000000000000000000000001402510130100143475ustar00rootroot00000000000000sparse-0.12.0/sparse/__init__.py000066400000000000000000000004551402510130100164640ustar00rootroot00000000000000from ._coo import COO, as_coo from ._compressed import GCXS from ._dok import DOK from ._sparse_array import SparseArray from ._utils import random from ._io import save_npz, load_npz from ._common import * from ._version import get_versions __version__ = get_versions()["version"] del get_versions sparse-0.12.0/sparse/_common.py000066400000000000000000001376201402510130100163610ustar00rootroot00000000000000import numpy as np import numba import scipy.sparse from functools import wraps from itertools import chain from collections.abc import Iterable from ._sparse_array import SparseArray from ._utils import check_compressed_axes, normalize_axis, check_zero_fill_value from ._umath import elemwise from ._coo.common import ( clip, triu, tril, where, nansum, nanmean, nanprod, nanmin, nanmax, nanreduce, roll, kron, argwhere, isposinf, isneginf, result_type, diagonal, diagonalize, asCOO, linear_loc, ) def tensordot(a, b, axes=2, *, return_type=None): """ Perform the equivalent of :obj:`numpy.tensordot`. Parameters ---------- a, b : Union[COO, np.ndarray, scipy.sparse.spmatrix] The arrays to perform the :code:`tensordot` operation on. axes : tuple[Union[int, tuple[int], Union[int, tuple[int]], optional The axes to match when performing the sum. return_type : {None, COO, np.ndarray}, optional Type of returned array. Returns ------- Union[COO, numpy.ndarray] The result of the operation. Raises ------ ValueError If all arguments don't have zero fill-values. See Also -------- numpy.tensordot : NumPy equivalent function """ from ._compressed import GCXS # Much of this is stolen from numpy/core/numeric.py::tensordot # Please see license at https://github.com/numpy/numpy/blob/master/LICENSE.txt check_zero_fill_value(a, b) if scipy.sparse.issparse(a): a = GCXS.from_scipy_sparse(a) if scipy.sparse.issparse(b): b = GCXS.from_scipy_sparse(b) try: iter(axes) except TypeError: axes_a = list(range(-axes, 0)) axes_b = list(range(0, axes)) else: axes_a, axes_b = axes try: na = len(axes_a) axes_a = list(axes_a) except TypeError: axes_a = [axes_a] na = 1 try: nb = len(axes_b) axes_b = list(axes_b) except TypeError: axes_b = [axes_b] nb = 1 # a, b = asarray(a), asarray(b) # <--- modified as_ = a.shape nda = a.ndim bs = b.shape ndb = b.ndim equal = True if nda == 0 or ndb == 0: pos = int(nda != 0) raise ValueError("Input {} operand does not have enough dimensions".format(pos)) if na != nb: equal = False else: for k in range(na): if as_[axes_a[k]] != bs[axes_b[k]]: equal = False break if axes_a[k] < 0: axes_a[k] += nda if axes_b[k] < 0: axes_b[k] += ndb if not equal: raise ValueError("shape-mismatch for sum") # Move the axes to sum over to the end of "a" # and to the front of "b" notin = [k for k in range(nda) if k not in axes_a] newaxes_a = notin + axes_a N2 = 1 for axis in axes_a: N2 *= as_[axis] newshape_a = (-1, N2) olda = [as_[axis] for axis in notin] notin = [k for k in range(ndb) if k not in axes_b] newaxes_b = axes_b + notin N2 = 1 for axis in axes_b: N2 *= bs[axis] newshape_b = (N2, -1) oldb = [bs[axis] for axis in notin] if any(dim == 0 for dim in chain(newshape_a, newshape_b)): res = asCOO(np.empty(olda + oldb), check=False) if isinstance(a, np.ndarray) or isinstance(b, np.ndarray): res = res.todense() return res at = a.transpose(newaxes_a).reshape(newshape_a) bt = b.transpose(newaxes_b).reshape(newshape_b) res = _dot(at, bt, return_type) return res.reshape(olda + oldb) def matmul(a, b): """Perform the equivalent of :obj:`numpy.matmul` on two arrays. Parameters ---------- a, b : Union[COO, np.ndarray, scipy.sparse.spmatrix] The arrays to perform the :code:`matmul` operation on. Returns ------- Union[COO, numpy.ndarray] The result of the operation. Raises ------ ValueError If all arguments don't have zero fill-values, or the shape of the two arrays is not broadcastable. See Also -------- numpy.matmul : NumPy equivalent function. COO.__matmul__ : Equivalent function for COO objects. """ check_zero_fill_value(a, b) if not hasattr(a, "ndim") or not hasattr(b, "ndim"): raise TypeError( "Cannot perform dot product on types %s, %s" % (type(a), type(b)) ) # When b is 2-d, it is equivalent to dot if b.ndim <= 2: return dot(a, b) # when a is 2-d, we need to transpose result after dot if a.ndim <= 2: res = dot(a, b) axes = list(range(res.ndim)) axes.insert(-1, axes.pop(0)) return res.transpose(axes) # If a can be squeeze to a vector, use dot will be faster if a.ndim <= b.ndim and np.prod(a.shape[:-1]) == 1: res = dot(a.reshape(-1), b) shape = list(res.shape) shape.insert(-1, 1) return res.reshape(shape) # If b can be squeeze to a matrix, use dot will be faster if b.ndim <= a.ndim and np.prod(b.shape[:-2]) == 1: return dot(a, b.reshape(b.shape[-2:])) if a.ndim < b.ndim: a = a[(None,) * (b.ndim - a.ndim)] if a.ndim > b.ndim: b = b[(None,) * (a.ndim - b.ndim)] for i, j in zip(a.shape[:-2], b.shape[:-2]): if i != 1 and j != 1 and i != j: raise ValueError("shapes of a and b are not broadcastable") def _matmul_recurser(a, b): if a.ndim == 2: return dot(a, b) res = [] for i in range(max(a.shape[0], b.shape[0])): a_i = a[0] if a.shape[0] == 1 else a[i] b_i = b[0] if b.shape[0] == 1 else b[i] res.append(_matmul_recurser(a_i, b_i)) mask = [isinstance(x, SparseArray) for x in res] if all(mask): return stack(res) else: res = [x.todense() if isinstance(x, SparseArray) else x for x in res] return np.stack(res) return _matmul_recurser(a, b) def dot(a, b): """ Perform the equivalent of :obj:`numpy.dot` on two arrays. Parameters ---------- a, b : Union[COO, np.ndarray, scipy.sparse.spmatrix] The arrays to perform the :code:`dot` operation on. Returns ------- Union[COO, numpy.ndarray] The result of the operation. Raises ------ ValueError If all arguments don't have zero fill-values. See Also -------- numpy.dot : NumPy equivalent function. COO.dot : Equivalent function for COO objects. """ check_zero_fill_value(a, b) if not hasattr(a, "ndim") or not hasattr(b, "ndim"): raise TypeError( "Cannot perform dot product on types %s, %s" % (type(a), type(b)) ) if a.ndim == 1 and b.ndim == 1: if isinstance(a, SparseArray): a = asCOO(a) if isinstance(b, SparseArray): b = asCOO(b) return (a * b).sum() a_axis = -1 b_axis = -2 if b.ndim == 1: b_axis = -1 return tensordot(a, b, axes=(a_axis, b_axis)) def _dot(a, b, return_type=None): from ._coo import COO from ._compressed import GCXS from ._compressed.convert import uncompress_dimension from ._sparse_array import SparseArray out_shape = (a.shape[0], b.shape[1]) if all(isinstance(arr, SparseArray) for arr in [a, b]) and any( isinstance(arr, GCXS) for arr in [a, b] ): a = a.asformat("gcxs") b = b.asformat("gcxs", compressed_axes=a.compressed_axes) if isinstance(a, GCXS) and isinstance(b, GCXS): if a.nbytes > b.nbytes: b = b.change_compressed_axes(a.compressed_axes) else: a = a.change_compressed_axes(b.compressed_axes) if a.compressed_axes == (0,): # csr @ csr compressed_axes = (0,) data, indices, indptr = _dot_csr_csr_type(a.dtype, b.dtype)( out_shape, a.data, b.data, a.indices, b.indices, a.indptr, b.indptr ) elif a.compressed_axes == (1,): # csc @ csc # a @ b = (b.T @ a.T).T compressed_axes = (1,) data, indices, indptr = _dot_csr_csr_type(b.dtype, a.dtype)( out_shape[::-1], b.data, a.data, b.indices, a.indices, b.indptr, a.indptr, ) out = GCXS( (data, indices, indptr), shape=out_shape, compressed_axes=compressed_axes, prune=True, ) if return_type == np.ndarray: return out.todense() elif return_type == COO: return out.tocoo() return out if isinstance(a, GCXS) and isinstance(b, np.ndarray): if a.compressed_axes == (0,): # csr @ ndarray if return_type is None or return_type == np.ndarray: return _dot_csr_ndarray_type(a.dtype, b.dtype)( out_shape, a.data, a.indices, a.indptr, b ) data, indices, indptr = _dot_csr_ndarray_type_sparse(a.dtype, b.dtype)( out_shape, a.data, a.indices, a.indptr, b ) out = GCXS( (data, indices, indptr), shape=out_shape, compressed_axes=(0,), prune=True, ) if return_type == COO: return out.tocoo() return out if return_type is None or return_type == np.ndarray: # csc @ ndarray return _dot_csc_ndarray_type(a.dtype, b.dtype)( a.shape, b.shape, a.data, a.indices, a.indptr, b ) data, indices, indptr = _dot_csc_ndarray_type_sparse(a.dtype, b.dtype)( a.shape, b.shape, a.data, a.indices, a.indptr, b ) compressed_axes = (1,) out = GCXS( (data, indices, indptr), shape=out_shape, compressed_axes=compressed_axes, prune=True, ) if return_type == COO: return out.tocoo() return out if isinstance(a, np.ndarray) and isinstance(b, GCXS): at = a.view(type=np.ndarray).T bt = b.T # constant-time transpose if b.compressed_axes == (0,): if return_type is None or return_type == np.ndarray: out = _dot_csc_ndarray_type(bt.dtype, at.dtype)( bt.shape, at.shape, bt.data, bt.indices, bt.indptr, at ) return out.T data, indices, indptr = _dot_csc_ndarray_type_sparse(bt.dtype, at.dtype)( bt.shape, at.shape, bt.data, b.indices, b.indptr, at ) out = GCXS( (data, indices, indptr), shape=out_shape, compressed_axes=(0,), prune=True, ) if return_type == COO: return out.tocoo() return out # compressed_axes == (1,) if return_type is None or return_type == np.ndarray: return _dot_ndarray_csc_type(a.dtype, b.dtype)( out_shape, b.data, b.indices, b.indptr, a ) data, indices, indptr = _dot_csr_ndarray_type_sparse(bt.dtype, at.dtype)( out_shape[::-1], bt.data, bt.indices, bt.indptr, at ) out = GCXS( (data, indices, indptr), shape=out_shape, compressed_axes=(1,), prune=True ) if return_type == COO: return out.tocoo() return out if isinstance(a, COO) and isinstance(b, COO): # convert to csr a_indptr = np.empty(a.shape[0] + 1, dtype=np.intp) a_indptr[0] = 0 np.cumsum(np.bincount(a.coords[0], minlength=a.shape[0]), out=a_indptr[1:]) b_indptr = np.empty(b.shape[0] + 1, dtype=np.intp) b_indptr[0] = 0 np.cumsum(np.bincount(b.coords[0], minlength=b.shape[0]), out=b_indptr[1:]) coords, data = _dot_coo_coo_type(a.dtype, b.dtype)( out_shape, a.coords, b.coords, a.data, b.data, a_indptr, b_indptr ) out = COO( coords, data, shape=out_shape, has_duplicates=False, sorted=False, prune=True, ) if return_type == np.ndarray: return out.todense() elif return_type == GCXS: return out.asformat("gcxs") return out if isinstance(a, COO) and isinstance(b, np.ndarray): b = b.view(type=np.ndarray).T if return_type is None or return_type == np.ndarray: return _dot_coo_ndarray_type(a.dtype, b.dtype)( a.coords, a.data, b, out_shape ) coords, data = _dot_coo_ndarray_type_sparse(a.dtype, b.dtype)( a.coords, a.data, b, out_shape ) out = COO(coords, data, shape=out_shape, has_duplicates=False, sorted=True) if return_type == GCXS: return out.asformat("gcxs") return out if isinstance(a, np.ndarray) and isinstance(b, COO): b = b.T a = a.view(type=np.ndarray) if return_type is None or return_type == np.ndarray: return _dot_ndarray_coo_type(a.dtype, b.dtype)( a, b.coords, b.data, out_shape ) coords, data = _dot_ndarray_coo_type_sparse(a.dtype, b.dtype)( a, b.coords, b.data, out_shape ) out = COO( coords, data, shape=out_shape, has_duplicates=False, sorted=True, prune=True ) if return_type == GCXS: return out.asformat("gcxs") return out def _memoize_dtype(f): """ Memoizes a function taking in NumPy dtypes. Parameters ---------- f : Callable Returns ------- wrapped : Callable Examples -------- >>> def func(dt1): ... return object() >>> func = _memoize_dtype(func) >>> func(np.dtype('i8')) is func(np.dtype('int64')) True >>> func(np.dtype('i8')) is func(np.dtype('i4')) False """ cache = {} @wraps(f) def wrapped(*args): key = tuple(arg.name for arg in args) if key in cache: return cache[key] result = f(*args) cache[key] = result return result return wrapped @numba.jit(nopython=True, nogil=True) def _csr_csr_count_nnz( out_shape, a_indices, b_indices, a_indptr, b_indptr ): # pragma: no cover """ A function for computing the number of nonzero values in the resulting array from multiplying an array with compressed rows with an array with compressed rows: (a @ b).nnz. Parameters ---------- out_shape : tuple The shape of the output array. indptr : ndarray The empty index pointer array for the output. a_indices, a_indptr : np.ndarray The indices and index pointer array of ``a``. b_data, b_indices, b_indptr : np.ndarray The indices and index pointer array of ``b``. """ n_row, n_col = out_shape nnz = 0 mask = np.full(n_col, -1) for i in range(n_row): row_nnz = 0 for j in a_indices[a_indptr[i] : a_indptr[i + 1]]: for k in b_indices[b_indptr[j] : b_indptr[j + 1]]: if mask[k] != i: mask[k] = i row_nnz += 1 nnz += row_nnz return nnz @numba.jit(nopython=True, nogil=True) def _csr_ndarray_count_nnz( out_shape, indptr, a_indices, a_indptr, b ): # pragma: no cover """ A function for computing the number of nonzero values in the resulting array from multiplying an array with compressed rows with a dense numpy array: (a @ b).nnz. Parameters ---------- out_shape : tuple The shape of the output array. indptr : ndarray The empty index pointer array for the output. a_indices, a_indptr : np.ndarray The indices and index pointer array of ``a``. b : np.ndarray The second input array ``b``. """ nnz = 0 for i in range(out_shape[0]): cur_row = a_indices[a_indptr[i] : a_indptr[i + 1]] for j in range(out_shape[1]): for k in cur_row: if b[k, j] != 0: nnz += 1 break indptr[i + 1] = nnz return nnz @numba.jit(nopython=True, nogil=True) def _csc_ndarray_count_nnz( a_shape, b_shape, indptr, a_indices, a_indptr, b ): # pragma: no cover """ A function for computing the number of nonzero values in the resulting array from multiplying an array with compressed columns with a dense numpy array: (a @ b).nnz. Parameters ---------- a_shape, b_shape : tuple The shapes of the input arrays. indptr : ndarray The empty index pointer array for the output. a_indices, a_indptr : np.ndarray The indices and index pointer array of ``a``. b : np.ndarray The second input array ``b``. """ nnz = 0 mask = np.full(a_shape[0], -1) for i in range(b_shape[1]): col_nnz = 0 for j in range(b_shape[0]): for k in a_indices[a_indptr[j] : a_indptr[j + 1]]: if b[j, i] != 0 and mask[k] != i: mask[k] = i col_nnz += 1 nnz += col_nnz indptr[i + 1] = nnz return nnz def _dot_dtype(dt1, dt2): return (np.zeros((), dtype=dt1) * np.zeros((), dtype=dt1)).dtype @_memoize_dtype def _dot_csr_csr_type(dt1, dt2): dtr = _dot_dtype(dt1, dt2) @numba.jit( nopython=True, nogil=True, locals={"data_curr": numba.np.numpy_support.from_dtype(dtr)}, ) def _dot_csr_csr( out_shape, a_data, b_data, a_indices, b_indices, a_indptr, b_indptr ): # pragma: no cover """ Utility function taking in two ``GCXS`` objects and calculating their dot product: a @ b for a and b with compressed rows. Parameters ---------- out_shape : tuple The shape of the output array. a_data, a_indices, a_indptr : np.ndarray The data, indices, and index pointer arrays of ``a``. b_data, b_indices, b_indptr : np.ndarray The data, indices, and index pointer arrays of ``b``. """ # much of this is borrowed from: # https://github.com/scipy/scipy/blob/master/scipy/sparse/sparsetools/csr.h # calculate nnz before multiplying so we can use static arrays nnz = _csr_csr_count_nnz(out_shape, a_indices, b_indices, a_indptr, b_indptr) n_row, n_col = out_shape indptr = np.empty(n_row + 1, dtype=np.intp) indptr[0] = 0 indices = np.empty(nnz, dtype=np.intp) data = np.empty(nnz, dtype=dtr) next_ = np.full(n_col, -1) sums = np.zeros(n_col, dtype=dtr) nnz = 0 for i in range(n_row): head = -2 length = 0 next_[:] = -1 for j, av in zip( a_indices[a_indptr[i] : a_indptr[i + 1]], a_data[a_indptr[i] : a_indptr[i + 1]], ): for k, bv in zip( b_indices[b_indptr[j] : b_indptr[j + 1]], b_data[b_indptr[j] : b_indptr[j + 1]], ): sums[k] += av * bv if next_[k] == -1: next_[k] = head head = k length += 1 for _ in range(length): if next_[head] != -1: indices[nnz] = head data[nnz] = sums[head] nnz += 1 temp = head head = next_[head] next_[temp] = -1 sums[temp] = 0 indptr[i + 1] = nnz return data, indices, indptr return _dot_csr_csr @_memoize_dtype def _dot_csr_ndarray_type(dt1, dt2): dtr = _dot_dtype(dt1, dt2) @numba.jit( nopython=True, nogil=True, locals={"data_curr": numba.np.numpy_support.from_dtype(dtr)}, ) def _dot_csr_ndarray(out_shape, a_data, a_indices, a_indptr, b): # pragma: no cover """ Utility function taking in one `GCXS` and one ``ndarray`` and calculating their dot product: a @ b for a with compressed rows. Returns a dense result. Parameters ---------- a_data, a_indices, a_indptr : np.ndarray The data, indices, and index pointers of ``a``. b : np.ndarray The second input array ``b``. out_shape : Tuple[int] The shape of the output array. """ out = np.empty(out_shape, dtype=dtr) for i in range(out_shape[0]): for j in range(out_shape[1]): val = 0 for k in range(a_indptr[i], a_indptr[i + 1]): ind = a_indices[k] v = a_data[k] val += v * b[ind, j] out[i, j] = val return out return _dot_csr_ndarray @_memoize_dtype def _dot_csr_ndarray_type_sparse(dt1, dt2): dtr = _dot_dtype(dt1, dt2) @numba.jit( nopython=True, nogil=True, locals={"data_curr": numba.np.numpy_support.from_dtype(dtr)}, ) def _dot_csr_ndarray_sparse( out_shape, a_data, a_indices, a_indptr, b ): # pragma: no cover """ Utility function taking in one `GCXS` and one ``ndarray`` and calculating their dot product: a @ b for a with compressed rows. Returns a sparse result. Parameters ---------- a_data, a_indices, a_indptr : np.ndarray The data, indices, and index pointers of ``a``. b : np.ndarray The second input array ``b``. out_shape : Tuple[int] The shape of the output array. """ indptr = np.empty(out_shape[0] + 1, dtype=np.intp) indptr[0] = 0 nnz = _csr_ndarray_count_nnz(out_shape, indptr, a_indices, a_indptr, b) indices = np.empty(nnz, dtype=np.intp) data = np.empty(nnz, dtype=dtr) current = 0 for i in range(out_shape[0]): for j in range(out_shape[1]): val = 0 nonzero = False for k in range(a_indptr[i], a_indptr[i + 1]): ind = a_indices[k] v = a_data[k] val += v * b[ind, j] if b[ind, j] != 0: nonzero = True if nonzero: data[current] = val indices[current] = j current += 1 return data, indices, indptr return _dot_csr_ndarray_sparse @_memoize_dtype def _dot_csc_ndarray_type_sparse(dt1, dt2): dtr = _dot_dtype(dt1, dt2) @numba.jit( nopython=True, nogil=True, locals={"data_curr": numba.np.numpy_support.from_dtype(dtr)}, ) def _dot_csc_ndarray_sparse( a_shape, b_shape, a_data, a_indices, a_indptr, b ): # pragma: no cover """ Utility function taking in one `GCXS` and one ``ndarray`` and calculating their dot product: a @ b for a with compressed columns. Returns a sparse result. Parameters ---------- a_data, a_indices, a_indptr : np.ndarray The data, indices, and index pointers of ``a``. b : np.ndarray The second input array ``b``. a_shape, b_shape : Tuple[int] The shapes of the input arrays. """ indptr = np.empty(b_shape[1] + 1, dtype=np.intp) nnz = _csc_ndarray_count_nnz(a_shape, b_shape, indptr, a_indices, a_indptr, b) indices = np.empty(nnz, dtype=np.intp) data = np.empty(nnz, dtype=dtr) sums = np.zeros(a_shape[0]) mask = np.full(a_shape[0], -1) nnz = 0 for i in range(b_shape[1]): head = -2 length = 0 for j in range(b_shape[0]): u = b[j, i] if u != 0: for k in range(a_indptr[j], a_indptr[j + 1]): ind = a_indices[k] v = a_data[k] sums[ind] += u * v if mask[ind] == -1: mask[ind] = head head = ind length += 1 start = nnz for _ in range(length): if sums[head] != 0: indices[nnz] = head data[nnz] = sums[head] nnz += 1 temp = head head = mask[head] mask[temp] = -1 sums[temp] = 0 return data, indices, indptr return _dot_csc_ndarray_sparse @_memoize_dtype def _dot_csc_ndarray_type(dt1, dt2): dtr = _dot_dtype(dt1, dt2) @numba.jit( nopython=True, nogil=True, locals={"data_curr": numba.np.numpy_support.from_dtype(dtr)}, ) def _dot_csc_ndarray( a_shape, b_shape, a_data, a_indices, a_indptr, b ): # pragma: no cover """ Utility function taking in one `GCXS` and one ``ndarray`` and calculating their dot product: a @ b for a with compressed columns. Returns a dense result. Parameters ---------- a_data, a_indices, a_indptr : np.ndarray The data, indices, and index pointers of ``a``. b : np.ndarray The second input array ``b``. a_shape, b_shape : Tuple[int] The shapes of the input arrays. """ out = np.zeros((a_shape[0], b_shape[1]), dtype=dtr) for j in range(b_shape[1]): for i in range(b_shape[0]): for k in range(a_indptr[i], a_indptr[i + 1]): out[a_indices[k], j] += a_data[k] * b[i, j] return out return _dot_csc_ndarray @_memoize_dtype def _dot_ndarray_csc_type(dt1, dt2): dtr = _dot_dtype(dt1, dt2) @numba.jit( nopython=True, nogil=True, locals={"data_curr": numba.np.numpy_support.from_dtype(dtr)}, ) def _dot_ndarray_csc(out_shape, b_data, b_indices, b_indptr, a): # pragma: no cover """ Utility function taking in one `ndarray` and one ``GCXS`` and calculating their dot product: a @ b for b with compressed columns. Parameters ---------- a : np.ndarray The input array ``a``. b_data, b_indices, b_indptr : np.ndarray The data, indices, and index pointers of ``b``. out_shape : Tuple[int] The shape of the output array. """ out = np.empty(out_shape, dtype=dtr) for i in range(out_shape[0]): for j in range(out_shape[1]): total = 0 for k in range(b_indptr[j], b_indptr[j + 1]): total += a[i, b_indices[k]] * b_data[k] out[i, j] = total return out return _dot_ndarray_csc @_memoize_dtype def _dot_coo_coo_type(dt1, dt2): dtr = _dot_dtype(dt1, dt2) @numba.jit( nopython=True, nogil=True, locals={"data_curr": numba.np.numpy_support.from_dtype(dtr)}, ) def _dot_coo_coo( out_shape, a_coords, b_coords, a_data, b_data, a_indptr, b_indptr ): # pragma: no cover """ Utility function taking in two ``COO`` objects and calculating their dot product: a @ b. Parameters ---------- a_shape, b_shape : tuple The shapes of the input arrays. a_data, a_coords : np.ndarray The data and coordinates of ``a``. b_data, b_coords : np.ndarray The data and coordinates of ``b``. """ # much of this is borrowed from: # https://github.com/scipy/scipy/blob/master/scipy/sparse/sparsetools/csr.h n_row, n_col = out_shape # calculate nnz before multiplying so we can use static arrays nnz = _csr_csr_count_nnz( out_shape, a_coords[1], b_coords[1], a_indptr, b_indptr ) coords = np.empty((2, nnz), dtype=np.intp) data = np.empty(nnz, dtype=dtr) next_ = np.full(n_col, -1) sums = np.zeros(n_col, dtype=dtr) nnz = 0 for i in range(n_row): head = -2 length = 0 next_[:] = -1 for j, av in zip( a_coords[1, a_indptr[i] : a_indptr[i + 1]], a_data[a_indptr[i] : a_indptr[i + 1]], ): for k, bv in zip( b_coords[1, b_indptr[j] : b_indptr[j + 1]], b_data[b_indptr[j] : b_indptr[j + 1]], ): sums[k] += av * bv if next_[k] == -1: next_[k] = head head = k length += 1 start = nnz for _ in range(length): if next_[head] != -1: coords[0, nnz] = i coords[1, nnz] = head data[nnz] = sums[head] nnz += 1 temp = head head = next_[head] next_[temp] = -1 sums[temp] = 0 return coords, data return _dot_coo_coo @_memoize_dtype def _dot_coo_ndarray_type(dt1, dt2): dtr = _dot_dtype(dt1, dt2) @numba.jit(nopython=True, nogil=True) def _dot_coo_ndarray(coords1, data1, array2, out_shape): # pragma: no cover """ Utility function taking in one `COO` and one ``ndarray`` and calculating a "sense" of their dot product. Acually computes ``s1 @ x2.T``. Parameters ---------- data1, coords1 : np.ndarray The data and coordinates of ``s1``. array2 : np.ndarray The second input array ``x2``. out_shape : Tuple[int] The output shape. """ out = np.zeros(out_shape, dtype=dtr) didx1 = 0 while didx1 < len(data1): oidx1 = coords1[0, didx1] didx1_curr = didx1 for oidx2 in range(out_shape[1]): didx1 = didx1_curr while didx1 < len(data1) and coords1[0, didx1] == oidx1: out[oidx1, oidx2] += data1[didx1] * array2[oidx2, coords1[1, didx1]] didx1 += 1 return out return _dot_coo_ndarray @_memoize_dtype def _dot_coo_ndarray_type_sparse(dt1, dt2): dtr = _dot_dtype(dt1, dt2) @numba.jit( nopython=True, nogil=True, locals={"data_curr": numba.np.numpy_support.from_dtype(dtr)}, ) def _dot_coo_ndarray(coords1, data1, array2, out_shape): # pragma: no cover """ Utility function taking in one `COO` and one ``ndarray`` and calculating a "sense" of their dot product. Acually computes ``s1 @ x2.T``. Parameters ---------- data1, coords1 : np.ndarray The data and coordinates of ``s1``. array2 : np.ndarray The second input array ``x2``. out_shape : Tuple[int] The output shape. """ out_data = [] out_coords = [] # coords1.shape = (2, len(data1)) # coords1[0, :] = rows, sorted # coords1[1, :] = columns didx1 = 0 while didx1 < len(data1): current_row = coords1[0, didx1] cur_didx1 = didx1 oidx2 = 0 while oidx2 < out_shape[1]: cur_didx1 = didx1 data_curr = 0 while cur_didx1 < len(data1) and coords1[0, cur_didx1] == current_row: data_curr += data1[cur_didx1] * array2[oidx2, coords1[1, cur_didx1]] cur_didx1 += 1 if data_curr != 0: out_data.append(data_curr) out_coords.append((current_row, oidx2)) oidx2 += 1 didx1 = cur_didx1 if len(out_data) == 0: return np.empty((2, 0), dtype=np.intp), np.empty((0,), dtype=dtr) return np.array(out_coords).T, np.array(out_data) return _dot_coo_ndarray @_memoize_dtype def _dot_ndarray_coo_type(dt1, dt2): dtr = _dot_dtype(dt1, dt2) @numba.jit(nopython=True, nogil=True) def _dot_ndarray_coo(array1, coords2, data2, out_shape): # pragma: no cover """ Utility function taking in two one ``ndarray`` and one ``COO`` and calculating a "sense" of their dot product. Acually computes ``x1 @ s2.T``. Parameters ---------- array1 : np.ndarray The input array ``x1``. data2, coords2 : np.ndarray The data and coordinates of ``s2``. out_shape : Tuple[int] The output shape. """ out = np.zeros(out_shape, dtype=dtr) for oidx1 in range(out_shape[0]): for didx2 in range(len(data2)): oidx2 = coords2[0, didx2] out[oidx1, oidx2] += array1[oidx1, coords2[1, didx2]] * data2[didx2] return out return _dot_ndarray_coo @_memoize_dtype def _dot_ndarray_coo_type_sparse(dt1, dt2): dtr = _dot_dtype(dt1, dt2) @numba.jit( nopython=True, nogil=True, locals={"data_curr": numba.np.numpy_support.from_dtype(dtr)}, ) def _dot_ndarray_coo(array1, coords2, data2, out_shape): # pragma: no cover """ Utility function taking in two one ``ndarray`` and one ``COO`` and calculating a "sense" of their dot product. Acually computes ``x1 @ s2.T``. Parameters ---------- array1 : np.ndarray The input array ``x1``. data2, coords2 : np.ndarray The data and coordinates of ``s2``. out_shape : Tuple[int] The output shape. """ out_data = [] out_coords = [] # coords2.shape = (2, len(data2)) # coords2[0, :] = columns, sorted # coords2[1, :] = rows for oidx1 in range(out_shape[0]): data_curr = 0 current_col = 0 for didx2 in range(len(data2)): if coords2[0, didx2] != current_col: if data_curr != 0: out_data.append(data_curr) out_coords.append([oidx1, current_col]) data_curr = 0 current_col = coords2[0, didx2] data_curr += array1[oidx1, coords2[1, didx2]] * data2[didx2] if data_curr != 0: out_data.append(data_curr) out_coords.append([oidx1, current_col]) if len(out_data) == 0: return np.empty((2, 0), dtype=np.intp), np.empty((0,), dtype=dtr) return np.array(out_coords).T, np.array(out_data) return _dot_ndarray_coo def stack(arrays, axis=0, compressed_axes=None): """ Stack the input arrays along the given dimension. Parameters ---------- arrays : Iterable[SparseArray] The input arrays to stack. axis : int, optional The axis along which to stack the input arrays. compressed_axes : iterable, optional The axes to compress if returning a GCXS array. Returns ------- SparseArray The output stacked array. Raises ------ ValueError If all elements of :code:`arrays` don't have the same fill-value. See Also -------- numpy.stack : NumPy equivalent function """ from ._coo import COO if any(isinstance(arr, COO) for arr in arrays): from ._coo import stack as coo_stack return coo_stack(arrays, axis) else: from ._compressed import stack as gcxs_stack return gcxs_stack(arrays, axis, compressed_axes) def concatenate(arrays, axis=0, compressed_axes=None): """ Concatenate the input arrays along the given dimension. Parameters ---------- arrays : Iterable[SparseArray] The input arrays to concatenate. axis : int, optional The axis along which to concatenate the input arrays. The default is zero. compressed_axes : iterable, optional The axes to compress if returning a GCXS array. Returns ------- SparseArray The output concatenated array. Raises ------ ValueError If all elements of :code:`arrays` don't have the same fill-value. See Also -------- numpy.concatenate : NumPy equivalent function """ from ._coo import COO if any(isinstance(arr, COO) for arr in arrays): from ._coo import concatenate as coo_concat return coo_concat(arrays, axis) else: from ._compressed import concatenate as gcxs_concat return gcxs_concat(arrays, axis, compressed_axes) def eye(N, M=None, k=0, dtype=float, format="coo", compressed_axes=None): """Return a 2-D array in the specified format with ones on the diagonal and zeros elsewhere. Parameters ---------- N : int Number of rows in the output. M : int, optional Number of columns in the output. If None, defaults to `N`. k : int, optional Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype : data-type, optional Data-type of the returned array. format : str, optional A format string. compressed_axes : iterable, optional The axes to compress if returning a GCXS array. Returns ------- I : SparseArray of shape (N, M) An array where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one. Examples -------- >>> eye(2, dtype=int).todense() # doctest: +NORMALIZE_WHITESPACE array([[1, 0], [0, 1]]) >>> eye(3, k=1).todense() # doctest: +SKIP array([[0., 1., 0.], [0., 0., 1.], [0., 0., 0.]]) """ from sparse import COO if M is None: M = N N = int(N) M = int(M) k = int(k) data_length = min(N, M) if k > 0: data_length = max(min(data_length, M - k), 0) n_coords = np.arange(data_length, dtype=np.intp) m_coords = n_coords + k elif k < 0: data_length = max(min(data_length, N + k), 0) m_coords = np.arange(data_length, dtype=np.intp) n_coords = m_coords - k else: n_coords = m_coords = np.arange(data_length, dtype=np.intp) coords = np.stack([n_coords, m_coords]) data = np.array(1, dtype=dtype) return COO( coords, data=data, shape=(N, M), has_duplicates=False, sorted=True ).asformat(format, compressed_axes=compressed_axes) def full(shape, fill_value, dtype=None, format="coo", compressed_axes=None): """Return a SparseArray of given shape and type, filled with `fill_value`. Parameters ---------- shape : int or tuple of ints Shape of the new array, e.g., ``(2, 3)`` or ``2``. fill_value : scalar Fill value. dtype : data-type, optional The desired data-type for the array. The default, `None`, means `np.array(fill_value).dtype`. format : str, optional A format string. compressed_axes : iterable, optional The axes to compress if returning a GCXS array. Returns ------- out : SparseArray Array of `fill_value` with the given shape and dtype. Examples -------- >>> full(5, 9).todense() # doctest: +NORMALIZE_WHITESPACE array([9, 9, 9, 9, 9]) >>> full((2, 2), 9, dtype=float).todense() # doctest: +SKIP array([[9., 9.], [9., 9.]]) """ from sparse import COO if dtype is None: dtype = np.array(fill_value).dtype if not isinstance(shape, tuple): shape = (shape,) if compressed_axes is not None: check_compressed_axes(shape, compressed_axes) data = np.empty(0, dtype=dtype) coords = np.empty((len(shape), 0), dtype=np.intp) return COO( coords, data=data, shape=shape, fill_value=fill_value, has_duplicates=False, sorted=True, ).asformat(format, compressed_axes=compressed_axes) def full_like(a, fill_value, dtype=None, shape=None, format=None, compressed_axes=None): """Return a full array with the same shape and type as a given array. Parameters ---------- a : array_like The shape and data-type of the result will match those of `a`. dtype : data-type, optional Overrides the data type of the result. format : str, optional A format string. compressed_axes : iterable, optional The axes to compress if returning a GCXS array. Returns ------- out : SparseArray Array of `fill_value` with the same shape and type as `a`. Examples -------- >>> x = np.ones((2, 3), dtype='i8') >>> full_like(x, 9.0).todense() # doctest: +NORMALIZE_WHITESPACE array([[9, 9, 9], [9, 9, 9]]) """ if format is None and not isinstance(a, np.ndarray): format = type(a).__name__.lower() elif format is None: format = "coo" if hasattr(a, "compressed_axes") and compressed_axes is None: compressed_axes = a.compressed_axes return full( a.shape if shape is None else shape, fill_value, dtype=(a.dtype if dtype is None else dtype), format=format, compressed_axes=compressed_axes, ) def zeros(shape, dtype=float, format="coo", compressed_axes=None): """Return a SparseArray of given shape and type, filled with zeros. Parameters ---------- shape : int or tuple of ints Shape of the new array, e.g., ``(2, 3)`` or ``2``. dtype : data-type, optional The desired data-type for the array, e.g., `numpy.int8`. Default is `numpy.float64`. format : str, optional A format string. compressed_axes : iterable, optional The axes to compress if returning a GCXS array. Returns ------- out : SparseArray Array of zeros with the given shape and dtype. Examples -------- >>> zeros(5).todense() # doctest: +SKIP array([0., 0., 0., 0., 0.]) >>> zeros((2, 2), dtype=int).todense() # doctest: +NORMALIZE_WHITESPACE array([[0, 0], [0, 0]]) """ if compressed_axes is not None: check_compressed_axes(shape, compressed_axes) return full(shape, 0, np.dtype(dtype)).asformat( format, compressed_axes=compressed_axes ) def zeros_like(a, dtype=None, shape=None, format=None, compressed_axes=None): """Return a SparseArray of zeros with the same shape and type as ``a``. Parameters ---------- a : array_like The shape and data-type of the result will match those of `a`. dtype : data-type, optional Overrides the data type of the result. format : str, optional A format string. compressed_axes : iterable, optional The axes to compress if returning a GCXS array. Returns ------- out : SparseArray Array of zeros with the same shape and type as `a`. Examples -------- >>> x = np.ones((2, 3), dtype='i8') >>> zeros_like(x).todense() # doctest: +NORMALIZE_WHITESPACE array([[0, 0, 0], [0, 0, 0]]) """ return full_like( a, 0, dtype=dtype, shape=shape, format=format, compressed_axes=compressed_axes ) def ones(shape, dtype=float, format="coo", compressed_axes=None): """Return a SparseArray of given shape and type, filled with ones. Parameters ---------- shape : int or tuple of ints Shape of the new array, e.g., ``(2, 3)`` or ``2``. dtype : data-type, optional The desired data-type for the array, e.g., `numpy.int8`. Default is `numpy.float64`. format : str, optional A format string. compressed_axes : iterable, optional The axes to compress if returning a GCXS array. Returns ------- out : SparseArray Array of ones with the given shape and dtype. Examples -------- >>> ones(5).todense() # doctest: +SKIP array([1., 1., 1., 1., 1.]) >>> ones((2, 2), dtype=int).todense() # doctest: +NORMALIZE_WHITESPACE array([[1, 1], [1, 1]]) """ if compressed_axes is not None: check_compressed_axes(shape, compressed_axes) return full(shape, 1, np.dtype(dtype)).asformat( format, compressed_axes=compressed_axes ) def ones_like(a, dtype=None, shape=None, format=None, compressed_axes=None): """Return a SparseArray of ones with the same shape and type as ``a``. Parameters ---------- a : array_like The shape and data-type of the result will match those of `a`. dtype : data-type, optional Overrides the data type of the result. format : str, optional A format string. compressed_axes : iterable, optional The axes to compress if returning a GCXS array. Returns ------- out : SparseArray Array of ones with the same shape and type as `a`. Examples -------- >>> x = np.ones((2, 3), dtype='i8') >>> ones_like(x).todense() # doctest: +NORMALIZE_WHITESPACE array([[1, 1, 1], [1, 1, 1]]) """ return full_like( a, 1, dtype=dtype, shape=shape, format=format, compressed_axes=compressed_axes ) def outer(a, b, out=None): """ Return outer product of two sparse arrays. Parameters ---------- a, b : sparse.SparseArray The input arrays. out : sparse.SparseArray The output array. Examples -------- >>> import numpy as np >>> import sparse >>> a = sparse.COO(np.arange(4)) >>> o = sparse.outer(a, a) >>> o.todense() array([[0, 0, 0, 0], [0, 1, 2, 3], [0, 2, 4, 6], [0, 3, 6, 9]]) """ from sparse import SparseArray, COO if isinstance(a, SparseArray): a = COO(a) if isinstance(b, SparseArray): b = COO(b) return np.multiply.outer(a.flatten(), b.flatten(), out=out) def asnumpy(a, dtype=None, order=None): """Returns a dense numpy array from an arbitrary source array. Args: a: Arbitrary object that can be converted to :class:`numpy.ndarray`. order ({'C', 'F', 'A'}): The desired memory layout of the output array. When ``order`` is 'A', it uses 'F' if ``a`` is fortran-contiguous and 'C' otherwise. Returns: numpy.ndarray: Converted array on the host memory. """ from ._sparse_array import SparseArray if isinstance(a, SparseArray): a = a.todense() return np.array(a, dtype=dtype, copy=False, order=order) # this code was taken from numpy.moveaxis # (cf. numpy/core/numeric.py, lines 1340-1409, v1.18.4) # https://github.com/numpy/numpy/blob/v1.18.4/numpy/core/numeric.py#L1340-L1409 def moveaxis(a, source, destination): """ Move axes of an array to new positions. Other axes remain in their original order. Parameters ---------- a : COO The array whose axes should be reordered. source : int or List[int] Original positions of the axes to move. These must be unique. destination : int or List[int] Destination positions for each of the original axes. These must also be unique. Returns ------- COO Array with moved axes. Examples -------- >>> import numpy as np >>> import sparse >>> x = sparse.COO.from_numpy(np.ones((2, 3, 4, 5))) >>> sparse.moveaxis(x, (0, 1), (2, 3)) """ if not isinstance(source, Iterable): source = (source,) if not isinstance(destination, Iterable): destination = (destination,) source = normalize_axis(source, a.ndim) destination = normalize_axis(destination, a.ndim) if len(source) != len(destination): raise ValueError( "`source` and `destination` arguments must have " "the same number of elements" ) order = [n for n in range(a.ndim) if n not in source] for dest, src in sorted(zip(destination, source)): order.insert(dest, src) result = a.transpose(order) return result sparse-0.12.0/sparse/_compressed/000077500000000000000000000000001402510130100166525ustar00rootroot00000000000000sparse-0.12.0/sparse/_compressed/__init__.py000066400000000000000000000001161402510130100207610ustar00rootroot00000000000000from .compressed import GCXS, CSC, CSR from .common import stack, concatenate sparse-0.12.0/sparse/_compressed/common.py000066400000000000000000000074621402510130100205250ustar00rootroot00000000000000import numpy as np from .._utils import check_consistent_fill_value, normalize_axis, can_store def concatenate(arrays, axis=0, compressed_axes=None): from .compressed import GCXS check_consistent_fill_value(arrays) arrays = [ arr if isinstance(arr, GCXS) else GCXS(arr, compressed_axes=(axis,)) for arr in arrays ] axis = normalize_axis(axis, arrays[0].ndim) dim = sum(x.shape[axis] for x in arrays) shape = list(arrays[0].shape) shape[axis] = dim assert all( x.shape[ax] == arrays[0].shape[ax] for x in arrays for ax in set(range(arrays[0].ndim)) - {axis} ) if compressed_axes is None: compressed_axes = (axis,) if arrays[0].ndim == 1: from .._coo.common import concatenate as coo_concat arrays = [arr.tocoo() for arr in arrays] return coo_concat(arrays, axis=axis) # arrays may have different compressed_axes # concatenating becomes easy when compressed_axes are the same arrays = [arr.change_compressed_axes((axis,)) for arr in arrays] ptr_list = [] for i, arr in enumerate(arrays): if i == 0: ptr_list.append(arr.indptr) continue ptr_list.append(arr.indptr[1:]) indptr = np.concatenate(ptr_list) indices = np.concatenate([arr.indices for arr in arrays]) data = np.concatenate([arr.data for arr in arrays]) ptr_len = arrays[0].indptr.shape[0] nnz = arrays[0].nnz total_nnz = sum(int(arr.nnz) for arr in arrays) if not can_store(indptr.dtype, total_nnz): indptr = indptr.astype(np.min_scalar_type(total_nnz)) for i in range(1, len(arrays)): indptr[ptr_len:] += nnz nnz = arrays[i].nnz ptr_len += arrays[i].indptr.shape[0] - 1 return GCXS( (data, indices, indptr), shape=tuple(shape), compressed_axes=arrays[0].compressed_axes, fill_value=arrays[0].fill_value, ).change_compressed_axes(compressed_axes) def stack(arrays, axis=0, compressed_axes=None): from .compressed import GCXS check_consistent_fill_value(arrays) arrays = [ arr if isinstance(arr, GCXS) else GCXS(arr, compressed_axes=(axis,)) for arr in arrays ] axis = normalize_axis(axis, arrays[0].ndim + 1) assert all( x.shape[ax] == arrays[0].shape[ax] for x in arrays for ax in set(range(arrays[0].ndim)) - {axis} ) if compressed_axes is None: compressed_axes = (axis,) if arrays[0].ndim == 1: from .._coo.common import stack as coo_stack arrays = [arr.tocoo() for arr in arrays] return coo_stack(arrays, axis=axis) # arrays may have different compressed_axes # stacking becomes easy when compressed_axes are the same ptr_list = [] for i in range(len(arrays)): shape = list(arrays[i].shape) shape.insert(axis, 1) arrays[i] = arrays[i].reshape(shape).change_compressed_axes((axis,)) if i == 0: ptr_list.append(arrays[i].indptr) continue ptr_list.append(arrays[i].indptr[1:]) shape[axis] = len(arrays) indptr = np.concatenate(ptr_list) indices = np.concatenate([arr.indices for arr in arrays]) data = np.concatenate([arr.data for arr in arrays]) ptr_len = arrays[0].indptr.shape[0] nnz = arrays[0].nnz total_nnz = sum(int(arr.nnz) for arr in arrays) if not can_store(indptr.dtype, total_nnz): indptr = indptr.astype(np.min_scalar_type(total_nnz)) for i in range(1, len(arrays)): indptr[ptr_len:] += nnz nnz = arrays[i].nnz ptr_len += arrays[i].indptr.shape[0] - 1 return GCXS( (data, indices, indptr), shape=tuple(shape), compressed_axes=arrays[0].compressed_axes, fill_value=arrays[0].fill_value, ).change_compressed_axes(compressed_axes) sparse-0.12.0/sparse/_compressed/compressed.py000066400000000000000000000661211402510130100213760ustar00rootroot00000000000000import copy as _copy import numpy as np import operator from numpy.lib.mixins import NDArrayOperatorsMixin from functools import reduce from collections.abc import Iterable import scipy.sparse as ss from scipy.sparse import compressed from typing import Tuple from .._sparse_array import SparseArray, _reduce_super_ufunc from .._coo.common import linear_loc from .._common import dot, matmul from .._utils import ( normalize_axis, can_store, check_zero_fill_value, check_compressed_axes, equivalent, ) from .._coo.core import COO from .convert import uncompress_dimension, _transpose, _1d_reshape from .indexing import getitem def _from_coo(x, compressed_axes=None, idx_dtype=None): if x.ndim == 0: if compressed_axes is not None: raise ValueError("no axes to compress for 0d array") return ((x.data, x.coords, []), x.shape, None, x.fill_value) if x.ndim == 1: if compressed_axes is not None: raise ValueError("no axes to compress for 1d array") return ((x.data, x.coords[0], ()), x.shape, None, x.fill_value) compressed_axes = normalize_axis(compressed_axes, x.ndim) if compressed_axes is None: # defaults to best compression ratio compressed_axes = (np.argmin(x.shape),) check_compressed_axes(x.shape, compressed_axes) axis_order = list(compressed_axes) # array location where the uncompressed dimensions start axisptr = len(compressed_axes) axis_order.extend(np.setdiff1d(np.arange(len(x.shape)), compressed_axes)) reordered_shape = tuple(x.shape[i] for i in axis_order) row_size = np.prod(reordered_shape[:axisptr]) col_size = np.prod(reordered_shape[axisptr:]) compressed_shape = (row_size, col_size) shape = x.shape if idx_dtype and not can_store(idx_dtype, max(max(compressed_shape), x.nnz)): raise ValueError( "cannot store array with the compressed shape {} and nnz {} with dtype {}.".format( compressed_shape, x.nnz, idx_dtype, ) ) if not idx_dtype: idx_dtype = x.coords.dtype if not can_store(idx_dtype, max(max(compressed_shape), x.nnz)): idx_dtype = np.min_scalar_type(max(max(compressed_shape), x.nnz)) # transpose axes, linearize, reshape, and compress linear = linear_loc(x.coords[axis_order], reordered_shape) order = np.argsort(linear) linear = linear[order] coords = np.empty((2, x.nnz), dtype=idx_dtype) strides = 1 for i, d in enumerate(compressed_shape[::-1]): coords[-(i + 1), :] = (linear // strides) % d strides *= d indptr = np.empty(row_size + 1, dtype=idx_dtype) indptr[0] = 0 np.cumsum(np.bincount(coords[0], minlength=row_size), out=indptr[1:]) indices = coords[1] data = x.data[order] return ((data, indices, indptr), shape, compressed_axes, x.fill_value) class GCXS(SparseArray, NDArrayOperatorsMixin): """ A sparse multidimensional array. This is stored in GCXS format, a generalization of the GCRS/GCCS formats from 'Efficient storage scheme for n-dimensional sparse array: GCRS/GCCS': https://ieeexplore.ieee.org/document/7237032. GCXS generalizes the csr/csc sparse matrix formats. For arrays with ndim == 2, GCXS is the same csr/csc. For arrays with ndim >2, any combination of axes can be compressed, significantly reducing storage. Parameters ---------- arg : tuple (data, indices, indptr) A tuple of arrays holding the data, indices, and index pointers for the nonzero values of the array. shape : tuple[int] (COO.ndim,) The shape of the array. compressed_axes : Iterable[int] The axes to compress. prune : bool, optional A flag indicating whether or not we should prune any fill-values present in the data array. fill_value: scalar, optional The fill value for this array. Attributes ---------- data : numpy.ndarray (nnz,) An array holding the nonzero values corresponding to :obj:`GCXS.indices`. indices : numpy.ndarray (nnz,) An array holding the coordinates of every nonzero element along uncompressed dimensions. indptr : numpy.ndarray An array holding the cumulative sums of the nonzeros along the compressed dimensions. shape : tuple[int] (ndim,) The dimensions of this array. See Also -------- DOK : A mostly write-only sparse array. """ __array_priority__ = 12 def __init__( self, arg, shape=None, compressed_axes=None, prune=False, fill_value=0, idx_dtype=None, ): if isinstance(arg, ss.spmatrix): arg = self.from_scipy_sparse(arg) if isinstance(arg, np.ndarray): (arg, shape, compressed_axes, fill_value) = _from_coo( COO(arg), compressed_axes ) elif isinstance(arg, COO): (arg, shape, compressed_axes, fill_value) = _from_coo( arg, compressed_axes, idx_dtype ) elif isinstance(arg, GCXS): if compressed_axes is not None and arg.compressed_axes != compressed_axes: arg = arg.change_compressed_axes(compressed_axes) (arg, shape, compressed_axes, fill_value) = ( (arg.data, arg.indices, arg.indptr), arg.shape, arg.compressed_axes, arg.fill_value, ) if shape is None: raise ValueError("missing `shape` argument") check_compressed_axes(len(shape), compressed_axes) if len(shape) == 1: compressed_axes = None self.data, self.indices, self.indptr = arg if self.data.ndim != 1: raise ValueError("data must be a scalar or 1-dimensional.") self.shape = shape self._compressed_axes = ( tuple(compressed_axes) if isinstance(compressed_axes, Iterable) else None ) self.fill_value = fill_value if prune: self._prune() def copy(self, deep=True): """Return a copy of the array. Parameters ---------- deep : boolean, optional If True (default), the internal coords and data arrays are also copied. Set to ``False`` to only make a shallow copy. """ return _copy.deepcopy(self) if deep else _copy.copy(self) @classmethod def from_numpy(cls, x, compressed_axes=None, fill_value=0, idx_dtype=None): coo = COO(x, fill_value=fill_value, idx_dtype=idx_dtype) return cls.from_coo(coo, compressed_axes, idx_dtype) @classmethod def from_coo(cls, x, compressed_axes=None, idx_dtype=None): (arg, shape, compressed_axes, fill_value) = _from_coo( x, compressed_axes, idx_dtype ) return cls( arg, shape=shape, compressed_axes=compressed_axes, fill_value=fill_value ) @classmethod def from_scipy_sparse(cls, x): if x.format == "csc": return cls( (x.data, x.indices, x.indptr), shape=x.shape, compressed_axes=(1,) ) else: x = x.asformat("csr") return cls( (x.data, x.indices, x.indptr), shape=x.shape, compressed_axes=(0,) ) @classmethod def from_iter( cls, x, shape=None, compressed_axes=None, fill_value=None, idx_dtype=None ): return cls.from_coo( COO.from_iter(x, shape, fill_value), compressed_axes, idx_dtype, ) @property def dtype(self): """ The datatype of this array. Returns ------- numpy.dtype The datatype of this array. See Also -------- numpy.ndarray.dtype : Numpy equivalent property. scipy.sparse.csr_matrix.dtype : Scipy equivalent property. """ return self.data.dtype @property def nnz(self): """ The number of nonzero elements in this array. Returns ------- int The number of nonzero elements in this array. See Also -------- COO.nnz : Equivalent :obj:`COO` array property. DOK.nnz : Equivalent :obj:`DOK` array property. numpy.count_nonzero : A similar Numpy function. scipy.sparse.csr_matrix.nnz : The Scipy equivalent property. """ return self.data.shape[0] @property def nbytes(self): """ The number of bytes taken up by this object. Note that for small arrays, this may undercount the number of bytes due to the large constant overhead. Returns ------- int The approximate bytes of memory taken by this object. See Also -------- numpy.ndarray.nbytes : The equivalent Numpy property. """ nbytes = self.data.nbytes + self.indices.nbytes + self.indptr.nbytes return nbytes @property def _axis_order(self): axis_order = list(self.compressed_axes) axis_order.extend( np.setdiff1d(np.arange(len(self.shape)), self.compressed_axes) ) return axis_order @property def _axisptr(self): # array location where the uncompressed dimensions start return len(self.compressed_axes) @property def _compressed_shape(self): row_size = np.prod(self._reordered_shape[: self._axisptr]) col_size = np.prod(self._reordered_shape[self._axisptr :]) return (row_size, col_size) @property def _reordered_shape(self): return tuple(self.shape[i] for i in self._axis_order) @property def T(self): return self.transpose() def __str__(self): return "".format( self.shape, self.dtype, self.nnz, self.fill_value, self.compressed_axes ) __repr__ = __str__ __getitem__ = getitem def _reduce_calc(self, method, axis, keepdims=False, **kwargs): if axis[0] is None or np.array_equal(axis, np.arange(self.ndim, dtype=np.intp)): x = self.flatten().tocoo() out = x.reduce(method, axis=None, keepdims=keepdims, **kwargs) if keepdims: return (out.reshape(np.ones(self.ndim, dtype=np.intp)),) return (out,) r = np.arange(self.ndim, dtype=np.intp) compressed_axes = [a for a in r if a not in set(axis)] x = self.change_compressed_axes(compressed_axes) idx = np.diff(x.indptr) != 0 indptr = x.indptr[:-1][idx] indices = (np.arange(x._compressed_shape[0], dtype=self.indptr.dtype))[idx] data = method.reduceat(x.data, indptr, **kwargs) counts = x.indptr[1:][idx] - x.indptr[:-1][idx] arr_attrs = (x, compressed_axes, indices) n_cols = x._compressed_shape[1] return (data, counts, axis, n_cols, arr_attrs) def _reduce_return(self, data, arr_attrs, result_fill_value): x, compressed_axes, indices = arr_attrs # prune data mask = ~equivalent(data, result_fill_value) data = data[mask] indices = indices[mask] out = GCXS( (data, indices, []), shape=(x._compressed_shape[0],), fill_value=result_fill_value, compressed_axes=None, ) return out.reshape(tuple(self.shape[d] for d in compressed_axes)) def change_compressed_axes(self, new_compressed_axes): """ Returns a new array with specified compressed axes. This operation is similar to converting a scipy.sparse.csc_matrix to a scipy.sparse.csr_matrix. Returns ------- GCXS A new instance of the input array with compression along the specified dimensions. """ if self.ndim == 1: raise NotImplementedError("no axes to compress for 1d array") new_compressed_axes = tuple( normalize_axis(new_compressed_axes[i], self.ndim) for i in range(len(new_compressed_axes)) ) if new_compressed_axes == self.compressed_axes: return self if len(new_compressed_axes) >= len(self.shape): raise ValueError("cannot compress all axes") if len(set(new_compressed_axes)) != len(new_compressed_axes): raise ValueError("repeated axis in compressed_axes") arg = _transpose(self, self.shape, np.arange(self.ndim), new_compressed_axes) return GCXS( arg, shape=self.shape, compressed_axes=new_compressed_axes, fill_value=self.fill_value, ) def tocoo(self): """ Convert this :obj:`GCXS` array to a :obj:`COO`. Returns ------- sparse.COO The converted COO array. """ if self.ndim == 0: return COO( np.array([])[None], self.data, shape=self.shape, fill_value=self.fill_value, ) if self.ndim == 1: return COO( self.indices[None, :], self.data, shape=self.shape, fill_value=self.fill_value, ) uncompressed = uncompress_dimension(self.indptr) coords = np.vstack((uncompressed, self.indices)) order = np.argsort(self._axis_order) return ( COO( coords, self.data, shape=self._compressed_shape, fill_value=self.fill_value, ) .reshape(self._reordered_shape) .transpose(order) ) def todense(self): """ Convert this :obj:`GCXS` array to a dense :obj:`numpy.ndarray`. Note that this may take a large amount of memory if the :obj:`GCXS` object's :code:`shape` is large. Returns ------- numpy.ndarray The converted dense array. See Also -------- DOK.todense : Equivalent :obj:`DOK` array method. COO.todense : Equivalent :obj:`COO` array method. scipy.sparse.coo_matrix.todense : Equivalent Scipy method. """ if self.compressed_axes is None: out = np.full(self.shape, self.fill_value, self.dtype) if len(self.indices) != 0: out[self.indices] = self.data else: if len(self.data) != 0: out[()] = self.data[0] return out return self.tocoo().todense() def todok(self): from .. import DOK return DOK.from_coo(self.tocoo()) # probably a temporary solution def to_scipy_sparse(self): """ Converts this :obj:`GCXS` object into a :obj:`scipy.sparse.csr_matrix` or `scipy.sparse.csc_matrix`. Returns ------- :obj:`scipy.sparse.csr_matrix` or `scipy.sparse.csc_matrix` The converted Scipy sparse matrix. Raises ------ ValueError If the array is not two-dimensional. ValueError If all the array doesn't zero fill-values. """ check_zero_fill_value(self) if self.ndim != 2: raise ValueError( "Can only convert a 2-dimensional array to a Scipy sparse matrix." ) if 0 in self.compressed_axes: return ss.csr_matrix( (self.data, self.indices, self.indptr), shape=self.shape ) else: return ss.csc_matrix( (self.data, self.indices, self.indptr), shape=self.shape ) def asformat(self, format, compressed_axes=None): """ Convert this sparse array to a given format. Parameters ---------- format : str A format string. Returns ------- out : SparseArray The converted array. Raises ------ NotImplementedError If the format isn't supported. """ if format == "coo": return self.tocoo() elif format == "dok": return self.todok() elif format == "csr": return CSR(self) elif format == "csc": return CSC(self) elif format == "gcxs": if compressed_axes is None: compressed_axes = self.compressed_axes return self.change_compressed_axes(compressed_axes) raise NotImplementedError("The given format is not supported.") def maybe_densify(self, max_size=1000, min_density=0.25): """ Converts this :obj:`GCXS` array to a :obj:`numpy.ndarray` if not too costly. Parameters ---------- max_size : int Maximum number of elements in output min_density : float Minimum density of output Returns ------- numpy.ndarray The dense array. See Also -------- sparse.GCXS.todense: Converts to Numpy function without checking the cost. sparse.COO.maybe_densify: The equivalent COO function. Raises ------- ValueError If the returned array would be too large. """ if self.size <= max_size or self.density >= min_density: return self.todense() else: raise ValueError( "Operation would require converting " "large sparse array to dense" ) def flatten(self, order="C"): """ Returns a new :obj:`GCXS` array that is a flattened version of this array. Returns ------- GCXS The flattened output array. Notes ----- The :code:`order` parameter is provided just for compatibility with Numpy and isn't actually supported. """ if order not in {"C", None}: raise NotImplementedError("The `order` parameter is not" "supported.") return self.reshape(-1) def reshape(self, shape, order="C", compressed_axes=None): """ Returns a new :obj:`GCXS` array that is a reshaped version of this array. Parameters ---------- shape : tuple[int] The desired shape of the output array. compressed_axes : Iterable[int], optional The axes to compress to store the array. Finds the most efficient storage by default. Returns ------- GCXS The reshaped output array. See Also -------- numpy.ndarray.reshape : The equivalent Numpy function. sparse.COO.reshape: The equivalent COO function. Notes ----- The :code:`order` parameter is provided just for compatibility with Numpy and isn't actually supported. """ if isinstance(shape, Iterable): shape = tuple(shape) else: shape = (shape,) if order not in {"C", None}: raise NotImplementedError("The 'order' parameter is not supported") if any(d == -1 for d in shape): extra = int(self.size / np.prod([d for d in shape if d != -1])) shape = tuple([d if d != -1 else extra for d in shape]) if self.shape == shape: return self if self.size != reduce(operator.mul, shape, 1): raise ValueError( "cannot reshape array of size {} into shape {}".format(self.size, shape) ) if len(shape) == 0: return self.tocoo().reshape(shape).asformat("gcxs") if compressed_axes is None: if len(shape) == self.ndim: compressed_axes = self.compressed_axes elif len(shape) == 1: compressed_axes = None else: compressed_axes = (np.argmin(shape),) if self.ndim == 1: arg = _1d_reshape(self, shape, compressed_axes) else: arg = _transpose(self, shape, np.arange(self.ndim), compressed_axes) return GCXS( arg, shape=tuple(shape), compressed_axes=compressed_axes, fill_value=self.fill_value, ) @property def compressed_axes(self): return self._compressed_axes def transpose(self, axes=None, compressed_axes=None): """ Returns a new array which has the order of the axes switched. Parameters ---------- axes : Iterable[int], optional The new order of the axes compared to the previous one. Reverses the axes by default. compressed_axes : Iterable[int], optional The axes to compress to store the array. Finds the most efficient storage by default. Returns ------- GCXS The new array with the axes in the desired order. See Also -------- :obj:`GCXS.T` : A quick property to reverse the order of the axes. numpy.ndarray.transpose : Numpy equivalent function. """ if axes is None: axes = list(reversed(range(self.ndim))) # Normalize all axes indices to positive values axes = normalize_axis(axes, self.ndim) if len(np.unique(axes)) < len(axes): raise ValueError("repeated axis in transpose") if not len(axes) == self.ndim: raise ValueError("axes don't match array") axes = tuple(axes) if axes == tuple(range(self.ndim)): return self if self.ndim == 2: return self._2d_transpose() shape = tuple(self.shape[ax] for ax in axes) if compressed_axes is None: compressed_axes = (np.argmin(shape),) arg = _transpose(self, shape, axes, compressed_axes, transpose=True) return GCXS( arg, shape=shape, compressed_axes=compressed_axes, fill_value=self.fill_value, ) def _2d_transpose(self): """ A function for performing constant-time transposes on 2d GCXS arrays. Returns ------- GCXS The new transposed array with the opposite compressed axes as the input. See Also -------- scipy.sparse.csr_matrix.transpose : Scipy equivalent function. scipy.sparse.csc_matrix.transpose : Scipy equivalent function. numpy.ndarray.transpose : Numpy equivalent function. """ if self.ndim != 2: raise ValueError( "cannot perform 2d transpose on array with dimension {}".format( self.ndim ) ) compressed_axes = [(self.compressed_axes[0] + 1) % 2] shape = self.shape[::-1] return GCXS( (self.data, self.indices, self.indptr), shape=shape, compressed_axes=compressed_axes, fill_value=self.fill_value, ) def dot(self, other): """ Performs the equivalent of :code:`x.dot(y)` for :obj:`GCXS`. Parameters ---------- other : Union[GCXS, COO, numpy.ndarray, scipy.sparse.spmatrix] The second operand of the dot product operation. Returns ------- {GCXS, numpy.ndarray} The result of the dot product. If the result turns out to be dense, then a dense array is returned, otherwise, a sparse array. Raises ------ ValueError If all arguments don't have zero fill-values. See Also -------- dot : Equivalent function for two arguments. :obj:`numpy.dot` : Numpy equivalent function. scipy.sparse.csr_matrix.dot : Scipy equivalent function. """ return dot(self, other) def __matmul__(self, other): try: return matmul(self, other) except NotImplementedError: return NotImplemented def __rmatmul__(self, other): try: return matmul(other, self) except NotImplementedError: return NotImplemented def _prune(self): """ Prunes data so that if any fill-values are present, they are removed from both indices and data. Examples -------- >>> coords = np.array([[0, 1, 2, 3]]) >>> data = np.array([1, 0, 1, 2]) >>> s = COO(coords, data).asformat('gcxs') >>> s._prune() >>> s.nnz 3 """ mask = ~equivalent(self.data, self.fill_value) self.data = self.data[mask] if len(self.indptr): coords = np.stack((uncompress_dimension(self.indptr), self.indices)) coords = coords[:, mask] self.indices = coords[1] row_size = self._compressed_shape[0] indptr = np.empty(row_size + 1, dtype=self.indptr.dtype) indptr[0] = 0 np.cumsum(np.bincount(coords[0], minlength=row_size), out=indptr[1:]) self.indptr = indptr else: self.indices = self.indices[mask] class _Compressed2d(GCXS): def __init__( self, arg, shape=None, compressed_axes=None, prune=False, fill_value=0 ): if not hasattr(arg, "shape") and shape is None: raise ValueError("missing `shape` argument") if shape is not None and hasattr(arg, "shape"): raise NotImplementedError("Cannot change shape in constructor") nd = len(shape if shape is not None else arg.shape) if nd != 2: raise ValueError(f"{type(self).__name__} must be 2-d, passed {nd}-d shape.") super().__init__( arg, shape=shape, compressed_axes=compressed_axes, prune=prune, fill_value=fill_value, ) def __str__(self): return "<{}: shape={}, dtype={}, nnz={}, fill_value={}>".format( type(self).__name__, self.shape, self.dtype, self.nnz, self.fill_value, ) __repr__ = __str__ @property def ndim(self) -> int: return 2 class CSR(_Compressed2d): def __init__(self, arg, shape=None, prune=False, fill_value=0): super().__init__(arg, shape=shape, compressed_axes=(0,), fill_value=fill_value) @classmethod def from_scipy_sparse(cls, x): x = x.asformat("csr", copy=False) return cls((x.data, x.indices, x.indptr), shape=x.shape) def transpose(self, axes: None = None, copy: bool = False) -> "CSC": if axes is not None: raise ValueError() if copy: self = self.copy() return CSC((self.data, self.indices, self.indptr), self.shape[::-1]) class CSC(_Compressed2d): def __init__(self, arg, shape=None, prune=False, fill_value=0): super().__init__(arg, shape=shape, compressed_axes=(1,), fill_value=fill_value) @classmethod def from_scipy_sparse(cls, x): x = x.asformat("csc", copy=False) return cls((x.data, x.indices, x.indptr), shape=x.shape) def transpose(self, axes: None = None, copy: bool = False) -> CSR: if axes is not None: raise ValueError() if copy: self = self.copy() return CSR((self.data, self.indices, self.indptr), self.shape[::-1]) sparse-0.12.0/sparse/_compressed/convert.py000066400000000000000000000250141402510130100207060ustar00rootroot00000000000000import numpy as np import numba import operator from .._utils import check_compressed_axes, get_out_dtype from .._coo.common import linear_loc from functools import reduce from numba.typed import List def convert_to_flat(inds, shape, dtype): """ Converts the indices of either the compressed or uncompressed axes into a linearized form. Prepares the inputs for compute_flat. """ inds = [np.array(ind) for ind in inds] if any(ind.ndim > 1 for ind in inds): raise IndexError("Only one-dimensional iterable indices supported.") cols = np.empty(np.prod([ind.size for ind in inds]), dtype=dtype) shape_bins = transform_shape(np.asarray(shape)) increments = List() for i in range(len(inds)): increments.append((inds[i] * shape_bins[i]).astype(dtype)) operations = np.prod([ind.shape[0] for ind in increments[:-1]]) return compute_flat(increments, cols, operations) @numba.jit(nopython=True, nogil=True) def compute_flat(increments, cols, operations): # pragma: no cover """ Iterates through indices and calculates the linearized indices. """ start = 0 end = increments[-1].shape[0] positions = np.zeros(len(increments) - 1, dtype=np.intp) pos = len(increments) - 2 for i in range(operations): if i != 0 and positions[pos] == increments[pos].shape[0]: positions[pos] = 0 pos -= 1 positions[pos] += 1 pos += 1 to_add = np.array( [increments[i][positions[i]] for i in range(len(increments) - 1)] ).sum() cols[start:end] = increments[-1] + to_add positions[pos] += 1 start += increments[-1].shape[0] end += increments[-1].shape[0] return cols @numba.jit(nopython=True, nogil=True) def transform_shape(shape): # pragma: no cover """ turns a shape into the linearized increments that it represents. For example, given (5,5,5), it returns np.array([25,5,1]). """ shape_bins = np.empty(len(shape), dtype=np.intp) shape_bins[-1] = 1 for i in range(len(shape) - 2, -1, -1): shape_bins[i] = np.prod(shape[i + 1 :]) return shape_bins @numba.jit(nopython=True, nogil=True) def uncompress_dimension(indptr): # pragma: no cover """converts an index pointer array into an array of coordinates""" uncompressed = np.empty(indptr[-1], dtype=indptr.dtype) for i in range(len(indptr) - 1): uncompressed[indptr[i] : indptr[i + 1]] = i return uncompressed @numba.jit(nopython=True, nogil=True) def is_sorted(arr): # pragma: no cover """ function to check if an indexing array is sorted without repeats. If it is, we can use the faster slicing algorithm. """ for i in range(len(arr) - 1): if arr[i + 1] <= arr[i]: return False return True @numba.jit(nopython=True, nogil=True) def _linearize( x_indices, shape, new_axis_order, new_reordered_shape, new_compressed_shape, new_linear, new_coords, ): # pragma: no cover for i, n in enumerate(x_indices): current = unravel_index(n, shape) current_t = current[new_axis_order] new_linear[i] = ravel_multi_index(current_t, new_reordered_shape) new_coords[:, i] = unravel_index(new_linear[i], new_compressed_shape) def _1d_reshape(x, shape, compressed_axes): check_compressed_axes(shape, compressed_axes) new_size = np.prod(shape) end_idx = np.searchsorted(x.indices, new_size, side="left") # for resizeing in one dimension if len(shape) == 1: return (x.data[:end_idx], x.indices[:end_idx], []) new_axis_order = list(compressed_axes) new_axis_order.extend(np.setdiff1d(np.arange(len(shape)), compressed_axes)) new_axis_order = np.asarray(new_axis_order) new_reordered_shape = np.array(shape)[new_axis_order] axisptr = len(compressed_axes) row_size = np.prod(new_reordered_shape[:axisptr]) col_size = np.prod(new_reordered_shape[axisptr:]) new_compressed_shape = np.array((row_size, col_size)) x_indices = x.indices[:end_idx] new_nnz = x_indices.size new_linear = np.empty(new_nnz, dtype=np.intp) coords_dtype = get_out_dtype(x.indices, max(max(new_compressed_shape), x.nnz)) new_coords = np.empty((2, new_nnz), dtype=coords_dtype) _linearize( x_indices, np.array(shape), new_axis_order, new_reordered_shape, new_compressed_shape, new_linear, new_coords, ) order = np.argsort(new_linear) new_coords = new_coords[:, order] indptr = np.empty(row_size + 1, dtype=coords_dtype) indptr[0] = 0 np.cumsum(np.bincount(new_coords[0], minlength=row_size), out=indptr[1:]) indices = new_coords[1] data = x.data[:end_idx][order] return (data, indices, indptr) def _resize(x, shape, compressed_axes): from .compressed import GCXS check_compressed_axes(shape, compressed_axes) size = reduce(operator.mul, shape, 1) if x.ndim == 1: end_idx = np.searchsorted(x.indices, size, side="left") indices = x.indices[:end_idx] data = x.data[:end_idx] out = GCXS((data, indices, []), shape=(size,), fill_value=x.fill_value) return _1d_reshape(out, shape, compressed_axes) uncompressed = uncompress_dimension(x.indptr) coords = np.stack((uncompressed, x.indices)) linear = linear_loc(coords, x._compressed_shape) sorted_axis_order = np.argsort(x._axis_order) linear_dtype = get_out_dtype(x.indices, np.prod(shape)) c_linear = np.empty(x.nnz, dtype=linear_dtype) _c_ordering( linear, c_linear, np.asarray(x._reordered_shape), np.asarray(sorted_axis_order), np.asarray(x.shape), ) order = np.argsort(c_linear, kind="mergesort") data = x.data[order] indices = c_linear[order] end_idx = np.searchsorted(indices, size, side="left") indices = indices[:end_idx] data = data[:end_idx] out = GCXS((data, indices, []), shape=(size,), fill_value=x.fill_value) return _1d_reshape(out, shape, compressed_axes) @numba.jit(nopython=True, nogil=True) def _c_ordering( linear, c_linear, reordered_shape, sorted_axis_order, shape ): # pragma: no cover for i, n in enumerate(linear): # c ordering current_coords = unravel_index(n, reordered_shape)[sorted_axis_order] c_linear[i] = ravel_multi_index(current_coords, shape) def _transpose(x, shape, axes, compressed_axes, transpose=False): """ An algorithm for reshaping, resizing, changing compressed axes, and transposing. """ check_compressed_axes(shape, compressed_axes) uncompressed = uncompress_dimension(x.indptr) coords = np.stack((uncompressed, x.indices)) linear = linear_loc(coords, x._compressed_shape) sorted_axis_order = np.argsort(x._axis_order) if len(shape) == 1: dtype = get_out_dtype(x.indices, shape[0]) c_linear = np.empty(x.nnz, dtype=dtype) _c_ordering( linear, c_linear, np.asarray(x._reordered_shape), np.asarray(sorted_axis_order), np.asarray(x.shape), ) order = np.argsort(c_linear, kind="mergesort") data = x.data[order] indices = c_linear[order] return (data, indices, []) new_axis_order = list(compressed_axes) new_axis_order.extend(np.setdiff1d(np.arange(len(shape)), compressed_axes)) new_linear = np.empty(x.nnz, dtype=np.intp) new_reordered_shape = np.array(shape)[new_axis_order] axisptr = len(compressed_axes) row_size = np.prod(new_reordered_shape[:axisptr]) col_size = np.prod(new_reordered_shape[axisptr:]) new_compressed_shape = np.array((row_size, col_size)) coords_dtype = get_out_dtype(x.indices, max(max(new_compressed_shape), x.nnz)) new_coords = np.empty((2, x.nnz), dtype=coords_dtype) _convert_coords( linear, np.asarray(x.shape), np.asarray(x._reordered_shape), sorted_axis_order, np.asarray(axes), np.asarray(shape), np.asarray(new_axis_order), new_reordered_shape, new_linear, new_coords, new_compressed_shape, transpose, ) order = np.argsort(new_linear, kind="mergesort") new_coords = new_coords[:, order] if len(shape) == 1: indptr = [] indices = coords[0, :] else: indptr = np.empty(row_size + 1, dtype=coords_dtype) indptr[0] = 0 np.cumsum(np.bincount(new_coords[0], minlength=row_size), out=indptr[1:]) indices = new_coords[1] data = x.data[order] return (data, indices, indptr) @numba.jit(nopython=True, nogil=True) def unravel_index(n, shape): # pragma: no cover """ implements a subset of the functionality of np.unravel_index. """ out = np.zeros(len(shape), dtype=np.intp) i = 1 while i < len(shape) and n > 0: cur = np.prod(shape[i:]) out[i - 1] = n // cur n -= out[i - 1] * cur i += 1 out[-1] = n return out @numba.jit(nopython=True, nogil=True) def ravel_multi_index(arr, shape): # pragma: no cover """ implements a subset of the functionality of np.ravel_multi_index. """ total = 0 for i, a in enumerate(arr[:-1], 1): total += a * np.prod(shape[i:]) total += arr[-1] return total @numba.jit(nopython=True, nogil=True) def _convert_coords( linear, old_shape, reordered_shape, sorted_axis_order, axes, shape, new_axis_order, new_reordered_shape, new_linear, new_coords, new_compressed_shape, transpose, ): # pragma: no cover if transpose == True: for i, n in enumerate(linear): # c ordering current_coords = unravel_index(n, reordered_shape)[sorted_axis_order] # transpose current_coords_t = current_coords[axes][new_axis_order] new_linear[i] = ravel_multi_index(current_coords_t, new_reordered_shape) # reshape new_coords[:, i] = unravel_index(new_linear[i], new_compressed_shape) else: for i, n in enumerate(linear): # c ordering current_coords = unravel_index(n, reordered_shape)[sorted_axis_order] # linearize c_current = ravel_multi_index(current_coords, old_shape) # compress c_compressed = unravel_index(c_current, shape) c_compressed = c_compressed[new_axis_order] new_linear[i] = ravel_multi_index(c_compressed, new_reordered_shape) # reshape new_coords[:, i] = unravel_index(new_linear[i], new_compressed_shape) sparse-0.12.0/sparse/_compressed/indexing.py000066400000000000000000000236471402510130100210450ustar00rootroot00000000000000import numpy as np import numba from numbers import Integral from itertools import zip_longest from collections.abc import Iterable from .._slicing import normalize_index from .convert import convert_to_flat, uncompress_dimension, is_sorted def getitem(x, key): """ GCXS arrays are stored by transposing and reshaping them into csr matrices. For indexing, we first convert the n-dimensional key to its corresponding 2-dimensional key and then iterate through each of the relevent rows and columns. """ from .compressed import GCXS if x.ndim == 1: coo = x.tocoo()[key] return GCXS.from_coo(coo) key = list(normalize_index(key, x.shape)) # zip_longest so things like x[..., None] are picked up. if len(key) != 0 and all( isinstance(k, slice) and k == slice(0, dim, 1) for k, dim in zip_longest(key, x.shape) ): return x # return a single element if all(isinstance(k, int) for k in key): return get_single_element(x, key) shape = [] compressed_inds = np.zeros(len(x.shape), dtype=np.bool) uncompressed_inds = np.zeros(len(x.shape), dtype=np.bool) # which axes will be compressed in the resulting array shape_key = np.zeros(len(x.shape), dtype=np.intp) # remove Nones from key, evaluate them at the end Nones_removed = [k for k in key if k is not None] count = 0 for i, ind in enumerate(Nones_removed): if isinstance(ind, Integral): continue elif isinstance(ind, slice): shape_key[i] = count shape.append(len(range(ind.start, ind.stop, ind.step))) if i in x.compressed_axes: compressed_inds[i] = True else: uncompressed_inds[i] = True elif isinstance(ind, Iterable): shape_key[i] = count shape.append(len(ind)) if i in x.compressed_axes: compressed_inds[i] = True else: uncompressed_inds[i] = True count += 1 # reorder the key according to the axis_order of the array reordered_key = [Nones_removed[i] for i in x._axis_order] # if all slices have a positive step and all # iterables are sorted without repeats, we can # use the quicker slicing algorithm pos_slice = True for ind in reordered_key[x._axisptr :]: if isinstance(ind, slice): if ind.step < 0: pos_slice = False elif isinstance(ind, Iterable): if not is_sorted(ind): pos_slice = False # convert all ints and slices to iterables before flattening for i, ind in enumerate(reordered_key): if isinstance(ind, Integral): reordered_key[i] = [ind] elif isinstance(ind, slice): reordered_key[i] = np.arange(ind.start, ind.stop, ind.step) shape = np.array(shape) # convert all indices of compressed axes to a single array index # this tells us which 'rows' of the underlying csr matrix to iterate through rows = convert_to_flat( reordered_key[: x._axisptr], x._reordered_shape[: x._axisptr], x.indices.dtype, ) # convert all indices of uncompressed axes to a single array index # this tells us which 'columns' of the underlying csr matrix to iterate through cols = convert_to_flat( reordered_key[x._axisptr :], x._reordered_shape[x._axisptr :], x.indices.dtype, ) starts = x.indptr[:-1][rows] # find the start and end of each of the rows ends = x.indptr[1:][rows] if np.any(compressed_inds): compressed_axes = shape_key[compressed_inds] if len(compressed_axes) == 1: row_size = shape[compressed_axes] else: row_size = np.prod(shape[compressed_axes]) # if only indexing through uncompressed axes else: compressed_axes = (0,) # defaults to 0 row_size = 1 # this doesn't matter if not np.any(uncompressed_inds): # only indexing compressed axes compressed_axes = (0,) # defaults to 0 row_size = starts.size indptr = np.empty(row_size + 1, dtype=x.indptr.dtype) indptr[0] = 0 if pos_slice: arg = get_slicing_selection(x.data, x.indices, indptr, starts, ends, cols) else: arg = get_array_selection(x.data, x.indices, indptr, starts, ends, cols) data, indices, indptr = arg size = np.prod(shape[1:]) if not np.any(uncompressed_inds): # only indexing compressed axes uncompressed = uncompress_dimension(indptr) if len(shape) == 1: indices = uncompressed indptr = None else: indices = uncompressed % size indptr = np.empty(shape[0] + 1, dtype=x.indptr.dtype) indptr[0] = 0 np.cumsum( np.bincount(uncompressed // size, minlength=shape[0]), out=indptr[1:] ) if not np.any(compressed_inds): if len(shape) == 1: indptr = None else: uncompressed = indices // size indptr = np.empty(shape[0] + 1, dtype=x.indptr.dtype) indptr[0] = 0 np.cumsum(np.bincount(uncompressed, minlength=shape[0]), out=indptr[1:]) indices = indices % size arg = (data, indices, indptr) # if there were Nones in the key, we insert them back here compressed_axes = np.array(compressed_axes) shape = shape.tolist() for i in range(len(key)): if key[i] is None: shape.insert(i, 1) compressed_axes[compressed_axes >= i] += 1 compressed_axes = tuple(compressed_axes) shape = tuple(shape) if len(shape) == 1: compressed_axes = None return GCXS( arg, shape=shape, compressed_axes=compressed_axes, fill_value=x.fill_value ) @numba.jit(nopython=True, nogil=True) def get_slicing_selection( arr_data, arr_indices, indptr, starts, ends, col ): # pragma: no cover """ When the requested elements come in a strictly ascending order, as is the case with acsending slices, we can iteratively reduce the search space, leading to better performance. We loop through the starts and ends, each time evaluating whether to use a linear filtering procedure or a binary-search-based method. """ indices = [] ind_list = [] for i, (start, end) in enumerate(zip(starts, ends)): inds = [] current_row = arr_indices[start:end] if current_row.size < col.size: # linear filtering count = 0 col_count = 0 nnz = 0 while col_count < col.size and count < current_row.size: if current_row[-1] < col[col_count] or current_row[count] > col[-1]: break if current_row[count] == col[col_count]: nnz += 1 ind_list.append(count + start) indices.append(col_count) count += 1 col_count += 1 elif current_row[count] < col[col_count]: count += 1 else: col_count += 1 indptr[i + 1] = indptr[i] + nnz else: # binary searches prev = 0 size = 0 col_count = 0 while col_count < col.size: while ( col[col_count] < current_row[size] and col_count < col.size ): # skip needless searches col_count += 1 if col_count >= col.size: # check again because of previous loop break if current_row[-1] < col[col_count] or current_row[size] > col[-1]: break s = np.searchsorted(current_row[size:], col[col_count]) size += s s += prev if not (s >= current_row.size or current_row[s] != col[col_count]): s += start inds.append(s) indices.append(col_count) size += 1 prev = size col_count += 1 ind_list.extend(inds) indptr[i + 1] = indptr[i] + len(inds) ind_list = np.array(ind_list, dtype=np.int64) indices = np.array(indices, dtype=indptr.dtype) data = arr_data[ind_list] return (data, indices, indptr) @numba.jit(nopython=True, nogil=True) def get_array_selection( arr_data, arr_indices, indptr, starts, ends, col ): # pragma: no cover """ This is a very general algorithm to be used when more optimized methods don't apply. It performs a binary search for each of the requested elements. Consequently it roughly scales by O(n log avg(nnz)). """ indices = [] ind_list = [] for i, (start, end) in enumerate(zip(starts, ends)): inds = [] current_row = arr_indices[start:end] if len(current_row) == 0: indptr[i + 1] = indptr[i] continue for c in range(len(col)): s = np.searchsorted(current_row, col[c]) if not (s >= current_row.size or current_row[s] != col[c]): s += start inds.append(s) indices.append(c) ind_list.extend(inds) indptr[i + 1] = indptr[i] + len(inds) ind_list = np.array(ind_list, dtype=np.int64) indices = np.array(indices, dtype=indptr.dtype) data = arr_data[ind_list] return (data, indices, indptr) def get_single_element(x, key): """ A convience function for indexing when returning a single element. """ key = np.array(key)[x._axis_order] # reordering the input ind = np.ravel_multi_index(key, x._reordered_shape) row, col = np.unravel_index(ind, x._compressed_shape) current_row = x.indices[x.indptr[row] : x.indptr[row + 1]] item = np.searchsorted(current_row, col) if not (item >= current_row.size or current_row[item] != col): item += x.indptr[row] return x.data[item] return x.fill_value sparse-0.12.0/sparse/_coo/000077500000000000000000000000001402510130100152665ustar00rootroot00000000000000sparse-0.12.0/sparse/_coo/__init__.py000066400000000000000000000012061402510130100173760ustar00rootroot00000000000000from .core import COO, as_coo from .common import ( concatenate, clip, stack, triu, tril, where, nansum, nanmean, nanprod, nanmin, nanmax, nanreduce, roll, kron, argwhere, isposinf, isneginf, result_type, diagonal, diagonalize, ) __all__ = [ "COO", "as_coo", "concatenate", "clip", "stack", "triu", "tril", "where", "nansum", "nanmean", "nanprod", "nanmin", "nanmax", "nanreduce", "roll", "kron", "argwhere", "isposinf", "isneginf", "result_type", "diagonal", "diagonalize", ] sparse-0.12.0/sparse/_coo/common.py000066400000000000000000000656141402510130100171440ustar00rootroot00000000000000from functools import reduce import operator import warnings from collections.abc import Iterable import numpy as np import scipy.sparse import numba from .._sparse_array import SparseArray from .._utils import ( isscalar, is_unsigned_dtype, normalize_axis, check_zero_fill_value, check_consistent_fill_value, can_store, ) def asCOO(x, name="asCOO", check=True): """ Convert the input to :obj:`COO`. Passes through :obj:`COO` objects as-is. Parameters ---------- x : Union[SparseArray, scipy.sparse.spmatrix, numpy.ndarray] The input array to convert. name : str, optional The name of the operation to use in the exception. check : bool, optional Whether to check for a dense input. Returns ------- COO The converted :obj:`COO` array. Raises ------ ValueError If ``check`` is true and a dense input is supplied. """ from .core import COO if check and not isinstance(x, (SparseArray, scipy.sparse.spmatrix)): raise ValueError( "Performing this operation would produce a dense result: %s" % name ) if not isinstance(x, COO): x = COO(x) return x def linear_loc(coords, shape): if shape == () and len(coords) == 0: # `np.ravel_multi_index` is not aware of arrays, so cannot produce a # sensible result here (https://github.com/numpy/numpy/issues/15690). # Since `coords` is an array and not a sequence, we know the correct # dimensions. return np.zeros(coords.shape[1:], dtype=np.intp) else: return np.ravel_multi_index(coords, shape) def kron(a, b): """Kronecker product of 2 sparse arrays. Parameters ---------- a, b : SparseArray, scipy.sparse.spmatrix, or np.ndarray The arrays over which to compute the Kronecker product. Returns ------- res : COO The kronecker product Raises ------ ValueError If all arguments are dense or arguments have nonzero fill-values. Examples -------- >>> from sparse import eye >>> a = eye(3, dtype='i8') >>> b = np.array([1, 2, 3], dtype='i8') >>> res = kron(a, b) >>> res.todense() # doctest: +SKIP array([[1, 2, 3, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 2, 3, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 2, 3]], dtype=int64) """ from .core import COO from .._umath import _cartesian_product check_zero_fill_value(a, b) a_sparse = isinstance(a, (SparseArray, scipy.sparse.spmatrix)) b_sparse = isinstance(b, (SparseArray, scipy.sparse.spmatrix)) a_ndim = np.ndim(a) b_ndim = np.ndim(b) if not (a_sparse or b_sparse): raise ValueError( "Performing this operation would produce a dense " "result: kron" ) if a_ndim == 0 or b_ndim == 0: return a * b a = asCOO(a, check=False) b = asCOO(b, check=False) # Match dimensions max_dim = max(a.ndim, b.ndim) a = a.reshape((1,) * (max_dim - a.ndim) + a.shape) b = b.reshape((1,) * (max_dim - b.ndim) + b.shape) a_idx, b_idx = _cartesian_product(np.arange(a.nnz), np.arange(b.nnz)) a_expanded_coords = a.coords[:, a_idx] b_expanded_coords = b.coords[:, b_idx] o_coords = a_expanded_coords * np.asarray(b.shape)[:, None] + b_expanded_coords o_data = a.data[a_idx] * b.data[b_idx] o_shape = tuple(i * j for i, j in zip(a.shape, b.shape)) return COO(o_coords, o_data, shape=o_shape, has_duplicates=False) def concatenate(arrays, axis=0): """ Concatenate the input arrays along the given dimension. Parameters ---------- arrays : Iterable[SparseArray] The input arrays to concatenate. axis : int, optional The axis along which to concatenate the input arrays. The default is zero. Returns ------- COO The output concatenated array. Raises ------ ValueError If all elements of :code:`arrays` don't have the same fill-value. See Also -------- numpy.concatenate : NumPy equivalent function """ from .core import COO check_consistent_fill_value(arrays) arrays = [x if isinstance(x, COO) else COO(x) for x in arrays] axis = normalize_axis(axis, arrays[0].ndim) assert all( x.shape[ax] == arrays[0].shape[ax] for x in arrays for ax in set(range(arrays[0].ndim)) - {axis} ) nnz = 0 dim = sum(x.shape[axis] for x in arrays) shape = list(arrays[0].shape) shape[axis] = dim data = np.concatenate([x.data for x in arrays]) coords = np.concatenate([x.coords for x in arrays], axis=1) if not can_store(coords.dtype, max(shape)): coords = coords.astype(np.min_scalar_type(max(shape))) dim = 0 for x in arrays: if dim: coords[axis, nnz : x.nnz + nnz] += dim dim += x.shape[axis] nnz += x.nnz return COO( coords, data, shape=shape, has_duplicates=False, sorted=(axis == 0), fill_value=arrays[0].fill_value, ) def stack(arrays, axis=0): """ Stack the input arrays along the given dimension. Parameters ---------- arrays : Iterable[SparseArray] The input arrays to stack. axis : int, optional The axis along which to stack the input arrays. Returns ------- COO The output stacked array. Raises ------ ValueError If all elements of :code:`arrays` don't have the same fill-value. See Also -------- numpy.stack : NumPy equivalent function """ from .core import COO check_consistent_fill_value(arrays) assert len({x.shape for x in arrays}) == 1 arrays = [x if isinstance(x, COO) else COO(x) for x in arrays] axis = normalize_axis(axis, arrays[0].ndim + 1) data = np.concatenate([x.data for x in arrays]) coords = np.concatenate([x.coords for x in arrays], axis=1) shape = list(arrays[0].shape) shape.insert(axis, len(arrays)) nnz = 0 dim = 0 new = np.empty(shape=(coords.shape[1],), dtype=np.intp) for x in arrays: new[nnz : x.nnz + nnz] = dim dim += 1 nnz += x.nnz coords = [coords[i] for i in range(coords.shape[0])] coords.insert(axis, new) coords = np.stack(coords, axis=0) return COO( coords, data, shape=shape, has_duplicates=False, sorted=(axis == 0), fill_value=arrays[0].fill_value, ) def triu(x, k=0): """ Returns an array with all elements below the k-th diagonal set to zero. Parameters ---------- x : COO The input array. k : int, optional The diagonal below which elements are set to zero. The default is zero, which corresponds to the main diagonal. Returns ------- COO The output upper-triangular matrix. Raises ------ ValueError If :code:`x` doesn't have zero fill-values. See Also -------- numpy.triu : NumPy equivalent function """ from .core import COO check_zero_fill_value(x) if not x.ndim >= 2: raise NotImplementedError( "sparse.triu is not implemented for scalars or 1-D arrays." ) mask = x.coords[-2] + k <= x.coords[-1] coords = x.coords[:, mask] data = x.data[mask] return COO(coords, data, shape=x.shape, has_duplicates=False, sorted=True) def tril(x, k=0): """ Returns an array with all elements above the k-th diagonal set to zero. Parameters ---------- x : COO The input array. k : int, optional The diagonal above which elements are set to zero. The default is zero, which corresponds to the main diagonal. Returns ------- COO The output lower-triangular matrix. Raises ------ ValueError If :code:`x` doesn't have zero fill-values. See Also -------- numpy.tril : NumPy equivalent function """ from .core import COO check_zero_fill_value(x) if not x.ndim >= 2: raise NotImplementedError( "sparse.tril is not implemented for scalars or 1-D arrays." ) mask = x.coords[-2] + k >= x.coords[-1] coords = x.coords[:, mask] data = x.data[mask] return COO(coords, data, shape=x.shape, has_duplicates=False, sorted=True) def nansum(x, axis=None, keepdims=False, dtype=None, out=None): """ Performs a ``NaN`` skipping sum operation along the given axes. Uses all axes by default. Parameters ---------- x : SparseArray The array to perform the reduction on. axis : Union[int, Iterable[int]], optional The axes along which to sum. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. dtype: numpy.dtype The data type of the output array. Returns ------- COO The reduced output sparse array. See Also -------- :obj:`COO.sum` : Function without ``NaN`` skipping. numpy.nansum : Equivalent Numpy function. """ assert out is None x = asCOO(x, name="nansum") return nanreduce(x, np.add, axis=axis, keepdims=keepdims, dtype=dtype) def nanmean(x, axis=None, keepdims=False, dtype=None, out=None): """ Performs a ``NaN`` skipping mean operation along the given axes. Uses all axes by default. Parameters ---------- x : SparseArray The array to perform the reduction on. axis : Union[int, Iterable[int]], optional The axes along which to compute the mean. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. dtype: numpy.dtype The data type of the output array. Returns ------- COO The reduced output sparse array. See Also -------- :obj:`COO.mean` : Function without ``NaN`` skipping. numpy.nanmean : Equivalent Numpy function. """ assert out is None x = asCOO(x, name="nanmean") if not np.issubdtype(x.dtype, np.floating): return x.mean(axis=axis, keepdims=keepdims, dtype=dtype) mask = np.isnan(x) x2 = where(mask, 0, x) # Count the number non-nan elements along axis nancount = mask.sum(axis=axis, dtype="i8", keepdims=keepdims) if axis is None: axis = tuple(range(x.ndim)) elif not isinstance(axis, tuple): axis = (axis,) den = reduce(operator.mul, (x.shape[i] for i in axis), 1) den -= nancount if (den == 0).any(): warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2) num = np.sum(x2, axis=axis, dtype=dtype, keepdims=keepdims) with np.errstate(invalid="ignore", divide="ignore"): if num.ndim: return np.true_divide(num, den, casting="unsafe") return (num / den).astype(dtype) def nanmax(x, axis=None, keepdims=False, dtype=None, out=None): """ Maximize along the given axes, skipping ``NaN`` values. Uses all axes by default. Parameters ---------- x : SparseArray The array to perform the reduction on. axis : Union[int, Iterable[int]], optional The axes along which to maximize. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. dtype: numpy.dtype The data type of the output array. Returns ------- COO The reduced output sparse array. See Also -------- :obj:`COO.max` : Function without ``NaN`` skipping. numpy.nanmax : Equivalent Numpy function. """ assert out is None x = asCOO(x, name="nanmax") ar = x.reduce(np.fmax, axis=axis, keepdims=keepdims, dtype=dtype) if (isscalar(ar) and np.isnan(ar)) or np.isnan(ar.data).any(): warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2) return ar def nanmin(x, axis=None, keepdims=False, dtype=None, out=None): """ Minimize along the given axes, skipping ``NaN`` values. Uses all axes by default. Parameters ---------- x : SparseArray The array to perform the reduction on. axis : Union[int, Iterable[int]], optional The axes along which to minimize. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. dtype: numpy.dtype The data type of the output array. Returns ------- COO The reduced output sparse array. See Also -------- :obj:`COO.min` : Function without ``NaN`` skipping. numpy.nanmin : Equivalent Numpy function. """ assert out is None x = asCOO(x, name="nanmin") ar = x.reduce(np.fmin, axis=axis, keepdims=keepdims, dtype=dtype) if (isscalar(ar) and np.isnan(ar)) or np.isnan(ar.data).any(): warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2) return ar def nanprod(x, axis=None, keepdims=False, dtype=None, out=None): """ Performs a product operation along the given axes, skipping ``NaN`` values. Uses all axes by default. Parameters ---------- x : SparseArray The array to perform the reduction on. axis : Union[int, Iterable[int]], optional The axes along which to multiply. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. dtype: numpy.dtype The data type of the output array. Returns ------- COO The reduced output sparse array. See Also -------- :obj:`COO.prod` : Function without ``NaN`` skipping. numpy.nanprod : Equivalent Numpy function. """ assert out is None x = asCOO(x) return nanreduce(x, np.multiply, axis=axis, keepdims=keepdims, dtype=dtype) def where(condition, x=None, y=None): """ Select values from either ``x`` or ``y`` depending on ``condition``. If ``x`` and ``y`` are not given, returns indices where ``condition`` is nonzero. Performs the equivalent of :obj:`numpy.where`. Parameters ---------- condition : SparseArray The condition based on which to select values from either ``x`` or ``y``. x : SparseArray, optional The array to select values from if ``condition`` is nonzero. y : SparseArray, optional The array to select values from if ``condition`` is zero. Returns ------- COO The output array with selected values if ``x`` and ``y`` are given; else where the array is nonzero. Raises ------ ValueError If the operation would produce a dense result; or exactly one of ``x`` and ``y`` are given. See Also -------- numpy.where : Equivalent Numpy function. """ from .._umath import elemwise x_given = x is not None y_given = y is not None if not (x_given or y_given): condition = asCOO(condition, name=str(np.where)) return tuple(condition.coords) if x_given != y_given: raise ValueError("either both or neither of x and y should be given") return elemwise(np.where, condition, x, y) def argwhere(a): """ Find the indices of array elements that are non-zero, grouped by element. Parameters ---------- a: array_like Input data. Returns ------- index_array: numpy.ndarray See Also -------- :obj:`where`, :obj:`COO.nonzero` Examples -------- >>> import sparse >>> x = sparse.COO(np.arange(6).reshape((2, 3))) >>> sparse.argwhere(x > 1) array([[0, 2], [1, 0], [1, 1], [1, 2]]) """ return np.transpose(a.nonzero()) def _replace_nan(array, value): """ Replaces ``NaN``s in ``array`` with ``value``. Parameters ---------- array : COO The input array. value : numpy.number The values to replace ``NaN`` with. Returns ------- COO A copy of ``array`` with the ``NaN``s replaced. """ if not np.issubdtype(array.dtype, np.floating): return array return where(np.isnan(array), value, array) def nanreduce(x, method, identity=None, axis=None, keepdims=False, **kwargs): """ Performs an ``NaN`` skipping reduction on this array. See the documentation on :obj:`COO.reduce` for examples. Parameters ---------- x : COO The array to reduce. method : numpy.ufunc The method to use for performing the reduction. identity : numpy.number The identity value for this reduction. Inferred from ``method`` if not given. Note that some ``ufunc`` objects don't have this, so it may be necessary to give it. axis : Union[int, Iterable[int]], optional The axes along which to perform the reduction. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. kwargs : dict Any extra arguments to pass to the reduction operation. Returns ------- COO The result of the reduction operation. Raises ------ ValueError If reducing an all-zero axis would produce a nonzero result. See Also -------- COO.reduce : Similar method without ``NaN`` skipping functionality. """ arr = _replace_nan(x, method.identity if identity is None else identity) return arr.reduce(method, axis, keepdims, **kwargs) def roll(a, shift, axis=None): """ Shifts elements of an array along specified axis. Elements that roll beyond the last position are circulated and re-introduced at the first. Parameters ---------- x : COO Input array shift : int or tuple of ints Number of index positions that elements are shifted. If a tuple is provided, then axis must be a tuple of the same size, and each of the given axes is shifted by the corresponding number. If an int while axis is a tuple of ints, then broadcasting is used so the same shift is applied to all axes. axis : int or tuple of ints, optional Axis or tuple specifying multiple axes. By default, the array is flattened before shifting, after which the original shape is restored. Returns ------- res : ndarray Output array, with the same shape as a. """ from .core import COO, as_coo from numpy.core._exceptions import UFuncTypeError a = as_coo(a) # roll flattened array if axis is None: return roll(a.reshape((-1,)), shift, 0).reshape(a.shape) # roll across specified axis else: # parse axis input, wrap in tuple axis = normalize_axis(axis, a.ndim) if not isinstance(axis, tuple): axis = (axis,) # make shift iterable if not isinstance(shift, Iterable): shift = (shift,) elif np.ndim(shift) > 1: raise ValueError("'shift' and 'axis' must be integers or 1D sequences.") # handle broadcasting if len(shift) == 1: shift = np.full(len(axis), shift) # check if dimensions are consistent if len(axis) != len(shift): raise ValueError( "If 'shift' is a 1D sequence, " "'axis' must have equal length." ) if not can_store(a.coords.dtype, max(a.shape + shift)): raise ValueError( "cannot roll with coords.dtype {} and shift {}. Try casting coords to a larger dtype.".format( a.coords.dtype, shift, ) ) # shift elements coords, data = np.copy(a.coords), np.copy(a.data) try: for sh, ax in zip(shift, axis): coords[ax] += sh coords[ax] %= a.shape[ax] except UFuncTypeError: if is_unsigned_dtype(coords.dtype): raise ValueError( "rolling with coords.dtype as {} is not safe. Try using a signed dtype.".format( coords.dtype ) ) return COO( coords, data=data, shape=a.shape, has_duplicates=False, fill_value=a.fill_value, ) def diagonal(a, offset=0, axis1=0, axis2=1): """ Extract diagonal from a COO array. The equivalent of :obj:`numpy.diagonal`. Parameters ---------- a: COO The array to perform the operation on. offset: int, optional Offset of the diagonal from the main diagonal. Defaults to main diagonal (0). axis1: int, optional First axis from which the diagonals should be taken. Defaults to first axis (0). axis2 : int, optional Second axis from which the diagonals should be taken. Defaults to second axis (1). Examples -------- >>> import sparse >>> x = sparse.as_coo(np.arange(9).reshape(3,3)) >>> sparse.diagonal(x).todense() array([0, 4, 8]) >>> sparse.diagonal(x,offset=1).todense() array([1, 5]) >>> x = sparse.as_coo(np.arange(12).reshape((2,3,2))) >>> x_diag = sparse.diagonal(x, axis1=0, axis2=2) >>> x_diag.shape (3, 2) >>> x_diag.todense() array([[ 0, 7], [ 2, 9], [ 4, 11]]) Returns ------- out: COO The result of the operation. Raises ------ ValueError If a.shape[axis1] != a.shape[axis2] See Also -------- :obj:`numpy.diagonal`: NumPy equivalent function """ from .core import COO if a.shape[axis1] != a.shape[axis2]: raise ValueError("a.shape[axis1] != a.shape[axis2]") diag_axes = [ axis for axis in range(len(a.shape)) if axis != axis1 and axis != axis2 ] + [axis1] diag_shape = [a.shape[axis] for axis in diag_axes] diag_shape[-1] -= abs(offset) diag_idx = _diagonal_idx(a.coords, axis1, axis2, offset) diag_coords = [a.coords[axis][diag_idx] for axis in diag_axes] diag_data = a.data[diag_idx] return COO(diag_coords, diag_data, diag_shape) def diagonalize(a, axis=0): """ Diagonalize a COO array. The new dimension is appended at the end. .. WARNING:: :obj:`diagonalize` is not :obj:`numpy` compatible as there is no direct :obj:`numpy` equivalent. The API may change in the future. Parameters ---------- a: Union[COO, np.ndarray, scipy.sparse.spmatrix] The array to diagonalize. axis: int, optional The axis to diagonalize. Defaults to first axis (0). Examples -------- >>> import sparse >>> x = sparse.as_coo(np.arange(1,4)) >>> sparse.diagonalize(x).todense() array([[1, 0, 0], [0, 2, 0], [0, 0, 3]]) >>> x = sparse.as_coo(np.arange(24).reshape((2,3,4))) >>> x_diag = sparse.diagonalize(x, axis=1) >>> x_diag.shape (2, 3, 4, 3) :obj:`diagonalize` is the inverse of :obj:`diagonal` >>> a = sparse.random((3,3,3,3,3), density=0.3) >>> a_diag = sparse.diagonalize(a, axis=2) >>> (sparse.diagonal(a_diag, axis1=2, axis2=5) == a.transpose([0,1,3,4,2])).all() True Returns ------- out: COO The result of the operation. See Also -------- :obj:`numpy.diag`: NumPy equivalent for 1D array """ from .core import COO, as_coo a = as_coo(a) diag_shape = a.shape + (a.shape[axis],) diag_coords = np.vstack([a.coords, a.coords[axis]]) return COO(diag_coords, a.data, diag_shape) def isposinf(x, out=None): """ Test element-wise for positive infinity, return result as sparse ``bool`` array. Parameters ---------- x Input out, optional Output array Examples -------- >>> import sparse >>> x = sparse.as_coo(np.array([np.inf])) >>> sparse.isposinf(x).todense() array([ True]) See Also -------- numpy.isposinf : The NumPy equivalent """ from .core import elemwise return elemwise(lambda x, out=None, dtype=None: np.isposinf(x, out=out), x, out=out) def isneginf(x, out=None): """ Test element-wise for negative infinity, return result as sparse ``bool`` array. Parameters ---------- x Input out, optional Output array Examples -------- >>> import sparse >>> x = sparse.as_coo(np.array([-np.inf])) >>> sparse.isneginf(x).todense() array([ True]) See Also -------- numpy.isneginf : The NumPy equivalent """ from .core import elemwise return elemwise(lambda x, out=None, dtype=None: np.isneginf(x, out=out), x, out=out) def result_type(*arrays_and_dtypes): """Returns the type that results from applying the NumPy type promotion rules to the arguments. See Also -------- numpy.result_type : The NumPy equivalent """ return np.result_type(*(_as_result_type_arg(x) for x in arrays_and_dtypes)) def _as_result_type_arg(x): if not isinstance(x, SparseArray): return x if x.ndim > 0: return x.dtype # 0-dimensional arrays give different result_type outputs than their dtypes return x.todense() @numba.jit(nopython=True, nogil=True) def _diagonal_idx(coordlist, axis1, axis2, offset): """ Utility function that returns all indices that correspond to a diagonal element. Parameters ---------- coordlist : list of lists Coordinate indices. axis1, axis2 : int The axes of the diagonal. offset : int Offset of the diagonal from the main diagonal. Defaults to main diagonal (0). """ return np.array( [ i for i in range(len(coordlist[axis1])) if coordlist[axis1][i] + offset == coordlist[axis2][i] ] ) def clip(a, a_min=None, a_max=None, out=None): """ Clip (limit) the values in the array. Return an array whose values are limited to ``[min, max]``. One of min or max must be given. Parameters ---------- a: a_min : scalar or `SparseArray` or `None` Minimum value. If `None`, clipping is not performed on lower interval edge. a_max : scalar or `SparseArray` or `None` Maximum value. If `None`, clipping is not performed on upper interval edge. out : SparseArray, optional If provided, the results will be placed in this array. It may be the input array for in-place clipping. `out` must be of the right shape to hold the output. Its type is preserved. Returns ------- clipped_array : SparseArray An array with the elements of `self`, but where values < `min` are replaced with `min`, and those > `max` with `max`. Examples -------- >>> import sparse >>> x = sparse.COO.from_numpy([0, 0, 0, 1, 2, 3]) >>> sparse.clip(x, a_min=1).todense() # doctest: +NORMALIZE_WHITESPACE array([1, 1, 1, 1, 2, 3]) >>> sparse.clip(x, a_max=1).todense() # doctest: +NORMALIZE_WHITESPACE array([0, 0, 0, 1, 1, 1]) >>> sparse.clip(x, a_min=1, a_max=2).todense() # doctest: +NORMALIZE_WHITESPACE array([1, 1, 1, 1, 2, 2]) See also -------- numpy.clip : Equivalent NumPy function """ a = asCOO(a, name="clip") return a.clip(a_min, a_max) sparse-0.12.0/sparse/_coo/core.py000066400000000000000000001350601402510130100165750ustar00rootroot00000000000000import copy as _copy import operator from collections.abc import Iterable, Iterator, Sized from collections import defaultdict, deque from functools import reduce import warnings import numpy as np import numba import scipy.sparse from numpy.lib.mixins import NDArrayOperatorsMixin from .._common import dot, matmul from .indexing import getitem from .._umath import elemwise, broadcast_to from .._sparse_array import SparseArray, _reduce_super_ufunc from .._utils import ( normalize_axis, equivalent, check_zero_fill_value, _zero_of_dtype, can_store, ) class COO(SparseArray, NDArrayOperatorsMixin): # lgtm [py/missing-equals] """ A sparse multidimensional array. This is stored in COO format. It depends on NumPy and Scipy.sparse for computation, but supports arrays of arbitrary dimension. Parameters ---------- coords : numpy.ndarray (COO.ndim, COO.nnz) An array holding the index locations of every value Should have shape (number of dimensions, number of non-zeros). data : numpy.ndarray (COO.nnz,) An array of Values. A scalar can also be supplied if the data is the same across all coordinates. If not given, defers to :obj:`as_coo`. shape : tuple[int] (COO.ndim,) The shape of the array. has_duplicates : bool, optional A value indicating whether the supplied value for :code:`coords` has duplicates. Note that setting this to `False` when :code:`coords` does have duplicates may result in undefined behaviour. See :obj:`COO.sum_duplicates` sorted : bool, optional A value indicating whether the values in `coords` are sorted. Note that setting this to `True` when :code:`coords` isn't sorted may result in undefined behaviour. See :obj:`COO.sort_indices`. prune : bool, optional A flag indicating whether or not we should prune any fill-values present in ``data``. cache : bool, optional Whether to enable cacheing for various operations. See :obj:`COO.enable_caching` fill_value: scalar, optional The fill value for this array. Attributes ---------- coords : numpy.ndarray (ndim, nnz) An array holding the coordinates of every nonzero element. data : numpy.ndarray (nnz,) An array holding the values corresponding to :obj:`COO.coords`. shape : tuple[int] (ndim,) The dimensions of this array. See Also -------- DOK : A mostly write-only sparse array. as_coo : Convert any given format to :obj:`COO`. Examples -------- You can create :obj:`COO` objects from Numpy arrays. >>> x = np.eye(4, dtype=np.uint8) >>> x[2, 3] = 5 >>> s = COO.from_numpy(x) >>> s >>> s.data # doctest: +NORMALIZE_WHITESPACE array([1, 1, 1, 5, 1], dtype=uint8) >>> s.coords # doctest: +NORMALIZE_WHITESPACE array([[0, 1, 2, 2, 3], [0, 1, 2, 3, 3]]) :obj:`COO` objects support basic arithmetic and binary operations. >>> x2 = np.eye(4, dtype=np.uint8) >>> x2[3, 2] = 5 >>> s2 = COO.from_numpy(x2) >>> (s + s2).todense() # doctest: +NORMALIZE_WHITESPACE array([[2, 0, 0, 0], [0, 2, 0, 0], [0, 0, 2, 5], [0, 0, 5, 2]], dtype=uint8) >>> (s * s2).todense() # doctest: +NORMALIZE_WHITESPACE array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], dtype=uint8) Binary operations support broadcasting. >>> x3 = np.zeros((4, 1), dtype=np.uint8) >>> x3[2, 0] = 1 >>> s3 = COO.from_numpy(x3) >>> (s * s3).todense() # doctest: +NORMALIZE_WHITESPACE array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 5], [0, 0, 0, 0]], dtype=uint8) :obj:`COO` objects also support dot products and reductions. >>> s.dot(s.T).sum(axis=0).todense() # doctest: +NORMALIZE_WHITESPACE array([ 1, 1, 31, 6], dtype=uint64) You can use Numpy :code:`ufunc` operations on :obj:`COO` arrays as well. >>> np.sum(s, axis=1).todense() # doctest: +NORMALIZE_WHITESPACE array([1, 1, 6, 1], dtype=uint64) >>> np.round(np.sqrt(s, dtype=np.float64), decimals=1).todense() # doctest: +SKIP array([[ 1. , 0. , 0. , 0. ], [ 0. , 1. , 0. , 0. ], [ 0. , 0. , 1. , 2.2], [ 0. , 0. , 0. , 1. ]]) Operations that will result in a dense array will usually result in a different fill value, such as the following. >>> np.exp(s) You can also create :obj:`COO` arrays from coordinates and data. >>> coords = [[0, 0, 0, 1, 1], ... [0, 1, 2, 0, 3], ... [0, 3, 2, 0, 1]] >>> data = [1, 2, 3, 4, 5] >>> s4 = COO(coords, data, shape=(3, 4, 5)) >>> s4 If the data is same across all coordinates, you can also specify a scalar. >>> coords = [[0, 0, 0, 1, 1], ... [0, 1, 2, 0, 3], ... [0, 3, 2, 0, 1]] >>> data = 1 >>> s5 = COO(coords, data, shape=(3, 4, 5)) >>> s5 Following scipy.sparse conventions you can also pass these as a tuple with rows and columns >>> rows = [0, 1, 2, 3, 4] >>> cols = [0, 0, 0, 1, 1] >>> data = [10, 20, 30, 40, 50] >>> z = COO((data, (rows, cols))) >>> z.todense() # doctest: +NORMALIZE_WHITESPACE array([[10, 0], [20, 0], [30, 0], [ 0, 40], [ 0, 50]]) You can also pass a dictionary or iterable of index/value pairs. Repeated indices imply summation: >>> d = {(0, 0, 0): 1, (1, 2, 3): 2, (1, 1, 0): 3} >>> COO(d) >>> L = [((0, 0), 1), ... ((1, 1), 2), ... ((0, 0), 3)] >>> COO(L).todense() # doctest: +NORMALIZE_WHITESPACE array([[4, 0], [0, 2]]) You can convert :obj:`DOK` arrays to :obj:`COO` arrays. >>> from sparse import DOK >>> s6 = DOK((5, 5), dtype=np.int64) >>> s6[1:3, 1:3] = [[4, 5], [6, 7]] >>> s6 >>> s7 = s6.asformat('coo') >>> s7 >>> s7.todense() # doctest: +NORMALIZE_WHITESPACE array([[0, 0, 0, 0, 0], [0, 4, 5, 0, 0], [0, 6, 7, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]) """ __array_priority__ = 12 def __init__( self, coords, data=None, shape=None, has_duplicates=True, sorted=False, prune=False, cache=False, fill_value=None, idx_dtype=None, ): self._cache = None if cache: self.enable_caching() if data is None: arr = as_coo( coords, shape=shape, fill_value=fill_value, idx_dtype=idx_dtype ) self._make_shallow_copy_of(arr) if cache: self.enable_caching() return self.data = np.asarray(data) self.coords = np.asarray(coords) if self.coords.ndim == 1: if self.coords.size == 0 and shape is not None: self.coords = self.coords.reshape((len(shape), len(data))) else: self.coords = self.coords[None, :] if self.data.ndim == 0: self.data = np.broadcast_to(self.data, self.coords.shape[1]) if self.data.ndim != 1: raise ValueError("data must be a scalar or 1-dimensional.") if shape and not self.coords.size: self.coords = np.zeros( (len(shape) if isinstance(shape, Iterable) else 1, 0), dtype=np.intp ) if shape is None: if self.coords.nbytes: shape = tuple((self.coords.max(axis=1) + 1)) else: shape = () if not isinstance(shape, Iterable): shape = (shape,) super().__init__(shape, fill_value=fill_value) if idx_dtype: if not can_store(idx_dtype, max(shape)): raise ValueError( "cannot cast array with shape {} to dtype {}.".format( shape, idx_dtype ) ) self.coords = self.coords.astype(idx_dtype) if self.shape: if len(self.data) != self.coords.shape[1]: msg = ( "The data length does not match the coordinates " "given.\nlen(data) = {}, but {} coords specified." ) raise ValueError(msg.format(len(data), self.coords.shape[1])) if len(self.shape) != self.coords.shape[0]: msg = ( "Shape specified by `shape` doesn't match the " "shape of `coords`; len(shape)={} != coords.shape[0]={}" "(and coords.shape={})" ) raise ValueError( msg.format(len(shape), self.coords.shape[0], self.coords.shape) ) from .._settings import WARN_ON_TOO_DENSE if WARN_ON_TOO_DENSE and self.nbytes >= self.size * self.data.itemsize: warnings.warn( "Attempting to create a sparse array that takes no less " "memory than than an equivalent dense array. You may want to " "use a dense array here instead.", RuntimeWarning, ) if not sorted: self._sort_indices() if has_duplicates: self._sum_duplicates() if prune: self._prune() def __getstate__(self): return (self.coords, self.data, self.shape, self.fill_value) def __setstate__(self, state): self.coords, self.data, self.shape, self.fill_value = state self._cache = None def __dask_tokenize__(self): "Produce a deterministic, content-based hash for dask." from dask.base import normalize_token return normalize_token( (type(self), self.coords, self.data, self.shape, self.fill_value) ) def copy(self, deep=True): """Return a copy of the array. Parameters ---------- deep : boolean, optional If True (default), the internal coords and data arrays are also copied. Set to ``False`` to only make a shallow copy. """ return _copy.deepcopy(self) if deep else _copy.copy(self) def enable_caching(self): """Enable caching of reshape, transpose, and tocsr/csc operations This enables efficient iterative workflows that make heavy use of csr/csc operations, such as tensordot. This maintains a cache of recent results of reshape and transpose so that operations like tensordot (which uses both internally) store efficiently stored representations for repeated use. This can significantly cut down on computational costs in common numeric algorithms. However, this also assumes that neither this object, nor the downstream objects will have their data mutated. Examples -------- >>> s.enable_caching() # doctest: +SKIP >>> csr1 = s.transpose((2, 0, 1)).reshape((100, 120)).tocsr() # doctest: +SKIP >>> csr2 = s.transpose((2, 0, 1)).reshape((100, 120)).tocsr() # doctest: +SKIP >>> csr1 is csr2 # doctest: +SKIP True """ self._cache = defaultdict(lambda: deque(maxlen=3)) @classmethod def from_numpy(cls, x, fill_value=None, idx_dtype=None): """ Convert the given :obj:`numpy.ndarray` to a :obj:`COO` object. Parameters ---------- x : np.ndarray The dense array to convert. fill_value : scalar The fill value of the constructed :obj:`COO` array. Zero if unspecified. Returns ------- COO The converted COO array. Examples -------- >>> x = np.eye(5) >>> s = COO.from_numpy(x) >>> s >>> x[x == 0] = np.nan >>> COO.from_numpy(x, fill_value=np.nan) """ x = np.asanyarray(x).view(type=np.ndarray) if fill_value is None: fill_value = _zero_of_dtype(x.dtype) if x.shape: coords = np.where(~equivalent(x, fill_value)) data = x[coords] coords = np.vstack(coords) else: coords = np.empty((0, 1), dtype=np.uint8) data = np.array(x, ndmin=1) return cls( coords, data, shape=x.shape, has_duplicates=False, sorted=True, fill_value=fill_value, idx_dtype=idx_dtype, ) def todense(self): """ Convert this :obj:`COO` array to a dense :obj:`numpy.ndarray`. Note that this may take a large amount of memory if the :obj:`COO` object's :code:`shape` is large. Returns ------- numpy.ndarray The converted dense array. See Also -------- DOK.todense : Equivalent :obj:`DOK` array method. scipy.sparse.coo_matrix.todense : Equivalent Scipy method. Examples -------- >>> x = np.random.randint(100, size=(7, 3)) >>> s = COO.from_numpy(x) >>> x2 = s.todense() >>> np.array_equal(x, x2) True """ x = np.full(self.shape, self.fill_value, self.dtype) coords = tuple([self.coords[i, :] for i in range(self.ndim)]) data = self.data if coords != (): x[coords] = data else: if len(data) != 0: x[coords] = data return x @classmethod def from_scipy_sparse(cls, x): """ Construct a :obj:`COO` array from a :obj:`scipy.sparse.spmatrix` Parameters ---------- x : scipy.sparse.spmatrix The sparse matrix to construct the array from. Returns ------- COO The converted :obj:`COO` object. Examples -------- >>> x = scipy.sparse.rand(6, 3, density=0.2) >>> s = COO.from_scipy_sparse(x) >>> np.array_equal(x.todense(), s.todense()) True """ x = x.asformat("coo") coords = np.empty((2, x.nnz), dtype=x.row.dtype) coords[0, :] = x.row coords[1, :] = x.col return COO( coords, x.data, shape=x.shape, has_duplicates=not x.has_canonical_format, sorted=x.has_canonical_format, ) @classmethod def from_iter(cls, x, shape=None, fill_value=None, dtype=None): """ Converts an iterable in certain formats to a :obj:`COO` array. See examples for details. Parameters ---------- x : Iterable or Iterator The iterable to convert to :obj:`COO`. shape : tuple[int], optional The shape of the array. fill_value : scalar The fill value for this array. dtype : numpy.dtype The dtype of the input array. Inferred from the input if not given. Returns ------- out : COO The output :obj:`COO` array. Examples -------- You can convert items of the format ``[((i, j, k), value), ((i, j, k), value)]`` to :obj:`COO`. Here, the first part represents the coordinate and the second part represents the value. >>> x = [((0, 0), 1), ((1, 1), 1)] >>> s = COO.from_iter(x) >>> s.todense() array([[1, 0], [0, 1]]) You can also have a similar format with a dictionary. >>> x = {(0, 0): 1, (1, 1): 1} >>> s = COO.from_iter(x) >>> s.todense() array([[1, 0], [0, 1]]) The third supported format is ``(data, (..., row, col))``. >>> x = ([1, 1], ([0, 1], [0, 1])) >>> s = COO.from_iter(x) >>> s.todense() array([[1, 0], [0, 1]]) You can also pass in a :obj:`collections.Iterator` object. >>> x = [((0, 0), 1), ((1, 1), 1)].__iter__() >>> s = COO.from_iter(x) >>> s.todense() array([[1, 0], [0, 1]]) """ if isinstance(x, dict): x = list(x.items()) if not isinstance(x, Sized): x = list(x) if len(x) != 2 and not all(len(item) == 2 for item in x): raise ValueError("Invalid iterable to convert to COO.") if not x: ndim = 0 if shape is None else len(shape) coords = np.empty((ndim, 0), dtype=np.uint8) data = np.empty((0,), dtype=dtype) shape = () if shape is None else shape elif not isinstance(x[0][0], Iterable): coords = np.stack(x[1], axis=0) data = np.asarray(x[0], dtype=dtype) else: coords = np.array([item[0] for item in x]).T data = np.array([item[1] for item in x], dtype=dtype) if not ( coords.ndim == 2 and data.ndim == 1 and np.issubdtype(coords.dtype, np.integer) and np.all(coords >= 0) ): raise ValueError("Invalid iterable to convert to COO.") return COO(coords, data, shape=shape, fill_value=fill_value) @property def dtype(self): """ The datatype of this array. Returns ------- numpy.dtype The datatype of this array. See Also -------- numpy.ndarray.dtype : Numpy equivalent property. scipy.sparse.coo_matrix.dtype : Scipy equivalent property. Examples -------- >>> x = (200 * np.random.rand(5, 4)).astype(np.int32) >>> s = COO.from_numpy(x) >>> s.dtype dtype('int32') >>> x.dtype == s.dtype True """ return self.data.dtype @property def nnz(self): """ The number of nonzero elements in this array. Note that any duplicates in :code:`coords` are counted multiple times. To avoid this, call :obj:`COO.sum_duplicates`. Returns ------- int The number of nonzero elements in this array. See Also -------- DOK.nnz : Equivalent :obj:`DOK` array property. numpy.count_nonzero : A similar Numpy function. scipy.sparse.coo_matrix.nnz : The Scipy equivalent property. Examples -------- >>> x = np.array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3, 0, 0]) >>> np.count_nonzero(x) 6 >>> s = COO.from_numpy(x) >>> s.nnz 6 >>> np.count_nonzero(x) == s.nnz True """ return self.coords.shape[1] @property def nbytes(self): """ The number of bytes taken up by this object. Note that for small arrays, this may undercount the number of bytes due to the large constant overhead. Returns ------- int The approximate bytes of memory taken by this object. See Also -------- numpy.ndarray.nbytes : The equivalent Numpy property. Examples -------- >>> data = np.arange(6, dtype=np.uint8) >>> coords = np.random.randint(1000, size=(3, 6), dtype=np.uint16) >>> s = COO(coords, data, shape=(1000, 1000, 1000)) >>> s.nbytes 42 """ return self.data.nbytes + self.coords.nbytes def __len__(self): """ Get "length" of array, which is by definition the size of the first dimension. Returns ------- int The size of the first dimension. See Also -------- numpy.ndarray.__len__ : Numpy equivalent property. Examples -------- >>> x = np.zeros((10, 10)) >>> s = COO.from_numpy(x) >>> len(s) 10 """ return self.shape[0] def __sizeof__(self): return self.nbytes __getitem__ = getitem def __str__(self): return "".format( self.shape, self.dtype, self.nnz, self.fill_value ) __repr__ = __str__ def _reduce_calc(self, method, axis, keepdims=False, **kwargs): if axis[0] is None: axis = tuple(range(self.ndim)) axis = tuple(a if a >= 0 else a + self.ndim for a in axis) neg_axis = tuple(ax for ax in range(self.ndim) if ax not in set(axis)) a = self.transpose(neg_axis + axis) a = a.reshape( ( np.prod([self.shape[d] for d in neg_axis], dtype=np.intp), np.prod([self.shape[d] for d in axis], dtype=np.intp), ) ) data, inv_idx, counts = _grouped_reduce(a.data, a.coords[0], method, **kwargs) n_cols = a.shape[1] arr_attrs = (a, neg_axis, inv_idx) return (data, counts, axis, n_cols, arr_attrs) def _reduce_return(self, data, arr_attrs, result_fill_value): a, neg_axis, inv_idx = arr_attrs coords = a.coords[0:1, inv_idx] out = COO( coords, data, shape=(a.shape[0],), has_duplicates=False, sorted=True, prune=True, fill_value=result_fill_value, ) return out.reshape(tuple(self.shape[d] for d in neg_axis)) def transpose(self, axes=None): """ Returns a new array which has the order of the axes switched. Parameters ---------- axes : Iterable[int], optional The new order of the axes compared to the previous one. Reverses the axes by default. Returns ------- COO The new array with the axes in the desired order. See Also -------- :obj:`COO.T` : A quick property to reverse the order of the axes. numpy.ndarray.transpose : Numpy equivalent function. Examples -------- We can change the order of the dimensions of any :obj:`COO` array with this function. >>> x = np.add.outer(np.arange(5), np.arange(5)[::-1]) >>> x # doctest: +NORMALIZE_WHITESPACE array([[4, 3, 2, 1, 0], [5, 4, 3, 2, 1], [6, 5, 4, 3, 2], [7, 6, 5, 4, 3], [8, 7, 6, 5, 4]]) >>> s = COO.from_numpy(x) >>> s.transpose((1, 0)).todense() # doctest: +NORMALIZE_WHITESPACE array([[4, 5, 6, 7, 8], [3, 4, 5, 6, 7], [2, 3, 4, 5, 6], [1, 2, 3, 4, 5], [0, 1, 2, 3, 4]]) Note that by default, this reverses the order of the axes rather than switching the last and second-to-last axes as required by some linear algebra operations. >>> x = np.random.rand(2, 3, 4) >>> s = COO.from_numpy(x) >>> s.transpose().shape (4, 3, 2) """ if axes is None: axes = list(reversed(range(self.ndim))) # Normalize all axes indices to positive values axes = normalize_axis(axes, self.ndim) if len(np.unique(axes)) < len(axes): raise ValueError("repeated axis in transpose") if not len(axes) == self.ndim: raise ValueError("axes don't match array") axes = tuple(axes) if axes == tuple(range(self.ndim)): return self if self._cache is not None: for ax, value in self._cache["transpose"]: if ax == axes: return value shape = tuple(self.shape[ax] for ax in axes) result = COO( self.coords[axes, :], self.data, shape, has_duplicates=False, cache=self._cache is not None, fill_value=self.fill_value, ) if self._cache is not None: self._cache["transpose"].append((axes, result)) return result @property def T(self): """ Returns a new array which has the order of the axes reversed. Returns ------- COO The new array with the axes in the desired order. See Also -------- :obj:`COO.transpose` : A method where you can specify the order of the axes. numpy.ndarray.T : Numpy equivalent property. Examples -------- We can change the order of the dimensions of any :obj:`COO` array with this function. >>> x = np.add.outer(np.arange(5), np.arange(5)[::-1]) >>> x # doctest: +NORMALIZE_WHITESPACE array([[4, 3, 2, 1, 0], [5, 4, 3, 2, 1], [6, 5, 4, 3, 2], [7, 6, 5, 4, 3], [8, 7, 6, 5, 4]]) >>> s = COO.from_numpy(x) >>> s.T.todense() # doctest: +NORMALIZE_WHITESPACE array([[4, 5, 6, 7, 8], [3, 4, 5, 6, 7], [2, 3, 4, 5, 6], [1, 2, 3, 4, 5], [0, 1, 2, 3, 4]]) Note that by default, this reverses the order of the axes rather than switching the last and second-to-last axes as required by some linear algebra operations. >>> x = np.random.rand(2, 3, 4) >>> s = COO.from_numpy(x) >>> s.T.shape (4, 3, 2) """ return self.transpose(tuple(range(self.ndim))[::-1]) def swapaxes(self, axis1, axis2): """Returns array that has axes axis1 and axis2 swapped. Parameters ---------- axis1 : int first axis to swap axis2: int second axis to swap Returns ------- COO The new array with the axes axis1 and axis2 swapped. Examples -------- >>> x = COO.from_numpy(np.ones((2, 3, 4))) >>> x.swapaxes(0, 2) """ # Normalize all axis1, axis2 to positive values axis1, axis2 = normalize_axis( (axis1, axis2), self.ndim ) # checks if axis1,2 are in range + raises ValueError axes = list(range(self.ndim)) axes[axis1], axes[axis2] = axes[axis2], axes[axis1] return self.transpose(axes) def dot(self, other): """ Performs the equivalent of :code:`x.dot(y)` for :obj:`COO`. Parameters ---------- other : Union[COO, numpy.ndarray, scipy.sparse.spmatrix] The second operand of the dot product operation. Returns ------- {COO, numpy.ndarray} The result of the dot product. If the result turns out to be dense, then a dense array is returned, otherwise, a sparse array. Raises ------ ValueError If all arguments don't have zero fill-values. See Also -------- dot : Equivalent function for two arguments. :obj:`numpy.dot` : Numpy equivalent function. scipy.sparse.coo_matrix.dot : Scipy equivalent function. Examples -------- >>> x = np.arange(4).reshape((2, 2)) >>> s = COO.from_numpy(x) >>> s.dot(s) # doctest: +SKIP array([[ 2, 3], [ 6, 11]], dtype=int64) """ return dot(self, other) def __matmul__(self, other): try: return matmul(self, other) except NotImplementedError: return NotImplemented def __rmatmul__(self, other): try: return matmul(other, self) except NotImplementedError: return NotImplemented def linear_loc(self): """ The nonzero coordinates of a flattened version of this array. Note that the coordinates may be out of order. Parameters ---------- signed : bool, optional Whether to use a signed datatype for the output array. :code:`False` by default. Returns ------- numpy.ndarray The flattened coordinates. See Also -------- :obj:`numpy.flatnonzero` : Equivalent Numpy function. Examples -------- >>> x = np.eye(5) >>> s = COO.from_numpy(x) >>> s.linear_loc() # doctest: +NORMALIZE_WHITESPACE array([ 0, 6, 12, 18, 24]) >>> np.array_equal(np.flatnonzero(x), s.linear_loc()) True """ from .common import linear_loc return linear_loc(self.coords, self.shape) def flatten(self, order="C"): """ Returns a new :obj:`COO` array that is a flattened version of this array. Returns ------- COO The flattened output array. Notes ----- The :code:`order` parameter is provided just for compatibility with Numpy and isn't actually supported. Examples -------- >>> s = COO.from_numpy(np.arange(10)) >>> s2 = s.reshape((2, 5)).flatten() >>> s2.todense() array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) """ if order not in {"C", None}: raise NotImplementedError("The `order` parameter is not" "supported.") return self.reshape(-1) def reshape(self, shape, order="C"): """ Returns a new :obj:`COO` array that is a reshaped version of this array. Parameters ---------- shape : tuple[int] The desired shape of the output array. Returns ------- COO The reshaped output array. See Also -------- numpy.ndarray.reshape : The equivalent Numpy function. Notes ----- The :code:`order` parameter is provided just for compatibility with Numpy and isn't actually supported. Examples -------- >>> s = COO.from_numpy(np.arange(25)) >>> s2 = s.reshape((5, 5)) >>> s2.todense() # doctest: +NORMALIZE_WHITESPACE array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) """ if isinstance(shape, Iterable): shape = tuple(shape) else: shape = (shape,) if order not in {"C", None}: raise NotImplementedError("The `order` parameter is not supported") if self.shape == shape: return self if any(d == -1 for d in shape): extra = int(self.size / np.prod([d for d in shape if d != -1])) shape = tuple([d if d != -1 else extra for d in shape]) if self.size != reduce(operator.mul, shape, 1): raise ValueError( "cannot reshape array of size {} into shape {}".format(self.size, shape) ) if self._cache is not None: for sh, value in self._cache["reshape"]: if sh == shape: return value # TODO: this self.size enforces a 2**64 limit to array size linear_loc = self.linear_loc() idx_dtype = self.coords.dtype if shape != () and not can_store(idx_dtype, max(shape)): idx_dtype = np.min_scalar_type(max(shape)) coords = np.empty((len(shape), self.nnz), dtype=idx_dtype) strides = 1 for i, d in enumerate(shape[::-1]): coords[-(i + 1), :] = (linear_loc // strides) % d strides *= d result = COO( coords, self.data, shape, has_duplicates=False, sorted=True, cache=self._cache is not None, fill_value=self.fill_value, ) if self._cache is not None: self._cache["reshape"].append((shape, result)) return result def resize(self, *args, refcheck=True, coords_dtype=np.intp): """ This method changes the shape and size of an array in-place. Parameters ---------- args : tuple, or series of integers The desired shape of the output array. See Also -------- numpy.ndarray.resize : The equivalent Numpy function. """ warnings.warn( "resize is deprecated on all SpraseArray objects.", DeprecationWarning ) if len(args) == 1 and isinstance(args[0], tuple): shape = args[0] elif all(isinstance(arg, int) for arg in args): shape = tuple(args) else: raise ValueError("Invalid input") if any(d < 0 for d in shape): raise ValueError("negative dimensions not allowed") new_size = reduce(operator.mul, shape, 1) # TODO: this self.size enforces a 2**64 limit to array size linear_loc = self.linear_loc() end_idx = np.searchsorted(linear_loc, new_size, side="left") linear_loc = linear_loc[:end_idx] idx_dtype = self.coords.dtype if shape != () and not can_store(idx_dtype, max(shape)): idx_dtype = np.min_scalar_type(max(shape)) coords = np.empty((len(shape), len(linear_loc)), dtype=idx_dtype) strides = 1 for i, d in enumerate(shape[::-1]): coords[-(i + 1), :] = (linear_loc // strides) % d strides *= d self.shape = shape self.coords = coords if len(self.data) != len(linear_loc): self.data = self.data[:end_idx].copy() def to_scipy_sparse(self): """ Converts this :obj:`COO` object into a :obj:`scipy.sparse.coo_matrix`. Returns ------- :obj:`scipy.sparse.coo_matrix` The converted Scipy sparse matrix. Raises ------ ValueError If the array is not two-dimensional. ValueError If all the array doesn't zero fill-values. See Also -------- COO.tocsr : Convert to a :obj:`scipy.sparse.csr_matrix`. COO.tocsc : Convert to a :obj:`scipy.sparse.csc_matrix`. """ check_zero_fill_value(self) if self.ndim != 2: raise ValueError( "Can only convert a 2-dimensional array to a Scipy sparse matrix." ) result = scipy.sparse.coo_matrix( (self.data, (self.coords[0], self.coords[1])), shape=self.shape ) result.has_canonical_format = True return result def _tocsr(self): if self.ndim != 2: raise ValueError( "This array must be two-dimensional for this conversion " "to work." ) row, col = self.coords # Pass 3: count nonzeros in each row indptr = np.zeros(self.shape[0] + 1, dtype=np.int64) np.cumsum(np.bincount(row, minlength=self.shape[0]), out=indptr[1:]) return scipy.sparse.csr_matrix((self.data, col, indptr), shape=self.shape) def tocsr(self): """ Converts this array to a :obj:`scipy.sparse.csr_matrix`. Returns ------- scipy.sparse.csr_matrix The result of the conversion. Raises ------ ValueError If the array is not two-dimensional. ValueError If all the array doesn't have zero fill-values. See Also -------- COO.tocsc : Convert to a :obj:`scipy.sparse.csc_matrix`. COO.to_scipy_sparse : Convert to a :obj:`scipy.sparse.coo_matrix`. scipy.sparse.coo_matrix.tocsr : Equivalent Scipy function. """ check_zero_fill_value(self) if self._cache is not None: try: return self._csr except AttributeError: pass try: self._csr = self._csc.tocsr() return self._csr except AttributeError: pass self._csr = csr = self._tocsr() else: csr = self._tocsr() return csr def tocsc(self): """ Converts this array to a :obj:`scipy.sparse.csc_matrix`. Returns ------- scipy.sparse.csc_matrix The result of the conversion. Raises ------ ValueError If the array is not two-dimensional. ValueError If the array doesn't have zero fill-values. See Also -------- COO.tocsr : Convert to a :obj:`scipy.sparse.csr_matrix`. COO.to_scipy_sparse : Convert to a :obj:`scipy.sparse.coo_matrix`. scipy.sparse.coo_matrix.tocsc : Equivalent Scipy function. """ check_zero_fill_value(self) if self._cache is not None: try: return self._csc except AttributeError: pass try: self._csc = self._csr.tocsc() return self._csc except AttributeError: pass self._csc = csc = self.tocsr().tocsc() else: csc = self.tocsr().tocsc() return csc def _sort_indices(self): """ Sorts the :obj:`COO.coords` attribute. Also sorts the data in :obj:`COO.data` to match. Examples -------- >>> coords = np.array([[1, 2, 0]], dtype=np.uint8) >>> data = np.array([4, 1, 3], dtype=np.uint8) >>> s = COO(coords, data) >>> s._sort_indices() >>> s.coords # doctest: +NORMALIZE_WHITESPACE array([[0, 1, 2]], dtype=uint8) >>> s.data # doctest: +NORMALIZE_WHITESPACE array([3, 4, 1], dtype=uint8) """ linear = self.linear_loc() if (np.diff(linear) >= 0).all(): # already sorted return order = np.argsort(linear, kind="mergesort") self.coords = self.coords[:, order] self.data = self.data[order] def _sum_duplicates(self): """ Sums data corresponding to duplicates in :obj:`COO.coords`. See Also -------- scipy.sparse.coo_matrix.sum_duplicates : Equivalent Scipy function. Examples -------- >>> coords = np.array([[0, 1, 1, 2]], dtype=np.uint8) >>> data = np.array([6, 5, 2, 2], dtype=np.uint8) >>> s = COO(coords, data) >>> s._sum_duplicates() >>> s.coords # doctest: +NORMALIZE_WHITESPACE array([[0, 1, 2]], dtype=uint8) >>> s.data # doctest: +NORMALIZE_WHITESPACE array([6, 7, 2], dtype=uint8) """ # Inspired by scipy/sparse/coo.py::sum_duplicates # See https://github.com/scipy/scipy/blob/master/LICENSE.txt linear = self.linear_loc() unique_mask = np.diff(linear) != 0 if unique_mask.sum() == len(unique_mask): # already unique return unique_mask = np.append(True, unique_mask) coords = self.coords[:, unique_mask] (unique_inds,) = np.nonzero(unique_mask) data = np.add.reduceat(self.data, unique_inds, dtype=self.data.dtype) self.data = data self.coords = coords def _prune(self): """ Prunes data so that if any fill-values are present, they are removed from both coordinates and data. Examples -------- >>> coords = np.array([[0, 1, 2, 3]]) >>> data = np.array([1, 0, 1, 2]) >>> s = COO(coords, data) >>> s._prune() >>> s.nnz 3 """ mask = ~equivalent(self.data, self.fill_value) self.coords = self.coords[:, mask] self.data = self.data[mask] def broadcast_to(self, shape): """ Performs the equivalent of :obj:`numpy.broadcast_to` for :obj:`COO`. Note that this function returns a new array instead of a view. Parameters ---------- shape : tuple[int] The shape to broadcast the data to. Returns ------- COO The broadcasted sparse array. Raises ------ ValueError If the operand cannot be broadcast to the given shape. See also -------- :obj:`numpy.broadcast_to` : NumPy equivalent function """ return broadcast_to(self, shape) def maybe_densify(self, max_size=1000, min_density=0.25): """ Converts this :obj:`COO` array to a :obj:`numpy.ndarray` if not too costly. Parameters ---------- max_size : int Maximum number of elements in output min_density : float Minimum density of output Returns ------- numpy.ndarray The dense array. Raises ------- ValueError If the returned array would be too large. Examples -------- Convert a small sparse array to a dense array. >>> s = COO.from_numpy(np.random.rand(2, 3, 4)) >>> x = s.maybe_densify() >>> np.allclose(x, s.todense()) True You can also specify the minimum allowed density or the maximum number of output elements. If both conditions are unmet, this method will throw an error. >>> x = np.zeros((5, 5), dtype=np.uint8) >>> x[2, 2] = 1 >>> s = COO.from_numpy(x) >>> s.maybe_densify(max_size=5, min_density=0.25) Traceback (most recent call last): ... ValueError: Operation would require converting large sparse array to dense """ if self.size <= max_size or self.density >= min_density: return self.todense() else: raise ValueError( "Operation would require converting " "large sparse array to dense" ) def nonzero(self): """ Get the indices where this array is nonzero. Returns ------- idx : tuple[numpy.ndarray] The indices where this array is nonzero. See Also -------- :obj:`numpy.ndarray.nonzero` : NumPy equivalent function Raises ------ ValueError If the array doesn't have zero fill-values. Examples -------- >>> s = COO.from_numpy(np.eye(5)) >>> s.nonzero() (array([0, 1, 2, 3, 4]), array([0, 1, 2, 3, 4])) """ check_zero_fill_value(self) return tuple(self.coords) def asformat(self, format, compressed_axes=None): """ Convert this sparse array to a given format. Parameters ---------- format : str A format string. Returns ------- out : SparseArray The converted array. Raises ------ NotImplementedError If the format isn't supported. """ from .._compressed import GCXS if format == "gcxs" or format is GCXS: return GCXS.from_coo(self, compressed_axes=compressed_axes) elif compressed_axes is not None: raise ValueError( "compressed_axes is not supported for {} format".format(format) ) if format == "coo" or format is COO: return self from .._dok import DOK if format == "dok" or format is DOK: return DOK.from_coo(self) raise NotImplementedError("The given format is not supported.") def as_coo(x, shape=None, fill_value=None, idx_dtype=None): """ Converts any given format to :obj:`COO`. See the "See Also" section for details. Parameters ---------- x : SparseArray or numpy.ndarray or scipy.sparse.spmatrix or Iterable. The item to convert. shape : tuple[int], optional The shape of the output array. Can only be used in case of Iterable. Returns ------- out : COO The converted :obj:`COO` array. See Also -------- SparseArray.asformat : A utility function to convert between formats in this library. COO.from_numpy : Convert a Numpy array to :obj:`COO`. COO.from_scipy_sparse : Convert a SciPy sparse matrix to :obj:`COO`. COO.from_iter : Convert an iterable to :obj:`COO`. """ if hasattr(x, "shape") and shape is not None: raise ValueError( "Cannot provide a shape in combination with something " "that already has a shape." ) if hasattr(x, "fill_value") and fill_value is not None: raise ValueError( "Cannot provide a fill-value in combination with something " "that already has a fill-value." ) if isinstance(x, SparseArray): return x.asformat("coo") if isinstance(x, np.ndarray): return COO.from_numpy(x, fill_value=fill_value, idx_dtype=idx_dtype) if isinstance(x, scipy.sparse.spmatrix): return COO.from_scipy_sparse(x) if isinstance(x, (Iterable, Iterator)): return COO.from_iter(x, shape=shape, fill_value=fill_value) raise NotImplementedError( "Format not supported for conversion. Supplied type is " "%s, see help(sparse.as_coo) for supported formats." % type(x) ) @numba.jit(nopython=True, nogil=True) # pragma: no cover def _calc_counts_invidx(groups): inv_idx = [] counts = [] if len(groups) == 0: return ( np.array(inv_idx, dtype=groups.dtype), np.array(counts, dtype=groups.dtype), ) inv_idx.append(0) last_group = groups[0] for i in range(1, len(groups)): if groups[i] != last_group: counts.append(i - inv_idx[-1]) inv_idx.append(i) last_group = groups[i] counts.append(len(groups) - inv_idx[-1]) return (np.array(inv_idx, dtype=groups.dtype), np.array(counts, dtype=groups.dtype)) def _grouped_reduce(x, groups, method, **kwargs): """ Performs a :code:`ufunc` grouped reduce. Parameters ---------- x : np.ndarray The data to reduce. groups : np.ndarray The groups the data belongs to. The groups must be contiguous. method : np.ufunc The :code:`ufunc` to use to perform the reduction. kwargs : dict The kwargs to pass to the :code:`ufunc`'s :code:`reduceat` function. Returns ------- result : np.ndarray The result of the grouped reduce operation. inv_idx : np.ndarray The index of the first element where each group is found. counts : np.ndarray The number of elements in each group. """ # Partial credit to @shoyer # Ref: https://gist.github.com/shoyer/f538ac78ae904c936844 inv_idx, counts = _calc_counts_invidx(groups) result = method.reduceat(x, inv_idx, **kwargs) return result, inv_idx, counts sparse-0.12.0/sparse/_coo/indexing.py000066400000000000000000000506661402510130100174620ustar00rootroot00000000000000from numbers import Integral import numba import numpy as np from itertools import zip_longest from .._slicing import normalize_index from .._utils import _zero_of_dtype, equivalent def getitem(x, index): """ This function implements the indexing functionality for COO. The overall algorithm has three steps: 1. Normalize the index to canonical form. Function: normalize_index 2. Get the mask, which is a list of integers corresponding to the indices in coords/data for the output data. Function: _mask 3. Transform the coordinates to what they will be in the output. Parameters ---------- x : COO The array to apply the indexing operation on. index : {tuple, str} The index into the array. """ from .core import COO # If string, this is an index into an np.void # Custom dtype. if isinstance(index, str): data = x.data[index] idx = np.where(data) data = data[idx].flatten() coords = list(x.coords[:, idx[0]]) coords.extend(idx[1:]) fill_value_idx = np.asarray(x.fill_value[index]).flatten() fill_value = ( fill_value_idx[0] if fill_value_idx.size else _zero_of_dtype(data.dtype)[()] ) if not equivalent(fill_value, fill_value_idx).all(): raise ValueError("Fill-values in the array are inconsistent.") return COO( coords, data, shape=x.shape + x.data.dtype[index].shape, has_duplicates=False, sorted=True, fill_value=fill_value, ) # Otherwise, convert into a tuple. if not isinstance(index, tuple): index = (index,) # Check if the last index is an ellipsis. last_ellipsis = len(index) > 0 and index[-1] is Ellipsis # Normalize the index into canonical form. index = normalize_index(index, x.shape) # zip_longest so things like x[..., None] are picked up. if len(index) != 0 and all( isinstance(ind, slice) and ind == slice(0, dim, 1) for ind, dim in zip_longest(index, x.shape) ): return x # Get the mask mask, adv_idx = _mask(x.coords, index, x.shape) # Get the length of the mask if isinstance(mask, slice): n = len(range(mask.start, mask.stop, mask.step)) else: n = len(mask) coords = [] shape = [] i = 0 sorted = adv_idx is None or adv_idx.pos == 0 adv_idx_added = False for ind in index: # Nothing is added to shape or coords if the index is an integer. if isinstance(ind, Integral): i += 1 continue # Add to the shape and transform the coords in the case of a slice. elif isinstance(ind, slice): shape.append(len(range(ind.start, ind.stop, ind.step))) coords.append((x.coords[i, mask] - ind.start) // ind.step) i += 1 if ind.step < 0: sorted = False # Add the index and shape for the advanced index. elif isinstance(ind, np.ndarray): if not adv_idx_added: shape.append(adv_idx.length) coords.append(adv_idx.idx) adv_idx_added = True i += 1 # Add a dimension for None. elif ind is None: coords.append(np.zeros(n, dtype=np.intp)) shape.append(1) # Join all the transformed coords. if coords: coords = np.stack(coords, axis=0) else: # If index result is a scalar, return a 0-d COO or # a scalar depending on whether the last index is an ellipsis. if last_ellipsis: coords = np.empty((0, n), dtype=np.uint8) else: if n != 0: return x.data[mask][0] else: return x.fill_value shape = tuple(shape) data = x.data[mask] return COO( coords, data, shape=shape, has_duplicates=False, sorted=sorted, fill_value=x.fill_value, ) def _mask(coords, indices, shape): indices = _prune_indices(indices, shape) indices, adv_idx, adv_idx_pos = _separate_adv_indices(indices) if len(adv_idx) != 0: if len(adv_idx) != 1: # Ensure if multiple advanced indices are passed, all are of the same length # Also check each advanced index to ensure each is only a one-dimensional iterable adv_ix_len = len(adv_idx[0]) for ai in adv_idx: if len(ai) != adv_ix_len: raise IndexError( "shape mismatch: indexing arrays could not be broadcast together. Ensure all indexing arrays are of the same length." ) if ai.ndim != 1: raise IndexError("Only one-dimensional iterable indices supported.") mask, aidxs = _compute_multi_axis_multi_mask( coords, _ind_ar_from_indices(indices), np.array(adv_idx, dtype=np.intp), np.array(adv_idx_pos, dtype=np.intp), ) return mask, _AdvIdxInfo(aidxs, adv_idx_pos, adv_ix_len) else: adv_idx = adv_idx[0] adv_idx_pos = adv_idx_pos[0] if adv_idx.ndim != 1: raise IndexError("Only one-dimensional iterable indices supported.") mask, aidxs = _compute_multi_mask( coords, _ind_ar_from_indices(indices), adv_idx, adv_idx_pos ) return mask, _AdvIdxInfo(aidxs, adv_idx_pos, len(adv_idx)) mask, is_slice = _compute_mask(coords, _ind_ar_from_indices(indices)) if is_slice: return slice(mask[0], mask[1], 1), None else: return mask, None def _ind_ar_from_indices(indices): """ Computes an index "array" from indices, such that ``indices[i]`` is transformed to ``ind_ar[i]`` and ``ind_ar[i].shape == (3,)``. It has the format ``[start, stop, step]``. Integers are converted into steps as well. Parameters ---------- indices : Iterable Input indices (slices and integers) Returns ------- ind_ar : np.ndarray The output array. Examples -------- >>> _ind_ar_from_indices([1]) array([[1, 2, 1]]) >>> _ind_ar_from_indices([slice(5, 7, 2)]) array([[5, 7, 2]]) """ ind_ar = np.empty((len(indices), 3), dtype=np.intp) for i, idx in enumerate(indices): if isinstance(idx, slice): ind_ar[i] = [idx.start, idx.stop, idx.step] elif isinstance(idx, Integral): ind_ar[i] = [idx, idx + 1, 1] return ind_ar def _prune_indices(indices, shape, prune_none=True): """ Gets rid of the indices that do not contribute to the overall mask, e.g. None and full slices. Parameters ---------- indices : tuple The indices to the array. shape : tuple[int] The shape of the array. Returns ------- indices : tuple The filtered indices. Examples -------- >>> _prune_indices((None, 5), (10,)) # None won't affect the mask [5] >>> _prune_indices((slice(0, 10, 1),), (10,)) # Full slices don't affect the mask [] """ if prune_none: indices = [idx for idx in indices if idx is not None] i = 0 for idx, l in zip(indices[::-1], shape[::-1]): if not isinstance(idx, slice): break if idx.start == 0 and idx.stop == l and idx.step == 1: i += 1 continue if idx.start == l - 1 and idx.stop == -1 and idx.step == -1: i += 1 continue break if i != 0: indices = indices[:-i] return indices def _separate_adv_indices(indices): """ Separates advanced from normal indices. Parameters ---------- indices : list The input indices Returns ------- new_idx : list The normal indices. adv_idx : list The advanced indices. adv_idx_pos : list The positions of the advanced indices. """ adv_idx_pos = [] new_idx = [] adv_idx = [] for i, idx in enumerate(indices): if isinstance(idx, np.ndarray): adv_idx.append(idx) adv_idx_pos.append(i) else: new_idx.append(idx) return new_idx, adv_idx, adv_idx_pos @numba.jit(nopython=True, nogil=True) def _compute_multi_axis_multi_mask( coords, indices, adv_idx, adv_idx_pos ): # pragma: no cover """ Computes a mask with the advanced index, and also returns the advanced index dimension. Parameters ---------- coords : np.ndarray Coordinates of the input array. indices : np.ndarray The indices in slice format. adv_idx : np.ndarray List of advanced indices. adv_idx_pos : np.ndarray The position of the advanced indices. Returns ------- mask : np.ndarray The mask. aidxs : np.ndarray The advanced array index. """ n_adv_idx = len(adv_idx_pos) mask = numba.typed.List.empty_list(numba.types.intp) a_indices = numba.typed.List.empty_list(numba.types.intp) full_idx = np.empty((len(indices) + len(adv_idx_pos), 3), dtype=np.intp) # Get location of non-advanced indices if len(indices) != 0: ixx = 0 for ix in range(coords.shape[0]): isin = False for ax in adv_idx_pos: if ix == ax: isin = True break if not isin: full_idx[ix] = indices[ixx] ixx += 1 for i in range(len(adv_idx[0])): for ii in range(n_adv_idx): full_idx[adv_idx_pos[ii]] = [adv_idx[ii][i], adv_idx[ii][i] + 1, 1] partial_mask, is_slice = _compute_mask(coords, full_idx) if is_slice: slice_mask = numba.typed.List.empty_list(numba.types.intp) for j in range(partial_mask[0], partial_mask[1]): slice_mask.append(j) partial_mask = array_from_list_intp(slice_mask) for j in range(len(partial_mask)): mask.append(partial_mask[j]) a_indices.append(i) return array_from_list_intp(mask), array_from_list_intp(a_indices) @numba.jit(nopython=True, nogil=True) def _compute_multi_mask(coords, indices, adv_idx, adv_idx_pos): # pragma: no cover """ Computes a mask with the advanced index, and also returns the advanced index dimension. Parameters ---------- coords : np.ndarray Coordinates of the input array. indices : np.ndarray The indices in slice format. adv_idx : list(int) The advanced index. adv_idx_pos : list(int) The position of the advanced index. Returns ------- mask : np.ndarray The mask. aidxs : np.ndarray The advanced array index. """ mask = numba.typed.List.empty_list(numba.types.intp) a_indices = numba.typed.List.empty_list(numba.types.intp) full_idx = np.empty((len(indices) + 1, 3), dtype=np.intp) full_idx[:adv_idx_pos] = indices[:adv_idx_pos] full_idx[adv_idx_pos + 1 :] = indices[adv_idx_pos:] for i, aidx in enumerate(adv_idx): full_idx[adv_idx_pos] = [aidx, aidx + 1, 1] partial_mask, is_slice = _compute_mask(coords, full_idx) if is_slice: slice_mask = numba.typed.List.empty_list(numba.types.intp) for j in range(partial_mask[0], partial_mask[1]): slice_mask.append(j) partial_mask = array_from_list_intp(slice_mask) for j in range(len(partial_mask)): mask.append(partial_mask[j]) a_indices.append(i) return array_from_list_intp(mask), array_from_list_intp(a_indices) @numba.jit(nopython=True, nogil=True) def _compute_mask(coords, indices): # pragma: no cover """ Gets the mask for the coords given the indices in slice format. Works with either start-stop ranges of matching indices into coords called "pairs" (start-stop pairs) or filters the mask directly, based on which is faster. Exploits the structure in sorted coords, which is that for a constant value of coords[i - 1], coords[i - 2] and so on, coords[i] is sorted. Concretely, ``coords[i, coords[i - 1] == v1 & coords[i - 2] = v2, ...]`` is always sorted. It uses this sortedness to find sub-pairs for each dimension given the previous, and so on. This is efficient for small slices or ints, but not for large ones. After it detects that working with pairs is rather inefficient (or after going through each possible index), it constructs a filtered mask from the start-stop pairs. Parameters ---------- coords : np.ndarray The coordinates of the array. indices : np.ndarray The indices in the form of slices such that indices[:, 0] are starts, indices[:, 1] are stops and indices[:, 2] are steps. Returns ------- mask : np.ndarray The starts and stops in the mask. is_slice : bool Whether or not the array represents a continuous slice. Examples -------- Let's create some mock coords and indices >>> import numpy as np >>> coords = np.array([[0, 0, 1, 1, 2, 2]]) >>> indices = np.array([[0, 3, 2]]) # Equivalent to slice(0, 3, 2) Now let's get the mask. Notice that the indices of ``0`` and ``2`` are matched. >>> _compute_mask(coords, indices) (array([0, 1, 4, 5]), False) Now, let's try with a more "continuous" slice. Matches ``0`` and ``1``. >>> indices = np.array([[0, 2, 1]]) >>> _compute_mask(coords, indices) (array([0, 4]), True) This is equivalent to mask being ``slice(0, 4, 1)``. """ # Set the initial mask to be the entire range of coordinates. starts = numba.typed.List.empty_list(numba.types.intp) starts.append(0) stops = numba.typed.List.empty_list(numba.types.intp) stops.append(coords.shape[1]) n_matches = np.intp(coords.shape[1]) i = 0 while i < len(indices): # Guesstimate whether working with pairs is more efficient or # working with the mask directly. # One side is the estimate of time taken for binary searches # (n_searches * log(avg_length)) # The other is an estimated time of a linear filter for the mask. n_pairs = len(starts) n_current_slices = ( len(range(indices[i, 0], indices[i, 1], indices[i, 2])) * n_pairs + 2 ) if ( n_current_slices * np.log(n_current_slices / max(n_pairs, 1)) > n_matches + n_pairs ): break # For each of the pairs, search inside the coordinates for other # matching sub-pairs. # This gets the start-end coordinates in coords for each 'sub-array' # Which would come out of indexing a single integer. starts, stops, n_matches = _get_mask_pairs(starts, stops, coords[i], indices[i]) i += 1 # Combine adjacent pairs starts, stops = _join_adjacent_pairs(starts, stops) # If just one pair is left over, treat it as a slice. if i == len(indices) and len(starts) == 1: return np.array([starts[0], stops[0]]), True # Convert start-stop pairs into mask, filtering by remaining # coordinates. mask = _filter_pairs(starts, stops, coords[i:], indices[i:]) return array_from_list_intp(mask), False @numba.jit(nopython=True, nogil=True) def _get_mask_pairs(starts_old, stops_old, c, idx): # pragma: no cover """ Gets the pairs for a following dimension given the pairs for a dimension. For each pair, it searches in the following dimension for matching coords and returns those. The total combined length of all pairs is returned to help with the performance guesstimate. Parameters ---------- starts_old, stops_old : list[int] The starts and stops from the previous index. c : np.ndarray The coords for this index's dimension. idx : np.ndarray The index in the form of a slice. idx[0], idx[1], idx[2] = start, stop, step Returns ------- starts, stops: list The starts and stops after applying the current index. n_matches : int The sum of elements in all ranges. Examples -------- >>> c = np.array([1, 2, 1, 2, 1, 1, 2, 2]) >>> starts_old = numba.typed.List(); starts_old.append(4) >>> stops_old = numba.typed.List(); stops_old.append(8) >>> idx = np.array([1, 2, 1]) >>> _get_mask_pairs(starts_old, stops_old, c, idx) (ListType[int64]([4]), ListType[int64]([6]), 2) """ starts = numba.typed.List.empty_list(numba.types.intp) stops = numba.typed.List.empty_list(numba.types.intp) n_matches = np.intp(0) for j in range(len(starts_old)): # For each matching "integer" in the slice, search within the "sub-coords" # Using binary search. for p_match in range(idx[0], idx[1], idx[2]): start = ( np.searchsorted(c[starts_old[j] : stops_old[j]], p_match, side="left") + starts_old[j] ) stop = ( np.searchsorted(c[starts_old[j] : stops_old[j]], p_match, side="right") + starts_old[j] ) if start != stop: starts.append(start) stops.append(stop) n_matches += stop - start return starts, stops, n_matches @numba.jit(nopython=True, nogil=True) def _filter_pairs(starts, stops, coords, indices): # pragma: no cover """ Converts all the pairs into a single integer mask, additionally filtering by the indices. Parameters ---------- starts, stops : list[int] The starts and stops to convert into an array. coords : np.ndarray The coordinates to filter by. indices : np.ndarray The indices in the form of slices such that indices[:, 0] are starts, indices[:, 1] are stops and indices[:, 2] are steps. Returns ------- mask : list The output integer mask. Examples -------- >>> import numpy as np >>> starts = numba.typed.List(); starts.append(2) >>> stops = numba.typed.List(); stops.append(7) >>> coords = np.array([[0, 1, 2, 3, 4, 5, 6, 7]]) >>> indices = np.array([[2, 8, 2]]) # Start, stop, step pairs >>> _filter_pairs(starts, stops, coords, indices) ListType[int64]([2, 4, 6]) """ mask = numba.typed.List.empty_list(numba.types.intp) # For each pair, for i in range(len(starts)): # For each element match within the pair range for j in range(starts[i], stops[i]): match = True # Check if it matches all indices for k in range(len(indices)): idx = indices[k] elem = coords[k, j] match &= (elem - idx[0]) % idx[2] == 0 and ( (idx[2] > 0 and idx[0] <= elem < idx[1]) or (idx[2] < 0 and idx[0] >= elem > idx[1]) ) # and append to the mask if so. if match: mask.append(j) return mask @numba.jit(nopython=True, nogil=True) def _join_adjacent_pairs(starts_old, stops_old): # pragma: no cover """ Joins adjacent pairs into one. For example, 2-5 and 5-7 will reduce to 2-7 (a single pair). This may help in returning a slice in the end which could be faster. Parameters ---------- starts_old, stops_old : list[int] The input starts and stops Returns ------- starts, stops : list[int] The reduced starts and stops. Examples -------- >>> starts = numba.typed.List(); starts.append(2); starts.append(5) >>> stops = numba.typed.List(); stops.append(5); stops.append(7) >>> _join_adjacent_pairs(starts, stops) (ListType[int64]([2]), ListType[int64]([7])) """ if len(starts_old) <= 1: return starts_old, stops_old starts = numba.typed.List.empty_list(numba.types.intp) starts.append(starts_old[0]) stops = numba.typed.List.empty_list(numba.types.intp) for i in range(1, len(starts_old)): if starts_old[i] != stops_old[i - 1]: starts.append(starts_old[i]) stops.append(stops_old[i - 1]) stops.append(stops_old[-1]) return starts, stops @numba.jit(nopython=True, nogil=True) def array_from_list_intp(l): # pragma: no cover n = len(l) a = np.empty(n, dtype=np.intp) for i in range(n): a[i] = l[i] return a class _AdvIdxInfo: def __init__(self, idx, pos, length): self.idx = idx self.pos = pos self.length = length sparse-0.12.0/sparse/_coo/numba_extension.py000066400000000000000000000230061402510130100210370ustar00rootroot00000000000000""" Numba support for COO objects. For now, this just supports attribute access """ import numpy as np import numba from numba.extending import ( models, register_model, box, unbox, NativeValue, make_attribute_wrapper, type_callable, ) from numba.core.imputils import impl_ret_borrowed, lower_constant, lower_builtin from numba.core.typing.typeof import typeof_impl from numba.core import cgutils, types from sparse._utils import _zero_of_dtype import contextlib from . import COO __all__ = ["COOType"] class COOType(types.Type): def __init__(self, data_dtype: np.dtype, coords_dtype: np.dtype, ndim: int): assert isinstance(data_dtype, np.dtype) assert isinstance(coords_dtype, np.dtype) self.data_dtype = data_dtype self.coords_dtype = coords_dtype self.ndim = ndim super().__init__( name="COOType[{!r}, {!r}, {!r}]".format( numba.from_dtype(data_dtype), numba.from_dtype(coords_dtype), ndim ) ) @property def key(self): return self.data_dtype, self.coords_dtype, self.ndim @property def data_type(self): return numba.from_dtype(self.data_dtype)[:] @property def coords_type(self): return numba.from_dtype(self.coords_dtype)[:, :] @property def shape_type(self): dt = numba.np.numpy_support.from_dtype(self.coords_dtype) return types.UniTuple(dt, self.ndim) @property def fill_value_type(self): return numba.from_dtype(self.data_dtype) @typeof_impl.register(COO) def _typeof_COO(val: COO, c) -> COOType: return COOType( data_dtype=val.data.dtype, coords_dtype=val.coords.dtype, ndim=val.ndim ) @register_model(COOType) class COOModel(models.StructModel): def __init__(self, dmm, fe_type): members = [ ("data", fe_type.data_type), ("coords", fe_type.coords_type), ("shape", fe_type.shape_type), ("fill_value", fe_type.fill_value_type), ] models.StructModel.__init__(self, dmm, fe_type, members) @type_callable(COO) def type_COO(context): # TODO: accept a fill_value kwarg def typer(coords, data, shape): return COOType( coords_dtype=numba.np.numpy_support.as_dtype(coords.dtype), data_dtype=numba.np.numpy_support.as_dtype(data.dtype), ndim=len(shape), ) return typer @lower_builtin(COO, types.Any, types.Any, types.Any) def impl_COO(context, builder, sig, args): typ = sig.return_type coords, data, shape = args coo = cgutils.create_struct_proxy(typ)(context, builder) coo.coords = coords coo.data = data coo.shape = shape coo.fill_value = context.get_constant_generic( builder, typ.fill_value_type, _zero_of_dtype(typ.data_dtype) ) return impl_ret_borrowed(context, builder, sig.return_type, coo._getvalue()) @lower_constant(COOType) def lower_constant_COO(context, builder, typ, pyval): coords = context.get_constant_generic(builder, typ.coords_type, pyval.coords) data = context.get_constant_generic(builder, typ.data_type, pyval.data) shape = context.get_constant_generic(builder, typ.shape_type, pyval.shape) fill_value = context.get_constant_generic( builder, typ.fill_value_type, pyval.fill_value ) return impl_ret_borrowed( context, builder, typ, cgutils.pack_struct(builder, (data, coords, shape, fill_value)), ) @contextlib.contextmanager def local_return(builder): """ Create a scope which can be broken from locally. Used as:: with local_return(c.builder) as ret: with c.builder.if(abort_cond): ret() do_some_other_stuff # no ret needed at the end, it's implied stuff_that_runs_unconditionally """ end_blk = builder.append_basic_block("end") def return_(): builder.branch(end_blk) yield return_ builder.branch(end_blk) # make sure all remaining code goes to the next block builder.position_at_end(end_blk) def _unbox_native_field(typ, obj, field_name: str, c): ret_ptr = cgutils.alloca_once(c.builder, c.context.get_value_type(typ)) is_error_ptr = cgutils.alloca_once_value(c.builder, cgutils.false_bit) fail_obj = c.context.get_constant_null(typ) with local_return(c.builder) as ret: fail_blk = c.builder.append_basic_block("fail") with c.builder.goto_block(fail_blk): c.builder.store(cgutils.true_bit, is_error_ptr) c.builder.store(fail_obj, ret_ptr) ret() field_obj = c.pyapi.object_getattr_string(obj, field_name) with cgutils.if_unlikely(c.builder, cgutils.is_null(c.builder, field_obj)): c.builder.branch(fail_blk) field_native = c.unbox(typ, field_obj) c.pyapi.decref(field_obj) with cgutils.if_unlikely(c.builder, field_native.is_error): c.builder.branch(fail_blk) c.builder.store(cgutils.false_bit, is_error_ptr) c.builder.store(field_native.value, ret_ptr) return NativeValue(c.builder.load(ret_ptr), is_error=c.builder.load(is_error_ptr)) @unbox(COOType) def unbox_COO(typ: COOType, obj: COO, c) -> NativeValue: ret_ptr = cgutils.alloca_once(c.builder, c.context.get_value_type(typ)) is_error_ptr = cgutils.alloca_once_value(c.builder, cgutils.false_bit) fail_obj = c.context.get_constant_null(typ) with local_return(c.builder) as ret: fail_blk = c.builder.append_basic_block("fail") with c.builder.goto_block(fail_blk): c.builder.store(cgutils.true_bit, is_error_ptr) c.builder.store(fail_obj, ret_ptr) ret() data = _unbox_native_field(typ.data_type, obj, "data", c) with cgutils.if_unlikely(c.builder, data.is_error): c.builder.branch(fail_blk) coords = _unbox_native_field(typ.coords_type, obj, "coords", c) with cgutils.if_unlikely(c.builder, coords.is_error): c.builder.branch(fail_blk) shape = _unbox_native_field(typ.shape_type, obj, "shape", c) with cgutils.if_unlikely(c.builder, shape.is_error): c.builder.branch(fail_blk) fill_value = _unbox_native_field(typ.fill_value_type, obj, "fill_value", c) with cgutils.if_unlikely(c.builder, fill_value.is_error): c.builder.branch(fail_blk) coo = cgutils.create_struct_proxy(typ)(c.context, c.builder) coo.coords = coords.value coo.data = data.value coo.shape = shape.value coo.fill_value = fill_value.value c.builder.store(cgutils.false_bit, is_error_ptr) c.builder.store(coo._getvalue(), ret_ptr) return NativeValue(c.builder.load(ret_ptr), is_error=c.builder.load(is_error_ptr)) @box(COOType) def box_COO(typ: COOType, val: "some LLVM thing", c) -> COO: ret_ptr = cgutils.alloca_once(c.builder, c.pyapi.pyobj) fail_obj = c.pyapi.get_null_object() coo = cgutils.create_struct_proxy(typ)(c.context, c.builder, value=val) with local_return(c.builder) as ret: data_obj = c.box(typ.data_type, coo.data) with cgutils.if_unlikely(c.builder, cgutils.is_null(c.builder, data_obj)): c.builder.store(fail_obj, ret_ptr) ret() coords_obj = c.box(typ.coords_type, coo.coords) with cgutils.if_unlikely(c.builder, cgutils.is_null(c.builder, coords_obj)): c.pyapi.decref(data_obj) c.builder.store(fail_obj, ret_ptr) ret() shape_obj = c.box(typ.shape_type, coo.shape) with cgutils.if_unlikely(c.builder, cgutils.is_null(c.builder, shape_obj)): c.pyapi.decref(coords_obj) c.pyapi.decref(data_obj) c.builder.store(fail_obj, ret_ptr) ret() fill_value_obj = c.box(typ.fill_value_type, coo.fill_value) with cgutils.if_unlikely(c.builder, cgutils.is_null(c.builder, fill_value_obj)): c.pyapi.decref(shape_obj) c.pyapi.decref(coords_obj) c.pyapi.decref(data_obj) c.builder.store(fail_obj, ret_ptr) ret() class_obj = c.pyapi.unserialize(c.pyapi.serialize_object(COO)) with cgutils.if_unlikely(c.builder, cgutils.is_null(c.builder, class_obj)): c.pyapi.decref(shape_obj) c.pyapi.decref(coords_obj) c.pyapi.decref(data_obj) c.pyapi.decref(fill_value_obj) c.builder.store(fail_obj, ret_ptr) ret() args = c.pyapi.tuple_pack([coords_obj, data_obj, shape_obj]) c.pyapi.decref(shape_obj) c.pyapi.decref(coords_obj) c.pyapi.decref(data_obj) with cgutils.if_unlikely(c.builder, cgutils.is_null(c.builder, args)): c.pyapi.decref(fill_value_obj) c.pyapi.decref(class_obj) c.builder.store(fail_obj, ret_ptr) ret() kwargs = c.pyapi.dict_pack([("fill_value", fill_value_obj)]) c.pyapi.decref(fill_value_obj) with cgutils.if_unlikely(c.builder, cgutils.is_null(c.builder, kwargs)): c.pyapi.decref(class_obj) c.builder.store(fail_obj, ret_ptr) ret() c.builder.store(c.pyapi.call(class_obj, args, kwargs), ret_ptr) c.pyapi.decref(class_obj) c.pyapi.decref(args) c.pyapi.decref(kwargs) return c.builder.load(ret_ptr) make_attribute_wrapper(COOType, "data", "data") make_attribute_wrapper(COOType, "coords", "coords") make_attribute_wrapper(COOType, "shape", "shape") make_attribute_wrapper(COOType, "fill_value", "fill_value") sparse-0.12.0/sparse/_dok.py000066400000000000000000000377341402510130100156530ustar00rootroot00000000000000from math import ceil from numbers import Integral from collections.abc import Iterable import numpy as np from numpy.lib.mixins import NDArrayOperatorsMixin from ._slicing import normalize_index from ._utils import equivalent from ._sparse_array import SparseArray class DOK(SparseArray, NDArrayOperatorsMixin): """ A class for building sparse multidimensional arrays. Parameters ---------- shape : tuple[int] (DOK.ndim,) The shape of the array. data : dict, optional The key-value pairs for the data in this array. dtype : np.dtype, optional The data type of this array. If left empty, it is inferred from the first element. fill_value : scalar, optional The fill value of this array. Attributes ---------- dtype : numpy.dtype The datatype of this array. Can be :code:`None` if no elements have been set yet. shape : tuple[int] The shape of this array. data : dict The keys of this dictionary contain all the indices and the values contain the nonzero entries. See Also -------- COO : A read-only sparse array. Examples -------- You can create :obj:`DOK` objects from Numpy arrays. >>> x = np.eye(5, dtype=np.uint8) >>> x[2, 3] = 5 >>> s = DOK.from_numpy(x) >>> s You can also create them from just shapes, and use slicing assignment. >>> s2 = DOK((5, 5), dtype=np.int64) >>> s2[1:3, 1:3] = [[4, 5], [6, 7]] >>> s2 You can convert :obj:`DOK` arrays to :obj:`COO` arrays, or :obj:`numpy.ndarray` objects. >>> from sparse import COO >>> s3 = COO(s2) >>> s3 >>> s2.todense() # doctest: +NORMALIZE_WHITESPACE array([[0, 0, 0, 0, 0], [0, 4, 5, 0, 0], [0, 6, 7, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]) >>> s4 = COO.from_numpy(np.eye(4, dtype=np.uint8)) >>> s4 >>> s5 = DOK.from_coo(s4) >>> s5 You can also create :obj:`DOK` arrays from a shape and a dict of values. Zeros are automatically ignored. >>> values = { ... (1, 2, 3): 4, ... (3, 2, 1): 0, ... } >>> s6 = DOK((5, 5, 5), values) >>> s6 """ def __init__(self, shape, data=None, dtype=None, fill_value=None): from ._coo import COO self.data = dict() if isinstance(shape, COO): ar = DOK.from_coo(shape) self._make_shallow_copy_of(ar) return if isinstance(shape, np.ndarray): ar = DOK.from_numpy(shape) self._make_shallow_copy_of(ar) return self.dtype = np.dtype(dtype) if not data: data = dict() super().__init__(shape, fill_value=fill_value) if isinstance(data, dict): if not dtype: if not len(data): self.dtype = np.dtype("float64") else: self.dtype = np.result_type( *map(lambda x: np.asarray(x).dtype, data.values()) ) for c, d in data.items(): self[c] = d else: raise ValueError("data must be a dict.") @classmethod def from_coo(cls, x): """ Get a :obj:`DOK` array from a :obj:`COO` array. Parameters ---------- x : COO The array to convert. Returns ------- DOK The equivalent :obj:`DOK` array. Examples -------- >>> from sparse import COO >>> s = COO.from_numpy(np.eye(4)) >>> s2 = DOK.from_coo(s) >>> s2 """ ar = cls(x.shape, dtype=x.dtype, fill_value=x.fill_value) for c, d in zip(x.coords.T, x.data): ar.data[tuple(c)] = d return ar def to_coo(self): """ Convert this :obj:`DOK` array to a :obj:`COO` array. Returns ------- COO The equivalent :obj:`COO` array. Examples -------- >>> s = DOK((5, 5)) >>> s[1:3, 1:3] = [[4, 5], [6, 7]] >>> s >>> s2 = s.to_coo() >>> s2 """ from ._coo import COO return COO(self) @classmethod def from_numpy(cls, x): """ Get a :obj:`DOK` array from a Numpy array. Parameters ---------- x : np.ndarray The array to convert. Returns ------- DOK The equivalent :obj:`DOK` array. Examples -------- >>> s = DOK.from_numpy(np.eye(4)) >>> s """ ar = cls(x.shape, dtype=x.dtype) coords = np.nonzero(x) data = x[coords] for c in zip(data, *coords): d, c = c[0], c[1:] ar.data[c] = d return ar @property def nnz(self): """ The number of nonzero elements in this array. Returns ------- int The number of nonzero elements. See Also -------- COO.nnz : Equivalent :obj:`COO` array property. numpy.count_nonzero : A similar Numpy function. scipy.sparse.dok_matrix.nnz : The Scipy equivalent property. Examples -------- >>> values = { ... (1, 2, 3): 4, ... (3, 2, 1): 0, ... } >>> s = DOK((5, 5, 5), values) >>> s.nnz 1 """ return len(self.data) @property def format(self): """ The storage format of this array. Returns ------- str The storage format of this array. See Also ------- COO.format : Equivalent :obj:`COO` array property. GCXS.format : Equivalent :obj:`GCXS` array property. scipy.sparse.dok_matrix.format : The Scipy equivalent property. Examples ------- >>> import sparse >>> s = sparse.random((5,5), density=0.2, format='dok') >>> s.format 'dok' """ return "dok" @property def nbytes(self): """ The number of bytes taken up by this object. Note that for small arrays, this may undercount the number of bytes due to the large constant overhead. Returns ------- int The approximate bytes of memory taken by this object. See Also -------- numpy.ndarray.nbytes : The equivalent Numpy property. Examples -------- >>> import sparse >>> x = sparse.random((100,100),density=.1,format='dok') >>> x.nbytes 8000 """ return self.nnz * self.dtype.itemsize def __getitem__(self, key): # 1D fancy indexing if ( self.ndim == 1 and isinstance(key, Iterable) and all(isinstance(i, (int, np.integer)) for i in key) and len(key) > 1 ): key = (key,) if isinstance(key, tuple) and all(isinstance(k, Iterable) for k in key): if len(key) != self.ndim: raise NotImplementedError( f"Index sequences for all {self.ndim} array dimensions needed!" ) if not all(len(key[0]) == len(k) for k in key): raise IndexError("Unequal length of index sequences!") return self._fancy_getitem(key) key = normalize_index(key, self.shape) # single element doesn't return sparse array if all(isinstance(k, Integral) for k in key): if key in self.data: return self.data[key] else: return self.fill_value slice_key = [to_slice(k) for k in key] coords_array = np.asarray(list(self.data.keys())) values_array = np.asarray(list(self.data.values())) filtered_coords, filter_arr = self._filter_by_key(coords_array, slice_key) filtered_values = values_array[filter_arr] res_shape = [] keep_dims = [] for i, k in enumerate(key): if isinstance(k, slice): n_elements = ceil((k.stop - k.start) / k.step) res_shape.append(n_elements) keep_dims.append(i) # none of the keys in this array make it into the slice if filtered_coords.size == 0: return DOK(shape=res_shape, dtype=self.dtype, fill_value=self.fill_value) starts = np.asarray([k.start for k in slice_key]) steps = np.asarray([k.step for k in slice_key]) new_coords = (filtered_coords - starts) // steps new_coords_squeezed = np.take(new_coords, keep_dims, axis=1) new_data = { tuple(coord): val for coord, val in zip(new_coords_squeezed, filtered_values) } return DOK( shape=res_shape, data=new_data, dtype=self.dtype, fill_value=self.fill_value ) def _fancy_getitem(self, key): """Subset of fancy indexing, when all dimensions are accessed""" new_data = {} for i, k in enumerate(zip(*key)): if k in self.data: new_data[i] = self.data[k] return DOK( shape=(len(key[0])), data=new_data, dtype=self.dtype, fill_value=self.fill_value, ) def _filter_by_key(self, coords, slice_key): """Filter data coordinates to be within given slice """ filter_arr = np.ones(coords.shape[0], dtype=bool) for coords_in_dim, sl in zip(coords.T, slice_key): filter_arr *= ( (coords_in_dim >= sl.start) * (coords_in_dim < sl.stop) * ((coords_in_dim - sl.start) % sl.step == 0) ) return coords[filter_arr], filter_arr def __setitem__(self, key, value): value = np.asarray(value, dtype=self.dtype) # 1D fancy indexing if ( self.ndim == 1 and isinstance(key, Iterable) and all(isinstance(i, (int, np.integer)) for i in key) ): key = (key,) if isinstance(key, tuple) and all(isinstance(k, Iterable) for k in key): if len(key) != self.ndim: raise NotImplementedError( f"Index sequences for all {self.ndim} array dimensions needed!" ) if not all(len(key[0]) == len(k) for k in key): raise IndexError("Unequal length of index sequences!") self._fancy_setitem(key, value) return key = normalize_index(key, self.shape) key_list = [int(k) if isinstance(k, Integral) else k for k in key] self._setitem(key_list, value) def _fancy_setitem(self, idxs, values): idxs = tuple(np.asanyarray(idxs) for idxs in idxs) if not all(np.issubdtype(k.dtype, np.integer) for k in idxs): raise IndexError("Indices must be sequences of integer types!") if idxs[0].ndim != 1: raise IndexError("Indices are not 1d sequences!") if values.ndim == 0: values = np.full(idxs[0].size, values, self.dtype) elif values.ndim > 1: raise ValueError(f"Dimension of values ({values.ndim}) must be 0 or 1!") if not idxs[0].shape == values.shape: raise ValueError( f"Shape mismatch of indices ({idxs[0].shape}) and values ({values.shape})!" ) fill_value = self.fill_value data = self.data for idx, value in zip(zip(*idxs), values): if not value == fill_value: data[idx] = value elif idx in data: del data[idx] def _setitem(self, key_list, value): value_missing_dims = ( len([ind for ind in key_list if isinstance(ind, slice)]) - value.ndim ) if value_missing_dims < 0: raise ValueError("setting an array element with a sequence.") for i, ind in enumerate(key_list): if isinstance(ind, slice): step = ind.step if ind.step is not None else 1 if step > 0: start = ind.start if ind.start is not None else 0 start = max(start, 0) stop = ind.stop if ind.stop is not None else self.shape[i] stop = min(stop, self.shape[i]) if start > stop: start = stop else: start = ind.start or self.shape[i] - 1 stop = ind.stop if ind.stop is not None else -1 start = min(start, self.shape[i] - 1) stop = max(stop, -1) if start < stop: start = stop key_list_temp = key_list[:] for v_idx, ki in enumerate(range(start, stop, step)): key_list_temp[i] = ki vi = ( value if value_missing_dims > 0 else (value[0] if value.shape[0] == 1 else value[v_idx]) ) self._setitem(key_list_temp, vi) return elif not isinstance(ind, Integral): raise IndexError( "All indices must be slices or integers" " when setting an item." ) key = tuple(key_list) if not equivalent(value, self.fill_value): self.data[key] = value[()] elif key in self.data: del self.data[key] def __str__(self): return "".format( self.shape, self.dtype, self.nnz, self.fill_value ) __repr__ = __str__ def todense(self): """ Convert this :obj:`DOK` array into a Numpy array. Returns ------- numpy.ndarray The equivalent dense array. See Also -------- COO.todense : Equivalent :obj:`COO` array method. scipy.sparse.dok_matrix.todense : Equivalent Scipy method. Examples -------- >>> s = DOK((5, 5)) >>> s[1:3, 1:3] = [[4, 5], [6, 7]] >>> s.todense() # doctest: +SKIP array([[0., 0., 0., 0., 0.], [0., 4., 5., 0., 0.], [0., 6., 7., 0., 0.], [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.]]) """ result = np.full(self.shape, self.fill_value, self.dtype) for c, d in self.data.items(): result[c] = d return result def asformat(self, format): """ Convert this sparse array to a given format. Parameters ---------- format : str A format string. Returns ------- out : SparseArray The converted array. Raises ------ NotImplementedError If the format isn't supported. """ if format == "dok" or format is DOK: return self from ._coo import COO if format == "coo" or format is COO: return COO.from_iter( self.data, shape=self.shape, fill_value=self.fill_value, dtype=self.dtype, ) raise NotImplementedError("The given format is not supported.") def to_slice(k): """Convert integer indices to one-element slices for consistency""" if isinstance(k, Integral): return slice(k, k + 1, 1) return k sparse-0.12.0/sparse/_io.py000066400000000000000000000073321402510130100154740ustar00rootroot00000000000000import numpy as np from ._coo.core import COO from ._compressed import GCXS def save_npz(filename, matrix, compressed=True): """Save a sparse matrix to disk in numpy's ``.npz`` format. Note: This is not binary compatible with scipy's ``save_npz()``. This binary format is not currently stable. Will save a file that can only be opend with this package's ``load_npz()``. Parameters ---------- filename : string or file Either the file name (string) or an open file (file-like object) where the data will be saved. If file is a string or a Path, the ``.npz`` extension will be appended to the file name if it is not already there matrix : SparseArray The matrix to save to disk compressed : bool Whether to save in compressed or uncompressed mode Example -------- Store sparse matrix to disk, and load it again: >>> import os >>> import sparse >>> import numpy as np >>> dense_mat = np.array([[[0., 0.], [0., 0.70677779]], [[0., 0.], [0., 0.86522495]]]) >>> mat = sparse.COO(dense_mat) >>> mat >>> sparse.save_npz('mat.npz', mat) >>> loaded_mat = sparse.load_npz('mat.npz') >>> loaded_mat >>> os.remove('mat.npz') See Also -------- load_npz scipy.sparse.save_npz scipy.sparse.load_npz numpy.savez numpy.load """ nodes = { "data": matrix.data, "shape": matrix.shape, "fill_value": matrix.fill_value, } if type(matrix) == COO: nodes["coords"] = matrix.coords elif type(matrix) == GCXS: nodes["indices"] = matrix.indices nodes["indptr"] = matrix.indptr nodes["compressed_axes"] = matrix.compressed_axes if compressed: np.savez_compressed(filename, **nodes) else: np.savez(filename, **nodes) def load_npz(filename): """Load a sparse matrix in numpy's ``.npz`` format from disk. Note: This is not binary compatible with scipy's ``save_npz()`` output. This binary format is not currently stable. Will only load files saved by this package. Parameters ---------- filename : file-like object, string, or pathlib.Path The file to read. File-like objects must support the ``seek()`` and ``read()`` methods. Returns ------- SparseArray The sparse matrix at path ``filename``. Example -------- See :obj:`save_npz` for usage examples. See Also -------- save_npz scipy.sparse.save_npz scipy.sparse.load_npz numpy.savez numpy.load """ with np.load(filename) as fp: try: coords = fp["coords"] data = fp["data"] shape = tuple(fp["shape"]) fill_value = fp["fill_value"][()] return COO( coords=coords, data=data, shape=shape, sorted=True, has_duplicates=False, fill_value=fill_value, ) except KeyError: pass try: data = fp["data"] indices = fp["indices"] indptr = fp["indptr"] comp_axes = fp["compressed_axes"] shape = tuple(fp["shape"]) fill_value = fp["fill_value"][()] return GCXS( (data, indices, indptr), shape=shape, fill_value=fill_value, compressed_axes=comp_axes, ) except KeyError: raise RuntimeError( "The file {!s} does not contain a valid sparse matrix".format(filename) ) sparse-0.12.0/sparse/_numba_extension.py000066400000000000000000000002451402510130100202570ustar00rootroot00000000000000def _init_extension(): """ Load extensions when numba is loaded. This name must match the one in setup.py """ import sparse._coo.numba_extension sparse-0.12.0/sparse/_settings.py000066400000000000000000000006501402510130100167210ustar00rootroot00000000000000import os import numpy AUTO_DENSIFY = bool(int(os.environ.get("SPARSE_AUTO_DENSIFY", "0"))) WARN_ON_TOO_DENSE = bool(int(os.environ.get("SPARSE_WARN_ON_TOO_DENSE", "0"))) def _is_nep18_enabled(): class A: def __array_function__(self, *args, **kwargs): return True try: return numpy.concatenate([A()]) except ValueError: return False NEP18_ENABLED = _is_nep18_enabled() sparse-0.12.0/sparse/_slicing.py000066400000000000000000000213141402510130100165110ustar00rootroot00000000000000# Most of this file is taken from https://github.com/dask/dask/blob/master/dask/array/slicing.py # See license at https://github.com/dask/dask/blob/master/LICENSE.txt import math from collections.abc import Iterable from numbers import Integral, Number import numpy as np def normalize_index(idx, shape): """Normalize slicing indexes 1. Replaces ellipses with many full slices 2. Adds full slices to end of index 3. Checks bounding conditions 4. Replaces numpy arrays with lists 5. Posify's slices integers and lists 6. Normalizes slices to canonical form Examples -------- >>> normalize_index(1, (10,)) (1,) >>> normalize_index(-1, (10,)) (9,) >>> normalize_index([-1], (10,)) (array([9]),) >>> normalize_index(slice(-3, 10, 1), (10,)) (slice(7, 10, 1),) >>> normalize_index((Ellipsis, None), (10,)) (slice(0, 10, 1), None) """ if not isinstance(idx, tuple): idx = (idx,) idx = replace_ellipsis(len(shape), idx) n_sliced_dims = 0 for i in idx: if hasattr(i, "ndim") and i.ndim >= 1: n_sliced_dims += i.ndim elif i is None: continue else: n_sliced_dims += 1 idx = idx + (slice(None),) * (len(shape) - n_sliced_dims) if len([i for i in idx if i is not None]) > len(shape): raise IndexError("Too many indices for array") none_shape = [] i = 0 for ind in idx: if ind is not None: none_shape.append(shape[i]) i += 1 else: none_shape.append(None) for i, d in zip(idx, none_shape): if d is not None: check_index(i, d) idx = tuple(map(sanitize_index, idx)) idx = tuple(map(replace_none, idx, none_shape)) idx = posify_index(none_shape, idx) idx = tuple(map(clip_slice, idx, none_shape)) return idx def replace_ellipsis(n, index): """Replace ... with slices, :, : ,: >>> replace_ellipsis(4, (3, Ellipsis, 2)) (3, slice(None, None, None), slice(None, None, None), 2) >>> replace_ellipsis(2, (Ellipsis, None)) (slice(None, None, None), slice(None, None, None), None) """ # Careful about using in or index because index may contain arrays isellipsis = [i for i, ind in enumerate(index) if ind is Ellipsis] if not isellipsis: return index elif len(isellipsis) > 1: raise IndexError("an index can only have a single ellipsis ('...')") else: loc = isellipsis[0] extra_dimensions = n - (len(index) - sum(i is None for i in index) - 1) return ( index[:loc] + (slice(None, None, None),) * extra_dimensions + index[loc + 1 :] ) def check_index(ind, dimension): """Check validity of index for a given dimension Examples -------- >>> check_index(3, 5) >>> check_index(5, 5) Traceback (most recent call last): ... IndexError: Index is not smaller than dimension 5 >= 5 >>> check_index(6, 5) Traceback (most recent call last): ... IndexError: Index is not smaller than dimension 6 >= 5 >>> check_index(-1, 5) >>> check_index(-6, 5) Traceback (most recent call last): ... IndexError: Negative index is not greater than negative dimension -6 <= -5 >>> check_index([1, 2], 5) >>> check_index([6, 3], 5) Traceback (most recent call last): ... IndexError: Index out of bounds for dimension 5 >>> check_index(slice(0, 3), 5) """ # unknown dimension, assumed to be in bounds if isinstance(ind, Iterable): x = np.asanyarray(ind) if ( np.issubdtype(x.dtype, np.integer) and ((x >= dimension) | (x < -dimension)).any() ): raise IndexError("Index out of bounds for dimension {:d}".format(dimension)) elif x.dtype == bool and len(x) != dimension: raise IndexError( "boolean index did not match indexed array; dimension is {:d} " "but corresponding boolean dimension is {:d}".format(dimension, len(x)) ) elif isinstance(ind, slice): return elif not isinstance(ind, Integral): raise IndexError( "only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and " "integer or boolean arrays are valid indices" ) elif ind >= dimension: raise IndexError( "Index is not smaller than dimension {:d} >= {:d}".format(ind, dimension) ) elif ind < -dimension: msg = "Negative index is not greater than negative dimension {:d} <= -{:d}" raise IndexError(msg.format(ind, dimension)) def sanitize_index(ind): """Sanitize the elements for indexing along one axis >>> sanitize_index([2, 3, 5]) array([2, 3, 5]) >>> sanitize_index([True, False, True, False]) array([0, 2]) >>> sanitize_index(np.array([1, 2, 3])) array([1, 2, 3]) >>> sanitize_index(np.array([False, True, True])) array([1, 2]) >>> type(sanitize_index(np.int32(0))) # doctest: +SKIP >>> sanitize_index(0.5) # doctest: +SKIP Traceback (most recent call last): ... IndexError: only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and integer or boolean arrays are valid indices """ if ind is None: return None elif isinstance(ind, slice): return slice( _sanitize_index_element(ind.start), _sanitize_index_element(ind.stop), _sanitize_index_element(ind.step), ) elif isinstance(ind, Number): return _sanitize_index_element(ind) if not hasattr(ind, "dtype") and len(ind) == 0: ind = np.array([], dtype=np.intp) ind = np.asarray(ind) if ind.dtype == np.bool_: nonzero = np.nonzero(ind) if len(nonzero) == 1: # If a 1-element tuple, unwrap the element nonzero = nonzero[0] return np.asanyarray(nonzero) elif np.issubdtype(ind.dtype, np.integer): return ind else: raise IndexError( "only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and " "integer or boolean arrays are valid indices" ) def _sanitize_index_element(ind): """Sanitize a one-element index.""" if ind is None: return None return int(ind) def posify_index(shape, ind): """Flip negative indices around to positive ones >>> posify_index(10, 3) 3 >>> posify_index(10, -3) 7 >>> posify_index(10, [3, -3]) array([3, 7]) >>> posify_index((10, 20), (3, -3)) (3, 17) >>> posify_index((10, 20), (3, [3, 4, -3])) # doctest: +NORMALIZE_WHITESPACE (3, array([ 3, 4, 17])) """ if isinstance(ind, tuple): return tuple(map(posify_index, shape, ind)) if isinstance(ind, Integral): if ind < 0 and not math.isnan(shape): return ind + shape else: return ind if isinstance(ind, (np.ndarray, list)) and not math.isnan(shape): ind = np.asanyarray(ind) return np.where(ind < 0, ind + shape, ind) if isinstance(ind, slice): start, stop, step = ind.start, ind.stop, ind.step if start < 0: start += shape if not (0 > stop >= step) and stop < 0: stop += shape return slice(start, stop, ind.step) return ind def clip_slice(idx, dim): """ Clip slice to its effective size given the shape. Parameters ---------- idx : The index. dim : The size along the corresponding dimension. Returns ------- idx : slice Examples -------- >>> clip_slice(slice(0, 20, 1), 10) slice(0, 10, 1) """ if not isinstance(idx, slice): return idx start, stop, step = idx.start, idx.stop, idx.step if step > 0: start = max(start, 0) stop = min(stop, dim) if start > stop: start = stop else: start = min(start, dim - 1) stop = max(stop, -1) if start < stop: start = stop return slice(start, stop, step) def replace_none(idx, dim): """ Normalize slices to canonical form, i.e. replace ``None`` with the appropriate integers. Parameters ---------- idx: slice or other index dim: dimension length Examples -------- >>> replace_none(slice(None, None, None), 10) slice(0, 10, 1) """ if not isinstance(idx, slice): return idx start, stop, step = idx.start, idx.stop, idx.step if step is None: step = 1 if step > 0: if start is None: start = 0 if stop is None: stop = dim else: if start is None: start = dim - 1 if stop is None: stop = -1 return slice(start, stop, step) sparse-0.12.0/sparse/_sparse_array.py000066400000000000000000000670451402510130100175670ustar00rootroot00000000000000from abc import ABCMeta, abstractmethod from collections.abc import Iterable from numbers import Integral from typing import Callable import operator from functools import reduce import numpy as np import scipy.sparse as ss from ._umath import elemwise from ._utils import _zero_of_dtype, html_table, equivalent, normalize_axis _reduce_super_ufunc = {np.add: np.multiply, np.multiply: np.power} class SparseArray: """ An abstract base class for all the sparse array classes. Attributes ---------- dtype : numpy.dtype The data type of this array. fill_value : scalar The fill value of this array. """ __metaclass__ = ABCMeta def __init__(self, shape, fill_value=None): if not isinstance(shape, Iterable): shape = (shape,) if not all(isinstance(l, Integral) and int(l) >= 0 for l in shape): raise ValueError( "shape must be an non-negative integer or a tuple " "of non-negative integers." ) self.shape = tuple(int(l) for l in shape) if fill_value is not None: if not hasattr(fill_value, "dtype") or fill_value.dtype != self.dtype: self.fill_value = self.dtype.type(fill_value) else: self.fill_value = fill_value else: self.fill_value = _zero_of_dtype(self.dtype) dtype = None @property @abstractmethod def nnz(self): """ The number of nonzero elements in this array. Note that any duplicates in :code:`coords` are counted multiple times. To avoid this, call :obj:`COO.sum_duplicates`. Returns ------- int The number of nonzero elements in this array. See Also -------- DOK.nnz : Equivalent :obj:`DOK` array property. numpy.count_nonzero : A similar Numpy function. scipy.sparse.coo_matrix.nnz : The Scipy equivalent property. Examples -------- >>> import numpy as np >>> from sparse import COO >>> x = np.array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3, 0, 0]) >>> np.count_nonzero(x) 6 >>> s = COO.from_numpy(x) >>> s.nnz 6 >>> np.count_nonzero(x) == s.nnz True """ @property def ndim(self): """ The number of dimensions of this array. Returns ------- int The number of dimensions of this array. See Also -------- DOK.ndim : Equivalent property for :obj:`DOK` arrays. numpy.ndarray.ndim : Numpy equivalent property. Examples -------- >>> from sparse import COO >>> import numpy as np >>> x = np.random.rand(1, 2, 3, 1, 2) >>> s = COO.from_numpy(x) >>> s.ndim 5 >>> s.ndim == x.ndim True """ return len(self.shape) @property def size(self): """ The number of all elements (including zeros) in this array. Returns ------- int The number of elements. See Also -------- numpy.ndarray.size : Numpy equivalent property. Examples -------- >>> from sparse import COO >>> import numpy as np >>> x = np.zeros((10, 10)) >>> s = COO.from_numpy(x) >>> s.size 100 """ # We use this instead of np.prod because np.prod # returns a float64 for an empty shape. return reduce(operator.mul, self.shape, 1) @property def density(self): """ The ratio of nonzero to all elements in this array. Returns ------- float The ratio of nonzero to all elements. See Also -------- COO.size : Number of elements. COO.nnz : Number of nonzero elements. Examples -------- >>> import numpy as np >>> from sparse import COO >>> x = np.zeros((8, 8)) >>> x[0, :] = 1 >>> s = COO.from_numpy(x) >>> s.density 0.125 """ return self.nnz / self.size def _repr_html_(self): """ Diagnostic report about this array. Renders in Jupyter. """ return html_table(self) @abstractmethod def asformat(self, format): """ Convert this sparse array to a given format. Parameters ---------- format : str A format string. Returns ------- out : SparseArray The converted array. Raises ------ NotImplementedError If the format isn't supported. """ @abstractmethod def todense(self): """ Convert this :obj:`SparseArray` array to a dense :obj:`numpy.ndarray`. Note that this may take a large amount of memory and time. Returns ------- numpy.ndarray The converted dense array. See Also -------- DOK.todense : Equivalent :obj:`DOK` array method. COO.todense : Equivalent :obj:`COO` array method. scipy.sparse.coo_matrix.todense : Equivalent Scipy method. Examples -------- >>> import sparse >>> x = np.random.randint(100, size=(7, 3)) >>> s = sparse.COO.from_numpy(x) >>> x2 = s.todense() >>> np.array_equal(x, x2) True """ def _make_shallow_copy_of(self, other): self.__dict__ = other.__dict__.copy() def __array__(self, *args, **kwargs): from ._settings import AUTO_DENSIFY if not AUTO_DENSIFY: raise RuntimeError( "Cannot convert a sparse array to dense automatically. " "To manually densify, use the todense method." ) return np.asarray(self.todense(), *args, **kwargs) def __array_function__(self, func, types, args, kwargs): import sparse as module sparse_func = None try: submodules = getattr(func, "__module__", "numpy").split(".")[1:] for submodule in submodules: module = getattr(module, submodule) sparse_func = getattr(module, func.__name__) except AttributeError: pass else: return sparse_func(*args, **kwargs) try: sparse_func = getattr(type(self), func.__name__) except AttributeError: pass if ( not isinstance(sparse_func, Callable) and len(args) == 1 and len(kwargs) == 0 ): try: return getattr(self, func.__name__) except AttributeError: pass if sparse_func is None: return NotImplemented return sparse_func(*args, **kwargs) @staticmethod def _reduce(method, *args, **kwargs): assert len(args) == 1 self = args[0] if isinstance(self, ss.spmatrix): self = type(self).from_scipy_sparse(self) return self.reduce(method, **kwargs) def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): out = kwargs.pop("out", None) if out is not None and not all(isinstance(x, type(self)) for x in out): return NotImplemented if getattr(ufunc, "signature", None) is not None: return self.__array_function__( ufunc, (np.ndarray, type(self)), inputs, kwargs ) if out is not None: kwargs["dtype"] = out[0].dtype if method == "outer": method = "__call__" cum_ndim = 0 inputs_transformed = [] for inp in reversed(inputs): inputs_transformed.append(inp[(Ellipsis,) + (None,) * cum_ndim]) cum_ndim += inp.ndim inputs = tuple(reversed(inputs_transformed)) if method == "__call__": result = elemwise(ufunc, *inputs, **kwargs) elif method == "reduce": result = SparseArray._reduce(ufunc, *inputs, **kwargs) else: return NotImplemented if out is not None: (out,) = out if out.shape != result.shape: raise ValueError( "non-broadcastable output operand with shape %s " "doesn't match the broadcast shape %s" % (out.shape, result.shape) ) out._make_shallow_copy_of(result) return out return result def reduce(self, method, axis=(0,), keepdims=False, **kwargs): """ Performs a reduction operation on this array. Parameters ---------- method : numpy.ufunc The method to use for performing the reduction. axis : Union[int, Iterable[int]], optional The axes along which to perform the reduction. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. kwargs : dict Any extra arguments to pass to the reduction operation. See Also -------- numpy.ufunc.reduce : A similar Numpy method. COO.reduce : This method implemented on COO arrays. GCXS.reduce : This method implemented on GCXS arrays. """ axis = normalize_axis(axis, self.ndim) zero_reduce_result = method.reduce([self.fill_value, self.fill_value], **kwargs) reduce_super_ufunc = None if not equivalent(zero_reduce_result, self.fill_value): reduce_super_ufunc = _reduce_super_ufunc.get(method, None) if reduce_super_ufunc is None: raise ValueError( "Performing this reduction operation would produce " "a dense result: %s" % str(method) ) if not isinstance(axis, tuple): axis = (axis,) out = self._reduce_calc(method, axis, keepdims, **kwargs) if len(out) == 1: return out[0] data, counts, axis, n_cols, arr_attrs = out result_fill_value = self.fill_value if reduce_super_ufunc is None: missing_counts = counts != n_cols data[missing_counts] = method( data[missing_counts], self.fill_value, **kwargs ) else: data = method( data, reduce_super_ufunc(self.fill_value, n_cols - counts), ).astype(data.dtype) result_fill_value = reduce_super_ufunc(self.fill_value, n_cols) out = self._reduce_return(data, arr_attrs, result_fill_value) if keepdims: shape = list(self.shape) for ax in axis: shape[ax] = 1 out = out.reshape(shape) if out.ndim == 0: return out[()] return out def _reduce_calc(self, method, axis, keepdims, **kwargs): raise NotImplementedError def _reduce_return(self, data, arr_attrs, result_fill_value): raise NotImplementedError def sum(self, axis=None, keepdims=False, dtype=None, out=None): """ Performs a sum operation along the given axes. Uses all axes by default. Parameters ---------- axis : Union[int, Iterable[int]], optional The axes along which to sum. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. dtype: numpy.dtype The data type of the output array. Returns ------- SparseArray The reduced output sparse array. See Also -------- :obj:`numpy.sum` : Equivalent numpy function. scipy.sparse.coo_matrix.sum : Equivalent Scipy function. """ return np.add.reduce(self, out=out, axis=axis, keepdims=keepdims, dtype=dtype) def max(self, axis=None, keepdims=False, out=None): """ Maximize along the given axes. Uses all axes by default. Parameters ---------- axis : Union[int, Iterable[int]], optional The axes along which to maximize. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. dtype: numpy.dtype The data type of the output array. Returns ------- SparseArray The reduced output sparse array. See Also -------- :obj:`numpy.max` : Equivalent numpy function. scipy.sparse.coo_matrix.max : Equivalent Scipy function. """ return np.maximum.reduce(self, out=out, axis=axis, keepdims=keepdims) amax = max def any(self, axis=None, keepdims=False, out=None): """ See if any values along array are ``True``. Uses all axes by default. Parameters ---------- axis : Union[int, Iterable[int]], optional The axes along which to minimize. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. Returns ------- SparseArray The reduced output sparse array. See Also -------- :obj:`numpy.all` : Equivalent numpy function. """ return np.logical_or.reduce(self, out=out, axis=axis, keepdims=keepdims) def all(self, axis=None, keepdims=False, out=None): """ See if all values in an array are ``True``. Uses all axes by default. Parameters ---------- axis : Union[int, Iterable[int]], optional The axes along which to minimize. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. Returns ------- SparseArray The reduced output sparse array. See Also -------- :obj:`numpy.all` : Equivalent numpy function. """ return np.logical_and.reduce(self, out=out, axis=axis, keepdims=keepdims) def min(self, axis=None, keepdims=False, out=None): """ Minimize along the given axes. Uses all axes by default. Parameters ---------- axis : Union[int, Iterable[int]], optional The axes along which to minimize. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. dtype: numpy.dtype The data type of the output array. Returns ------- SparseArray The reduced output sparse array. See Also -------- :obj:`numpy.min` : Equivalent numpy function. scipy.sparse.coo_matrix.min : Equivalent Scipy function. """ return np.minimum.reduce(self, out=out, axis=axis, keepdims=keepdims) amin = min def prod(self, axis=None, keepdims=False, dtype=None, out=None): """ Performs a product operation along the given axes. Uses all axes by default. Parameters ---------- axis : Union[int, Iterable[int]], optional The axes along which to multiply. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. dtype: numpy.dtype The data type of the output array. Returns ------- SparseArray The reduced output sparse array. See Also -------- :obj:`numpy.prod` : Equivalent numpy function. """ return np.multiply.reduce( self, out=out, axis=axis, keepdims=keepdims, dtype=dtype ) def round(self, decimals=0, out=None): """ Evenly round to the given number of decimals. See also -------- :obj:`numpy.round` : NumPy equivalent ufunc. :obj:`COO.elemwise`: Apply an arbitrary element-wise function to one or two arguments. """ if out is not None and not isinstance(out, tuple): out = (out,) return self.__array_ufunc__( np.round, "__call__", self, decimals=decimals, out=out ) round_ = round def clip(self, min=None, max=None, out=None): """ Clip (limit) the values in the array. Return an array whose values are limited to ``[min, max]``. One of min or max must be given. See Also -------- sparse.clip : For full documentation and more details. numpy.clip : Equivalent NumPy function. """ if min is None and max is None: raise ValueError("One of max or min must be given.") if out is not None and not isinstance(out, tuple): out = (out,) return self.__array_ufunc__( np.clip, "__call__", self, a_min=min, a_max=max, out=out ) def astype(self, dtype, casting="unsafe", copy=True): """ Copy of the array, cast to a specified type. See also -------- scipy.sparse.coo_matrix.astype : SciPy sparse equivalent function numpy.ndarray.astype : NumPy equivalent ufunc. :obj:`COO.elemwise`: Apply an arbitrary element-wise function to one or two arguments. """ # this matches numpy's behavior if self.dtype == dtype and not copy: return self return self.__array_ufunc__( np.ndarray.astype, "__call__", self, dtype=dtype, copy=copy, casting=casting ) def mean(self, axis=None, keepdims=False, dtype=None, out=None): """ Compute the mean along the given axes. Uses all axes by default. Parameters ---------- axis : Union[int, Iterable[int]], optional The axes along which to compute the mean. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. dtype: numpy.dtype The data type of the output array. Returns ------- SparseArray The reduced output sparse array. See Also -------- numpy.ndarray.mean : Equivalent numpy method. scipy.sparse.coo_matrix.mean : Equivalent Scipy method. Notes ----- * This function internally calls :obj:`COO.sum_duplicates` to bring the array into canonical form. * The :code:`out` parameter is provided just for compatibility with Numpy and isn't actually supported. Examples -------- You can use :obj:`COO.mean` to compute the mean of an array across any dimension. >>> from sparse import COO >>> x = np.array([[1, 2, 0, 0], ... [0, 1, 0, 0]], dtype='i8') >>> s = COO.from_numpy(x) >>> s2 = s.mean(axis=1) >>> s2.todense() # doctest: +SKIP array([0.5, 1.5, 0., 0.]) You can also use the :code:`keepdims` argument to keep the dimensions after the mean. >>> s3 = s.mean(axis=0, keepdims=True) >>> s3.shape (1, 4) You can pass in an output datatype, if needed. >>> s4 = s.mean(axis=0, dtype=np.float16) >>> s4.dtype dtype('float16') By default, this reduces the array down to one number, computing the mean along all axes. >>> s.mean() 0.5 """ if axis is None: axis = tuple(range(self.ndim)) elif not isinstance(axis, tuple): axis = (axis,) den = reduce(operator.mul, (self.shape[i] for i in axis), 1) if dtype is None: if issubclass(self.dtype.type, (np.integer, np.bool_)): dtype = inter_dtype = np.dtype("f8") else: dtype = self.dtype inter_dtype = ( np.dtype("f4") if issubclass(dtype.type, np.float16) else dtype ) else: inter_dtype = dtype num = self.sum(axis=axis, keepdims=keepdims, dtype=inter_dtype) if num.ndim: out = np.true_divide(num, den, casting="unsafe") return out.astype(dtype) if out.dtype != dtype else out return np.divide(num, den, dtype=dtype, out=out) def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): """ Compute the variance along the gi66ven axes. Uses all axes by default. Parameters ---------- axis : Union[int, Iterable[int]], optional The axes along which to compute the variance. Uses all axes by default. dtype : numpy.dtype, optional The output datatype. out: SparseArray, optional The array to write the output to. ddof: int The degrees of freedom. keepdims : bool, optional Whether or not to keep the dimensions of the original array. Returns ------- SparseArray The reduced output sparse array. See Also -------- numpy.ndarray.var : Equivalent numpy method. Notes ----- * This function internally calls :obj:`COO.sum_duplicates` to bring the array into canonical form. Examples -------- You can use :obj:`COO.var` to compute the variance of an array across any dimension. >>> from sparse import COO >>> x = np.array([[1, 2, 0, 0], ... [0, 1, 0, 0]], dtype='i8') >>> s = COO.from_numpy(x) >>> s2 = s.var(axis=1) >>> s2.todense() # doctest: +SKIP array([0.6875, 0.1875]) You can also use the :code:`keepdims` argument to keep the dimensions after the variance. >>> s3 = s.var(axis=0, keepdims=True) >>> s3.shape (1, 4) You can pass in an output datatype, if needed. >>> s4 = s.var(axis=0, dtype=np.float16) >>> s4.dtype dtype('float16') By default, this reduces the array down to one number, computing the variance along all axes. >>> s.var() 0.5 """ axis = normalize_axis(axis, self.ndim) if axis is None: axis = tuple(range(self.ndim)) if not isinstance(axis, tuple): axis = (axis,) rcount = reduce(operator.mul, (self.shape[a] for a in axis), 1) # Make this warning show up on top. if ddof >= rcount: warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning) # Cast bool, unsigned int, and int to float64 by default if dtype is None and issubclass(self.dtype.type, (np.integer, np.bool_)): dtype = np.dtype("f8") arrmean = self.sum(axis, dtype=dtype, keepdims=True) np.divide(arrmean, rcount, out=arrmean) x = self - arrmean if issubclass(self.dtype.type, np.complexfloating): x = x.real * x.real + x.imag * x.imag else: x = np.multiply(x, x, out=x) ret = x.sum(axis=axis, dtype=dtype, out=out, keepdims=keepdims) # Compute degrees of freedom and make sure it is not negative. rcount = max([rcount - ddof, 0]) ret = ret[...] np.divide(ret, rcount, out=ret, casting="unsafe") return ret[()] def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): """ Compute the standard deviation along the given axes. Uses all axes by default. Parameters ---------- axis : Union[int, Iterable[int]], optional The axes along which to compute the standard deviation. Uses all axes by default. dtype : numpy.dtype, optional The output datatype. out: SparseArray, optional The array to write the output to. ddof: int The degrees of freedom. keepdims : bool, optional Whether or not to keep the dimensions of the original array. Returns ------- SparseArray The reduced output sparse array. See Also -------- numpy.ndarray.std : Equivalent numpy method. Notes ----- * This function internally calls :obj:`COO.sum_duplicates` to bring the array into canonical form. Examples -------- You can use :obj:`COO.std` to compute the standard deviation of an array across any dimension. >>> from sparse import COO >>> x = np.array([[1, 2, 0, 0], ... [0, 1, 0, 0]], dtype='i8') >>> s = COO.from_numpy(x) >>> s2 = s.std(axis=1) >>> s2.todense() # doctest: +SKIP array([0.8291562, 0.4330127]) You can also use the :code:`keepdims` argument to keep the dimensions after the standard deviation. >>> s3 = s.std(axis=0, keepdims=True) >>> s3.shape (1, 4) You can pass in an output datatype, if needed. >>> s4 = s.std(axis=0, dtype=np.float16) >>> s4.dtype dtype('float16') By default, this reduces the array down to one number, computing the standard deviation along all axes. >>> s.std() # doctest: +SKIP 0.7071067811865476 """ ret = self.var(axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims) ret = np.sqrt(ret) return ret @property def real(self): """The real part of the array. Examples -------- >>> from sparse import COO >>> x = COO.from_numpy([1 + 0j, 0 + 1j]) >>> x.real.todense() # doctest: +SKIP array([1., 0.]) >>> x.real.dtype dtype('float64') Returns ------- out : SparseArray The real component of the array elements. If the array dtype is real, the dtype of the array is used for the output. If the array is complex, the output dtype is float. See Also -------- numpy.ndarray.real : NumPy equivalent attribute. numpy.real : NumPy equivalent function. """ return self.__array_ufunc__(np.real, "__call__", self) @property def imag(self): """The imaginary part of the array. Examples -------- >>> from sparse import COO >>> x = COO.from_numpy([1 + 0j, 0 + 1j]) >>> x.imag.todense() # doctest: +SKIP array([0., 1.]) >>> x.imag.dtype dtype('float64') Returns ------- out : SparseArray The imaginary component of the array elements. If the array dtype is real, the dtype of the array is used for the output. If the array is complex, the output dtype is float. See Also -------- numpy.ndarray.imag : NumPy equivalent attribute. numpy.imag : NumPy equivalent function. """ return self.__array_ufunc__(np.imag, "__call__", self) def conj(self): """Return the complex conjugate, element-wise. The complex conjugate of a complex number is obtained by changing the sign of its imaginary part. Examples -------- >>> from sparse import COO >>> x = COO.from_numpy([1 + 2j, 2 - 1j]) >>> res = x.conj() >>> res.todense() # doctest: +SKIP array([1.-2.j, 2.+1.j]) >>> res.dtype dtype('complex128') Returns ------- out : SparseArray The complex conjugate, with same dtype as the input. See Also -------- numpy.ndarray.conj : NumPy equivalent method. numpy.conj : NumPy equivalent function. """ return np.conj(self) sparse-0.12.0/sparse/_umath.py000066400000000000000000000570711402510130100162100ustar00rootroot00000000000000import itertools import numba import numpy as np import scipy.sparse from itertools import zip_longest from ._utils import isscalar, equivalent, _zero_of_dtype def elemwise(func, *args, **kwargs): """ Apply a function to any number of arguments. Parameters ---------- func : Callable The function to apply. Must support broadcasting. args : tuple, optional The arguments to the function. Can be :obj:`SparseArray` objects or :obj:`scipy.sparse.spmatrix` objects. kwargs : dict, optional Any additional arguments to pass to the function. Returns ------- SparseArray The result of applying the function. Raises ------ ValueError If the operation would result in a dense matrix, or if the operands don't have broadcastable shapes. See Also -------- :obj:`numpy.ufunc` : A similar Numpy construct. Note that any :code:`ufunc` can be used as the :code:`func` input to this function. Notes ----- Previously, operations with Numpy arrays were sometimes supported. Now, it is necessary to convert Numpy arrays to :obj:`COO` objects. """ return _Elemwise(func, *args, **kwargs).get_result() @numba.jit(nopython=True, nogil=True) def _match_arrays(a, b): # pragma: no cover """ Finds all indexes into a and b such that a[i] = b[j]. The outputs are sorted in lexographical order. Parameters ---------- a, b : np.ndarray The input 1-D arrays to match. If matching of multiple fields is needed, use np.recarrays. These two arrays must be sorted. Returns ------- a_idx, b_idx : np.ndarray The output indices of every possible pair of matching elements. """ if len(a) == 0 or len(b) == 0: return np.empty(0, dtype=np.uintp), np.empty(0, dtype=np.uintp) a_ind, b_ind = [], [] nb = len(b) ib = 0 match = 0 for ia, j in enumerate(a): if j == b[match]: ib = match while ib < nb and j >= b[ib]: if j == b[ib]: a_ind.append(ia) b_ind.append(ib) if b[match] < b[ib]: match = ib ib += 1 return np.array(a_ind, dtype=np.uintp), np.array(b_ind, dtype=np.uintp) def _get_nary_broadcast_shape(*shapes): """ Broadcast any number of shapes to a result shape. Parameters ---------- shapes : tuple[tuple[int]] The shapes to broadcast. Returns ------- tuple[int] The output shape. Raises ------ ValueError If the input shapes cannot be broadcast to a single shape. """ result_shape = () for shape in shapes: try: result_shape = _get_broadcast_shape(shape, result_shape) except ValueError: shapes_str = ", ".join(str(shape) for shape in shapes) raise ValueError( "operands could not be broadcast together with shapes %s" % shapes_str ) return result_shape def _get_broadcast_shape(shape1, shape2, is_result=False): """ Get the overall broadcasted shape. Parameters ---------- shape1, shape2 : tuple[int] The input shapes to broadcast together. is_result : bool Whether or not shape2 is also the result shape. Returns ------- result_shape : tuple[int] The overall shape of the result. Raises ------ ValueError If the two shapes cannot be broadcast together. """ # https://stackoverflow.com/a/47244284/774273 if not all( (l1 == l2) or (l1 == 1) or ((l2 == 1) and not is_result) for l1, l2 in zip(shape1[::-1], shape2[::-1]) ): raise ValueError( "operands could not be broadcast together with shapes %s, %s" % (shape1, shape2) ) result_shape = tuple( l1 if l1 != 1 else l2 for l1, l2 in zip_longest(shape1[::-1], shape2[::-1], fillvalue=1) )[::-1] return result_shape def _get_broadcast_parameters(shape, broadcast_shape): """ Get the broadcast parameters. Parameters ---------- shape : tuple[int] The input shape. broadcast_shape The shape to broadcast to. Returns ------- params : list A list containing None if the dimension isn't in the original array, False if it needs to be broadcast, and True if it doesn't. """ params = [ None if l1 is None else l1 == l2 for l1, l2 in zip_longest(shape[::-1], broadcast_shape[::-1], fillvalue=None) ][::-1] return params def _get_reduced_coords(coords, params): """ Gets only those dimensions of the coordinates that don't need to be broadcast. Parameters ---------- coords : np.ndarray The coordinates to reduce. params : list The params from which to check which dimensions to get. Returns ------- reduced_coords : np.ndarray The reduced coordinates. """ reduced_params = [bool(param) for param in params] return coords[reduced_params] def _get_reduced_shape(shape, params): """ Gets only those dimensions of the coordinates that don't need to be broadcast. Parameters ---------- coords : np.ndarray The coordinates to reduce. params : list The params from which to check which dimensions to get. Returns ------- reduced_coords : np.ndarray The reduced coordinates. """ reduced_shape = tuple(l for l, p in zip(shape, params) if p) return reduced_shape def _get_expanded_coords_data(coords, data, params, broadcast_shape): """ Expand coordinates/data to broadcast_shape. Does most of the heavy lifting for broadcast_to. Produces sorted output for sorted inputs. Parameters ---------- coords : np.ndarray The coordinates to expand. data : np.ndarray The data corresponding to the coordinates. params : list The broadcast parameters. broadcast_shape : tuple[int] The shape to broadcast to. Returns ------- expanded_coords : np.ndarray List of 1-D arrays. Each item in the list has one dimension of coordinates. expanded_data : np.ndarray The data corresponding to expanded_coords. """ first_dim = -1 expand_shapes = [] for d, p, l in zip(range(len(broadcast_shape)), params, broadcast_shape): if p and first_dim == -1: expand_shapes.append(coords.shape[1]) first_dim = d if not p: expand_shapes.append(l) all_idx = _cartesian_product(*(np.arange(d, dtype=np.intp) for d in expand_shapes)) false_dim = 0 dim = 0 expanded_coords = np.empty((len(broadcast_shape), all_idx.shape[1]), dtype=np.intp) if first_dim != -1: expanded_data = data[all_idx[first_dim]] else: expanded_coords = all_idx expanded_data = np.repeat(data, np.prod(broadcast_shape, dtype=np.int64)) return np.asarray(expanded_coords), np.asarray(expanded_data) for d, p, l in zip(range(len(broadcast_shape)), params, broadcast_shape): if p: expanded_coords[d] = coords[dim, all_idx[first_dim]] else: expanded_coords[d] = all_idx[false_dim + (d > first_dim)] false_dim += 1 if p is not None: dim += 1 return np.asarray(expanded_coords), np.asarray(expanded_data) # (c) senderle # Taken from https://stackoverflow.com/a/11146645/774273 # License: https://creativecommons.org/licenses/by-sa/3.0/ def _cartesian_product(*arrays): """ Get the cartesian product of a number of arrays. Parameters ---------- arrays : Tuple[np.ndarray] The arrays to get a cartesian product of. Always sorted with respect to the original array. Returns ------- out : np.ndarray The overall cartesian product of all the input arrays. """ broadcastable = np.ix_(*arrays) broadcasted = np.broadcast_arrays(*broadcastable) rows, cols = np.prod(broadcasted[0].shape), len(broadcasted) dtype = np.result_type(*arrays) out = np.empty(rows * cols, dtype=dtype) start, end = 0, rows for a in broadcasted: out[start:end] = a.reshape(-1) start, end = end, end + rows return out.reshape(cols, rows) def _get_matching_coords(coords, params): """ Get the matching coords across a number of broadcast operands. Parameters ---------- coords : list[numpy.ndarray] The input coordinates. params : list[Union[bool, none]] The broadcast parameters. Returns ------- numpy.ndarray The broacasted coordinates """ matching_coords = [] dims = np.zeros(len(coords), dtype=np.uint8) for p_all in zip(*params): for i, p in enumerate(p_all): if p: matching_coords.append(coords[i][dims[i]]) break else: matching_coords.append(coords[dims[0]]) for i, p in enumerate(p_all): if p is not None: dims[i] += 1 return np.asarray(matching_coords, dtype=np.intp) def broadcast_to(x, shape): """ Performs the equivalent of :obj:`numpy.broadcast_to` for :obj:`COO`. Note that this function returns a new array instead of a view. Parameters ---------- shape : tuple[int] The shape to broadcast the data to. Returns ------- COO The broadcasted sparse array. Raises ------ ValueError If the operand cannot be broadcast to the given shape. See also -------- :obj:`numpy.broadcast_to` : NumPy equivalent function """ from ._coo import COO if shape == x.shape: return x result_shape = _get_broadcast_shape(x.shape, shape, is_result=True) params = _get_broadcast_parameters(x.shape, result_shape) coords, data = _get_expanded_coords_data(x.coords, x.data, params, result_shape) # Check if all the non-broadcast axes are next to each other nonbroadcast_idx = [idx for idx, p in enumerate(params) if p] diff_nonbroadcast_idx = [ a - b for a, b in zip(nonbroadcast_idx[1:], nonbroadcast_idx[:-1]) ] sorted = all(d == 1 for d in diff_nonbroadcast_idx) return COO( coords, data, shape=result_shape, has_duplicates=False, sorted=sorted, fill_value=x.fill_value, ) class _Elemwise: def __init__(self, func, *args, **kwargs): """ Initialize the element-wise function calculator. Parameters ---------- func : types.Callable The function to compute args : tuple[Union[SparseArray, ndarray, scipy.sparse.spmatrix]] The arguments to compute the function on. kwargs : dict Extra arguments to pass to the function. """ from ._coo import COO from ._sparse_array import SparseArray from ._compressed import GCXS from ._dok import DOK processed_args = [] out_type = GCXS sparse_args = [arg for arg in args if isinstance(arg, SparseArray)] if all(isinstance(arg, DOK) for arg in sparse_args): out_type = DOK elif all(isinstance(arg, GCXS) for arg in sparse_args): out_type = GCXS else: out_type = COO for arg in args: if isinstance(arg, scipy.sparse.spmatrix): processed_args.append(COO.from_scipy_sparse(arg)) elif isscalar(arg) or isinstance(arg, np.ndarray): # Faster and more reliable to pass ()-shaped ndarrays as scalars. processed_args.append(np.asarray(arg)) elif isinstance(arg, SparseArray) and not isinstance(arg, COO): processed_args.append(COO(arg)) elif not isinstance(arg, COO): self.args = None return else: processed_args.append(arg) self.out_type = out_type self.args = tuple(processed_args) self.func = func self.dtype = kwargs.pop("dtype", None) self.kwargs = kwargs self.cache = {} self._dense_result = False self._check_broadcast() self._get_fill_value() def get_result(self): from ._coo import COO if self.args is None: return NotImplemented if self._dense_result: args = [a.todense() if isinstance(a, COO) else a for a in self.args] return self.func(*args, **self.kwargs) if any(s == 0 for s in self.shape): data = np.empty((0,), dtype=self.fill_value.dtype) coords = np.empty((0, len(self.shape)), dtype=np.intp) return COO( coords, data, shape=self.shape, has_duplicates=False, fill_value=self.fill_value, ) data_list = [] coords_list = [] for mask in itertools.product( *[[True, False] if isinstance(arg, COO) else [None] for arg in self.args] ): if not any(mask): continue r = self._get_func_coords_data(mask) if r is not None: coords_list.append(r[0]) data_list.append(r[1]) # Concatenate matches and mismatches data = ( np.concatenate(data_list) if len(data_list) else np.empty((0,), dtype=self.fill_value.dtype) ) coords = ( np.concatenate(coords_list, axis=1) if len(coords_list) else np.empty((0, len(self.shape)), dtype=np.intp) ) return COO( coords, data, shape=self.shape, has_duplicates=False, fill_value=self.fill_value, ).asformat(self.out_type) def _get_fill_value(self): """ A function that finds and returns the fill-value. Raises ------ ValueError If the fill-value is inconsistent. """ from ._coo import COO zero_args = tuple( arg.fill_value[...] if isinstance(arg, COO) else arg for arg in self.args ) # Some elemwise functions require a dtype argument, some abhorr it. try: fill_value_array = self.func( *np.broadcast_arrays(*zero_args), dtype=self.dtype, **self.kwargs ) except TypeError: fill_value_array = self.func( *np.broadcast_arrays(*zero_args), **self.kwargs ) try: fill_value = fill_value_array[(0,) * fill_value_array.ndim] except IndexError: zero_args = tuple( arg.fill_value if isinstance(arg, COO) else _zero_of_dtype(arg.dtype) for arg in self.args ) fill_value = self.func(*zero_args, **self.kwargs)[()] equivalent_fv = equivalent(fill_value, fill_value_array).all() if not equivalent_fv and self.shape != self.ndarray_shape: raise ValueError( "Performing a mixed sparse-dense operation that would result in a dense array. " "Please make sure that func(sparse_fill_values, ndarrays) is a constant array." ) elif not equivalent_fv: self._dense_result = True # Store dtype separately if needed. if self.dtype is not None: fill_value = fill_value.astype(self.dtype) self.fill_value = fill_value self.dtype = self.fill_value.dtype def _check_broadcast(self): """ Checks if adding the ndarrays changes the broadcast shape. Raises ------ ValueError If the check fails. """ from ._coo import COO full_shape = _get_nary_broadcast_shape(*tuple(arg.shape for arg in self.args)) non_ndarray_shape = _get_nary_broadcast_shape( *tuple(arg.shape for arg in self.args if isinstance(arg, COO)) ) ndarray_shape = _get_nary_broadcast_shape( *tuple(arg.shape for arg in self.args if isinstance(arg, np.ndarray)) ) self.shape = full_shape self.ndarray_shape = ndarray_shape self.non_ndarray_shape = non_ndarray_shape def _get_func_coords_data(self, mask): """ Gets the coords/data for a certain mask Parameters ---------- mask : tuple[Union[bool, NoneType]] The mask determining whether to match or unmatch. Returns ------- None or tuple The coords/data tuple for the given mask. """ from ._coo import COO matched_args = [arg for arg, m in zip(self.args, mask) if m is not None and m] unmatched_args = [ arg for arg, m in zip(self.args, mask) if m is not None and not m ] ndarray_args = [arg for arg, m in zip(self.args, mask) if m is None] matched_broadcast_shape = _get_nary_broadcast_shape( *tuple(arg.shape for arg in itertools.chain(matched_args, ndarray_args)) ) matched_arrays = self._match_coo( *matched_args, cache=self.cache, broadcast_shape=matched_broadcast_shape ) func_args = [] m_arg = 0 for arg, m in zip(self.args, mask): if m is None: func_args.append( np.broadcast_to(arg, matched_broadcast_shape)[ tuple(matched_arrays[0].coords) ] ) continue if m: func_args.append(matched_arrays[m_arg].data) m_arg += 1 else: func_args.append(arg.fill_value) # Try our best to preserve the output dtype. try: func_data = self.func(*func_args, dtype=self.dtype, **self.kwargs) except TypeError: try: func_args = np.broadcast_arrays(*func_args) out = np.empty(func_args[0].shape, dtype=self.dtype) func_data = self.func(*func_args, out=out, **self.kwargs) except TypeError: func_data = self.func(*func_args, **self.kwargs).astype(self.dtype) unmatched_mask = ~equivalent(func_data, self.fill_value) if not unmatched_mask.any(): return None func_coords = matched_arrays[0].coords[:, unmatched_mask] func_data = func_data[unmatched_mask] if matched_arrays[0].shape != self.shape: params = _get_broadcast_parameters(matched_arrays[0].shape, self.shape) func_coords, func_data = _get_expanded_coords_data( func_coords, func_data, params, self.shape ) if all(m is None or m for m in mask): return func_coords, func_data # Not really sorted but we need the sortedness. func_array = COO( func_coords, func_data, self.shape, has_duplicates=False, sorted=True ) unmatched_mask = np.ones(func_array.nnz, dtype=np.bool) for arg in unmatched_args: matched_idx = self._match_coo(func_array, arg, return_midx=True)[0] unmatched_mask[matched_idx] = False coords = np.asarray(func_array.coords[:, unmatched_mask], order="C") data = np.asarray(func_array.data[unmatched_mask], order="C") return coords, data @staticmethod def _match_coo(*args, **kwargs): """ Matches the coordinates for any number of input :obj:`COO` arrays. Equivalent to "sparse" broadcasting for all arrays. Parameters ---------- args : Tuple[COO] The input :obj:`COO` arrays. return_midx : bool Whether to return matched indices or matched arrays. Matching only supported for two arrays. ``False`` by default. cache : dict Cache of things already matched. No cache by default. Returns ------- matched_idx : List[ndarray] The indices of matched elements in the original arrays. Only returned if ``return_midx`` is ``True``. matched_arrays : List[COO] The expanded, matched :obj:`COO` objects. Only returned if ``return_midx`` is ``False``. """ from ._coo import COO from ._coo.common import linear_loc cache = kwargs.pop("cache", None) return_midx = kwargs.pop("return_midx", False) broadcast_shape = kwargs.pop("broadcast_shape", None) if kwargs: raise ValueError("Unknown kwargs: {}".format(kwargs.keys())) if return_midx and (len(args) != 2 or cache is not None): raise NotImplementedError( "Matching indices only supported for two args, and no cache." ) matched_arrays = [args[0]] cache_key = [id(args[0])] for arg2 in args[1:]: cache_key.append(id(arg2)) key = tuple(cache_key) if cache is not None and key in cache: matched_arrays = cache[key] continue cargs = [matched_arrays[0], arg2] current_shape = _get_broadcast_shape(matched_arrays[0].shape, arg2.shape) params = [ _get_broadcast_parameters(arg.shape, current_shape) for arg in cargs ] reduced_params = [all(p) for p in zip(*params)] reduced_shape = _get_reduced_shape( arg2.shape, _rev_idx(reduced_params, arg2.ndim) ) reduced_coords = [ _get_reduced_coords(arg.coords, _rev_idx(reduced_params, arg.ndim)) for arg in cargs ] linear = [linear_loc(rc, reduced_shape) for rc in reduced_coords] sorted_idx = [np.argsort(idx) for idx in linear] linear = [idx[s] for idx, s in zip(linear, sorted_idx)] matched_idx = _match_arrays(*linear) if return_midx: matched_idx = [ sidx[midx] for sidx, midx in zip(sorted_idx, matched_idx) ] return matched_idx coords = [arg.coords[:, s] for arg, s in zip(cargs, sorted_idx)] mcoords = [c[:, idx] for c, idx in zip(coords, matched_idx)] mcoords = _get_matching_coords(mcoords, params) mdata = [arg.data[sorted_idx[0]][matched_idx[0]] for arg in matched_arrays] mdata.append(arg2.data[sorted_idx[1]][matched_idx[1]]) # The coords aren't truly sorted, but we don't need them, so it's # best to avoid the extra cost. matched_arrays = [ COO(mcoords, md, shape=current_shape, sorted=True, has_duplicates=False) for md in mdata ] if cache is not None: cache[key] = matched_arrays if broadcast_shape is not None and matched_arrays[0].shape != broadcast_shape: params = _get_broadcast_parameters(matched_arrays[0].shape, broadcast_shape) coords, idx = _get_expanded_coords_data( matched_arrays[0].coords, np.arange(matched_arrays[0].nnz), params, broadcast_shape, ) matched_arrays = [ COO( coords, arr.data[idx], shape=broadcast_shape, sorted=True, has_duplicates=False, ) for arr in matched_arrays ] return matched_arrays def _rev_idx(arg, idx): if idx == 0: return arg[len(arg) :] return arg[-idx:] sparse-0.12.0/sparse/_utils.py000066400000000000000000000332611402510130100162250ustar00rootroot00000000000000import functools from collections.abc import Iterable from numbers import Integral from functools import reduce import operator import numpy as np def assert_eq(x, y, check_nnz=True, compare_dtype=True, **kwargs): from ._coo import COO assert x.shape == y.shape if compare_dtype: assert x.dtype == y.dtype check_equal = ( np.array_equal if np.issubdtype(x.dtype, np.integer) and np.issubdtype(y.dtype, np.integer) else functools.partial(np.allclose, equal_nan=True) ) if isinstance(x, COO): assert is_canonical(x) if isinstance(y, COO): assert is_canonical(y) if isinstance(x, COO) and isinstance(y, COO) and check_nnz: assert np.array_equal(x.coords, y.coords) assert check_equal(x.data, y.data, **kwargs) assert x.fill_value == y.fill_value return if hasattr(x, "todense"): xx = x.todense() if check_nnz: assert_nnz(x, xx) else: xx = x if hasattr(y, "todense"): yy = y.todense() if check_nnz: assert_nnz(y, yy) else: yy = y assert check_equal(xx, yy, **kwargs) def assert_nnz(s, x): fill_value = s.fill_value if hasattr(s, "fill_value") else _zero_of_dtype(s.dtype) assert np.sum(~equivalent(x, fill_value)) == s.nnz def is_canonical(x): return not x.shape or ( (np.diff(x.linear_loc()) > 0).all() and not equivalent(x.data, x.fill_value).any() ) def _zero_of_dtype(dtype): """ Creates a ()-shaped 0-dimensional zero array of a given dtype. Parameters ---------- dtype : numpy.dtype The dtype for the array. Returns ------- np.ndarray The zero array. """ return np.zeros((), dtype=dtype)[()] def random( shape, density=None, nnz=None, random_state=None, data_rvs=None, format="coo", compressed_axes=None, fill_value=None, idx_dtype=None, ): """Generate a random sparse multidimensional array Parameters ---------- shape: Tuple[int] Shape of the array density: float, optional Density of the generated array; default is 0.01. Mutually exclusive with `nnz`. nnz: int, optional Number of nonzero elements in the generated array. Mutually exclusive with `density`. random_state : Union[numpy.random.RandomState, int], optional Random number generator or random seed. If not given, the singleton numpy.random will be used. This random state will be used for sampling the sparsity structure, but not necessarily for sampling the values of the structurally nonzero entries of the matrix. data_rvs : Callable Data generation callback. Must accept one single parameter: number of :code:`nnz` elements, and return one single NumPy array of exactly that length. format : str The format to return the output array in. fill_value : scalar The fill value of the output array. Returns ------- SparseArray The generated random matrix. See Also -------- :obj:`scipy.sparse.rand` Equivalent Scipy function. :obj:`numpy.random.rand` Similar Numpy function. Examples -------- >>> from sparse import random >>> from scipy import stats >>> rvs = lambda x: stats.poisson(25, loc=10).rvs(x, random_state=np.random.RandomState(1)) >>> s = random((2, 3, 4), density=0.25, random_state=np.random.RandomState(1), data_rvs=rvs) >>> s.todense() # doctest: +NORMALIZE_WHITESPACE array([[[ 0, 0, 0, 0], [ 0, 34, 0, 0], [33, 34, 0, 29]], [[30, 0, 0, 34], [ 0, 0, 0, 0], [ 0, 0, 0, 0]]]) """ # Copied, in large part, from scipy.sparse.random # See https://github.com/scipy/scipy/blob/master/LICENSE.txt from ._coo import COO if density is not None and nnz is not None: raise ValueError("'density' and 'nnz' are mutually exclusive") if density is None: density = 0.01 if not (0 <= density <= 1): raise ValueError("density {} is not in the unit interval".format(density)) elements = np.prod(shape, dtype=np.intp) if nnz is None: nnz = int(elements * density) if not (0 <= nnz <= elements): raise ValueError( "cannot generate {} nonzero elements " "for an array with {} total elements".format(nnz, elements) ) if format != "gcxs" and compressed_axes is not None: raise ValueError( "compressed_axes is not supported for {} format".format(format) ) if random_state is None: random_state = np.random elif isinstance(random_state, Integral): random_state = np.random.RandomState(random_state) if data_rvs is None: data_rvs = random_state.rand # Use the algorithm from python's random.sample for k < mn/3. if elements < 3 * nnz: ind = random_state.choice(elements, size=nnz, replace=False) else: ind = np.empty(nnz, dtype=np.min_scalar_type(elements - 1)) selected = set() for i in range(nnz): j = random_state.randint(elements) while j in selected: j = random_state.randint(elements) selected.add(j) ind[i] = j data = data_rvs(nnz) ar = COO( ind[None, :], data, shape=elements, fill_value=fill_value, ).reshape(shape) if idx_dtype: if can_store(idx_dtype, max(shape)): ar.coords = ar.coords.astype(idx_dtype) else: raise ValueError( "cannot cast array with shape {} to dtype {}.".format(shape, idx_dtype) ) return ar.asformat(format, compressed_axes=compressed_axes) def isscalar(x): from ._sparse_array import SparseArray return not isinstance(x, SparseArray) and np.isscalar(x) def random_value_array(value, fraction): def replace_values(n): i = int(n * fraction) ar = np.empty((n,), dtype=np.float_) ar[:i] = value ar[i:] = np.random.rand(n - i) return ar return replace_values def normalize_axis(axis, ndim): """ Normalize negative axis indices to their positive counterpart for a given number of dimensions. Parameters ---------- axis : Union[int, Iterable[int], None] The axis indices. ndim : int Number of dimensions to normalize axis indices against. Returns ------- axis The normalized axis indices. """ if axis is None: return None if isinstance(axis, Integral): axis = int(axis) if axis < 0: axis += ndim if axis >= ndim or axis < 0: raise ValueError("Invalid axis index %d for ndim=%d" % (axis, ndim)) return axis if isinstance(axis, Iterable): if not all(isinstance(a, Integral) for a in axis): raise ValueError("axis %s not understood" % axis) return tuple(normalize_axis(a, ndim) for a in axis) raise ValueError("axis %s not understood" % axis) def equivalent(x, y): """ Checks the equivalence of two scalars or arrays with broadcasting. Assumes a consistent dtype. Parameters ---------- x : scalar or numpy.ndarray y : scalar or numpy.ndarray Returns ------- equivalent : scalar or numpy.ndarray The element-wise comparison of where two arrays are equivalent. Examples -------- >>> equivalent(1, 1) True >>> equivalent(np.nan, np.nan + 1) True >>> equivalent(1, 2) False >>> equivalent(np.inf, np.inf) True >>> equivalent(np.PZERO, np.NZERO) True """ x = np.asarray(x) y = np.asarray(y) # Can't contain NaNs if any(np.issubdtype(x.dtype, t) for t in [np.integer, np.bool_, np.character]): return x == y # Can contain NaNs # FIXME: Complex floats and np.void with multiple values can't be compared properly. # lgtm [py/comparison-of-identical-expressions] return (x == y) | ((x != x) & (y != y)) # copied from zarr # See https://github.com/zarr-developers/zarr-python/blob/master/zarr/util.py def human_readable_size(size): if size < 2 ** 10: return "%s" % size elif size < 2 ** 20: return "%.1fK" % (size / float(2 ** 10)) elif size < 2 ** 30: return "%.1fM" % (size / float(2 ** 20)) elif size < 2 ** 40: return "%.1fG" % (size / float(2 ** 30)) elif size < 2 ** 50: return "%.1fT" % (size / float(2 ** 40)) else: return "%.1fP" % (size / float(2 ** 50)) def html_table(arr): table = "" table += "" headings = ["Format", "Data Type", "Shape", "nnz", "Density", "Read-only"] info = [ type(arr).__name__.lower(), str(arr.dtype), str(arr.shape), str(arr.nnz), str(arr.nnz / arr.size), ] # read-only info.append(str(not hasattr(arr, "__setitem__"))) if hasattr(arr, "nbytes"): headings.append("Size") info.append(human_readable_size(arr.nbytes)) headings.append("Storage ratio") info.append( "%.1f" % (arr.nbytes / (reduce(operator.mul, arr.shape, 1) * arr.dtype.itemsize)) ) # compressed_axes if type(arr).__name__ == "GCXS": headings.append("Compressed Axes") info.append(str(arr.compressed_axes)) for h, i in zip(headings, info): table += ( "" '' '' "" % (h, i) ) table += "" table += "
%s%s
" return table def check_compressed_axes(ndim, compressed_axes): """ Checks if the given compressed_axes are compatible with the shape of the array. Parameters ---------- shape : int compressed_axes : Iterable Raises ------ ValueError If the compressed_axes are incompatible with the number of dimensions """ if compressed_axes is None: return if isinstance(ndim, Iterable): ndim = len(ndim) if not isinstance(compressed_axes, Iterable): raise ValueError("compressed_axes must be an iterable") if len(compressed_axes) == ndim: raise ValueError("cannot compress all axes") if not np.array_equal(list(set(compressed_axes)), compressed_axes): raise ValueError("axes must be sorted without repeats") if not all(isinstance(a, Integral) for a in compressed_axes): raise ValueError("axes must be represented with integers") if min(compressed_axes) < 0 or max(compressed_axes) >= ndim: raise ValueError("axis out of range") def check_zero_fill_value(*args): """ Checks if all the arguments have zero fill-values. Parameters ---------- args : Iterable[SparseArray] Raises ------ ValueError If all arguments don't have zero fill-values. Examples -------- >>> import sparse >>> s1 = sparse.random((10,), density=0.5) >>> s2 = sparse.random((10,), density=0.5, fill_value=0.5) >>> check_zero_fill_value(s1) >>> check_zero_fill_value(s2) Traceback (most recent call last): ... ValueError: This operation requires zero fill values, but argument 0 had a fill value of 0.5. >>> check_zero_fill_value(s1, s2) Traceback (most recent call last): ... ValueError: This operation requires zero fill values, but argument 1 had a fill value of 0.5. """ for i, arg in enumerate(args): if hasattr(arg, "fill_value") and not equivalent( arg.fill_value, _zero_of_dtype(arg.dtype) ): raise ValueError( "This operation requires zero fill values, " "but argument {:d} had a fill value of {!s}.".format(i, arg.fill_value) ) def check_consistent_fill_value(arrays): """ Checks if all the arguments have consistent fill-values. Parameters ---------- args : Iterable[SparseArray] Raises ------ ValueError If all elements of :code:`arrays` don't have the same fill-value. Examples -------- >>> import sparse >>> s1 = sparse.random((10,), density=0.5, fill_value=0.1) >>> s2 = sparse.random((10,), density=0.5, fill_value=0.5) >>> check_consistent_fill_value([s1, s1]) >>> check_consistent_fill_value([s1, s2]) # doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): ... ValueError: This operation requires consistent fill-values, but argument 1 had a fill value of 0.5,\ which is different from a fill_value of 0.1 in the first argument. """ arrays = list(arrays) from ._sparse_array import SparseArray if not all(isinstance(s, SparseArray) for s in arrays): raise ValueError("All arrays must be instances of SparseArray.") if len(arrays) == 0: raise ValueError("At least one array required.") fv = arrays[0].fill_value for i, arg in enumerate(arrays): if not equivalent(fv, arg.fill_value): raise ValueError( "This operation requires consistent fill-values, " "but argument {:d} had a fill value of {!s}, which " "is different from a fill_value of {!s} in the first " "argument.".format(i, arg.fill_value, fv) ) def get_out_dtype(arr, scalar): out_type = arr.dtype if not can_store(out_type, scalar): out_type = np.min_scalar_type(scalar) return out_type def can_store(dtype, scalar): return np.array(scalar, dtype=dtype) == np.array(scalar) def is_unsigned_dtype(dtype): return not np.array(-1, dtype=dtype) == np.array(-1) sparse-0.12.0/sparse/_version.py000066400000000000000000000441211402510130100165470ustar00rootroot00000000000000# This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.18 (https://github.com/warner/python-versioneer) """Git implementation of _version.py.""" import errno import os import re import subprocess import sys def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = " (tag: 0.12.0)" git_full = "32976286cb1eb6638533cffd9bc1646d24f541ad" git_date = "2021-03-19 11:53:53 +0100" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "pep440" cfg.tag_prefix = "" cfg.parentdir_prefix = "sparse-" cfg.versionfile_source = "sparse/_version.py" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen( [c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None), ) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, p.returncode return stdout, p.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return { "version": dirname[len(parentdir_prefix) :], "full-revisionid": None, "dirty": False, "error": None, "date": None, } else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print( "Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix) ) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r"\d", r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix) :] if verbose: print("picking %s" % r) return { "version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date, } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return { "version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None, } @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] _, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command( GITS, [ "describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix, ], cwd=root, ) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[: git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % ( full_tag, tag_prefix, ) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix) :] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[ 0 ].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return { "version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None, } if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return { "version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date"), } def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for _ in cfg.versionfile_source.split("/"): root = os.path.dirname(root) except NameError: return { "version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None, } try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return { "version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None, } sparse-0.12.0/sparse/tests/000077500000000000000000000000001402510130100155115ustar00rootroot00000000000000sparse-0.12.0/sparse/tests/conftest.py000066400000000000000000000002051402510130100177050ustar00rootroot00000000000000import platform def pytest_cmdline_preparse(args): if platform.system() != "Windows": args.append("--doctest-modules") sparse-0.12.0/sparse/tests/test_array_function.py000066400000000000000000000035251402510130100221520ustar00rootroot00000000000000import sparse from sparse._settings import NEP18_ENABLED from sparse._utils import assert_eq import numpy as np import pytest if not NEP18_ENABLED: pytest.skip("NEP18 is not enabled", allow_module_level=True) @pytest.mark.parametrize( "func", [ np.mean, np.std, np.var, np.sum, lambda x: np.sum(x, axis=0), lambda x: np.transpose(x), ], ) def test_unary(func): y = sparse.random((50, 50), density=0.25) x = y.todense() xx = func(x) yy = func(y) assert_eq(xx, yy) @pytest.mark.parametrize("arg_order", [(0, 1), (1, 0), (1, 1)]) @pytest.mark.parametrize("func", [np.dot, np.result_type, np.tensordot, np.matmul]) def test_binary(func, arg_order): y = sparse.random((50, 50), density=0.25) x = y.todense() xx = func(x, x) args = [(x, y)[i] for i in arg_order] yy = func(*args) if isinstance(xx, np.ndarray): assert_eq(xx, yy) else: # result_type returns a dtype assert xx == yy def test_stack(): """stack(), by design, does not allow for mixed type inputs""" y = sparse.random((50, 50), density=0.25) x = y.todense() xx = np.stack([x, x]) yy = np.stack([y, y]) assert_eq(xx, yy) @pytest.mark.parametrize( "arg_order", [(0, 0, 1), (0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1)], ) @pytest.mark.parametrize("func", [lambda a, b, c: np.where(a.astype(bool), b, c)]) def test_ternary(func, arg_order): y = sparse.random((50, 50), density=0.25) x = y.todense() xx = func(x, x, x) args = [(x, y)[i] for i in arg_order] yy = func(*args) assert_eq(xx, yy) @pytest.mark.parametrize("func", [np.shape, np.size, np.ndim]) def test_property(func): y = sparse.random((50, 50), density=0.25) x = y.todense() xx = func(x) yy = func(y) assert xx == yy sparse-0.12.0/sparse/tests/test_compressed.py000066400000000000000000000301101402510130100212610ustar00rootroot00000000000000import sparse import pytest import numpy as np import scipy from sparse._compressed import GCXS from sparse._utils import assert_eq @pytest.fixture(scope="module", params=["f8", "f4", "i8", "i4"]) def random_sparse(request): dtype = request.param if np.issubdtype(dtype, np.integer): def data_rvs(n): return np.random.randint(-1000, 1000, n) else: data_rvs = None return sparse.random( (20, 30, 40), density=0.25, format="gcxs", data_rvs=data_rvs ).astype(dtype) @pytest.fixture(scope="module", params=["f8", "f4", "i8", "i4"]) def random_sparse_small(request): dtype = request.param if np.issubdtype(dtype, np.integer): def data_rvs(n): return np.random.randint(-10, 10, n) else: data_rvs = None return sparse.random( (20, 30, 40), density=0.25, format="gcxs", data_rvs=data_rvs ).astype(dtype) @pytest.mark.parametrize( "reduction, kwargs", [ ("sum", {}), ("sum", {"dtype": np.float32}), ("mean", {}), ("mean", {"dtype": np.float32}), ("prod", {}), ("max", {}), ("min", {}), ("std", {}), ("var", {}), ], ) @pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 2), -3, (1, -1)]) @pytest.mark.parametrize("keepdims", [True, False]) def test_reductions(reduction, random_sparse, axis, keepdims, kwargs): x = random_sparse y = x.todense() xx = getattr(x, reduction)(axis=axis, keepdims=keepdims, **kwargs) yy = getattr(y, reduction)(axis=axis, keepdims=keepdims, **kwargs) assert_eq(xx, yy) @pytest.mark.xfail( reason=("Setting output dtype=float16 produces results " "inconsistent with numpy") ) @pytest.mark.filterwarnings("ignore:overflow") @pytest.mark.parametrize( "reduction, kwargs", [("sum", {"dtype": np.float16}), ("mean", {"dtype": np.float16})], ) @pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 2)]) def test_reductions_float16(random_sparse, reduction, kwargs, axis): x = random_sparse y = x.todense() xx = getattr(x, reduction)(axis=axis, **kwargs) yy = getattr(y, reduction)(axis=axis, **kwargs) assert_eq(xx, yy, atol=1e-2) @pytest.mark.parametrize("reduction,kwargs", [("any", {}), ("all", {})]) @pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 2), -3, (1, -1)]) @pytest.mark.parametrize("keepdims", [True, False]) def test_reductions_bool(random_sparse, reduction, kwargs, axis, keepdims): y = np.zeros((2, 3, 4), dtype=bool) y[0] = True y[1, 1, 1] = True x = sparse.COO.from_numpy(y) xx = getattr(x, reduction)(axis=axis, keepdims=keepdims, **kwargs) yy = getattr(y, reduction)(axis=axis, keepdims=keepdims, **kwargs) assert_eq(xx, yy) @pytest.mark.parametrize( "reduction,kwargs", [ (np.max, {}), (np.sum, {}), (np.sum, {"dtype": np.float32}), (np.mean, {}), (np.mean, {"dtype": np.float32}), (np.prod, {}), (np.min, {}), ], ) @pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 2), -1, (0, -1)]) @pytest.mark.parametrize("keepdims", [True, False]) def test_ufunc_reductions(random_sparse, reduction, kwargs, axis, keepdims): x = random_sparse y = x.todense() xx = reduction(x, axis=axis, keepdims=keepdims, **kwargs) yy = reduction(y, axis=axis, keepdims=keepdims, **kwargs) assert_eq(xx, yy) # If not a scalar/1 element array, must be a sparse array if xx.size > 1: assert isinstance(xx, GCXS) @pytest.mark.parametrize( "reduction,kwargs", [ (np.max, {}), (np.sum, {"axis": 0}), (np.prod, {"keepdims": True}), (np.minimum.reduce, {"axis": 0}), ], ) @pytest.mark.parametrize("fill_value", [0, 1.0, -1, -2.2, 5.0]) def test_ufunc_reductions_kwargs(reduction, kwargs, fill_value): x = sparse.random((2, 3, 4), density=0.5, format="gcxs", fill_value=fill_value) y = x.todense() xx = reduction(x, **kwargs) yy = reduction(y, **kwargs) assert_eq(xx, yy) # If not a scalar/1 element array, must be a sparse array if xx.size > 1: assert isinstance(xx, GCXS) @pytest.mark.parametrize( "a,b", [ [(3, 4), (3, 4)], [(12,), (3, 4)], [(12,), (3, -1)], [(3, 4), (12,)], [(3, 4), (-1, 4)], [(3, 4), (3, -1)], [(2, 3, 4, 5), (8, 15)], [(2, 3, 4, 5), (24, 5)], [(2, 3, 4, 5), (20, 6)], [(), ()], ], ) def test_reshape(a, b): s = sparse.random(a, density=0.5, format="gcxs") x = s.todense() assert_eq(x.reshape(b), s.reshape(b)) def test_reshape_same(): s = sparse.random((3, 5), density=0.5, format="gcxs") assert s.reshape(s.shape) is s @pytest.mark.parametrize( "a,b", [ [(3, 4, 5), (2, 1, 0)], [(12,), None], [(9, 10), (1, 0)], [(4, 3, 5), (1, 0, 2)], [(5, 4, 3), (0, 2, 1)], [(3, 4, 5, 6), (0, 2, 1, 3)], ], ) def test_tranpose(a, b): s = sparse.random(a, density=0.5, format="gcxs") x = s.todense() assert_eq(x.transpose(b), s.transpose(b)) def test_to_scipy_sparse(): s = sparse.random((3, 5), density=0.5, format="gcxs", compressed_axes=(0,)) a = s.to_scipy_sparse() b = scipy.sparse.csr_matrix(s.todense()) assert_eq(a, b) s = sparse.random((3, 5), density=0.5, format="gcxs", compressed_axes=(1,)) a = s.to_scipy_sparse() b = scipy.sparse.csc_matrix(s.todense()) assert_eq(a, b) def test_tocoo(): coo = sparse.random((5, 6), density=0.5) b = GCXS.from_coo(coo) assert_eq(b.tocoo(), coo) @pytest.mark.parametrize("complex", [True, False]) def test_complex_methods(complex): if complex: x = np.array([1 + 2j, 2 - 1j, 0, 1, 0]) else: x = np.array([1, 2, 0, 0, 0]) s = GCXS.from_numpy(x) assert_eq(s.imag, x.imag) assert_eq(s.real, x.real) assert_eq(s.conj(), x.conj()) @pytest.mark.parametrize( "index", [ # Integer 0, 1, -1, (1, 1, 1), # Pure slices (slice(0, 2),), (slice(None, 2), slice(None, 2)), (slice(1, None), slice(1, None)), (slice(None, None),), (slice(None, None, -1),), (slice(None, 2, -1), slice(None, 2, -1)), (slice(1, None, 2), slice(1, None, 2)), (slice(None, None, 2),), (slice(None, 2, -1), slice(None, 2, -2)), (slice(1, None, 2), slice(1, None, 1)), (slice(None, None, -2),), # Combinations (0, slice(0, 2)), (slice(0, 1), 0), (None, slice(1, 3), 0), (slice(0, 3), None, 0), (slice(1, 2), slice(2, 4)), (slice(1, 2), slice(None, None)), (slice(1, 2), slice(None, None), 2), (slice(1, 2, 2), slice(None, None), 2), (slice(1, 2, None), slice(None, None, 2), 2), (slice(1, 2, -2), slice(None, None), -2), (slice(1, 2, None), slice(None, None, -2), 2), (slice(1, 2, -1), slice(None, None), -1), (slice(1, 2, None), slice(None, None, -1), 2), (slice(2, 0, -1), slice(None, None), -1), (slice(-2, None, None),), (slice(-1, None, None), slice(-2, None, None)), # With ellipsis (Ellipsis, slice(1, 3)), (1, Ellipsis, slice(1, 3)), (slice(0, 1), Ellipsis), (Ellipsis, None), (None, Ellipsis), (1, Ellipsis), (1, Ellipsis, None), (1, 1, 1, Ellipsis), (Ellipsis, 1, None), # Pathological - Slices larger than array (slice(None, 1000)), (slice(None), slice(None, 1000)), (slice(None), slice(1000, -1000, -1)), (slice(None), slice(1000, -1000, -50)), # Pathological - Wrong ordering of start/stop (slice(5, 0),), (slice(0, 5, -1),), ], ) @pytest.mark.parametrize("compressed_axes", [(0,), (1,), (2,), (0, 1), (0, 2), (1, 2)]) def test_slicing(index, compressed_axes): s = sparse.random( (2, 3, 4), density=0.5, format="gcxs", compressed_axes=compressed_axes ) x = s.todense() assert_eq(x[index], s[index]) @pytest.mark.parametrize( "index", [ ([1, 0], 0), (1, [0, 2]), (0, [1, 0], 0), (1, [2, 0], 0), ([True, False], slice(1, None), slice(-2, None)), (slice(1, None), slice(-2, None), [True, False, True, False]), ([1, 0],), (Ellipsis, [2, 1, 3]), (slice(None), [2, 1, 2]), (1, [2, 0, 1]), ], ) @pytest.mark.parametrize("compressed_axes", [(0,), (1,), (2,), (0, 1), (0, 2), (1, 2)]) def test_advanced_indexing(index, compressed_axes): s = sparse.random( (2, 3, 4), density=0.5, format="gcxs", compressed_axes=compressed_axes ) x = s.todense() assert_eq(x[index], s[index]) @pytest.mark.parametrize( "index", [ (Ellipsis, Ellipsis), (1, 1, 1, 1), (slice(None),) * 4, 5, -5, "foo", [True, False, False], 0.5, [0.5], {"potato": "kartoffel"}, ([[0, 1]],), ], ) def test_slicing_errors(index): s = sparse.random((2, 3, 4), density=0.5, format="gcxs") with pytest.raises(IndexError): s[index] def test_change_compressed_axes(): coo = sparse.random((3, 4, 5), density=0.5) s = GCXS.from_coo(coo, compressed_axes=(0, 1)) b = GCXS.from_coo(coo, compressed_axes=(1, 2)) assert_eq(s, b) s.change_compressed_axes((1, 2)) assert_eq(s, b) def test_concatenate(): xx = sparse.random((2, 3, 4), density=0.5, format="gcxs") x = xx.todense() yy = sparse.random((5, 3, 4), density=0.5, format="gcxs") y = yy.todense() zz = sparse.random((4, 3, 4), density=0.5, format="gcxs") z = zz.todense() assert_eq( np.concatenate([x, y, z], axis=0), sparse.concatenate([xx, yy, zz], axis=0) ) xx = sparse.random((5, 3, 1), density=0.5, format="gcxs") x = xx.todense() yy = sparse.random((5, 3, 3), density=0.5, format="gcxs") y = yy.todense() zz = sparse.random((5, 3, 2), density=0.5, format="gcxs") z = zz.todense() assert_eq( np.concatenate([x, y, z], axis=2), sparse.concatenate([xx, yy, zz], axis=2) ) assert_eq( np.concatenate([x, y, z], axis=-1), sparse.concatenate([xx, yy, zz], axis=-1) ) @pytest.mark.parametrize("axis", [0, 1]) @pytest.mark.parametrize("func", [sparse.stack, sparse.concatenate]) def test_concatenate_mixed(func, axis): s = sparse.random((10, 10), density=0.5, format="gcxs") d = s.todense() with pytest.raises(ValueError): func([d, s, s], axis=axis) def test_concatenate_noarrays(): with pytest.raises(ValueError): sparse.concatenate([]) @pytest.mark.parametrize("shape", [(5,), (2, 3, 4), (5, 2)]) @pytest.mark.parametrize("axis", [0, 1, -1]) def test_stack(shape, axis): xx = sparse.random(shape, density=0.5, format="gcxs") x = xx.todense() yy = sparse.random(shape, density=0.5, format="gcxs") y = yy.todense() zz = sparse.random(shape, density=0.5, format="gcxs") z = zz.todense() assert_eq(np.stack([x, y, z], axis=axis), sparse.stack([xx, yy, zz], axis=axis)) @pytest.mark.parametrize("in_shape", [(5, 5), 62, (3, 3, 3)]) def test_flatten(in_shape): s = sparse.random(in_shape, format="gcxs", density=0.5) x = s.todense() a = s.flatten() e = x.flatten() assert_eq(e, a) def test_gcxs_valerr(): a = np.arange(300) with pytest.raises(ValueError): GCXS.from_numpy(a, idx_dtype=np.int8) def test_upcast(): a = sparse.random((50, 50, 50), density=0.1, format="coo", idx_dtype=np.uint8) b = a.asformat("gcxs") assert b.indices.dtype == np.uint16 a = sparse.random((8, 7, 6), density=0.5, format="gcxs", idx_dtype=np.uint8) b = sparse.random((6, 6, 6), density=0.8, format="gcxs", idx_dtype=np.uint8) assert sparse.concatenate((a, a)).indptr.dtype == np.uint16 assert sparse.stack((b, b)).indptr.dtype == np.uint16 def test_from_coo(): a = sparse.random((5, 5, 5), density=0.1, format="coo") b = GCXS(a) assert_eq(a, b) def test_from_coo_valerr(): a = sparse.random((25, 25, 25), density=0.01, format="coo") with pytest.raises(ValueError): GCXS.from_coo(a, idx_dtype=np.int8) sparse-0.12.0/sparse/tests/test_compressed_2d.py000066400000000000000000000061251402510130100216570ustar00rootroot00000000000000import numpy as np from numpy.core.numeric import indices import pytest import scipy.sparse from scipy.sparse import data from scipy.sparse.construct import random import scipy.stats import sparse from sparse import COO from sparse._compressed.compressed import GCXS, CSR, CSC from sparse._utils import assert_eq @pytest.fixture(scope="module", params=[CSR, CSC]) def cls(request): return request.param @pytest.fixture(scope="module", params=["f8", "f4", "i8", "i4"]) def dtype(request): return request.param @pytest.fixture(scope="module") def random_sparse(cls, dtype): if np.issubdtype(dtype, np.integer): def data_rvs(n): return np.random.randint(-1000, 1000, n) else: data_rvs = None return cls(sparse.random((20, 30), density=0.25, data_rvs=data_rvs).astype(dtype)) @pytest.fixture(scope="module") def random_sparse_small(cls, dtype): if np.issubdtype(dtype, np.integer): def data_rvs(n): return np.random.randint(-10, 10, n) else: data_rvs = None return cls( sparse.random((20, 30, 40), density=0.25, data_rvs=data_rvs).astype(dtype) ) def test_repr(random_sparse): cls = type(random_sparse).__name__ str_repr = repr(random_sparse) assert cls in str_repr def test_bad_constructor_input(cls): with pytest.raises(ValueError, match=r".*shape.*"): cls(arg="hello world") @pytest.mark.parametrize("n", [0, 1, 3]) def test_bad_nd_input(cls, n): a = np.ones(shape=tuple(5 for _ in range(n))) with pytest.raises(ValueError, match=f"{n}-d"): cls(a) @pytest.mark.parametrize("source_type", ["gcxs", "coo"]) def test_from_sparse(cls, source_type): gcxs = sparse.random((20, 30), density=0.25, format=source_type) result = cls(gcxs) assert_eq(result, gcxs) @pytest.mark.parametrize("scipy_type", ["coo", "csr", "csc", "lil"]) @pytest.mark.parametrize("CLS", [CSR, CSC, GCXS]) def test_from_scipy_sparse(scipy_type, CLS, dtype): orig = scipy.sparse.random(20, 30, density=0.2, format=scipy_type, dtype=dtype) ref = COO.from_scipy_sparse(orig) result = CLS.from_scipy_sparse(orig) assert_eq(ref, result) result_via_init = CLS(orig) assert_eq(ref, result_via_init) @pytest.mark.parametrize("cls_str", ["coo", "dok", "csr", "csc", "gcxs"]) def test_to_sparse(cls_str, random_sparse): result = random_sparse.asformat(cls_str) assert_eq(random_sparse, result) @pytest.mark.parametrize("copy", [True, False]) def test_transpose(random_sparse, copy): from operator import is_, is_not t = random_sparse.transpose(copy=copy) tt = t.transpose(copy=copy) # Check if a copy was made if copy: check = is_not else: check = is_ assert check(random_sparse.data, t.data) assert check(random_sparse.indices, t.indices) assert check(random_sparse.indptr, t.indptr) assert random_sparse.shape == t.shape[::-1] assert_eq(random_sparse, tt) assert type(random_sparse) == type(tt) def test_transpose_error(random_sparse): with pytest.raises(ValueError): random_sparse.transpose(axes=1) sparse-0.12.0/sparse/tests/test_coo.py000066400000000000000000001250441402510130100177100ustar00rootroot00000000000000import contextlib import operator import pickle import sys import numpy as np import pytest import scipy.sparse import scipy.stats import sparse from sparse import COO from sparse._settings import NEP18_ENABLED from sparse._utils import assert_eq, random_value_array @pytest.fixture(scope="module", params=["f8", "f4", "i8", "i4"]) def random_sparse(request): dtype = request.param if np.issubdtype(dtype, np.integer): def data_rvs(n): return np.random.randint(-1000, 1000, n) else: data_rvs = None return sparse.random((20, 30, 40), density=0.25, data_rvs=data_rvs).astype(dtype) @pytest.fixture(scope="module", params=["f8", "f4", "i8", "i4"]) def random_sparse_small(request): dtype = request.param if np.issubdtype(dtype, np.integer): def data_rvs(n): return np.random.randint(-10, 10, n) else: data_rvs = None return sparse.random((20, 30, 40), density=0.25, data_rvs=data_rvs).astype(dtype) @pytest.mark.parametrize( "reduction, kwargs", [("sum", {}), ("sum", {"dtype": np.float32}), ("prod", {})] ) @pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 2), -3, (1, -1)]) @pytest.mark.parametrize("keepdims", [True, False]) def test_reductions_fv(reduction, random_sparse_small, axis, keepdims, kwargs): x = random_sparse_small + np.random.randint(-1, 1, dtype="i4") y = x.todense() xx = getattr(x, reduction)(axis=axis, keepdims=keepdims, **kwargs) yy = getattr(y, reduction)(axis=axis, keepdims=keepdims, **kwargs) assert_eq(xx, yy) @pytest.mark.parametrize( "reduction, kwargs", [ ("sum", {}), ("sum", {"dtype": np.float32}), ("mean", {}), ("mean", {"dtype": np.float32}), ("prod", {}), ("max", {}), ("min", {}), ("std", {}), ("var", {}), ], ) @pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 2), -3, (1, -1)]) @pytest.mark.parametrize("keepdims", [True, False]) def test_reductions(reduction, random_sparse, axis, keepdims, kwargs): x = random_sparse y = x.todense() xx = getattr(x, reduction)(axis=axis, keepdims=keepdims, **kwargs) yy = getattr(y, reduction)(axis=axis, keepdims=keepdims, **kwargs) assert_eq(xx, yy) @pytest.mark.xfail( reason=("Setting output dtype=float16 produces results " "inconsistent with numpy") ) @pytest.mark.filterwarnings("ignore:overflow") @pytest.mark.parametrize( "reduction, kwargs", [("sum", {"dtype": np.float16}), ("mean", {"dtype": np.float16})], ) @pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 2)]) def test_reductions_float16(random_sparse, reduction, kwargs, axis): x = random_sparse y = x.todense() xx = getattr(x, reduction)(axis=axis, **kwargs) yy = getattr(y, reduction)(axis=axis, **kwargs) assert_eq(xx, yy, atol=1e-2) @pytest.mark.parametrize("reduction,kwargs", [("any", {}), ("all", {})]) @pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 2), -3, (1, -1)]) @pytest.mark.parametrize("keepdims", [True, False]) def test_reductions_bool(random_sparse, reduction, kwargs, axis, keepdims): y = np.zeros((2, 3, 4), dtype=bool) y[0] = True y[1, 1, 1] = True x = sparse.COO.from_numpy(y) xx = getattr(x, reduction)(axis=axis, keepdims=keepdims, **kwargs) yy = getattr(y, reduction)(axis=axis, keepdims=keepdims, **kwargs) assert_eq(xx, yy) @pytest.mark.parametrize( "reduction,kwargs", [ (np.max, {}), (np.sum, {}), (np.sum, {"dtype": np.float32}), (np.mean, {}), (np.mean, {"dtype": np.float32}), (np.prod, {}), (np.min, {}), ], ) @pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 2), -1, (0, -1)]) @pytest.mark.parametrize("keepdims", [True, False]) def test_ufunc_reductions(random_sparse, reduction, kwargs, axis, keepdims): x = random_sparse y = x.todense() xx = reduction(x, axis=axis, keepdims=keepdims, **kwargs) yy = reduction(y, axis=axis, keepdims=keepdims, **kwargs) assert_eq(xx, yy) # If not a scalar/1 element array, must be a sparse array if xx.size > 1: assert isinstance(xx, COO) @pytest.mark.parametrize( "reduction,kwargs", [ (np.max, {}), (np.sum, {"axis": 0}), (np.prod, {"keepdims": True}), (np.add.reduce, {}), (np.add.reduce, {"keepdims": True}), (np.minimum.reduce, {"axis": 0}), ], ) def test_ufunc_reductions_kwargs(reduction, kwargs): x = sparse.random((2, 3, 4), density=0.5) y = x.todense() xx = reduction(x, **kwargs) yy = reduction(y, **kwargs) assert_eq(xx, yy) # If not a scalar/1 element array, must be a sparse array if xx.size > 1: assert isinstance(xx, COO) @pytest.mark.parametrize( "reduction", ["nansum", "nanmean", "nanprod", "nanmax", "nanmin"] ) @pytest.mark.parametrize("axis", [None, 0, 1]) @pytest.mark.parametrize("keepdims", [False]) @pytest.mark.parametrize("fraction", [0.25, 0.5, 0.75, 1.0]) @pytest.mark.filterwarnings("ignore:All-NaN") @pytest.mark.filterwarnings("ignore:Mean of empty slice") def test_nan_reductions(reduction, axis, keepdims, fraction): s = sparse.random( (2, 3, 4), data_rvs=random_value_array(np.nan, fraction), density=0.25 ) x = s.todense() expected = getattr(np, reduction)(x, axis=axis, keepdims=keepdims) actual = getattr(sparse, reduction)(s, axis=axis, keepdims=keepdims) assert_eq(expected, actual) @pytest.mark.parametrize("reduction", ["nanmax", "nanmin", "nanmean"]) @pytest.mark.parametrize("axis", [None, 0, 1]) def test_all_nan_reduction_warning(reduction, axis): x = random_value_array(np.nan, 1.0)(2 * 3 * 4).reshape(2, 3, 4) s = COO.from_numpy(x) with pytest.warns(RuntimeWarning): getattr(sparse, reduction)(s, axis=axis) @pytest.mark.parametrize( "axis", [None, (1, 2, 0), (2, 1, 0), (0, 1, 2), (0, 1, -1), (0, -2, -1), (-3, -2, -1)], ) def test_transpose(axis): x = sparse.random((2, 3, 4), density=0.25) y = x.todense() xx = x.transpose(axis) yy = y.transpose(axis) assert_eq(xx, yy) @pytest.mark.parametrize( "axis", [ (0, 1), # too few (0, 1, 2, 3), # too many (3, 1, 0), # axis 3 illegal (0, -1, -4), # axis -4 illegal (0, 0, 1), # duplicate axis 0 (0, -1, 2), # duplicate axis -1 == 2 0.3, # Invalid type in axis ((0, 1, 2),), # Iterable inside iterable ], ) def test_transpose_error(axis): x = sparse.random((2, 3, 4), density=0.25) with pytest.raises(ValueError): x.transpose(axis) @pytest.mark.parametrize( "a,b", [ [(3, 4), (5, 5)], [(12,), (3, 4)], [(12,), (3, 6)], [(5, 5, 5), (6, 6, 6)], [(3, 4), (9, 4)], [(5,), (4,)], [(2, 3, 4, 5), (2, 3, 4, 5, 6)], [(100,), (5, 5)], [(2, 3, 4, 5), (20, 6)], [(), ()], ], ) def test_resize(a, b): s = sparse.random(a, density=0.5) orig_size = s.size x = s.todense() x.resize(b) s.resize(b) temp = x.reshape(x.size) temp[orig_size:] = s.fill_value assert isinstance(s, sparse.SparseArray) assert_eq(x, s) def test_resize_upcast(): s = sparse.random((10, 10, 10), density=0.5, format="coo", idx_dtype=np.uint8) s.resize(600) assert s.coords.dtype == np.uint16 @pytest.mark.parametrize("axis1", [-3, -2, -1, 0, 1, 2]) @pytest.mark.parametrize("axis2", [-3, -2, -1, 0, 1, 2]) def test_swapaxes(axis1, axis2): x = sparse.random((2, 3, 4), density=0.25) y = x.todense() xx = x.swapaxes(axis1, axis2) yy = y.swapaxes(axis1, axis2) assert_eq(xx, yy) @pytest.mark.parametrize("axis1", [-4, 3]) @pytest.mark.parametrize("axis2", [-4, 3, 0]) def test_swapaxes_error(axis1, axis2): x = sparse.random((2, 3, 4), density=0.25) with pytest.raises(ValueError): x.swapaxes(axis1, axis2) @pytest.mark.parametrize( "source, destination", [ [0, 1], [2, 1], [-2, 1], [-2, -3], [(0, 1), (2, 3)], [(-1, 0), (0, 1)], [(0, 1, 2), (2, 1, 0)], [(0, 1, 2), (-2, -3, -1)], ], ) def test_moveaxis(source, destination): x = sparse.random((2, 3, 4, 5), density=0.25) y = x.todense() xx = sparse.moveaxis(x, source, destination) yy = np.moveaxis(y, source, destination) assert_eq(xx, yy) @pytest.mark.parametrize( "source, destination", [[0, -4], [(0, 5), (1, 2)], [(0, 1, 2), (2, 1)]] ) def test_moveaxis_error(source, destination): x = sparse.random((2, 3, 4), density=0.25) with pytest.raises(ValueError): sparse.moveaxis(x, source, destination) @pytest.mark.parametrize( "a,b", [ [(3, 4), (5, 5)], [(12,), (3, 4)], [(12,), (3, 6)], [(5, 5, 5), (6, 6, 6)], [(3, 4), (9, 4)], [(5,), (4,)], [(2, 3, 4, 5), (2, 3, 4, 5, 6)], [(100,), (5, 5)], [(2, 3, 4, 5), (20, 6)], [(), ()], ], ) def test_resize(a, b): s = sparse.random(a, density=0.5) orig_size = s.size x = s.todense() x = np.resize(x, b) s.resize(b) temp = x.reshape(x.size) temp[orig_size:] = s.fill_value assert isinstance(s, sparse.SparseArray) assert_eq(x, s) @pytest.mark.parametrize( "a,b", [ [(3, 4), (3, 4)], [(12,), (3, 4)], [(12,), (3, -1)], [(3, 4), (12,)], [(3, 4), (-1, 4)], [(3, 4), (3, -1)], [(2, 3, 4, 5), (8, 15)], [(2, 3, 4, 5), (24, 5)], [(2, 3, 4, 5), (20, 6)], [(), ()], ], ) def test_reshape(a, b): s = sparse.random(a, density=0.5) x = s.todense() assert_eq(x.reshape(b), s.reshape(b)) def test_large_reshape(): n = 100 m = 10 row = np.arange( n, dtype=np.uint16 ) # np.random.randint(0, n, size=n, dtype=np.uint16) col = row % m # np.random.randint(0, m, size=n, dtype=np.uint16) data = np.ones(n, dtype=np.uint8) x = COO((data, (row, col)), sorted=True, has_duplicates=False) assert_eq(x, x.reshape(x.shape)) def test_reshape_same(): s = sparse.random((3, 5), density=0.5) assert s.reshape(s.shape) is s def test_reshape_function(): s = sparse.random((5, 3), density=0.5) x = s.todense() shape = (3, 5) s2 = np.reshape(s, shape) assert isinstance(s2, COO) assert_eq(s2, x.reshape(shape)) def test_reshape_upcast(): a = sparse.random((10, 10, 10), density=0.5, format="coo", idx_dtype=np.uint8) assert a.reshape(1000).coords.dtype == np.uint16 def test_to_scipy_sparse(): s = sparse.random((3, 5), density=0.5) a = s.to_scipy_sparse() b = scipy.sparse.coo_matrix(s.todense()) assert_eq(a, b) @pytest.mark.parametrize("a_ndim", [1, 2, 3]) @pytest.mark.parametrize("b_ndim", [1, 2, 3]) def test_kron(a_ndim, b_ndim): a_shape = (2, 3, 4)[:a_ndim] b_shape = (5, 6, 7)[:b_ndim] sa = sparse.random(a_shape, density=0.5) a = sa.todense() sb = sparse.random(b_shape, density=0.5) b = sb.todense() sol = np.kron(a, b) assert_eq(sparse.kron(sa, sb), sol) assert_eq(sparse.kron(sa, b), sol) assert_eq(sparse.kron(a, sb), sol) with pytest.raises(ValueError): assert_eq(sparse.kron(a, b), sol) @pytest.mark.parametrize( "a_spmatrix, b_spmatrix", [(True, True), (True, False), (False, True)] ) def test_kron_spmatrix(a_spmatrix, b_spmatrix): sa = sparse.random((3, 4), density=0.5) a = sa.todense() sb = sparse.random((5, 6), density=0.5) b = sb.todense() if a_spmatrix: sa = sa.tocsr() if b_spmatrix: sb = sb.tocsr() sol = np.kron(a, b) assert_eq(sparse.kron(sa, sb), sol) assert_eq(sparse.kron(sa, b), sol) assert_eq(sparse.kron(a, sb), sol) with pytest.raises(ValueError): assert_eq(sparse.kron(a, b), sol) @pytest.mark.parametrize("ndim", [1, 2, 3]) def test_kron_scalar(ndim): if ndim: a_shape = (3, 4, 5)[:ndim] sa = sparse.random(a_shape, density=0.5) a = sa.todense() else: sa = a = np.array(6) scalar = np.array(5) sol = np.kron(a, scalar) assert_eq(sparse.kron(sa, scalar), sol) assert_eq(sparse.kron(scalar, sa), sol) def test_gt(): s = sparse.random((2, 3, 4), density=0.5) x = s.todense() m = x.mean() assert_eq(x > m, s > m) m = s.data[2] assert_eq(x > m, s > m) assert_eq(x >= m, s >= m) @pytest.mark.parametrize( "index", [ # Integer 0, 1, -1, (1, 1, 1), # Pure slices (slice(0, 2),), (slice(None, 2), slice(None, 2)), (slice(1, None), slice(1, None)), (slice(None, None),), (slice(None, None, -1),), (slice(None, 2, -1), slice(None, 2, -1)), (slice(1, None, 2), slice(1, None, 2)), (slice(None, None, 2),), (slice(None, 2, -1), slice(None, 2, -2)), (slice(1, None, 2), slice(1, None, 1)), (slice(None, None, -2),), # Combinations (0, slice(0, 2)), (slice(0, 1), 0), (None, slice(1, 3), 0), (slice(0, 3), None, 0), (slice(1, 2), slice(2, 4)), (slice(1, 2), slice(None, None)), (slice(1, 2), slice(None, None), 2), (slice(1, 2, 2), slice(None, None), 2), (slice(1, 2, None), slice(None, None, 2), 2), (slice(1, 2, -2), slice(None, None), -2), (slice(1, 2, None), slice(None, None, -2), 2), (slice(1, 2, -1), slice(None, None), -1), (slice(1, 2, None), slice(None, None, -1), 2), (slice(2, 0, -1), slice(None, None), -1), (slice(-2, None, None),), (slice(-1, None, None), slice(-2, None, None)), # With ellipsis (Ellipsis, slice(1, 3)), (1, Ellipsis, slice(1, 3)), (slice(0, 1), Ellipsis), (Ellipsis, None), (None, Ellipsis), (1, Ellipsis), (1, Ellipsis, None), (1, 1, 1, Ellipsis), (Ellipsis, 1, None), # With multi-axis advanced indexing ([0, 1],) * 2, ([0, 1], [0, 2]), ([0, 0, 0], [0, 1, 2], [1, 2, 1]), # Pathological - Slices larger than array (slice(None, 1000)), (slice(None), slice(None, 1000)), (slice(None), slice(1000, -1000, -1)), (slice(None), slice(1000, -1000, -50)), # Pathological - Wrong ordering of start/stop (slice(5, 0),), (slice(0, 5, -1),), (slice(0, 0, None),), ], ) def test_slicing(index): s = sparse.random((2, 3, 4), density=0.5) x = s.todense() assert_eq(x[index], s[index]) @pytest.mark.parametrize( "index", [ ([1, 0], 0), (1, [0, 2]), (0, [1, 0], 0), (1, [2, 0], 0), (1, [], 0), ([True, False], slice(1, None), slice(-2, None)), (slice(1, None), slice(-2, None), [True, False, True, False]), ([1, 0],), (Ellipsis, [2, 1, 3]), (slice(None), [2, 1, 2]), (1, [2, 0, 1]), ], ) def test_advanced_indexing(index): s = sparse.random((2, 3, 4), density=0.5) x = s.todense() assert_eq(x[index], s[index]) def test_custom_dtype_slicing(): dt = np.dtype( [("part1", np.float_), ("part2", np.int_, (2,)), ("part3", np.int_, (2, 2))] ) x = np.zeros((2, 3, 4), dtype=dt) x[1, 1, 1] = (0.64, [4, 2], [[1, 2], [3, 0]]) s = COO.from_numpy(x) assert x[1, 1, 1] == s[1, 1, 1] assert x[0, 1, 2] == s[0, 1, 2] assert_eq(x["part1"], s["part1"]) assert_eq(x["part2"], s["part2"]) assert_eq(x["part3"], s["part3"]) @pytest.mark.parametrize( "index", [ (Ellipsis, Ellipsis), (1, 1, 1, 1), (slice(None),) * 4, 5, -5, "foo", [True, False, False], 0.5, [0.5], {"potato": "kartoffel"}, ([[0, 1]],), ], ) def test_slicing_errors(index): s = sparse.random((2, 3, 4), density=0.5) with pytest.raises(IndexError): s[index] def test_concatenate(): xx = sparse.random((2, 3, 4), density=0.5) x = xx.todense() yy = sparse.random((5, 3, 4), density=0.5) y = yy.todense() zz = sparse.random((4, 3, 4), density=0.5) z = zz.todense() assert_eq( np.concatenate([x, y, z], axis=0), sparse.concatenate([xx, yy, zz], axis=0) ) xx = sparse.random((5, 3, 1), density=0.5) x = xx.todense() yy = sparse.random((5, 3, 3), density=0.5) y = yy.todense() zz = sparse.random((5, 3, 2), density=0.5) z = zz.todense() assert_eq( np.concatenate([x, y, z], axis=2), sparse.concatenate([xx, yy, zz], axis=2) ) assert_eq( np.concatenate([x, y, z], axis=-1), sparse.concatenate([xx, yy, zz], axis=-1) ) @pytest.mark.parametrize("axis", [0, 1]) @pytest.mark.parametrize("func", [sparse.stack, sparse.concatenate]) def test_concatenate_mixed(func, axis): s = sparse.random((10, 10), density=0.5) d = s.todense() with pytest.raises(ValueError): func([d, s, s], axis=axis) def test_concatenate_noarrays(): with pytest.raises(ValueError): sparse.concatenate([]) @pytest.mark.parametrize("shape", [(5,), (2, 3, 4), (5, 2)]) @pytest.mark.parametrize("axis", [0, 1, -1]) def test_stack(shape, axis): xx = sparse.random(shape, density=0.5) x = xx.todense() yy = sparse.random(shape, density=0.5) y = yy.todense() zz = sparse.random(shape, density=0.5) z = zz.todense() assert_eq(np.stack([x, y, z], axis=axis), sparse.stack([xx, yy, zz], axis=axis)) def test_large_concat_stack(): data = np.array([1], dtype=np.uint8) coords = np.array([[255]], dtype=np.uint8) xs = COO(coords, data, shape=(256,), has_duplicates=False, sorted=True) x = xs.todense() assert_eq(np.stack([x, x]), sparse.stack([xs, xs])) assert_eq(np.concatenate((x, x)), sparse.concatenate((xs, xs))) def test_addition(): a = sparse.random((2, 3, 4), density=0.5) x = a.todense() b = sparse.random((2, 3, 4), density=0.5) y = b.todense() assert_eq(x + y, a + b) assert_eq(x - y, a - b) @pytest.mark.parametrize("scalar", [2, 2.5, np.float32(2.0), np.int8(3)]) def test_scalar_multiplication(scalar): a = sparse.random((2, 3, 4), density=0.5) x = a.todense() assert_eq(x * scalar, a * scalar) assert (a * scalar).nnz == a.nnz assert_eq(scalar * x, scalar * a) assert (scalar * a).nnz == a.nnz assert_eq(x / scalar, a / scalar) assert (a / scalar).nnz == a.nnz assert_eq(x // scalar, a // scalar) # division may reduce nnz. @pytest.mark.filterwarnings("ignore:divide by zero") def test_scalar_exponentiation(): a = sparse.random((2, 3, 4), density=0.5) x = a.todense() assert_eq(x ** 2, a ** 2) assert_eq(x ** 0.5, a ** 0.5) assert_eq(x ** -1, a ** -1) def test_create_with_lists_of_tuples(): L = [((0, 0, 0), 1), ((1, 2, 1), 1), ((1, 1, 1), 2), ((1, 3, 2), 3)] s = COO(L) x = np.zeros((2, 4, 3), dtype=np.asarray([1, 2, 3]).dtype) for ind, value in L: x[ind] = value assert_eq(s, x) def test_sizeof(): x = np.eye(100) y = COO.from_numpy(x) nb = sys.getsizeof(y) assert 400 < nb < x.nbytes / 10 def test_scipy_sparse_interface(): n = 100 m = 10 row = np.random.randint(0, n, size=n, dtype=np.uint16) col = np.random.randint(0, m, size=n, dtype=np.uint16) data = np.ones(n, dtype=np.uint8) inp = (data, (row, col)) x = scipy.sparse.coo_matrix(inp) xx = sparse.COO(inp) assert_eq(x, xx, check_nnz=False) assert_eq(x.T, xx.T, check_nnz=False) assert_eq(xx.to_scipy_sparse(), x, check_nnz=False) assert_eq(COO.from_scipy_sparse(xx.to_scipy_sparse()), xx, check_nnz=False) assert_eq(x, xx, check_nnz=False) assert_eq(x.T.dot(x), xx.T.dot(xx), check_nnz=False) assert isinstance(x + xx, COO) assert isinstance(xx + x, COO) @pytest.mark.parametrize("scipy_format", ["coo", "csr", "dok", "csc"]) def test_scipy_sparse_interaction(scipy_format): x = sparse.random((10, 20), density=0.2).todense() sp = getattr(scipy.sparse, scipy_format + "_matrix")(x) coo = COO(x) assert isinstance(sp + coo, COO) assert isinstance(coo + sp, COO) assert_eq(sp, coo) @pytest.mark.parametrize( "func", [operator.mul, operator.add, operator.sub, operator.gt, operator.lt, operator.ne], ) def test_op_scipy_sparse(func): xs = sparse.random((3, 4), density=0.5) y = sparse.random((3, 4), density=0.5).todense() ys = scipy.sparse.csr_matrix(y) x = xs.todense() assert_eq(func(x, y), func(xs, ys)) @pytest.mark.parametrize( "func", [ operator.add, operator.sub, pytest.param( operator.mul, marks=pytest.mark.xfail(reason="Scipy sparse auto-densifies in this case."), ), pytest.param( operator.gt, marks=pytest.mark.xfail(reason="Scipy sparse doesn't support this yet."), ), pytest.param( operator.lt, marks=pytest.mark.xfail(reason="Scipy sparse doesn't support this yet."), ), pytest.param( operator.ne, marks=pytest.mark.xfail(reason="Scipy sparse doesn't support this yet."), ), ], ) def test_op_scipy_sparse_left(func): ys = sparse.random((3, 4), density=0.5) x = sparse.random((3, 4), density=0.5).todense() xs = scipy.sparse.csr_matrix(x) y = ys.todense() assert_eq(func(x, y), func(xs, ys)) def test_cache_csr(): x = sparse.random((10, 5), density=0.5).todense() s = COO(x, cache=True) assert isinstance(s.tocsr(), scipy.sparse.csr_matrix) assert isinstance(s.tocsc(), scipy.sparse.csc_matrix) assert s.tocsr() is s.tocsr() assert s.tocsc() is s.tocsc() def test_empty_shape(): x = COO(np.empty((0, 1), dtype=np.int8), [1.0]) assert x.shape == () assert_eq(2 * x, np.float_(2.0)) def test_single_dimension(): x = COO([1, 3], [1.0, 3.0]) assert x.shape == (4,) assert_eq(x, np.array([0, 1.0, 0, 3.0])) def test_large_sum(): n = 500000 x = np.random.randint(0, 10000, size=(n,)) y = np.random.randint(0, 1000, size=(n,)) z = np.random.randint(0, 3, size=(n,)) data = np.random.random(n) a = COO((x, y, z), data) assert a.shape == (10000, 1000, 3) b = a.sum(axis=2) assert b.nnz > 100000 def test_add_many_sparse_arrays(): x = COO({(1, 1): 1}) y = sum([x] * 100) assert y.nnz < np.prod(y.shape) def test_caching(): x = COO({(9, 9, 9): 1}) assert ( x[:].reshape((100, 10)).transpose().tocsr() is not x[:].reshape((100, 10)).transpose().tocsr() ) x = COO({(9, 9, 9): 1}, cache=True) assert ( x[:].reshape((100, 10)).transpose().tocsr() is x[:].reshape((100, 10)).transpose().tocsr() ) x = COO({(1, 1, 1, 1, 1, 1, 1, 2): 1}, cache=True) for i in range(x.ndim): x.reshape(x.size) assert len(x._cache["reshape"]) < 5 def test_scalar_slicing(): x = np.array([0, 1]) s = COO(x) assert np.isscalar(s[0]) assert_eq(x[0], s[0]) assert isinstance(s[0, ...], COO) assert s[0, ...].shape == () assert_eq(x[0, ...], s[0, ...]) assert np.isscalar(s[1]) assert_eq(x[1], s[1]) assert isinstance(s[1, ...], COO) assert s[1, ...].shape == () assert_eq(x[1, ...], s[1, ...]) @pytest.mark.parametrize( "shape, k", [((3, 4), 0), ((3, 4, 5), 1), ((4, 2), -1), ((2, 4), -2), ((4, 4), 1000)], ) def test_triul(shape, k): s = sparse.random(shape, density=0.5) x = s.todense() assert_eq(np.triu(x, k), sparse.triu(s, k)) assert_eq(np.tril(x, k), sparse.tril(s, k)) def test_empty_reduction(): x = np.zeros((2, 3, 4), dtype=np.float_) xs = COO.from_numpy(x) assert_eq(x.sum(axis=(0, 2)), xs.sum(axis=(0, 2))) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4)]) @pytest.mark.parametrize("density", [0.1, 0.3, 0.5, 0.7]) def test_random_shape(shape, density): s = sparse.random(shape, density) assert isinstance(s, COO) assert s.shape == shape expected_nnz = density * np.prod(shape) assert np.floor(expected_nnz) <= s.nnz <= np.ceil(expected_nnz) @pytest.mark.parametrize("shape, nnz", [((1,), 1), ((2,), 0), ((3, 4), 5)]) def test_random_nnz(shape, nnz): s = sparse.random(shape, nnz=nnz) assert isinstance(s, COO) assert s.nnz == nnz @pytest.mark.parametrize( "density, nnz", [(1, 1), (1.01, None), (-0.01, None), (None, 2)] ) def test_random_invalid_density_and_nnz(density, nnz): with pytest.raises(ValueError): sparse.random((1,), density, nnz=nnz) def test_two_random_unequal(): s1 = sparse.random((2, 3, 4), 0.3) s2 = sparse.random((2, 3, 4), 0.3) assert not np.allclose(s1.todense(), s2.todense()) def test_two_random_same_seed(): state = np.random.randint(100) s1 = sparse.random((2, 3, 4), 0.3, random_state=state) s2 = sparse.random((2, 3, 4), 0.3, random_state=state) assert_eq(s1, s2) @pytest.mark.parametrize( "rvs, dtype", [ (None, np.float64), (scipy.stats.poisson(25, loc=10).rvs, np.int), (lambda x: np.random.choice([True, False], size=x), np.bool), ], ) @pytest.mark.parametrize("shape", [(2, 4, 5), (20, 40, 50)]) @pytest.mark.parametrize("density", [0.0, 0.01, 0.1, 0.2]) def test_random_rvs(rvs, dtype, shape, density): x = sparse.random(shape, density, data_rvs=rvs) assert x.shape == shape assert x.dtype == dtype @pytest.mark.parametrize("format", ["coo", "dok"]) def test_random_fv(format): fv = np.random.rand() s = sparse.random((2, 3, 4), density=0.5, format=format, fill_value=fv) assert s.fill_value == fv def test_scalar_shape_construction(): x = np.random.rand(5) coords = np.arange(5)[None] s = COO(coords, x, shape=5) assert_eq(x, s) def test_len(): s = sparse.random((20, 30, 40)) assert len(s) == 20 def test_density(): s = sparse.random((20, 30, 40), density=0.1) assert np.isclose(s.density, 0.1) def test_size(): s = sparse.random((20, 30, 40)) assert s.size == 20 * 30 * 40 def test_np_array(): s = sparse.random((20, 30, 40)) with pytest.raises(RuntimeError): np.array(s) @pytest.mark.parametrize( "shapes", [ [(2,), (3, 2), (4, 3, 2)], [(3,), (2, 3), (2, 2, 3)], [(2,), (2, 2), (2, 2, 2)], [(4,), (4, 4), (4, 4, 4)], [(4,), (4, 4), (4, 4, 4)], [(4,), (4, 4), (4, 4, 4)], [(1, 1, 2), (1, 3, 1), (4, 1, 1)], [(2,), (2, 1), (2, 1, 1)], [(3,), (), (2, 3)], [(4, 4), (), ()], ], ) def test_three_arg_where(shapes): cs = sparse.random(shapes[0], density=0.5).astype(np.bool) xs = sparse.random(shapes[1], density=0.5) ys = sparse.random(shapes[2], density=0.5) c = cs.todense() x = xs.todense() y = ys.todense() expected = np.where(c, x, y) actual = sparse.where(cs, xs, ys) assert isinstance(actual, COO) assert_eq(expected, actual) def test_one_arg_where(): s = sparse.random((2, 3, 4), density=0.5) x = s.todense() expected = np.where(x) actual = sparse.where(s) assert len(expected) == len(actual) for e, a in zip(expected, actual): assert_eq(e, a, compare_dtype=False) def test_one_arg_where_dense(): x = np.random.rand(2, 3, 4) with pytest.raises(ValueError): sparse.where(x) def test_two_arg_where(): cs = sparse.random((2, 3, 4), density=0.5).astype(np.bool) xs = sparse.random((2, 3, 4), density=0.5) with pytest.raises(ValueError): sparse.where(cs, xs) @pytest.mark.parametrize("func", [operator.imul, operator.iadd, operator.isub]) def test_inplace_invalid_shape(func): xs = sparse.random((3, 4), density=0.5) ys = sparse.random((2, 3, 4), density=0.5) with pytest.raises(ValueError): func(xs, ys) def test_nonzero(): s = sparse.random((2, 3, 4), density=0.5) x = s.todense() expected = x.nonzero() actual = s.nonzero() assert isinstance(actual, tuple) assert len(expected) == len(actual) for e, a in zip(expected, actual): assert_eq(e, a, compare_dtype=False) def test_argwhere(): s = sparse.random((2, 3, 4), density=0.5) x = s.todense() assert_eq(np.argwhere(s), np.argwhere(x), compare_dtype=False) @pytest.mark.parametrize("format", ["coo", "dok"]) def test_asformat(format): s = sparse.random((2, 3, 4), density=0.5, format="coo") s2 = s.asformat(format) assert_eq(s, s2) @pytest.mark.parametrize( "format", [sparse.COO, sparse.DOK, scipy.sparse.csr_matrix, np.asarray] ) def test_as_coo(format): x = format(sparse.random((3, 4), density=0.5, format="coo").todense()) s1 = sparse.as_coo(x) s2 = COO(x) assert_eq(x, s1) assert_eq(x, s2) def test_invalid_attrs_error(): s = sparse.random((3, 4), density=0.5, format="coo") with pytest.raises(ValueError): sparse.as_coo(s, shape=(2, 3)) with pytest.raises(ValueError): COO(s, shape=(2, 3)) with pytest.raises(ValueError): sparse.as_coo(s, fill_value=0.0) with pytest.raises(ValueError): COO(s, fill_value=0.0) def test_invalid_iterable_error(): with pytest.raises(ValueError): x = [(3, 4, 5)] COO.from_iter(x) with pytest.raises(ValueError): x = [((2.3, 4.5), 3.2)] COO.from_iter(x) def test_prod_along_axis(): s1 = sparse.random((10, 10), density=0.1) s2 = 1 - s1 x1 = s1.todense() x2 = s2.todense() assert_eq(s1.prod(axis=0), x1.prod(axis=0)) assert_eq(s2.prod(axis=0), x2.prod(axis=0)) class TestRoll: # test on 1d array # @pytest.mark.parametrize("shift", [0, 2, -2, 20, -20]) def test_1d(self, shift): xs = sparse.random((100,), density=0.5) x = xs.todense() assert_eq(np.roll(x, shift), sparse.roll(xs, shift)) assert_eq(np.roll(x, shift), sparse.roll(x, shift)) # test on 2d array # @pytest.mark.parametrize("shift", [0, 2, -2, 20, -20]) @pytest.mark.parametrize("ax", [None, 0, 1, (0, 1)]) def test_2d(self, shift, ax): xs = sparse.random((10, 10), density=0.5) x = xs.todense() assert_eq(np.roll(x, shift, axis=ax), sparse.roll(xs, shift, axis=ax)) assert_eq(np.roll(x, shift, axis=ax), sparse.roll(x, shift, axis=ax)) # test on rolling multiple axes at once # @pytest.mark.parametrize("shift", [(0, 0), (1, -1), (-1, 1), (10, -10)]) @pytest.mark.parametrize("ax", [(0, 1), (0, 2), (1, 2), (-1, 1)]) def test_multiaxis(self, shift, ax): xs = sparse.random((9, 9, 9), density=0.5) x = xs.todense() assert_eq(np.roll(x, shift, axis=ax), sparse.roll(xs, shift, axis=ax)) assert_eq(np.roll(x, shift, axis=ax), sparse.roll(x, shift, axis=ax)) # test original is unchanged # @pytest.mark.parametrize("shift", [0, 2, -2, 20, -20]) @pytest.mark.parametrize("ax", [None, 0, 1, (0, 1)]) def test_original_is_copied(self, shift, ax): xs = sparse.random((10, 10), density=0.5) xc = COO(np.copy(xs.coords), np.copy(xs.data), shape=xs.shape) sparse.roll(xs, shift, axis=ax) assert_eq(xs, xc) # test on empty array # def test_empty(self): x = np.array([]) assert_eq(np.roll(x, 1), sparse.roll(sparse.as_coo(x), 1)) # test error handling # @pytest.mark.parametrize( "args", [ # iterable shift, but axis not iterable ((1, 1), 0), # ndim(axis) != 1 (1, [[0, 1]]), # ndim(shift) != 1 ([[0, 1]], [0, 1]), ([[0, 1], [0, 1]], [0, 1]), ], ) def test_valerr(self, args): x = sparse.random((2, 2, 2), density=1) with pytest.raises(ValueError): sparse.roll(x, *args) @pytest.mark.parametrize("dtype", [np.uint8, np.int8]) @pytest.mark.parametrize("shift", [300, -300]) def test_dtype_errors(self, dtype, shift): x = sparse.random((5, 5, 5), density=0.2, idx_dtype=dtype) with pytest.raises(ValueError): sparse.roll(x, shift) def test_unsigned_type_error(self): x = sparse.random((5, 5, 5), density=0.3, idx_dtype=np.uint8) with pytest.raises(ValueError): sparse.roll(x, -1) def test_clip(): x = np.array([[0, 0, 1, 0, 2], [5, 0, 0, 3, 0]]) s = sparse.COO.from_numpy(x) assert_eq(s.clip(min=1), x.clip(min=1)) assert_eq(s.clip(max=3), x.clip(max=3)) assert_eq(s.clip(min=1, max=3), x.clip(min=1, max=3)) assert_eq(s.clip(min=1, max=3.0), x.clip(min=1, max=3.0)) assert_eq(np.clip(s, 1, 3), np.clip(x, 1, 3)) with pytest.raises(ValueError): s.clip() out = sparse.COO.from_numpy(np.zeros_like(x)) out2 = s.clip(min=1, max=3, out=out) assert out is out2 assert_eq(out, x.clip(min=1, max=3)) class TestFailFillValue: # Check failed fill_value op def test_nonzero_fv(self): xs = sparse.random((2, 3), density=0.5, fill_value=1) ys = sparse.random((3, 4), density=0.5) with pytest.raises(ValueError): sparse.dot(xs, ys) def test_inconsistent_fv(self): xs = sparse.random((3, 4), density=0.5, fill_value=1) ys = sparse.random((3, 4), density=0.5, fill_value=2) with pytest.raises(ValueError): sparse.concatenate([xs, ys]) def test_pickle(): x = sparse.COO.from_numpy([1, 0, 0, 0, 0]).reshape((5, 1)) # Enable caching and add some data to it x.enable_caching() x.T assert x._cache is not None # Pickle sends data but not cache x2 = pickle.loads(pickle.dumps(x)) assert_eq(x, x2) assert x2._cache is None @pytest.mark.parametrize("deep", [True, False]) def test_copy(deep): x = sparse.COO.from_numpy([1, 0, 0, 0, 0]).reshape((5, 1)) # Enable caching and add some data to it x.enable_caching() x.T assert x._cache is not None x2 = x.copy(deep) assert_eq(x, x2) assert (x2.data is x.data) is not deep assert (x2.coords is x.coords) is not deep assert x2._cache is None @pytest.mark.parametrize("ndim", [2, 3, 4, 5]) def test_initialization(ndim): shape = [10] * ndim shape[1] *= 2 shape = tuple(shape) coords = np.random.randint(10, size=ndim * 20).reshape(ndim, 20) data = np.random.rand(20) COO(coords, data=data, shape=shape) with pytest.raises(ValueError, match="data length"): COO(coords, data=data[:5], shape=shape) with pytest.raises(ValueError, match="shape of `coords`"): coords = np.random.randint(10, size=20).reshape(1, 20) COO(coords, data=data, shape=shape) @pytest.mark.parametrize("N, M", [(4, None), (4, 10), (10, 4), (0, 10)]) def test_eye(N, M): m = M or N for k in [0, N - 2, N + 2, m - 2, m + 2]: assert_eq(sparse.eye(N, M=M, k=k), np.eye(N, M=M, k=k)) assert_eq(sparse.eye(N, M=M, k=k, dtype="i4"), np.eye(N, M=M, k=k, dtype="i4")) @pytest.mark.parametrize("funcname", ["ones", "zeros"]) def test_ones_zeros(funcname): sp_func = getattr(sparse, funcname) np_func = getattr(np, funcname) assert_eq(sp_func(5), np_func(5)) assert_eq(sp_func((5, 4)), np_func((5, 4))) assert_eq(sp_func((5, 4), dtype="i4"), np_func((5, 4), dtype="i4")) assert_eq(sp_func((5, 4), dtype=None), np_func((5, 4), dtype=None)) @pytest.mark.parametrize("funcname", ["ones_like", "zeros_like"]) def test_ones_zeros_like(funcname): sp_func = getattr(sparse, funcname) np_func = getattr(np, funcname) x = np.ones((5, 5), dtype="i8") assert_eq(sp_func(x), np_func(x)) assert_eq(sp_func(x, dtype="f8"), np_func(x, dtype="f8")) assert_eq(sp_func(x, dtype=None), np_func(x, dtype=None)) assert_eq(sp_func(x, shape=(2, 2)), np_func(x, shape=(2, 2))) def test_full(): assert_eq(sparse.full(5, 9), np.full(5, 9)) assert_eq(sparse.full(5, 9, dtype="f8"), np.full(5, 9, dtype="f8")) assert_eq(sparse.full((5, 4), 9.5), np.full((5, 4), 9.5)) assert_eq(sparse.full((5, 4), 9.5, dtype="i4"), np.full((5, 4), 9.5, dtype="i4")) def test_full_like(): x = np.zeros((5, 5), dtype="i8") assert_eq(sparse.full_like(x, 9.5), np.full_like(x, 9.5)) assert_eq(sparse.full_like(x, 9.5, dtype="f8"), np.full_like(x, 9.5, dtype="f8")) assert_eq( sparse.full_like(x, 9.5, shape=(2, 2)), np.full_like(x, 9.5, shape=(2, 2)) ) @pytest.mark.parametrize("complex", [True, False]) def test_complex_methods(complex): if complex: x = np.array([1 + 2j, 2 - 1j, 0, 1, 0]) else: x = np.array([1, 2, 0, 0, 0]) s = sparse.COO.from_numpy(x) assert_eq(s.imag, x.imag) assert_eq(s.real, x.real) assert_eq(s.conj(), x.conj()) def test_np_matrix(): x = np.random.rand(10, 1).view(type=np.matrix) s = sparse.COO.from_numpy(x) assert_eq(x, s) def test_out_dtype(): a = sparse.eye(5, dtype="float32") b = sparse.eye(5, dtype="float64") assert ( np.positive(a, out=b).dtype == np.positive(a.todense(), out=b.todense()).dtype ) assert ( np.positive(a, out=b, dtype="float64").dtype == np.positive(a.todense(), out=b.todense(), dtype="float64").dtype ) @contextlib.contextmanager def auto_densify(): "For use in tests only! Not threadsafe." import os from importlib import reload os.environ["SPARSE_AUTO_DENSIFY"] = "1" reload(sparse._settings) yield del os.environ["SPARSE_AUTO_DENSIFY"] reload(sparse._settings) def test_setting_into_numpy_slice(): actual = np.zeros((5, 5)) s = sparse.COO(data=[1, 1], coords=(2, 4), shape=(5,)) # This calls s.__array__(dtype('float64')) which means that __array__ # must accept a positional argument. If not this will raise, of course, # TypeError: __array__() takes 1 positional argument but 2 were given with auto_densify(): actual[:, 0] = s # Might as well check the content of the result as well. expected = np.zeros((5, 5)) expected[:, 0] = s.todense() assert_eq(actual, expected) # Without densification, setting is unsupported. with pytest.raises(RuntimeError): actual[:, 0] = s def test_successful_densification(): s = sparse.random((3, 4, 5), density=0.5) with auto_densify(): x = np.array(s) assert isinstance(x, np.ndarray) assert_eq(s, x) def test_failed_densification(): s = sparse.random((3, 4, 5), density=0.5) with pytest.raises(RuntimeError): np.array(s) def test_warn_on_too_dense(): import os from importlib import reload os.environ["SPARSE_WARN_ON_TOO_DENSE"] = "1" reload(sparse._settings) with pytest.warns(RuntimeWarning): sparse.random((3, 4, 5), density=1.0) del os.environ["SPARSE_WARN_ON_TOO_DENSE"] reload(sparse._settings) def test_prune_coo(): coords = np.array([[0, 1, 2, 3]]) data = np.array([1, 0, 1, 2]) s1 = COO(coords, data) s2 = COO(coords, data, prune=True) assert s2.nnz == 3 # Densify s1 because it isn't canonical assert_eq(s1.todense(), s2, check_nnz=False) def test_diagonal(): a = sparse.random((4, 4), density=0.5) assert_eq(sparse.diagonal(a, offset=0), np.diagonal(a.todense(), offset=0)) assert_eq(sparse.diagonal(a, offset=1), np.diagonal(a.todense(), offset=1)) assert_eq(sparse.diagonal(a, offset=2), np.diagonal(a.todense(), offset=2)) a = sparse.random((4, 5, 4, 6), density=0.5) assert_eq( sparse.diagonal(a, offset=0, axis1=0, axis2=2), np.diagonal(a.todense(), offset=0, axis1=0, axis2=2), ) assert_eq( sparse.diagonal(a, offset=1, axis1=0, axis2=2), np.diagonal(a.todense(), offset=1, axis1=0, axis2=2), ) assert_eq( sparse.diagonal(a, offset=2, axis1=0, axis2=2), np.diagonal(a.todense(), offset=2, axis1=0, axis2=2), ) def test_diagonalize(): assert_eq(sparse.diagonalize(np.ones(3)), sparse.eye(3)) assert_eq( sparse.diagonalize(scipy.sparse.coo_matrix(np.eye(3))), sparse.diagonalize(sparse.eye(3)), ) # inverse of diagonal b = sparse.random((4, 3, 2), density=0.5) b_diag = sparse.diagonalize(b, axis=1) assert_eq(b, sparse.diagonal(b_diag, axis1=1, axis2=3).transpose([0, 2, 1])) RESULT_TYPE_DTYPES = [ "i1", "i2", "i4", "i8", "u1", "u2", "u4", "u8", "f4", "f8", "c8", "c16", object, ] @pytest.mark.parametrize("t1", RESULT_TYPE_DTYPES) @pytest.mark.parametrize("t2", RESULT_TYPE_DTYPES) @pytest.mark.parametrize( "func", [ sparse.result_type, pytest.param( np.result_type, marks=pytest.mark.skipif(not NEP18_ENABLED, reason="NEP18 is not enabled"), ), ], ) @pytest.mark.parametrize("data", [1, [1]]) # Not the same outputs! def test_result_type(t1, t2, func, data): a = np.array(data, dtype=t1) b = np.array(data, dtype=t2) expect = np.result_type(a, b) assert func(a, sparse.COO(b)) == expect assert func(sparse.COO(a), b) == expect assert func(sparse.COO(a), sparse.COO(b)) == expect assert func(a.dtype, sparse.COO(b)) == np.result_type(a.dtype, b) assert func(sparse.COO(a), b.dtype) == np.result_type(a, b.dtype) @pytest.mark.parametrize("in_shape", [(5, 5), 62, (3, 3, 3)]) def test_flatten(in_shape): s = sparse.random(in_shape, density=0.5) x = s.todense() a = s.flatten() e = x.flatten() assert_eq(e, a) def test_asnumpy(): s = sparse.COO(data=[1], coords=[2], shape=(5,)) assert_eq(sparse.asnumpy(s), s.todense()) assert_eq( sparse.asnumpy(s, dtype=np.float64), np.asarray(s.todense(), dtype=np.float64) ) a = np.array([1, 2, 3]) # Array passes through with no copying. assert sparse.asnumpy(a) is a @pytest.mark.parametrize("shape1", [(2,), (2, 3), (2, 3, 4)]) @pytest.mark.parametrize("shape2", [(2,), (2, 3), (2, 3, 4)]) def test_outer(shape1, shape2): s1 = sparse.random(shape1, density=0.5) s2 = sparse.random(shape2, density=0.5) x1 = s1.todense() x2 = s2.todense() assert_eq(sparse.outer(s1, s2), np.outer(x1, x2)) assert_eq(np.multiply.outer(s1, s2), np.multiply.outer(x1, x2)) def test_scalar_list_init(): a = sparse.COO([], [], ()) b = sparse.COO([], [1], ()) assert a.todense() == 0 assert b.todense() == 1 def test_raise_on_nd_data(): s1 = sparse.random((2, 3, 4), density=0.5) with pytest.raises(ValueError): sparse.COO(s1.coords, s1.data[:, None], shape=(2, 3, 4)) def test_astype_casting(): s1 = sparse.random((2, 3, 4), density=0.5) with pytest.raises(TypeError): s1.astype(dtype=np.int64, casting="safe") def test_astype_no_copy(): s1 = sparse.random((2, 3, 4), density=0.5) s2 = s1.astype(s1.dtype, copy=False) assert s1 is s2 def test_coo_valerr(): a = np.arange(300) with pytest.raises(ValueError): COO.from_numpy(a, idx_dtype=np.int8) def test_random_idx_dtype(): with pytest.raises(ValueError): sparse.random((300,), density=0.1, format="coo", idx_dtype=np.int8) sparse-0.12.0/sparse/tests/test_coo_numba.py000066400000000000000000000033371402510130100210720ustar00rootroot00000000000000import pytest import numba import sparse import numpy as np @numba.njit def identity(x): """ Pass an object through numba and back """ return x def identity_constant(x): @numba.njit def get_it(): """ Pass an object through numba and back as a constant """ return x return get_it() def assert_coo_equal(c1, c2): assert c1.shape == c2.shape assert c1 == c2 assert c1.data.dtype == c2.data.dtype assert c1.fill_value == c2.fill_value def assert_coo_same_memory(c1, c2): assert_coo_equal(c1, c2) assert c1.coords.data == c2.coords.data assert c1.data.data == c2.data.data class TestBasic: """ Test very simple construction and field access """ def test_roundtrip(self): c1 = sparse.COO(np.eye(3), fill_value=1) c2 = identity(c1) assert type(c1) is type(c2) assert_coo_same_memory(c1, c2) def test_roundtrip_constant(self): c1 = sparse.COO(np.eye(3), fill_value=1) c2 = identity_constant(c1) # constants are always copies assert_coo_equal(c1, c2) def test_unpack_attrs(self): @numba.njit def unpack(c): return c.coords, c.data, c.shape, c.fill_value c1 = sparse.COO(np.eye(3), fill_value=1) coords, data, shape, fill_value = unpack(c1) c2 = sparse.COO(coords, data, shape, fill_value=fill_value) assert_coo_same_memory(c1, c2) def test_repack_attrs(self): @numba.njit def pack(coords, data, shape): return sparse.COO(coords, data, shape) # repacking fill_value isn't possible yet c1 = sparse.COO(np.eye(3)) c2 = pack(c1.coords, c1.data, c1.shape) assert_coo_same_memory(c1, c2) sparse-0.12.0/sparse/tests/test_dask_interop.py000066400000000000000000000006361402510130100216110ustar00rootroot00000000000000from dask.base import tokenize import sparse def test_deterministic_token(): a = sparse.COO(data=[1, 2, 3], coords=[10, 20, 30], shape=(40,)) b = sparse.COO(data=[1, 2, 3], coords=[10, 20, 30], shape=(40,)) assert tokenize(a) == tokenize(b) # One of these things is not like the other.... c = sparse.COO(data=[1, 2, 4], coords=[10, 20, 30], shape=(40,)) assert tokenize(a) != tokenize(c) sparse-0.12.0/sparse/tests/test_dok.py000066400000000000000000000145021402510130100177010ustar00rootroot00000000000000import pytest import numpy as np import sparse from sparse import DOK from sparse._utils import assert_eq @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4)]) @pytest.mark.parametrize("density", [0.1, 0.3, 0.5, 0.7]) def test_random_shape_nnz(shape, density): s = sparse.random(shape, density, format="dok") assert isinstance(s, DOK) assert s.shape == shape expected_nnz = density * np.prod(shape) assert np.floor(expected_nnz) <= s.nnz <= np.ceil(expected_nnz) def test_convert_to_coo(): s1 = sparse.random((2, 3, 4), 0.5, format="dok") s2 = sparse.COO(s1) assert_eq(s1, s2) def test_convert_from_coo(): s1 = sparse.random((2, 3, 4), 0.5, format="coo") s2 = DOK(s1) assert_eq(s1, s2) def test_convert_from_numpy(): x = np.random.rand(2, 3, 4) s = DOK(x) assert_eq(x, s) def test_convert_to_numpy(): s = sparse.random((2, 3, 4), 0.5, format="dok") x = s.todense() assert_eq(x, s) @pytest.mark.parametrize( "shape, data", [ (2, {0: 1}), ((2, 3), {(0, 1): 3, (1, 2): 4}), ((2, 3, 4), {(0, 1): 3, (1, 2, 3): 4, (1, 1): [6, 5, 4, 1]}), ], ) def test_construct(shape, data): s = DOK(shape, data) x = np.zeros(shape, dtype=s.dtype) for c, d in data.items(): x[c] = d assert_eq(x, s) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4)]) @pytest.mark.parametrize("density", [0.1, 0.3, 0.5, 0.7]) def test_getitem_single(shape, density): s = sparse.random(shape, density, format="dok") x = s.todense() for _ in range(s.nnz): idx = np.random.randint(np.prod(shape)) idx = np.unravel_index(idx, shape) print(idx) assert np.isclose(s[idx], x[idx]) @pytest.mark.parametrize( "shape, density, indices", [ ((2, 3), 0.5, (slice(1),)), ((5, 5), 0.2, (slice(0, 4, 2),)), ((10, 10), 0.2, (slice(5), slice(0, 10, 3))), ((5, 5), 0.5, (slice(0, 4, 4), slice(0, 4, 4))), ((5, 5), 0.4, (1, slice(0, 4, 1))), ((10, 10), 0.8, ([0, 4, 5], [3, 2, 4])), ((10, 10), 0, (slice(10), slice(10))), ], ) def test_getitem(shape, density, indices): s = sparse.random(shape, density, format="dok") x = s.todense() sparse_sliced = s[indices] dense_sliced = x[indices] assert_eq(sparse_sliced.todense(), dense_sliced) @pytest.mark.parametrize( "shape, density, indices", [ ((10, 10), 0.8, ([0, 4, 5],)), ((5, 5, 5), 0.5, ([1, 2, 3], [0, 2, 2])), ], ) def test_getitem_notimplemented_error(shape, density, indices): s = sparse.random(shape, density, format="dok") with pytest.raises(NotImplementedError): s[indices] @pytest.mark.parametrize( "shape, density, indices", [ ((10, 10), 0.8, ([0, 4, 5], [0, 2])), ((5, 5, 5), 0.5, ([1, 2, 3], [0], [2, 3, 4])), ], ) def test_getitem_index_error(shape, density, indices): s = sparse.random(shape, density, format="dok") with pytest.raises(IndexError): s[indices] @pytest.mark.parametrize( "shape, index, value", [ ((2,), slice(None), np.random.rand()), ((2,), slice(1, 2), np.random.rand()), ((2,), slice(0, 2), np.random.rand(2)), ((2,), 1, np.random.rand()), ((2, 3), (0, slice(None)), np.random.rand()), ((2, 3), (0, slice(1, 3)), np.random.rand()), ((2, 3), (1, slice(None)), np.random.rand(3)), ((2, 3), (0, slice(1, 3)), np.random.rand(2)), ((2, 3), (0, slice(2, 0, -1)), np.random.rand(2)), ((2, 3), (slice(None), 1), np.random.rand()), ((2, 3), (slice(None), 1), np.random.rand(2)), ((2, 3), (slice(1, 2), 1), np.random.rand()), ((2, 3), (slice(1, 2), 1), np.random.rand(1)), ((2, 3), (0, 2), np.random.rand()), ((2, 3), ([0, 1], [1, 2]), np.random.rand(2)), ((2, 3), ([0, 1], [1, 2]), np.random.rand()), ((4,), ([1, 3]), np.random.rand()), ((2, 3), ([0, 1], [1, 2]), 0), ], ) def test_setitem(shape, index, value): s = sparse.random(shape, 0.5, format="dok") x = s.todense() s[index] = value x[index] = value assert_eq(x, s) @pytest.mark.parametrize( "shape, index, value", [ ((2, 3), ([0, 1.5], [1, 2]), np.random.rand()), ((2, 3), ([0, 1], [1]), np.random.rand()), ((2, 3), ([[0], [1]], [1, 2]), np.random.rand()), ], ) def test_setitem_index_error(shape, index, value): s = sparse.random(shape, 0.5, format="dok") with pytest.raises(IndexError): s[index] = value @pytest.mark.parametrize( "shape, index, value", [ ((2, 3), ([0, 1],), np.random.rand()), ], ) def test_setitem_notimplemented_error(shape, index, value): s = sparse.random(shape, 0.5, format="dok") with pytest.raises(NotImplementedError): s[index] = value @pytest.mark.parametrize( "shape, index, value", [ ((2, 3), ([0, 1], [1, 2]), np.random.rand(1, 2)), ((2, 3), ([0, 1], [1, 2]), np.random.rand(3)), ((2,), 1, np.random.rand(2)), ], ) def test_setitem_value_error(shape, index, value): s = sparse.random(shape, 0.5, format="dok") with pytest.raises(ValueError): s[index] = value def test_default_dtype(): s = DOK((5,)) assert s.dtype == np.float64 def test_int_dtype(): data = {1: np.uint8(1), 2: np.uint16(2)} s = DOK((5,), data) assert s.dtype == np.uint16 def test_float_dtype(): data = {1: np.uint8(1), 2: np.float32(2)} s = DOK((5,), data) assert s.dtype == np.float32 def test_set_zero(): s = DOK((1,), dtype=np.uint8) s[0] = 1 s[0] = 0 assert s[0] == 0 assert s.nnz == 0 @pytest.mark.parametrize("format", ["coo", "dok"]) def test_asformat(format): s = sparse.random((2, 3, 4), density=0.5, format="dok") s2 = s.asformat(format) assert_eq(s, s2) def test_coo_fv_interface(): s1 = sparse.full((5, 5), fill_value=1 + np.random.rand()) s2 = sparse.DOK(s1) assert_eq(s1, s2) s3 = sparse.COO(s2) assert_eq(s1, s3) def test_empty_dok_dtype(): d = sparse.DOK(5, dtype=np.uint8) s = sparse.COO(d) assert s.dtype == d.dtype def test_zeros_like(): s = sparse.random((2, 3, 4), density=0.5) s2 = sparse.zeros_like(s, format="dok") assert s.shape == s2.shape assert s.dtype == s2.dtype assert isinstance(s2, sparse.DOK) sparse-0.12.0/sparse/tests/test_dot.py000066400000000000000000000174671402510130100177270ustar00rootroot00000000000000import numpy as np import pytest import scipy.sparse import scipy.stats import operator import sparse from sparse._compressed import GCXS from sparse import COO from sparse._utils import assert_eq @pytest.mark.parametrize( "a_shape,b_shape,axes", [ [(3, 4), (4, 3), (1, 0)], [(3, 4), (4, 3), (0, 1)], [(3, 4, 5), (4, 3), (1, 0)], [(3, 4), (5, 4, 3), (1, 1)], [(3, 4), (5, 4, 3), ((0, 1), (2, 1))], [(3, 4), (5, 4, 3), ((1, 0), (1, 2))], [(3, 4, 5), (4,), (1, 0)], [(4,), (3, 4, 5), (0, 1)], [(4,), (4,), (0, 0)], [(4,), (4,), 0], ], ) @pytest.mark.parametrize( "a_format, b_format", [("coo", "coo"), ("coo", "gcxs"), ("gcxs", "coo"), ("gcxs", "gcxs")], ) def test_tensordot(a_shape, b_shape, axes, a_format, b_format): sa = sparse.random(a_shape, density=0.5, format=a_format) sb = sparse.random(b_shape, density=0.5, format=b_format) a = sa.todense() b = sb.todense() a_b = np.tensordot(a, b, axes) # tests for return_type=None sa_sb = sparse.tensordot(sa, sb, axes) sa_b = sparse.tensordot(sa, b, axes) a_sb = sparse.tensordot(a, sb, axes) assert_eq(a_b, sa_sb) assert_eq(a_b, sa_b) assert_eq(a_b, a_sb) if all(isinstance(arr, COO) for arr in [sa, sb]): assert isinstance(sa_sb, COO) else: assert isinstance(sa_sb, GCXS) assert isinstance(sa_b, np.ndarray) assert isinstance(a_sb, np.ndarray) # tests for return_type=COO sa_b = sparse.tensordot(sa, b, axes, return_type=COO) a_sb = sparse.tensordot(a, sb, axes, return_type=COO) assert_eq(a_b, sa_b) assert_eq(a_b, a_sb) assert isinstance(sa_b, COO) assert isinstance(a_sb, COO) # tests form return_type=GCXS sa_b = sparse.tensordot(sa, b, axes, return_type=GCXS) a_sb = sparse.tensordot(a, sb, axes, return_type=GCXS) assert_eq(a_b, sa_b) assert_eq(a_b, a_sb) assert isinstance(sa_b, GCXS) assert isinstance(a_sb, GCXS) # tests for return_type=np.ndarray sa_sb = sparse.tensordot(sa, sb, axes, return_type=np.ndarray) assert_eq(a_b, sa_sb) assert isinstance(sa_sb, np.ndarray) def test_tensordot_empty(): x1 = np.empty((0, 0, 0)) x2 = np.empty((0, 0, 0)) s1 = sparse.COO.from_numpy(x1) s2 = sparse.COO.from_numpy(x2) assert_eq(np.tensordot(x1, x2), sparse.tensordot(s1, s2)) def test_tensordot_valueerror(): x1 = sparse.COO(np.array(1)) x2 = sparse.COO(np.array(1)) with pytest.raises(ValueError): x1 @ x2 @pytest.mark.parametrize( "a_shape, b_shape", [ ((3, 1, 6, 5), (2, 1, 4, 5, 6)), ((2, 1, 4, 5, 6), (3, 1, 6, 5)), ((1, 1, 5), (3, 5, 6)), ((3, 4, 5), (1, 5, 6)), ((3, 4, 5), (3, 5, 6)), ((3, 4, 5), (5, 6)), ((4, 5), (5, 6)), ((5,), (5, 6)), ((4, 5), (5,)), ((5,), (5,)), ((3, 4), (1, 2, 4, 3)), ], ) @pytest.mark.parametrize( "a_format, b_format", [("coo", "coo"), ("coo", "gcxs"), ("gcxs", "coo"), ("gcxs", "gcxs")], ) @pytest.mark.parametrize( "a_comp_axes, b_comp_axes", [([0], [0]), ([0], [1]), ([1], [0]), ([1], [1])] ) def test_matmul(a_shape, b_shape, a_format, b_format, a_comp_axes, b_comp_axes): if a_format == "coo" or len(a_shape) == 1: a_comp_axes = None if b_format == "coo" or len(b_shape) == 1: b_comp_axes = None sa = sparse.random( a_shape, density=0.5, format=a_format, compressed_axes=a_comp_axes ) sb = sparse.random( b_shape, density=0.5, format=b_format, compressed_axes=b_comp_axes ) a = sa.todense() b = sb.todense() assert_eq(np.matmul(a, b), sparse.matmul(sa, sb)) assert_eq(sparse.matmul(sa, b), sparse.matmul(a, sb)) assert_eq(np.matmul(a, b), sparse.matmul(sa, sb)) if a.ndim == 2 or b.ndim == 2: assert_eq( np.matmul(a, b), sparse.matmul( scipy.sparse.coo_matrix(a) if a.ndim == 2 else sa, scipy.sparse.coo_matrix(b) if b.ndim == 2 else sb, ), ) if hasattr(operator, "matmul"): assert_eq(operator.matmul(a, b), operator.matmul(sa, sb)) def test_matmul_errors(): with pytest.raises(ValueError): sa = sparse.random((3, 4, 5, 6), 0.5) sb = sparse.random((3, 6, 5, 6), 0.5) sparse.matmul(sa, sb) @pytest.mark.parametrize( "a_shape, b_shape", [ ((1, 4, 5), (3, 5, 6)), ((3, 4, 5), (1, 5, 6)), ((3, 4, 5), (3, 5, 6)), ((3, 4, 5), (5, 6)), ((4, 5), (5, 6)), ((5,), (5, 6)), ((4, 5), (5,)), ((5,), (5,)), ], ) @pytest.mark.parametrize( "a_format, b_format", [("coo", "coo"), ("coo", "gcxs"), ("gcxs", "coo"), ("gcxs", "gcxs")], ) @pytest.mark.parametrize( "a_comp_axes, b_comp_axes", [([0], [0]), ([0], [1]), ([1], [0]), ([1], [1])] ) def test_dot(a_shape, b_shape, a_format, b_format, a_comp_axes, b_comp_axes): if a_format == "coo" or len(a_shape) == 1: a_comp_axes = None if b_format == "coo" or len(b_shape) == 1: b_comp_axes = None sa = sparse.random( a_shape, density=0.5, format=a_format, compressed_axes=a_comp_axes ) sb = sparse.random( b_shape, density=0.5, format=b_format, compressed_axes=b_comp_axes ) a = sa.todense() b = sb.todense() assert_eq(a.dot(b), sa.dot(sb)) assert_eq(np.dot(a, b), sparse.dot(sa, sb)) assert_eq(sparse.dot(sa, b), sparse.dot(a, sb)) assert_eq(np.dot(a, b), sparse.dot(sa, sb)) # Basic equivalences assert_eq(operator.matmul(a, b), operator.matmul(sa, sb)) # Test that COO's and np.array's combine correctly # Not possible due to https://github.com/numpy/numpy/issues/9028 # assert_eq(eval("a @ sb"), eval("sa @ b")) @pytest.mark.parametrize( "a_dense, b_dense, o_type", [ (False, False, sparse.SparseArray), (False, True, np.ndarray), (True, False, np.ndarray), ], ) def test_dot_type(a_dense, b_dense, o_type): a = sparse.random((3, 4), density=0.8) b = sparse.random((4, 5), density=0.8) if a_dense: a = a.todense() if b_dense: b = b.todense() assert isinstance(sparse.dot(a, b), o_type) @pytest.mark.xfail def test_dot_nocoercion(): sa = sparse.random((3, 4, 5), density=0.5) sb = sparse.random((5, 6), density=0.5) a = sa.todense() b = sb.todense() la = a.tolist() lb = b.tolist() if hasattr(operator, "matmul"): # Operations with naive collection (list) assert_eq(operator.matmul(la, b), operator.matmul(la, sb)) assert_eq(operator.matmul(a, lb), operator.matmul(sa, lb)) dot_formats = [ lambda x: x.asformat("coo"), lambda x: x.asformat("gcxs"), lambda x: x.todense(), ] @pytest.mark.parametrize("format1", dot_formats) @pytest.mark.parametrize("format2", dot_formats) def test_small_values(format1, format2): s1 = format1(sparse.COO(coords=[[0, 10]], data=[3.6e-100, 7.2e-009], shape=(20,))) s2 = format2( sparse.COO(coords=[[0, 0], [4, 28]], data=[3.8e-25, 4.5e-225], shape=(20, 50)) ) dense_convertor = lambda x: x.todense() if isinstance(x, sparse.SparseArray) else x x1, x2 = dense_convertor(s1), dense_convertor(s2) assert_eq(x1 @ x2, s1 @ s2) dot_dtypes = [np.complex64, np.complex128] @pytest.mark.parametrize("dtype1", dot_dtypes) @pytest.mark.parametrize("dtype2", dot_dtypes) @pytest.mark.parametrize("format1", dot_formats) @pytest.mark.parametrize("format2", dot_formats) def test_complex(dtype1, dtype2, format1, format2): s1 = format1(sparse.random((20,), density=0.5).astype(dtype1)) s2 = format2(sparse.random((20,), density=0.5).astype(dtype2)) dense_convertor = lambda x: x.todense() if isinstance(x, sparse.SparseArray) else x x1, x2 = dense_convertor(s1), dense_convertor(s2) assert_eq(x1 @ x2, s1 @ s2) sparse-0.12.0/sparse/tests/test_elemwise.py000066400000000000000000000455671402510130100207550ustar00rootroot00000000000000import numpy as np import sparse import pytest import operator from sparse import COO, DOK from sparse._compressed import GCXS from sparse._utils import assert_eq, random_value_array @pytest.mark.parametrize( "func", [ np.expm1, np.log1p, np.sin, np.tan, np.sinh, np.tanh, np.floor, np.ceil, np.sqrt, np.conj, np.round, np.rint, lambda x: x.astype("int32"), np.conjugate, np.conj, lambda x: x.round(decimals=2), abs, ], ) @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_elemwise(func, format): s = sparse.random((2, 3, 4), density=0.5, format=format) x = s.todense() fs = func(s) assert isinstance(fs, format) assert fs.nnz <= s.nnz assert_eq(func(x), fs) @pytest.mark.parametrize( "func", [ np.expm1, np.log1p, np.sin, np.tan, np.sinh, np.tanh, np.floor, np.ceil, np.sqrt, np.conj, np.round, np.rint, np.conjugate, np.conj, lambda x, out: x.round(decimals=2, out=out), ], ) @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_elemwise_inplace(func, format): s = sparse.random((2, 3, 4), density=0.5, format=format) x = s.todense() func(s, out=s) func(x, out=x) assert isinstance(s, format) assert_eq(x, s) @pytest.mark.parametrize( "shape1, shape2", [ ((2, 3, 4), (3, 4)), ((3, 4), (2, 3, 4)), ((3, 1, 4), (3, 2, 4)), ((1, 3, 4), (3, 4)), ((3, 4, 1), (3, 4, 2)), ((1, 5), (5, 1)), ((3, 1), (3, 4)), ((3, 1), (1, 4)), ((1, 4), (3, 4)), ((2, 2, 2), (1, 1, 1)), ], ) @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_elemwise_mixed(shape1, shape2, format): s1 = sparse.random(shape1, density=0.5, format=format) x2 = np.random.rand(*shape2) x1 = s1.todense() assert_eq(s1 * x2, x1 * x2) @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_elemwise_mixed_empty(format): s1 = sparse.random((2, 0, 4), density=0.5, format=format) x2 = np.random.rand(2, 0, 4) x1 = s1.todense() assert_eq(s1 * x2, x1 * x2) @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_elemwise_unsupported(format): class A: pass s1 = sparse.random((2, 3, 4), density=0.5, format=format) x2 = A() with pytest.raises(TypeError): s1 + x2 assert sparse.elemwise(operator.add, s1, x2) is NotImplemented @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_elemwise_mixed_broadcast(format): s1 = sparse.random((2, 3, 4), density=0.5, format=format) s2 = sparse.random(4, density=0.5) x3 = np.random.rand(3, 4) x1 = s1.todense() x2 = s2.todense() def func(x1, x2, x3): return x1 * x2 * x3 assert_eq(sparse.elemwise(func, s1, s2, x3), func(x1, x2, x3)) @pytest.mark.parametrize( "func", [operator.mul, operator.add, operator.sub, operator.gt, operator.lt, operator.ne], ) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_elemwise_binary(func, shape, format): xs = sparse.random(shape, density=0.5, format=format) ys = sparse.random(shape, density=0.5, format=format) x = xs.todense() y = ys.todense() assert_eq(func(xs, ys), func(x, y)) @pytest.mark.parametrize("func", [operator.imul, operator.iadd, operator.isub]) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_elemwise_binary_inplace(func, shape, format): xs = sparse.random(shape, density=0.5, format=format) ys = sparse.random(shape, density=0.5, format=format) x = xs.todense() y = ys.todense() xs = func(xs, ys) x = func(x, y) assert_eq(xs, x) @pytest.mark.parametrize( "func", [ lambda x, y, z: x + y + z, lambda x, y, z: x * y * z, lambda x, y, z: x + y * z, lambda x, y, z: (x + y) * z, ], ) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) @pytest.mark.parametrize( "formats", [ [COO, COO, COO], [GCXS, GCXS, GCXS], [COO, GCXS, GCXS], ], ) def test_elemwise_trinary(func, shape, formats): xs = sparse.random(shape, density=0.5, format=formats[0]) ys = sparse.random(shape, density=0.5, format=formats[1]) zs = sparse.random(shape, density=0.5, format=formats[2]) x = xs.todense() y = ys.todense() z = zs.todense() fs = sparse.elemwise(func, xs, ys, zs) assert_eq(fs, func(x, y, z)) @pytest.mark.parametrize("func", [operator.add, operator.mul]) @pytest.mark.parametrize( "shape1,shape2", [ ((2, 3, 4), (3, 4)), ((3, 4), (2, 3, 4)), ((3, 1, 4), (3, 2, 4)), ((1, 3, 4), (3, 4)), ((3, 4, 1), (3, 4, 2)), ((1, 5), (5, 1)), ((3, 1), (3, 4)), ((3, 1), (1, 4)), ((1, 4), (3, 4)), ((2, 2, 2), (1, 1, 1)), ], ) def test_binary_broadcasting(func, shape1, shape2): density1 = 1 if np.prod(shape1) == 1 else 0.5 density2 = 1 if np.prod(shape2) == 1 else 0.5 xs = sparse.random(shape1, density=density1) x = xs.todense() ys = sparse.random(shape2, density=density2) y = ys.todense() expected = func(x, y) actual = func(xs, ys) assert isinstance(actual, COO) assert_eq(expected, actual) assert np.count_nonzero(expected) == actual.nnz @pytest.mark.parametrize( "shape1,shape2", [((3, 4), (2, 3, 4)), ((3, 1, 4), (3, 2, 4)), ((3, 4, 1), (3, 4, 2))], ) def test_broadcast_to(shape1, shape2): a = sparse.random(shape1, density=0.5) x = a.todense() assert_eq(np.broadcast_to(x, shape2), a.broadcast_to(shape2)) @pytest.mark.parametrize( "shapes", [ [(2,), (3, 2), (4, 3, 2)], [(3,), (2, 3), (2, 2, 3)], [(2,), (2, 2), (2, 2, 2)], [(4,), (4, 4), (4, 4, 4)], [(4,), (4, 4), (4, 4, 4)], [(4,), (4, 4), (4, 4, 4)], [(1, 1, 2), (1, 3, 1), (4, 1, 1)], [(2,), (2, 1), (2, 1, 1)], ], ) @pytest.mark.parametrize( "func", [ lambda x, y, z: (x + y) * z, lambda x, y, z: x * (y + z), lambda x, y, z: x * y * z, lambda x, y, z: x + y + z, lambda x, y, z: x + y - z, lambda x, y, z: x - y + z, ], ) def test_trinary_broadcasting(shapes, func): args = [sparse.random(s, density=0.5) for s in shapes] dense_args = [arg.todense() for arg in args] fs = sparse.elemwise(func, *args) assert isinstance(fs, COO) assert_eq(fs, func(*dense_args)) @pytest.mark.parametrize( "shapes, func", [ ([(2,), (3, 2), (4, 3, 2)], lambda x, y, z: (x + y) * z), ([(3,), (2, 3), (2, 2, 3)], lambda x, y, z: x * (y + z)), ([(2,), (2, 2), (2, 2, 2)], lambda x, y, z: x * y * z), ([(4,), (4, 4), (4, 4, 4)], lambda x, y, z: x + y + z), ], ) @pytest.mark.parametrize("value", [np.nan, np.inf, -np.inf]) @pytest.mark.parametrize("fraction", [0.25, 0.5, 0.75, 1.0]) @pytest.mark.filterwarnings("ignore:invalid value") def test_trinary_broadcasting_pathological(shapes, func, value, fraction): args = [ sparse.random(s, density=0.5, data_rvs=random_value_array(value, fraction)) for s in shapes ] dense_args = [arg.todense() for arg in args] fs = sparse.elemwise(func, *args) assert isinstance(fs, COO) assert_eq(fs, func(*dense_args)) def test_sparse_broadcasting(monkeypatch): orig_unmatch_coo = sparse._umath._Elemwise._get_func_coords_data state = {"num_matches": 0} xs = sparse.random((3, 4), density=0.5) ys = sparse.random((3, 4), density=0.5) def mock_unmatch_coo(*args, **kwargs): result = orig_unmatch_coo(*args, **kwargs) if result is not None: state["num_matches"] += 1 return result monkeypatch.setattr( sparse._umath._Elemwise, "_get_func_coords_data", mock_unmatch_coo ) xs * ys # Less than in case there's absolutely no overlap in some cases. assert state["num_matches"] <= 1 def test_dense_broadcasting(monkeypatch): orig_unmatch_coo = sparse._umath._Elemwise._get_func_coords_data state = {"num_matches": 0} xs = sparse.random((3, 4), density=0.5) ys = sparse.random((3, 4), density=0.5) def mock_unmatch_coo(*args, **kwargs): result = orig_unmatch_coo(*args, **kwargs) if result is not None: state["num_matches"] += 1 return result monkeypatch.setattr( sparse._umath._Elemwise, "_get_func_coords_data", mock_unmatch_coo ) xs + ys # Less than in case there's absolutely no overlap in some cases. assert state["num_matches"] <= 3 @pytest.mark.parametrize("format", ["coo", "dok", "gcxs"]) def test_sparsearray_elemwise(format): xs = sparse.random((3, 4), density=0.5, format=format) ys = sparse.random((3, 4), density=0.5, format=format) x = xs.todense() y = ys.todense() fs = sparse.elemwise(operator.add, xs, ys) if format == "gcxs": assert isinstance(fs, GCXS) elif format == "dok": assert isinstance(fs, DOK) else: assert isinstance(fs, COO) assert_eq(fs, x + y) def test_ndarray_densification_fails(): xs = sparse.random((2, 3, 4), density=0.5) y = np.random.rand(3, 4) with pytest.raises(ValueError): xs + y def test_elemwise_noargs(): def func(): return np.float_(5.0) assert_eq(sparse.elemwise(func), func()) @pytest.mark.parametrize( "func", [ operator.pow, operator.truediv, operator.floordiv, operator.ge, operator.le, operator.eq, operator.mod, ], ) @pytest.mark.filterwarnings("ignore:divide by zero") @pytest.mark.filterwarnings("ignore:invalid value") @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_nonzero_outout_fv_ufunc(func, format): xs = sparse.random((2, 3, 4), density=0.5, format=format) ys = sparse.random((2, 3, 4), density=0.5, format=format) x = xs.todense() y = ys.todense() f = func(x, y) fs = func(xs, ys) assert isinstance(fs, format) assert_eq(f, fs) @pytest.mark.parametrize( "func, scalar", [ (operator.mul, 5), (operator.add, 0), (operator.sub, 0), (operator.pow, 5), (operator.truediv, 3), (operator.floordiv, 4), (operator.gt, 5), (operator.lt, -5), (operator.ne, 0), (operator.ge, 5), (operator.le, -3), (operator.eq, 1), (operator.mod, 5), ], ) @pytest.mark.parametrize("convert_to_np_number", [True, False]) @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_elemwise_scalar(func, scalar, convert_to_np_number, format): xs = sparse.random((2, 3, 4), density=0.5, format=format) if convert_to_np_number: scalar = np.float32(scalar) y = scalar x = xs.todense() fs = func(xs, y) assert isinstance(fs, format) assert xs.nnz >= fs.nnz assert_eq(fs, func(x, y)) @pytest.mark.parametrize( "func, scalar", [ (operator.mul, 5), (operator.add, 0), (operator.sub, 0), (operator.gt, -5), (operator.lt, 5), (operator.ne, 0), (operator.ge, -5), (operator.le, 3), (operator.eq, 1), ], ) @pytest.mark.parametrize("convert_to_np_number", [True, False]) def test_leftside_elemwise_scalar(func, scalar, convert_to_np_number): xs = sparse.random((2, 3, 4), density=0.5) if convert_to_np_number: scalar = np.float32(scalar) y = scalar x = xs.todense() fs = func(y, xs) assert isinstance(fs, COO) assert xs.nnz >= fs.nnz assert_eq(fs, func(y, x)) @pytest.mark.parametrize( "func, scalar", [ (operator.add, 5), (operator.sub, -5), (operator.pow, -3), (operator.truediv, 0), (operator.floordiv, 0), (operator.gt, -5), (operator.lt, 5), (operator.ne, 1), (operator.ge, -3), (operator.le, 3), (operator.eq, 0), ], ) @pytest.mark.filterwarnings("ignore:divide by zero") @pytest.mark.filterwarnings("ignore:invalid value") def test_scalar_output_nonzero_fv(func, scalar): xs = sparse.random((2, 3, 4), density=0.5) y = scalar x = xs.todense() f = func(x, y) fs = func(xs, y) assert isinstance(fs, COO) assert fs.nnz <= xs.nnz assert_eq(f, fs) @pytest.mark.parametrize("func", [operator.and_, operator.or_, operator.xor]) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_bitwise_binary(func, shape, format): # Small arrays need high density to have nnz entries # Casting floats to int will result in all zeros, hence the * 100 xs = (sparse.random(shape, density=0.5, format=format) * 100).astype(np.int_) ys = (sparse.random(shape, density=0.5, format=format) * 100).astype(np.int_) x = xs.todense() y = ys.todense() assert_eq(func(xs, ys), func(x, y)) @pytest.mark.parametrize("func", [operator.iand, operator.ior, operator.ixor]) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_bitwise_binary_inplace(func, shape, format): # Small arrays need high density to have nnz entries # Casting floats to int will result in all zeros, hence the * 100 xs = (sparse.random(shape, density=0.5, format=format) * 100).astype(np.int_) ys = (sparse.random(shape, density=0.5, format=format) * 100).astype(np.int_) x = xs.todense() y = ys.todense() xs = func(xs, ys) x = func(x, y) assert_eq(xs, x) @pytest.mark.parametrize("func", [operator.lshift, operator.rshift]) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) def test_bitshift_binary(func, shape): # Small arrays need high density to have nnz entries # Casting floats to int will result in all zeros, hence the * 100 xs = (sparse.random(shape, density=0.5) * 100).astype(np.int_) # Can't merge into test_bitwise_binary because left/right shifting # with something >= 64 isn't defined. ys = (sparse.random(shape, density=0.5) * 64).astype(np.int_) x = xs.todense() y = ys.todense() assert_eq(func(xs, ys), func(x, y)) @pytest.mark.parametrize("func", [operator.ilshift, operator.irshift]) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) def test_bitshift_binary_inplace(func, shape): # Small arrays need high density to have nnz entries # Casting floats to int will result in all zeros, hence the * 100 xs = (sparse.random(shape, density=0.5) * 100).astype(np.int_) # Can't merge into test_bitwise_binary because left/right shifting # with something >= 64 isn't defined. ys = (sparse.random(shape, density=0.5) * 64).astype(np.int_) x = xs.todense() y = ys.todense() xs = func(xs, ys) x = func(x, y) assert_eq(xs, x) @pytest.mark.parametrize("func", [operator.and_]) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) def test_bitwise_scalar(func, shape): # Small arrays need high density to have nnz entries # Casting floats to int will result in all zeros, hence the * 100 xs = (sparse.random(shape, density=0.5) * 100).astype(np.int_) y = np.random.randint(100) x = xs.todense() assert_eq(func(xs, y), func(x, y)) assert_eq(func(y, xs), func(y, x)) @pytest.mark.parametrize("func", [operator.lshift, operator.rshift]) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) def test_bitshift_scalar(func, shape): # Small arrays need high density to have nnz entries # Casting floats to int will result in all zeros, hence the * 100 xs = (sparse.random(shape, density=0.5) * 100).astype(np.int_) # Can't merge into test_bitwise_binary because left/right shifting # with something >= 64 isn't defined. y = np.random.randint(64) x = xs.todense() assert_eq(func(xs, y), func(x, y)) @pytest.mark.parametrize("func", [operator.invert]) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) def test_unary_bitwise_nonzero_output_fv(func, shape): # Small arrays need high density to have nnz entries # Casting floats to int will result in all zeros, hence the * 100 xs = (sparse.random(shape, density=0.5) * 100).astype(np.int_) x = xs.todense() f = func(x) fs = func(xs) assert isinstance(fs, COO) assert fs.nnz <= xs.nnz assert_eq(f, fs) @pytest.mark.parametrize("func", [operator.or_, operator.xor]) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) def test_binary_bitwise_nonzero_output_fv(func, shape): # Small arrays need high density to have nnz entries # Casting floats to int will result in all zeros, hence the * 100 xs = (sparse.random(shape, density=0.5) * 100).astype(np.int_) y = np.random.randint(1, 100) x = xs.todense() f = func(x, y) fs = func(xs, y) assert isinstance(fs, COO) assert fs.nnz <= xs.nnz assert_eq(f, fs) @pytest.mark.parametrize( "func", [operator.mul, operator.add, operator.sub, operator.gt, operator.lt, operator.ne], ) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) def test_elemwise_nonzero_input_fv(func, shape): xs = sparse.random(shape, density=0.5, fill_value=np.random.rand()) ys = sparse.random(shape, density=0.5, fill_value=np.random.rand()) x = xs.todense() y = ys.todense() assert_eq(func(xs, ys), func(x, y)) @pytest.mark.parametrize("func", [operator.lshift, operator.rshift]) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) def test_binary_bitshift_densification_fails(func, shape): # Small arrays need high density to have nnz entries # Casting floats to int will result in all zeros, hence the * 100 x = np.random.randint(1, 100) ys = (sparse.random(shape, density=0.5) * 64).astype(np.int_) y = ys.todense() f = func(x, y) fs = func(x, ys) assert isinstance(fs, COO) assert fs.nnz <= ys.nnz assert_eq(f, fs) @pytest.mark.parametrize("func", [operator.and_, operator.or_, operator.xor]) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) def test_bitwise_binary_bool(func, shape): # Small arrays need high density to have nnz entries xs = sparse.random(shape, density=0.5).astype(bool) ys = sparse.random(shape, density=0.5).astype(bool) x = xs.todense() y = ys.todense() assert_eq(func(xs, ys), func(x, y)) def test_elemwise_binary_empty(): x = COO({}, shape=(10, 10)) y = sparse.random((10, 10), density=0.5) for z in [x * y, y * x]: assert z.nnz == 0 assert z.coords.shape == (2, 0) assert z.data.shape == (0,) sparse-0.12.0/sparse/tests/test_io.py000066400000000000000000000013541402510130100175340ustar00rootroot00000000000000import pytest import numpy as np import sparse from sparse import save_npz, load_npz from sparse._utils import assert_eq @pytest.mark.parametrize("compression", [True, False]) @pytest.mark.parametrize("format", ["coo", "gcxs"]) def test_save_load_npz_file(tmp_path, compression, format): x = sparse.random((2, 3, 4, 5), density=0.25, format=format) y = x.todense() filename = tmp_path / "mat.npz" save_npz(filename, x, compressed=compression) z = load_npz(filename) assert_eq(x, z) assert_eq(y, z.todense()) def test_load_wrong_format_exception(tmp_path): x = np.array([1, 2, 3]) filename = tmp_path / "mat.npz" np.savez(filename, x) with pytest.raises(RuntimeError): load_npz(filename) sparse-0.12.0/tox.ini000066400000000000000000000001361402510130100143650ustar00rootroot00000000000000[tox] envlist = py36, py37 [testenv] commands= pytest {posargs} extras= tests tox sparse-0.12.0/versioneer.py000066400000000000000000002062171402510130100156150ustar00rootroot00000000000000# Version: 0.18 """The Versioneer - like a rocketeer, but for versions. The Versioneer ============== * like a rocketeer, but for versions! * https://github.com/warner/python-versioneer * Brian Warner * License: Public Domain * Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy * [![Latest Version] (https://pypip.in/version/versioneer/badge.svg?style=flat) ](https://pypi.python.org/pypi/versioneer/) * [![Build Status] (https://travis-ci.org/warner/python-versioneer.png?branch=master) ](https://travis-ci.org/warner/python-versioneer) This is a tool for managing a recorded version number in distutils-based python projects. The goal is to remove the tedious and error-prone "update the embedded version string" step from your release process. Making a new release should be as easy as recording a new tag in your version-control system, and maybe making new tarballs. ## Quick Install * `pip install versioneer` to somewhere to your $PATH * add a `[versioneer]` section to your setup.cfg (see below) * run `versioneer install` in your source tree, commit the results ## Version Identifiers Source trees come from a variety of places: * a version-control system checkout (mostly used by developers) * a nightly tarball, produced by build automation * a snapshot tarball, produced by a web-based VCS browser, like github's "tarball from tag" feature * a release tarball, produced by "setup.py sdist", distributed through PyPI Within each source tree, the version identifier (either a string or a number, this tool is format-agnostic) can come from a variety of places: * ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows about recent "tags" and an absolute revision-id * the name of the directory into which the tarball was unpacked * an expanded VCS keyword ($Id$, etc) * a `_version.py` created by some earlier build step For released software, the version identifier is closely related to a VCS tag. Some projects use tag names that include more than just the version string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool needs to strip the tag prefix to extract the version identifier. For unreleased software (between tags), the version identifier should provide enough information to help developers recreate the same tree, while also giving them an idea of roughly how old the tree is (after version 1.2, before version 1.3). Many VCS systems can report a description that captures this, for example `git describe --tags --dirty --always` reports things like "0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the 0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has uncommitted changes. The version identifier is used for multiple purposes: * to allow the module to self-identify its version: `myproject.__version__` * to choose a name and prefix for a 'setup.py sdist' tarball ## Theory of Operation Versioneer works by adding a special `_version.py` file into your source tree, where your `__init__.py` can import it. This `_version.py` knows how to dynamically ask the VCS tool for version information at import time. `_version.py` also contains `$Revision$` markers, and the installation process marks `_version.py` to have this marker rewritten with a tag name during the `git archive` command. As a result, generated tarballs will contain enough information to get the proper version. To allow `setup.py` to compute a version too, a `versioneer.py` is added to the top level of your source tree, next to `setup.py` and the `setup.cfg` that configures it. This overrides several distutils/setuptools commands to compute the version when invoked, and changes `setup.py build` and `setup.py sdist` to replace `_version.py` with a small static file that contains just the generated version data. ## Installation See [INSTALL.md](./INSTALL.md) for detailed installation instructions. ## Version-String Flavors Code which uses Versioneer can learn about its version string at runtime by importing `_version` from your main `__init__.py` file and running the `get_versions()` function. From the "outside" (e.g. in `setup.py`), you can import the top-level `versioneer.py` and run `get_versions()`. Both functions return a dictionary with different flavors of version information: * `['version']`: A condensed version string, rendered using the selected style. This is the most commonly used value for the project's version string. The default "pep440" style yields strings like `0.11`, `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section below for alternative styles. * `['full-revisionid']`: detailed revision identifier. For Git, this is the full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". * `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the commit date in ISO 8601 format. This will be None if the date is not available. * `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that this is only accurate if run in a VCS checkout, otherwise it is likely to be False or None * `['error']`: if the version string could not be computed, this will be set to a string describing the problem, otherwise it will be None. It may be useful to throw an exception in setup.py if this is set, to avoid e.g. creating tarballs with a version string of "unknown". Some variants are more useful than others. Including `full-revisionid` in a bug report should allow developers to reconstruct the exact code being tested (or indicate the presence of local changes that should be shared with the developers). `version` is suitable for display in an "about" box or a CLI `--version` output: it can be easily compared against release notes and lists of bugs fixed in various releases. The installer adds the following text to your `__init__.py` to place a basic version in `YOURPROJECT.__version__`: from ._version import get_versions __version__ = get_versions()['version'] del get_versions ## Styles The setup.cfg `style=` configuration controls how the VCS information is rendered into a version string. The default style, "pep440", produces a PEP440-compliant string, equal to the un-prefixed tag name for actual releases, and containing an additional "local version" section with more detail for in-between builds. For Git, this is TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags --dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and that this commit is two revisions ("+2") beyond the "0.11" tag. For released software (exactly equal to a known tag), the identifier will only contain the stripped tag, e.g. "0.11". Other styles are available. See [details.md](details.md) in the Versioneer source tree for descriptions. ## Debugging Versioneer tries to avoid fatal errors: if something goes wrong, it will tend to return a version of "0+unknown". To investigate the problem, run `setup.py version`, which will run the version-lookup code in a verbose mode, and will display the full contents of `get_versions()` (including the `error` string, which may help identify what went wrong). ## Known Limitations Some situations are known to cause problems for Versioneer. This details the most significant ones. More can be found on Github [issues page](https://github.com/warner/python-versioneer/issues). ### Subprojects Versioneer has limited support for source trees in which `setup.py` is not in the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are two common reasons why `setup.py` might not be in the root: * Source trees which contain multiple subprojects, such as [Buildbot](https://github.com/buildbot/buildbot), which contains both "master" and "slave" subprojects, each with their own `setup.py`, `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI distributions (and upload multiple independently-installable tarballs). * Source trees whose main purpose is to contain a C library, but which also provide bindings to Python (and perhaps other langauges) in subdirectories. Versioneer will look for `.git` in parent directories, and most operations should get the right version string. However `pip` and `setuptools` have bugs and implementation details which frequently cause `pip install .` from a subproject directory to fail to find a correct version string (so it usually defaults to `0+unknown`). `pip install --editable .` should work correctly. `setup.py install` might work too. Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in some later version. [Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking this issue. The discussion in [PR #61](https://github.com/warner/python-versioneer/pull/61) describes the issue from the Versioneer side in more detail. [pip PR#3176](https://github.com/pypa/pip/pull/3176) and [pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve pip to let Versioneer work correctly. Versioneer-0.16 and earlier only looked for a `.git` directory next to the `setup.cfg`, so subprojects were completely unsupported with those releases. ### Editable installs with setuptools <= 18.5 `setup.py develop` and `pip install --editable .` allow you to install a project into a virtualenv once, then continue editing the source code (and test) without re-installing after every change. "Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a convenient way to specify executable scripts that should be installed along with the python package. These both work as expected when using modern setuptools. When using setuptools-18.5 or earlier, however, certain operations will cause `pkg_resources.DistributionNotFound` errors when running the entrypoint script, which must be resolved by re-installing the package. This happens when the install happens with one version, then the egg_info data is regenerated while a different version is checked out. Many setup.py commands cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into a different virtualenv), so this can be surprising. [Bug #83](https://github.com/warner/python-versioneer/issues/83) describes this one, but upgrading to a newer version of setuptools should probably resolve it. ### Unicode version strings While Versioneer works (and is continually tested) with both Python 2 and Python 3, it is not entirely consistent with bytes-vs-unicode distinctions. Newer releases probably generate unicode version strings on py2. It's not clear that this is wrong, but it may be surprising for applications when then write these strings to a network connection or include them in bytes-oriented APIs like cryptographic checksums. [Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates this question. ## Updating Versioneer To upgrade your project to a new release of Versioneer, do the following: * install the new Versioneer (`pip install -U versioneer` or equivalent) * edit `setup.cfg`, if necessary, to include any new configuration settings indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. * re-run `versioneer install` in your source tree, to replace `SRC/_version.py` * commit any changed files ## Future Directions This tool is designed to make it easily extended to other version-control systems: all VCS-specific components are in separate directories like src/git/ . The top-level `versioneer.py` script is assembled from these components by running make-versioneer.py . In the future, make-versioneer.py will take a VCS name as an argument, and will construct a version of `versioneer.py` that is specific to the given VCS. It might also take the configuration arguments that are currently provided manually during installation by editing setup.py . Alternatively, it might go the other direction and include code from all supported VCS systems, reducing the number of intermediate scripts. ## License To make Versioneer easier to embed, all its code is dedicated to the public domain. The `_version.py` that it creates is also in the public domain. Specifically, both are released under the Creative Commons "Public Domain Dedication" license (CC0-1.0), as described in https://creativecommons.org/publicdomain/zero/1.0/ . """ from __future__ import print_function try: import configparser except ImportError: import ConfigParser as configparser import errno import json import os import re import subprocess import sys class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_root(): """Get the project root directory. We require that all commands are run from the project root, i.e. the directory that contains setup.py, setup.cfg, and versioneer.py . """ root = os.path.realpath(os.path.abspath(os.getcwd())) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): # allow 'python path/to/setup.py COMMAND' root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): err = ( "Versioneer was unable to run the project root directory. " "Versioneer requires setup.py to be executed from " "its immediate directory (like 'python setup.py COMMAND'), " "or in a way that lets it use sys.argv[0] to find the root " "(like 'python path/to/setup.py COMMAND')." ) raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools # tree) execute all dependencies in a single python process, so # "versioneer" may be imported multiple times, and python's shared # module-import table will cache the first one. So we can't use # os.path.dirname(__file__), as that will find whichever # versioneer.py was first imported, even in later projects. me = os.path.realpath(os.path.abspath(__file__)) me_dir = os.path.normcase(os.path.splitext(me)[0]) vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) if me_dir != vsr_dir: print( "Warning: build in %s is using versioneer.py from %s" % (os.path.dirname(me), versioneer_py) ) except NameError: pass return root def get_config_from_root(root): """Read the project setup.cfg file to determine Versioneer config.""" # This might raise EnvironmentError (if setup.cfg is missing), or # configparser.NoSectionError (if it lacks a [versioneer] section), or # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . setup_cfg = os.path.join(root, "setup.cfg") parser = configparser.SafeConfigParser() with open(setup_cfg, "r") as f: parser.readfp(f) VCS = parser.get("versioneer", "VCS") # mandatory def get(parser, name): if parser.has_option("versioneer", name): return parser.get("versioneer", name) return None cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = get(parser, "style") or "" cfg.versionfile_source = get(parser, "versionfile_source") cfg.versionfile_build = get(parser, "versionfile_build") cfg.tag_prefix = get(parser, "tag_prefix") if cfg.tag_prefix in ("''", '""'): cfg.tag_prefix = "" cfg.parentdir_prefix = get(parser, "parentdir_prefix") cfg.verbose = get(parser, "verbose") return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" # these dictionaries contain VCS-specific tools LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen( [c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None), ) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, p.returncode return stdout, p.returncode LONG_VERSION_PY[ "git" ] = ''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.18 (https://github.com/warner/python-versioneer) """Git implementation of _version.py.""" import errno import os import re import subprocess import sys def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "%(STYLE)s" cfg.tag_prefix = "%(TAG_PREFIX)s" cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %%s" %% dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %%s" %% (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %%s (error)" %% dispcmd) print("stdout was %%s" %% stdout) return None, p.returncode return stdout, p.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %%s but none started with prefix %%s" %% (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %%d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%%s', no digits" %% ",".join(refs - tags)) if verbose: print("likely tags: %%s" %% ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %%s" %% r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %%s not under git control" %% root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%%s*" %% tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%%s'" %% describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%%s' doesn't start with prefix '%%s'" print(fmt %% (full_tag, tag_prefix)) pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" %% (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%%d" %% pieces["distance"] else: # exception #1 rendered = "0.post.dev%%d" %% pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%%s" %% pieces["short"] else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%%s'" %% style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} ''' @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r"\d", r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix) :] if verbose: print("picking %s" % r) return { "version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date, } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return { "version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None, } @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command( GITS, [ "describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix, ], cwd=root, ) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[: git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % ( full_tag, tag_prefix, ) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix) :] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[ 0 ].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def do_vcs_install(manifest_in, versionfile_source, ipy): """Git-specific installation logic for Versioneer. For Git, this means creating/changing .gitattributes to mark _version.py for export-subst keyword substitution. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] files = [manifest_in, versionfile_source] if ipy: files.append(ipy) try: me = __file__ if me.endswith(".pyc") or me.endswith(".pyo"): me = os.path.splitext(me)[0] + ".py" versioneer_file = os.path.relpath(me) except NameError: versioneer_file = "versioneer.py" files.append(versioneer_file) present = False try: f = open(".gitattributes", "r") for line in f.readlines(): if line.strip().startswith(versionfile_source): if "export-subst" in line.strip().split()[1:]: present = True f.close() except EnvironmentError: pass if not present: f = open(".gitattributes", "a+") f.write("%s export-subst\n" % versionfile_source) f.close() files.append(".gitattributes") run_command(GITS, ["add", "--"] + files) def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return { "version": dirname[len(parentdir_prefix) :], "full-revisionid": None, "dirty": False, "error": None, "date": None, } else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print( "Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix) ) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") SHORT_VERSION_PY = """ # This file was generated by 'versioneer.py' (0.18) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json version_json = ''' %s ''' # END VERSION_JSON def get_versions(): return json.loads(version_json) """ def versions_from_file(filename): """Try to determine the version from _version.py if present.""" try: with open(filename) as f: contents = f.read() except EnvironmentError: raise NotThisMethod("unable to read _version.py") mo = re.search( r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S ) if not mo: mo = re.search( r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S ) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) def write_to_version_file(filename, versions): """Write the given version number to the given _version.py file.""" os.unlink(filename) contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) print("set %s to '%s'" % (filename, versions["version"])) def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return { "version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None, } if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return { "version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date"), } class VersioneerBadRootError(Exception): """The project root directory is unknown or missing key files.""" def get_versions(verbose=False): """Get the project version from whatever source is available. Returns dict with two keys: 'version' and 'full'. """ if "versioneer" in sys.modules: # see the discussion in cmdclass.py:get_cmdclass() del sys.modules["versioneer"] root = get_root() cfg = get_config_from_root(root) assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or cfg.verbose assert ( cfg.versionfile_source is not None ), "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source) # extract version from first of: _version.py, VCS command (e.g. 'git # describe'), parentdir. This is meant to work for developers using a # source checkout, for users of a tarball created by 'setup.py sdist', # and for users of a tarball/zipball created by 'git archive' or github's # download-from-tag feature or the equivalent in other VCSes. get_keywords_f = handlers.get("get_keywords") from_keywords_f = handlers.get("keywords") if get_keywords_f and from_keywords_f: try: keywords = get_keywords_f(versionfile_abs) ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) if verbose: print("got version from expanded keyword %s" % ver) return ver except NotThisMethod: pass try: ver = versions_from_file(versionfile_abs) if verbose: print("got version from file %s %s" % (versionfile_abs, ver)) return ver except NotThisMethod: pass from_vcs_f = handlers.get("pieces_from_vcs") if from_vcs_f: try: pieces = from_vcs_f(cfg.tag_prefix, root, verbose) ver = render(pieces, cfg.style) if verbose: print("got version from VCS %s" % ver) return ver except NotThisMethod: pass try: if cfg.parentdir_prefix: ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) if verbose: print("got version from parentdir %s" % ver) return ver except NotThisMethod: pass if verbose: print("unable to compute version") return { "version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None, } def get_version(): """Get the short version string for this project.""" return get_versions()["version"] def get_cmdclass(): """Get the custom setuptools/distutils subclasses used by Versioneer.""" if "versioneer" in sys.modules: del sys.modules["versioneer"] # this fixes the "python setup.py develop" case (also 'install' and # 'easy_install .'), in which subdependencies of the main project are # built (using setup.py bdist_egg) in the same python process. Assume # a main project A and a dependency B, which use different versions # of Versioneer. A's setup.py imports A's Versioneer, leaving it in # sys.modules by the time B's setup.py is executed, causing B to run # with the wrong versioneer. Setuptools wraps the sub-dep builds in a # sandbox that restores sys.modules to it's pre-build state, so the # parent is protected against the child's "import versioneer". By # removing ourselves from sys.modules here, before the child build # happens, we protect the child from the parent's versioneer too. # Also see https://github.com/warner/python-versioneer/issues/52 cmds = {} # we add "version" to both distutils and setuptools from distutils.core import Command class cmd_version(Command): description = "report generated version string" user_options = [] boolean_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): vers = get_versions(verbose=True) print("Version: %s" % vers["version"]) print(" full-revisionid: %s" % vers.get("full-revisionid")) print(" dirty: %s" % vers.get("dirty")) print(" date: %s" % vers.get("date")) if vers["error"]: print(" error: %s" % vers["error"]) cmds["version"] = cmd_version # we override "build_py" in both distutils and setuptools # # most invocation pathways end up running build_py: # distutils/build -> build_py # distutils/install -> distutils/build ->.. # setuptools/bdist_wheel -> distutils/install ->.. # setuptools/bdist_egg -> distutils/install_lib -> build_py # setuptools/install -> bdist_egg ->.. # setuptools/develop -> ? # pip install: # copies source tree to a tempdir before running egg_info/etc # if .git isn't copied too, 'git describe' will fail # then does setup.py bdist_wheel, or sometimes setup.py install # setup.py egg_info -> ? # we override different "build_py" commands for both environments if "setuptools" in sys.modules: from setuptools.command.build_py import build_py as _build_py else: from distutils.command.build_py import build_py as _build_py class cmd_build_py(_build_py): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_py.run(self) # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) cmds["build_py"] = cmd_build_py if "cx_Freeze" in sys.modules: # cx_freeze enabled? from cx_Freeze.dist import build_exe as _build_exe # nczeczulin reports that py2exe won't like the pep440-style string # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. # setup(console=[{ # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION # "product_version": versioneer.get_version(), # ... class cmd_build_exe(_build_exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _build_exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write( LONG % { "DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, } ) cmds["build_exe"] = cmd_build_exe del cmds["build_py"] if "py2exe" in sys.modules: # py2exe enabled? try: from py2exe.distutils_buildexe import py2exe as _py2exe # py3 except ImportError: from py2exe.build_exe import py2exe as _py2exe # py2 class cmd_py2exe(_py2exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _py2exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write( LONG % { "DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, } ) cmds["py2exe"] = cmd_py2exe # we override different "sdist" commands for both environments if "setuptools" in sys.modules: from setuptools.command.sdist import sdist as _sdist else: from distutils.command.sdist import sdist as _sdist class cmd_sdist(_sdist): def run(self): versions = get_versions() self._versioneer_generated_versions = versions # unless we update this, the command will keep using the old # version self.distribution.metadata.version = versions["version"] return _sdist.run(self) def make_release_tree(self, base_dir, files): root = get_root() cfg = get_config_from_root(root) _sdist.make_release_tree(self, base_dir, files) # now locate _version.py in the new base_dir directory # (remembering that it may be a hardlink) and replace it with an # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) write_to_version_file( target_versionfile, self._versioneer_generated_versions ) cmds["sdist"] = cmd_sdist return cmds CONFIG_ERROR = """ setup.cfg is missing the necessary Versioneer configuration. You need a section like: [versioneer] VCS = git style = pep440 versionfile_source = src/myproject/_version.py versionfile_build = myproject/_version.py tag_prefix = parentdir_prefix = myproject- You will also need to edit your setup.py to use the results: import versioneer setup(version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), ...) Please read the docstring in ./versioneer.py for configuration instructions, edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. """ SAMPLE_CONFIG = """ # See the docstring in versioneer.py for instructions. Note that you must # re-run 'versioneer.py setup' after changing this section, and commit the # resulting files. [versioneer] #VCS = git #style = pep440 #versionfile_source = #versionfile_build = #tag_prefix = #parentdir_prefix = """ INIT_PY_SNIPPET = """ from ._version import get_versions __version__ = get_versions()['version'] del get_versions """ def do_setup(): """Main VCS-independent setup function for installing Versioneer.""" root = get_root() try: cfg = get_config_from_root(root) except ( EnvironmentError, configparser.NoSectionError, configparser.NoOptionError, ) as e: if isinstance(e, (EnvironmentError, configparser.NoSectionError)): print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) return 1 print(" creating %s" % cfg.versionfile_source) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write( LONG % { "DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, } ) ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") if os.path.exists(ipy): try: with open(ipy, "r") as f: old = f.read() except EnvironmentError: old = "" if INIT_PY_SNIPPET not in old: print(" appending to %s" % ipy) with open(ipy, "a") as f: f.write(INIT_PY_SNIPPET) else: print(" %s unmodified" % ipy) else: print(" %s doesn't exist, ok" % ipy) ipy = None # Make sure both the top-level "versioneer.py" and versionfile_source # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so # they'll be copied into source distributions. Pip won't be able to # install the package without this. manifest_in = os.path.join(root, "MANIFEST.in") simple_includes = set() try: with open(manifest_in, "r") as f: for line in f: if line.startswith("include "): for include in line.split()[1:]: simple_includes.add(include) except EnvironmentError: pass # That doesn't cover everything MANIFEST.in can do # (http://docs.python.org/2/distutils/sourcedist.html#commands), so # it might give some false negatives. Appending redundant 'include' # lines is safe, though. if "versioneer.py" not in simple_includes: print(" appending 'versioneer.py' to MANIFEST.in") with open(manifest_in, "a") as f: f.write("include versioneer.py\n") else: print(" 'versioneer.py' already in MANIFEST.in") if cfg.versionfile_source not in simple_includes: print( " appending versionfile_source ('%s') to MANIFEST.in" % cfg.versionfile_source ) with open(manifest_in, "a") as f: f.write("include %s\n" % cfg.versionfile_source) else: print(" versionfile_source already in MANIFEST.in") # Make VCS-specific changes. For git, this means creating/changing # .gitattributes to mark _version.py for export-subst keyword # substitution. do_vcs_install(manifest_in, cfg.versionfile_source, ipy) return 0 def scan_setup_py(): """Validate the contents of setup.py against Versioneer's expectations.""" found = set() setters = False errors = 0 with open("setup.py", "r") as f: for line in f.readlines(): if "import versioneer" in line: found.add("import") if "versioneer.get_cmdclass()" in line: found.add("cmdclass") if "versioneer.get_version()" in line: found.add("get_version") if "versioneer.VCS" in line: setters = True if "versioneer.versionfile_source" in line: setters = True if len(found) != 3: print("") print("Your setup.py appears to be missing some important items") print("(but I might be wrong). Please make sure it has something") print("roughly like the following:") print("") print(" import versioneer") print(" setup( version=versioneer.get_version(),") print(" cmdclass=versioneer.get_cmdclass(), ...)") print("") errors += 1 if setters: print("You should remove lines like 'versioneer.VCS = ' and") print("'versioneer.versionfile_source = ' . This configuration") print("now lives in setup.cfg, and should be removed from setup.py") print("") errors += 1 return errors if __name__ == "__main__": cmd = sys.argv[1] if cmd == "setup": errors = do_setup() errors += scan_setup_py() if errors: sys.exit(1)