pax_global_header00006660000000000000000000000064146347550150014524gustar00rootroot0000000000000052 comment=de312ea3abf91ca31a914fa60706d5e3450dd176 sparse-0.16.0a9/000077500000000000000000000000001463475501500133375ustar00rootroot00000000000000sparse-0.16.0a9/.codecov.yml000066400000000000000000000004741463475501500155670ustar00rootroot00000000000000comment: false coverage: status: project: default: # Total project must be 95% target: '100%' threshold: '5%' patch: default: # Patch coverage must be 92% target: '100%' threshold: '8%' precision: 2 round: down range: 80...98 sparse-0.16.0a9/.coveragerc000066400000000000000000000003231463475501500154560ustar00rootroot00000000000000[run] source= sparse/ omit= sparse/_version.py sparse/tests/* sparse/numba_backend/tests/* [report] exclude_lines = pragma: no cover return NotImplemented raise NotImplementedError sparse-0.16.0a9/.gitattributes000066400000000000000000000000401463475501500162240ustar00rootroot00000000000000sparse/_version.py export-subst sparse-0.16.0a9/.github/000077500000000000000000000000001463475501500146775ustar00rootroot00000000000000sparse-0.16.0a9/.github/CODE_OF_CONDUCT.md000066400000000000000000000001041463475501500174710ustar00rootroot00000000000000# Code of Conduct Please see [`docs/conduct.md`](docs/conduct.rst) sparse-0.16.0a9/.github/FUNDING.yml000066400000000000000000000012251463475501500165140ustar00rootroot00000000000000# These are supported funding model platforms github: [Quansight, Quansight-Labs] patreon: # Replace with a single Patreon username open_collective: # Replace with a single Open Collective username ko_fi: # Replace with a single Ko-fi username tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry liberapay: # Replace with a single Liberapay username issuehunt: # Replace with a single IssueHunt username otechie: # Replace with a single Otechie username custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] sparse-0.16.0a9/.github/ISSUE_TEMPLATE/000077500000000000000000000000001463475501500170625ustar00rootroot00000000000000sparse-0.16.0a9/.github/ISSUE_TEMPLATE/bug_report.md000066400000000000000000000010561463475501500215560ustar00rootroot00000000000000--- name: Bug report about: Create a report to help us improve title: '' labels: type:bug assignees: '' --- **Describe the bug** A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior. **Expected behavior** A clear and concise description of what you expected to happen. **System** - OS and version: [e.g. Windows 10] - `sparse` version (`sparse.__version__`) - NumPy version (`np.__version__`) - Numba version (`numba.__version__`) **Additional context** Add any other context about the problem here. sparse-0.16.0a9/.github/ISSUE_TEMPLATE/feature_request.md000066400000000000000000000010741463475501500226110ustar00rootroot00000000000000--- name: Feature request about: Suggest an idea for this project title: '' labels: type:enhancement assignees: '' --- **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. **Describe the solution you'd like** A clear and concise description of what you want to happen. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Additional context** Add any other context or screenshots about the feature request here. sparse-0.16.0a9/.github/ISSUE_TEMPLATE/question-support.md000066400000000000000000000005011463475501500227610ustar00rootroot00000000000000--- name: Question/Support about: A question about how to use this library. title: '' labels: type:support assignees: '' --- **Description** Provide a description of what you'd like to do. **Example Code** Syntactically valid Python code that shows what you want to do, possibly with placeholder functions or methods. sparse-0.16.0a9/.github/workflows/000077500000000000000000000000001463475501500167345ustar00rootroot00000000000000sparse-0.16.0a9/.github/workflows/ci.yml000066400000000000000000000112031463475501500200470ustar00rootroot00000000000000defaults: run: shell: bash -leo pipefail {0} concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true jobs: test: strategy: matrix: os: [ubuntu-latest] python: ['3.10', '3.11', '3.12'] pip_opts: [''] numba_boundscheck: [0] include: - os: macos-latest python: '3.10' - os: windows-latest python: '3.10' - os: ubuntu-latest python: '3.10' numba_boundscheck: 1 - os: ubuntu-latest python: '3.10' pip_opts: 'numpy<2' fail-fast: false runs-on: ${{ matrix.os }} env: PYTHON_VERSION: ${{ matrix.python }} NUMBA_BOUNDSCHECK: ${{ matrix.numba_boundscheck }} PYTHONFAULTHANDLER: '${{ github.workspace }}/faulthandler.log' steps: - name: Checkout Repo uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} cache: 'pip' - name: Install package run: | pip install -e .[tests] if [ "${{ matrix.pip_opts }}" != "" ]; then pip install "${{ matrix.pip_opts }}" numba fi - name: Run tests run: | SPARSE_BACKEND=Numba pytest --pyargs sparse --cov-report=xml:coverage_Numba.xml -n 4 -vvv SPARSE_BACKEND=Finch pytest --pyargs sparse/tests --cov-report=xml:coverage_Finch.xml -n 4 -vvv - uses: codecov/codecov-action@v4 if: always() with: files: ./**/coverage*.xml docs: runs-on: ubuntu-latest steps: - name: Checkout Repo uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v5 with: python-version: '3.10' cache: 'pip' - name: Install package run: | pip install -e .[docs] - name: Run tests run: | sphinx-build -W -b html docs/ _build/html - uses: actions/upload-artifact@v4 with: name: Documentation path: _build/html benchmarks: runs-on: ubuntu-latest steps: - name: Checkout Repo uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v5 with: python-version: '3.10' cache: 'pip' - name: Install asv run: | pip install asv asv machine --yes - name: Run benchmarks run: | asv run --quick examples: runs-on: ubuntu-latest steps: - name: Checkout Repo uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v5 with: python-version: '3.11' cache: 'pip' - name: Build and install Sparse run: | pip install -U setuptools wheel python -m pip install '.[finch]' scipy - name: Run examples run: | source ci/test_examples.sh array_api_tests: strategy: matrix: backend: ['Numba', 'Finch'] fail-fast: false runs-on: ubuntu-latest steps: - name: Checkout Repo uses: actions/checkout@v4 - name: Checkout array-api-tests uses: actions/checkout@v4 with: repository: data-apis/array-api-tests ref: '33f2d2ea2f3dd2b3ceeeb4519d55e08096184149' # Latest commit as of 2024-05-29 submodules: 'true' path: 'array-api-tests' - name: Set up Python uses: actions/setup-python@v5 with: python-version: '3.11' cache: 'pip' - name: Install build and test dependencies from PyPI run: | python -m pip install pytest-xdist -r array-api-tests/requirements.txt - name: Build and install Sparse run: | python -m pip install '.[finch]' - name: Run the test suite env: ARRAY_API_TESTS_MODULE: sparse SPARSE_BACKEND: ${{ matrix.backend }} run: | cd ${GITHUB_WORKSPACE}/array-api-tests pytest array_api_tests -v -c pytest.ini -n 4 --max-examples=2 --derandomize --disable-deadline -o xfail_strict=True --xfails-file ${GITHUB_WORKSPACE}/ci/${{ matrix.backend }}-array-api-xfails.txt --skips-file ${GITHUB_WORKSPACE}/ci/${{ matrix.backend }}-array-api-skips.txt on: # Trigger the workflow on push or pull request, # but only for the main branch push: branches: - main - vnext pull_request: branches: - main - vnext # Also trigger on page_build, as well as release created events page_build: release: types: # This configuration does not affect the page_build event above - created sparse-0.16.0a9/.gitignore000066400000000000000000000017131463475501500153310ustar00rootroot00000000000000#####=== Python ===##### # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *,cover .pytest_cache/ test_results/ junit/ .hypothesis/ # Airspeed velocity .asv/ # Translations *.mo *.pot # Django stuff: *.log # Sphinx documentation docs/_build/ _build/ # PyBuilder target/ # IDE .idea/ .vscode/ default.profraw # Sandbox sandbox.py # macOS **/.DS_Store # Version file sparse/_version.py # Benchmark Results results/ sparse-0.16.0a9/.pre-commit-config.yaml000066400000000000000000000012741463475501500176240ustar00rootroot00000000000000repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.6.0 hooks: - id: check-yaml - id: end-of-file-fixer - id: trailing-whitespace - id: fix-byte-order-marker - id: destroyed-symlinks - id: fix-encoding-pragma args: ["--remove"] - id: mixed-line-ending - id: name-tests-test args: ["--pytest-test-first"] - id: no-commit-to-branch - id: pretty-format-json args: ["--autofix", "--no-ensure-ascii"] - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.4.9 hooks: - id: ruff args: ["--fix"] types_or: [ python, pyi, jupyter ] - id: ruff-format types_or: [ python, pyi, jupyter ] sparse-0.16.0a9/.readthedocs.yml000066400000000000000000000002761463475501500164320ustar00rootroot00000000000000version: 2 build: os: ubuntu-22.04 tools: python: "3.10" sphinx: configuration: docs/conf.py python: install: - method: pip path: . extra_requirements: - docs sparse-0.16.0a9/LICENSE000066400000000000000000000027551463475501500143550ustar00rootroot00000000000000BSD 3-Clause License Copyright (c) 2018, Sparse developers All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. sparse-0.16.0a9/README.rst000066400000000000000000000016011463475501500150240ustar00rootroot00000000000000Sparse Multidimensional Arrays ============================== |Build Status| |Docs Status| |Coverage| This library provides multi-dimensional sparse arrays. * `Documentation `_ * `Contributing `_ * `Bug Reports/Feature Requests `_ .. |Build Status| image:: https://github.com/pydata/sparse/actions/workflows/ci.yml/badge.svg :target: https://github.com/pydata/sparse/actions/workflows/ci.yml :alt: Build status .. |Docs Status| image:: https://readthedocs.org/projects/sparse-nd/badge/?version=latest :target: http://sparse.pydata.org/en/latest/?badge=latest :alt: Documentation Status .. |Coverage| image:: https://codecov.io/gh/pydata/sparse/branch/main/graph/badge.svg :target: https://codecov.io/gh/pydata/sparse :alt: Coverage Report sparse-0.16.0a9/asv.conf.json000066400000000000000000000010471463475501500157510ustar00rootroot00000000000000{ "branches": [ "HEAD" ], "build_command": [], "environment_type": "virtualenv", "install_command": [ "pip install --no-deps ." ], "matrix": { "env": { "SPARSE_BACKEND": [ "Numba" ] }, "env_nobuild": {}, "req": { "numba": [ "" ], "numpy": [ "" ] } }, "project": "sparse", "project_url": "https://sparse.pydata.org/", "pythons": [ "3.10" ], "repo": ".", "uninstall_command": [ "pip uninstall sparse --yes" ], "version": 1 } sparse-0.16.0a9/benchmarks/000077500000000000000000000000001463475501500154545ustar00rootroot00000000000000sparse-0.16.0a9/benchmarks/__init__.py000066400000000000000000000000001463475501500175530ustar00rootroot00000000000000sparse-0.16.0a9/benchmarks/benchmark_coo.py000066400000000000000000000034311463475501500206210ustar00rootroot00000000000000import sparse import numpy as np class MatrixMultiplySuite: def setup(self): rng = np.random.default_rng(0) self.x = sparse.random((100, 100), density=0.01, random_state=rng) self.y = sparse.random((100, 100), density=0.01, random_state=rng) self.x @ self.y # Numba compilation def time_matmul(self): self.x @ self.y class ElemwiseSuite: def setup(self): rng = np.random.default_rng(0) self.x = sparse.random((100, 100, 100), density=0.01, random_state=rng) self.y = sparse.random((100, 100, 100), density=0.01, random_state=rng) self.x + self.y # Numba compilation self.x * self.y # Numba compilation def time_add(self): self.x + self.y def time_mul(self): self.x * self.y class ElemwiseBroadcastingSuite: def setup(self): rng = np.random.default_rng(0) self.x = sparse.random((100, 1, 100), density=0.01, random_state=rng) self.y = sparse.random((100, 100), density=0.01, random_state=rng) self.x + self.y # Numba compilation self.x * self.y # Numba compilation def time_add(self): self.x + self.y def time_mul(self): self.x * self.y class IndexingSuite: def setup(self): rng = np.random.default_rng(0) self.index = rng.integers(0, 100, 50) self.x = sparse.random((100, 100, 100), density=0.01, random_state=rng) # Numba compilation self.x[5] self.x[self.index] def time_index_scalar(self): self.x[5, 5, 5] def time_index_slice(self): self.x[:50] def time_index_slice2(self): self.x[:50, :50] def time_index_slice3(self): self.x[:50, :50, :50] def time_index_fancy(self): self.x[self.index] sparse-0.16.0a9/benchmarks/benchmark_gcxs.py000066400000000000000000000046471463475501500210170ustar00rootroot00000000000000import sparse import numpy as np class MatrixMultiplySuite: def setup(self): rng = np.random.default_rng(0) self.x = sparse.random((100, 100), density=0.01, format="gcxs", random_state=rng) self.y = sparse.random((100, 100), density=0.01, format="gcxs", random_state=rng) self.x @ self.y # Numba compilation def time_matmul(self): self.x @ self.y class ElemwiseSuite: def setup(self): rng = np.random.default_rng(0) self.x = sparse.random((100, 100, 100), density=0.01, format="gcxs", random_state=rng) self.y = sparse.random((100, 100, 100), density=0.01, format="gcxs", random_state=rng) self.x + self.y # Numba compilation def time_add(self): self.x + self.y def time_mul(self): self.x * self.y class ElemwiseBroadcastingSuite: def setup(self): rng = np.random.default_rng(0) self.x = sparse.random((100, 1, 100), density=0.01, format="gcxs", random_state=rng) self.y = sparse.random((100, 100), density=0.01, format="gcxs", random_state=rng) def time_add(self): self.x + self.y def time_mul(self): self.x * self.y class IndexingSuite: def setup(self): rng = np.random.default_rng(0) self.index = rng.integers(0, 100, 50) self.x = sparse.random((100, 100, 100), density=0.01, format="gcxs", random_state=rng) # Numba compilation self.x[5] self.x[self.index] def time_index_scalar(self): self.x[5, 5, 5] def time_index_slice(self): self.x[:50] def time_index_slice2(self): self.x[:50, :50] def time_index_slice3(self): self.x[:50, :50, :50] def time_index_fancy(self): self.x[self.index] class DenseMultiplySuite: params = ([0, 1], [1, 20, 100]) param_names = ["compressed axis", "n_vectors"] def setup(self, compressed_axis, n_vecs): rng = np.random.default_rng(1337) n = 10000 x = sparse.random((n, n), density=0.001, format="gcxs", random_state=rng).change_compressed_axes( (compressed_axis,) ) self.x = x self.t = rng.random((n, n_vecs)) self.u = rng.random((n_vecs, n)) # Numba compilation self.x @ self.t self.u @ self.x def time_gcxs_dot_ndarray(self, *args): self.x @ self.t def time_ndarray_dot_gcxs(self, *args): self.u @ self.x sparse-0.16.0a9/benchmarks/benchmark_matmul.py000066400000000000000000000010671463475501500213430ustar00rootroot00000000000000import sparse import numpy as np class Matmul_Sparse: params = (["coo", "gcxs"], [0, 1, None]) def setup(self, p, dens_arg): rng = np.random.default_rng(0) self.x = sparse.random((100, 100), density=0.01, format=p, random_state=rng) self.y = sparse.random((100, 100), density=0.01, format=p, random_state=rng) if dens_arg == 0: self.x = self.x.todense() elif dens_arg == 1: self.y = self.y.todense() self.x @ self.y def time_matmul(self, p, dens_arg): self.x @ self.y sparse-0.16.0a9/benchmarks/benchmark_tensordot.py000066400000000000000000000032561463475501500220670ustar00rootroot00000000000000import sparse import numpy as np class TensordotSuiteDenseSparse: """ Performance comparison for returntype=COO vs returntype=np.ndarray. tensordot(np.ndarray, COO) """ def setup(self): rng = np.random.default_rng(0) self.n = rng.random((100, 100)) self.s = sparse.random((100, 100, 100, 100), density=0.01, random_state=rng) def time_dense(self): sparse.tensordot(self.n, self.s, axes=([0, 1], [0, 2])) def time_sparse(self): sparse.tensordot(self.n, self.s, axes=([0, 1], [0, 2]), return_type=sparse.COO) class TensordotSuiteSparseSparse: """ Performance comparison for returntype=COO vs returntype=np.ndarray. tensordot(COO, COO) """ def setup(self): rng = np.random.default_rng(0) self.s1 = sparse.random((100, 100), density=0.01, random_state=rng) self.s2 = sparse.random((100, 100, 100, 100), density=0.01, random_state=rng) def time_dense(self): sparse.tensordot(self.s1, self.s2, axes=([0, 1], [0, 2]), return_type=np.ndarray) def time_sparse(self): sparse.tensordot(self.s1, self.s2, axes=([0, 1], [0, 2])) class TensordotSuiteSparseDense: """ Performance comparison for returntype=COO vs returntype=np.ndarray. tensordot(COO, np.ndarray) """ def setup(self): rng = np.random.default_rng(0) self.s = sparse.random((100, 100, 100, 100), density=0.01, random_state=rng) self.n = rng.random((100, 100)) def time_dense(self): sparse.tensordot(self.s, self.n, axes=([0, 1], [0, 1])) def time_sparse(self): sparse.tensordot(self.s, self.n, axes=([0, 1], [0, 1]), return_type=sparse.COO) sparse-0.16.0a9/ci/000077500000000000000000000000001463475501500137325ustar00rootroot00000000000000sparse-0.16.0a9/ci/Finch-array-api-skips.txt000066400000000000000000000004741463475501500205410ustar00rootroot00000000000000# `test_nonzero` name conflict array_api_tests/test_searching_functions.py::test_nonzero_zerodim_error # flaky test array_api_tests/test_special_cases.py::test_unary[sign((x_i is -0 or x_i == +0)) -> 0] # `broadcast_to` is not defined in Finch, hangs as xfail array_api_tests/test_searching_functions.py::test_where sparse-0.16.0a9/ci/Finch-array-api-xfails.txt000066400000000000000000000407011463475501500206730ustar00rootroot00000000000000# test_signatures # not implemented # stats functions array_api_tests/test_signatures.py::test_func_signature[mean] array_api_tests/test_signatures.py::test_func_signature[std] array_api_tests/test_signatures.py::test_func_signature[var] # set functions array_api_tests/test_signatures.py::test_func_signature[unique_all] array_api_tests/test_signatures.py::test_func_signature[unique_counts] array_api_tests/test_signatures.py::test_func_signature[unique_inverse] array_api_tests/test_signatures.py::test_func_signature[unique_values] # creation functions array_api_tests/test_signatures.py::test_func_signature[meshgrid] array_api_tests/test_signatures.py::test_func_signature[tril] array_api_tests/test_signatures.py::test_func_signature[triu] # inspection functions array_api_tests/test_signatures.py::test_func_signature[isdtype] array_api_tests/test_signatures.py::test_func_signature[result_type] # other functions array_api_tests/test_signatures.py::test_func_signature[concat] array_api_tests/test_signatures.py::test_func_signature[argsort] array_api_tests/test_signatures.py::test_func_signature[sort] array_api_tests/test_signatures.py::test_func_signature[broadcast_arrays] array_api_tests/test_signatures.py::test_func_signature[broadcast_to] array_api_tests/test_signatures.py::test_func_signature[expand_dims] array_api_tests/test_signatures.py::test_func_signature[flip] array_api_tests/test_signatures.py::test_func_signature[roll] array_api_tests/test_signatures.py::test_func_signature[squeeze] array_api_tests/test_signatures.py::test_func_signature[stack] array_api_tests/test_signatures.py::test_func_signature[matrix_transpose] array_api_tests/test_signatures.py::test_func_signature[vecdot] array_api_tests/test_signatures.py::test_func_signature[take] array_api_tests/test_signatures.py::test_func_signature[argmax] array_api_tests/test_signatures.py::test_func_signature[argmin] array_api_tests/test_signatures.py::test_func_signature[from_dlpack] array_api_tests/test_signatures.py::test_func_signature[cumulative_sum] array_api_tests/test_signatures.py::test_func_signature[searchsorted] array_api_tests/test_signatures.py::test_func_signature[moveaxis] array_api_tests/test_signatures.py::test_func_signature[repeat] array_api_tests/test_signatures.py::test_func_signature[tile] array_api_tests/test_signatures.py::test_func_signature[unstack] array_api_tests/test_signatures.py::test_func_signature[clip] array_api_tests/test_signatures.py::test_func_signature[copysign] array_api_tests/test_signatures.py::test_func_signature[hypot] array_api_tests/test_signatures.py::test_func_signature[logical_not] array_api_tests/test_signatures.py::test_func_signature[maximum] array_api_tests/test_signatures.py::test_func_signature[minimum] array_api_tests/test_signatures.py::test_func_signature[signbit] # linalg namespace array_api_tests/test_signatures.py::test_extension_func_signature[linalg.cross] array_api_tests/test_signatures.py::test_extension_func_signature[linalg.matmul] array_api_tests/test_signatures.py::test_extension_func_signature[linalg.cholesky] array_api_tests/test_signatures.py::test_extension_func_signature[linalg.matrix_norm] array_api_tests/test_signatures.py::test_extension_func_signature[linalg.matrix_rank] array_api_tests/test_signatures.py::test_extension_func_signature[linalg.matrix_transpose] array_api_tests/test_signatures.py::test_extension_func_signature[linalg.outer] array_api_tests/test_signatures.py::test_extension_func_signature[linalg.pinv] array_api_tests/test_signatures.py::test_extension_func_signature[linalg.svdvals] array_api_tests/test_signatures.py::test_extension_func_signature[linalg.tensordot] array_api_tests/test_signatures.py::test_extension_func_signature[linalg.vecdot] array_api_tests/test_signatures.py::test_extension_func_signature[linalg.vector_norm] # Array object namespace array_api_tests/test_signatures.py::test_array_method_signature[__dlpack__] array_api_tests/test_signatures.py::test_array_method_signature[__dlpack_device__] array_api_tests/test_signatures.py::test_array_method_signature[__setitem__] # test_creation_functions # not implemented array_api_tests/test_creation_functions.py::test_meshgrid # test_array_object array_api_tests/test_array_object.py::test_getitem array_api_tests/test_array_object.py::test_setitem array_api_tests/test_array_object.py::test_getitem_masking array_api_tests/test_array_object.py::test_setitem_masking # test_operators_and_elementwise_functions # throws for x < 1 instead of NaN array_api_tests/test_operators_and_elementwise_functions.py::test_acosh # not implemented array_api_tests/test_operators_and_elementwise_functions.py::test_logical_not # test_data_type_functions # not implemented array_api_tests/test_data_type_functions.py::test_broadcast_arrays array_api_tests/test_data_type_functions.py::test_broadcast_to array_api_tests/test_data_type_functions.py::test_isdtype array_api_tests/test_data_type_functions.py::test_result_type array_api_tests/test_data_type_functions.py::test_finfo[Float32] # test_has_names array_api_tests/test_has_names.py::test_has_names[linalg-cholesky] array_api_tests/test_has_names.py::test_has_names[linalg-cross] array_api_tests/test_has_names.py::test_has_names[linalg-det] array_api_tests/test_has_names.py::test_has_names[linalg-diagonal] array_api_tests/test_has_names.py::test_has_names[linalg-eigh] array_api_tests/test_has_names.py::test_has_names[linalg-eigvalsh] array_api_tests/test_has_names.py::test_has_names[linalg-inv] array_api_tests/test_has_names.py::test_has_names[linalg-matmul] array_api_tests/test_has_names.py::test_has_names[linalg-matrix_norm] array_api_tests/test_has_names.py::test_has_names[linalg-matrix_power] array_api_tests/test_has_names.py::test_has_names[linalg-matrix_rank] array_api_tests/test_has_names.py::test_has_names[linalg-matrix_transpose] array_api_tests/test_has_names.py::test_has_names[linalg-outer] array_api_tests/test_has_names.py::test_has_names[linalg-pinv] array_api_tests/test_has_names.py::test_has_names[linalg-qr] array_api_tests/test_has_names.py::test_has_names[linalg-slogdet] array_api_tests/test_has_names.py::test_has_names[linalg-solve] array_api_tests/test_has_names.py::test_has_names[linalg-svd] array_api_tests/test_has_names.py::test_has_names[linalg-svdvals] array_api_tests/test_has_names.py::test_has_names[linalg-tensordot] array_api_tests/test_has_names.py::test_has_names[linalg-trace] array_api_tests/test_has_names.py::test_has_names[linalg-vecdot] array_api_tests/test_has_names.py::test_has_names[linalg-vector_norm] array_api_tests/test_has_names.py::test_has_names[statistical-cumulative_sum] array_api_tests/test_has_names.py::test_has_names[statistical-mean] array_api_tests/test_has_names.py::test_has_names[statistical-std] array_api_tests/test_has_names.py::test_has_names[statistical-var] array_api_tests/test_has_names.py::test_has_names[set-unique_all] array_api_tests/test_has_names.py::test_has_names[set-unique_counts] array_api_tests/test_has_names.py::test_has_names[set-unique_inverse] array_api_tests/test_has_names.py::test_has_names[set-unique_values] array_api_tests/test_has_names.py::test_has_names[searching-argmax] array_api_tests/test_has_names.py::test_has_names[searching-argmin] array_api_tests/test_has_names.py::test_has_names[searching-searchsorted] array_api_tests/test_has_names.py::test_has_names[creation-from_dlpack] array_api_tests/test_has_names.py::test_has_names[creation-meshgrid] array_api_tests/test_has_names.py::test_has_names[creation-tril] array_api_tests/test_has_names.py::test_has_names[creation-triu] array_api_tests/test_has_names.py::test_has_names[manipulation-broadcast_arrays] array_api_tests/test_has_names.py::test_has_names[manipulation-broadcast_to] array_api_tests/test_has_names.py::test_has_names[manipulation-concat] array_api_tests/test_has_names.py::test_has_names[manipulation-expand_dims] array_api_tests/test_has_names.py::test_has_names[manipulation-flip] array_api_tests/test_has_names.py::test_has_names[manipulation-moveaxis] array_api_tests/test_has_names.py::test_has_names[manipulation-repeat] array_api_tests/test_has_names.py::test_has_names[manipulation-roll] array_api_tests/test_has_names.py::test_has_names[manipulation-squeeze] array_api_tests/test_has_names.py::test_has_names[manipulation-stack] array_api_tests/test_has_names.py::test_has_names[manipulation-tile] array_api_tests/test_has_names.py::test_has_names[manipulation-unstack] array_api_tests/test_has_names.py::test_has_names[sorting-argsort] array_api_tests/test_has_names.py::test_has_names[sorting-sort] array_api_tests/test_has_names.py::test_has_names[data_type-isdtype] array_api_tests/test_has_names.py::test_has_names[data_type-result_type] array_api_tests/test_has_names.py::test_has_names[elementwise-clip] array_api_tests/test_has_names.py::test_has_names[elementwise-copysign] array_api_tests/test_has_names.py::test_has_names[elementwise-hypot] array_api_tests/test_has_names.py::test_has_names[elementwise-logical_not] array_api_tests/test_has_names.py::test_has_names[elementwise-maximum] array_api_tests/test_has_names.py::test_has_names[elementwise-minimum] array_api_tests/test_has_names.py::test_has_names[elementwise-signbit] array_api_tests/test_has_names.py::test_has_names[linear_algebra-matrix_transpose] array_api_tests/test_has_names.py::test_has_names[linear_algebra-vecdot] array_api_tests/test_has_names.py::test_has_names[indexing-take] array_api_tests/test_has_names.py::test_has_names[array_method-__dlpack__] array_api_tests/test_has_names.py::test_has_names[array_method-__dlpack_device__] array_api_tests/test_has_names.py::test_has_names[array_method-__setitem__] array_api_tests/test_has_names.py::test_has_names[array_attribute-T] array_api_tests/test_has_names.py::test_has_names[array_attribute-mT] # test_indexing_functions # not implemented array_api_tests/test_indexing_functions.py::test_take # test_linalg # not implemented array_api_tests/test_linalg.py::test_matrix_transpose array_api_tests/test_linalg.py::test_vecdot # test suite draws two scalars array_api_tests/test_linalg.py::test_tensordot # test_manipulation_functions # not implemented array_api_tests/test_manipulation_functions.py::test_concat array_api_tests/test_manipulation_functions.py::test_expand_dims array_api_tests/test_manipulation_functions.py::test_squeeze array_api_tests/test_manipulation_functions.py::test_flip array_api_tests/test_manipulation_functions.py::test_roll array_api_tests/test_manipulation_functions.py::test_stack # test_searching_functions # not implemented array_api_tests/test_searching_functions.py::test_argmax array_api_tests/test_searching_functions.py::test_argmin # 0D issue array_api_tests/test_searching_functions.py::test_nonzero # test_set_functions # not implemented array_api_tests/test_set_functions.py::test_unique_all array_api_tests/test_set_functions.py::test_unique_counts array_api_tests/test_set_functions.py::test_unique_inverse array_api_tests/test_set_functions.py::test_unique_values # test_sorting_functions # not implemented array_api_tests/test_sorting_functions.py::test_argsort array_api_tests/test_sorting_functions.py::test_sort # test_special_cases array_api_tests/test_special_cases.py::test_unary[acos(x_i > 1) -> NaN] array_api_tests/test_special_cases.py::test_unary[acos(x_i < -1) -> NaN] array_api_tests/test_special_cases.py::test_unary[acosh(x_i is NaN) -> NaN] array_api_tests/test_special_cases.py::test_unary[acosh(x_i < 1) -> NaN] array_api_tests/test_special_cases.py::test_unary[acosh(x_i is 1) -> +0] array_api_tests/test_special_cases.py::test_unary[acosh(x_i is +infinity) -> +infinity] array_api_tests/test_special_cases.py::test_unary[asin(x_i > 1) -> NaN] array_api_tests/test_special_cases.py::test_unary[asin(x_i < -1) -> NaN] array_api_tests/test_special_cases.py::test_unary[atanh(x_i < -1) -> NaN] array_api_tests/test_special_cases.py::test_unary[atanh(x_i > 1) -> NaN] array_api_tests/test_special_cases.py::test_unary[cos(x_i is +infinity) -> NaN] array_api_tests/test_special_cases.py::test_unary[cos(x_i is -infinity) -> NaN] array_api_tests/test_special_cases.py::test_unary[log(x_i < 0) -> NaN] array_api_tests/test_special_cases.py::test_unary[log1p(x_i < -1) -> NaN] array_api_tests/test_special_cases.py::test_unary[log2(x_i < 0) -> NaN] array_api_tests/test_special_cases.py::test_unary[log10(x_i < 0) -> NaN] array_api_tests/test_special_cases.py::test_unary[signbit(x_i is +0) -> False] array_api_tests/test_special_cases.py::test_unary[signbit(x_i is -0) -> True] array_api_tests/test_special_cases.py::test_unary[signbit(x_i is +infinity) -> False] array_api_tests/test_special_cases.py::test_unary[signbit(x_i is -infinity) -> True] array_api_tests/test_special_cases.py::test_unary[signbit(isfinite(x_i) and x_i > 0) -> False] array_api_tests/test_special_cases.py::test_unary[signbit(isfinite(x_i) and x_i < 0) -> True] array_api_tests/test_special_cases.py::test_unary[signbit(x_i is NaN) -> False] array_api_tests/test_special_cases.py::test_unary[signbit(x_i is NaN) -> True] array_api_tests/test_special_cases.py::test_unary[sin((x_i is +infinity or x_i == -infinity)) -> NaN] array_api_tests/test_special_cases.py::test_unary[sqrt(x_i < 0) -> NaN] array_api_tests/test_special_cases.py::test_unary[tan((x_i is +infinity or x_i == -infinity)) -> NaN] array_api_tests/test_special_cases.py::test_binary[copysign(x2_i < 0) -> NaN] array_api_tests/test_special_cases.py::test_binary[copysign(x2_i is -0) -> NaN] array_api_tests/test_special_cases.py::test_binary[copysign(x2_i is +0) -> NaN] array_api_tests/test_special_cases.py::test_binary[copysign(x2_i > 0) -> NaN] array_api_tests/test_special_cases.py::test_binary[maximum(x1_i is NaN or x2_i is NaN) -> NaN] array_api_tests/test_special_cases.py::test_binary[minimum(x1_i is NaN or x2_i is NaN) -> NaN] array_api_tests/test_special_cases.py::test_binary[pow(x1_i is -infinity and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] array_api_tests/test_special_cases.py::test_binary[pow(x1_i is -infinity and x2_i < 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] array_api_tests/test_special_cases.py::test_binary[pow(x1_i is -infinity and x2_i < 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +0] array_api_tests/test_special_cases.py::test_binary[pow(x1_i is -0 and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] array_api_tests/test_special_cases.py::test_binary[pow(x1_i < 0 and isfinite(x1_i) and isfinite(x2_i) and not x2_i.is_integer()) -> NaN] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -infinity and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -infinity and x2_i < 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -infinity and x2_i < 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +0] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i < 0 and isfinite(x1_i) and isfinite(x2_i) and not x2_i.is_integer()) -> NaN] array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -infinity and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -infinity and x2_i < 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -infinity and x2_i < 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +0] array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -0 and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i < 0 and isfinite(x1_i) and isfinite(x2_i) and not x2_i.is_integer()) -> NaN] array_api_tests/test_special_cases.py::test_empty_arrays[mean] array_api_tests/test_special_cases.py::test_empty_arrays[std] array_api_tests/test_special_cases.py::test_empty_arrays[var] array_api_tests/test_special_cases.py::test_nan_propagation[cumulative_sum] array_api_tests/test_special_cases.py::test_nan_propagation[max] array_api_tests/test_special_cases.py::test_nan_propagation[mean] array_api_tests/test_special_cases.py::test_nan_propagation[min] array_api_tests/test_special_cases.py::test_nan_propagation[prod] array_api_tests/test_special_cases.py::test_nan_propagation[std] array_api_tests/test_special_cases.py::test_nan_propagation[sum] array_api_tests/test_special_cases.py::test_nan_propagation[var] # test_statistical_functions # not implemented array_api_tests/test_statistical_functions.py::test_mean sparse-0.16.0a9/ci/Numba-array-api-skips.txt000066400000000000000000000001101463475501500205370ustar00rootroot00000000000000array_api_tests/test_operators_and_elementwise_functions.py::test_floor sparse-0.16.0a9/ci/Numba-array-api-xfails.txt000066400000000000000000000105461463475501500207120ustar00rootroot00000000000000array_api_tests/test_array_object.py::test_setitem array_api_tests/test_array_object.py::test_getitem_masking array_api_tests/test_array_object.py::test_setitem_masking array_api_tests/test_creation_functions.py::test_arange array_api_tests/test_creation_functions.py::test_linspace array_api_tests/test_creation_functions.py::test_meshgrid array_api_tests/test_data_type_functions.py::test_finfo[float32] array_api_tests/test_data_type_functions.py::test_isdtype array_api_tests/test_has_names.py::test_has_names[linalg-cholesky] array_api_tests/test_has_names.py::test_has_names[linalg-cross] array_api_tests/test_has_names.py::test_has_names[linalg-det] array_api_tests/test_has_names.py::test_has_names[linalg-diagonal] array_api_tests/test_has_names.py::test_has_names[linalg-eigh] array_api_tests/test_has_names.py::test_has_names[linalg-eigvalsh] array_api_tests/test_has_names.py::test_has_names[linalg-inv] array_api_tests/test_has_names.py::test_has_names[linalg-matmul] array_api_tests/test_has_names.py::test_has_names[linalg-matrix_norm] array_api_tests/test_has_names.py::test_has_names[linalg-matrix_power] array_api_tests/test_has_names.py::test_has_names[linalg-matrix_rank] array_api_tests/test_has_names.py::test_has_names[linalg-matrix_transpose] array_api_tests/test_has_names.py::test_has_names[linalg-outer] array_api_tests/test_has_names.py::test_has_names[linalg-pinv] array_api_tests/test_has_names.py::test_has_names[linalg-qr] array_api_tests/test_has_names.py::test_has_names[linalg-slogdet] array_api_tests/test_has_names.py::test_has_names[linalg-solve] array_api_tests/test_has_names.py::test_has_names[linalg-svd] array_api_tests/test_has_names.py::test_has_names[linalg-svdvals] array_api_tests/test_has_names.py::test_has_names[linalg-tensordot] array_api_tests/test_has_names.py::test_has_names[linalg-trace] array_api_tests/test_has_names.py::test_has_names[linalg-vecdot] array_api_tests/test_has_names.py::test_has_names[linalg-vector_norm] array_api_tests/test_has_names.py::test_has_names[set-unique_all] array_api_tests/test_has_names.py::test_has_names[set-unique_inverse] array_api_tests/test_has_names.py::test_has_names[creation-arange] array_api_tests/test_has_names.py::test_has_names[creation-from_dlpack] array_api_tests/test_has_names.py::test_has_names[creation-linspace] array_api_tests/test_has_names.py::test_has_names[creation-meshgrid] array_api_tests/test_has_names.py::test_has_names[sorting-argsort] array_api_tests/test_has_names.py::test_has_names[data_type-isdtype] array_api_tests/test_has_names.py::test_has_names[array_method-__dlpack__] array_api_tests/test_has_names.py::test_has_names[array_method-__dlpack_device__] array_api_tests/test_has_names.py::test_has_names[array_method-__setitem__] array_api_tests/test_indexing_functions.py::test_take array_api_tests/test_linalg.py::test_vecdot array_api_tests/test_operators_and_elementwise_functions.py::test_ceil array_api_tests/test_operators_and_elementwise_functions.py::test_trunc array_api_tests/test_set_functions.py::test_unique_all array_api_tests/test_set_functions.py::test_unique_inverse array_api_tests/test_signatures.py::test_func_signature[unique_all] array_api_tests/test_signatures.py::test_func_signature[unique_inverse] array_api_tests/test_signatures.py::test_func_signature[arange] array_api_tests/test_signatures.py::test_func_signature[from_dlpack] array_api_tests/test_signatures.py::test_func_signature[linspace] array_api_tests/test_signatures.py::test_func_signature[meshgrid] array_api_tests/test_signatures.py::test_func_signature[argsort] array_api_tests/test_signatures.py::test_func_signature[isdtype] array_api_tests/test_signatures.py::test_array_method_signature[__dlpack__] array_api_tests/test_signatures.py::test_array_method_signature[__dlpack_device__] array_api_tests/test_signatures.py::test_array_method_signature[__setitem__] array_api_tests/test_sorting_functions.py::test_argsort array_api_tests/test_sorting_functions.py::test_sort array_api_tests/test_special_cases.py::test_nan_propagation[max] array_api_tests/test_special_cases.py::test_nan_propagation[mean] array_api_tests/test_special_cases.py::test_nan_propagation[min] array_api_tests/test_special_cases.py::test_nan_propagation[prod] array_api_tests/test_special_cases.py::test_nan_propagation[std] array_api_tests/test_special_cases.py::test_nan_propagation[sum] array_api_tests/test_special_cases.py::test_nan_propagation[var] sparse-0.16.0a9/ci/environment.yml000066400000000000000000000001141463475501500170150ustar00rootroot00000000000000name: sparse-dev channels: - conda-forge dependencies: - python - pip sparse-0.16.0a9/ci/test_examples.sh000066400000000000000000000001121463475501500171350ustar00rootroot00000000000000for example in $(find ./examples/ -iname *.py); do python $example done sparse-0.16.0a9/docs/000077500000000000000000000000001463475501500142675ustar00rootroot00000000000000sparse-0.16.0a9/docs/_templates/000077500000000000000000000000001463475501500164245ustar00rootroot00000000000000sparse-0.16.0a9/docs/_templates/autosummary/000077500000000000000000000000001463475501500210125ustar00rootroot00000000000000sparse-0.16.0a9/docs/_templates/autosummary/base.rst000066400000000000000000000001511463475501500224530ustar00rootroot00000000000000{{ objname | escape | underline}} .. currentmodule:: {{ module }} .. auto{{ objtype }}:: {{ objname }} sparse-0.16.0a9/docs/_templates/autosummary/class.rst000066400000000000000000000010171463475501500226500ustar00rootroot00000000000000{{ objname | escape | underline}} .. currentmodule:: {{ module }} .. autoclass:: {{ objname }} {% block attributes %} {% if attributes %} .. rubric:: Attributes .. autosummary:: :toctree: {% for item in attributes %} {{ name }}.{{ item }} {% endfor %} {% endif %} {% endblock %} {% block methods %} {% if methods %} .. rubric:: Methods .. autosummary:: :toctree: {% for item in methods %} {{ name }}.{{ item }} {% endfor %} {% endif %} {% endblock %} sparse-0.16.0a9/docs/_templates/autosummary/module.rst000066400000000000000000000006541463475501500230360ustar00rootroot00000000000000{{ fullname | escape | underline }} .. rubric:: Description .. automodule:: {{ fullname }} .. currentmodule:: {{ fullname }} {% if classes %} .. rubric:: Classes .. autosummary:: :toctree: {% for class in classes %} {{ class }} {% endfor %} {% endif %} {% if functions %} .. rubric:: Functions .. autosummary:: :toctree: {% for function in functions %} {{ function }} {% endfor %} {% endif %} sparse-0.16.0a9/docs/changelog.rst000066400000000000000000000356221463475501500167600ustar00rootroot00000000000000Changelog ========= .. currentmodule:: sparse 0.15.1 / 2024-01-10 ------------------- * Fix regression where with XArray by supporting all API functions via the Array API standard. (:pr:`622` thanks :ghuser:`hameerabbasi`) 0.15.0 / 2024-01-09 ------------------- * Fix regression where :obj:`DeprecationWarning`s were being fired unexpectedly. (:pr:`581` thanks :ghuser:`hameerabbasi`) * Extended :obj:`sparse.einsum` support (:pr:`579` thanks :ghuser:`HadrienNU`) * General code clean-up (:pr:`586` thanks :ghuser:`MHRasmy`, :pr:`598` thanks :ghuser:`jamestwebber`) * Bug fixes with respect to NumPy compatibility (:pr:`598` thanks :ghuser:`hameerabbasi`, :pr:`609` thanks :ghuser:`Illviljan`, :pr:`620` thanks :ghuser:`mtsokol`) * Bug fixes with respect to GCXS (:pr:`611` thanks :ghuser:`EuGig`, :pr:`601` thanks :ghuser:`jamestwebber`) * `Array API standard `_ support (:pr:`612`, :pr:`613`, :pr:`614`, :pr:`615`, :pr:`619`, :pr:`620` thanks :ghuser:`mtsokol`) * ``matrepr`` support for display of sparse data (:pr:`605`, :pr:`606` thanks :ghuser:`alugowski`). * Larger code clean-up with Ruff formatter and linter (:pr:`617`, :pr:`621` thanks :ghuser:`hameerabbasi`) * Packaging and maintenance (:pr:`616`, :commit:`b5954e68d3d6e35a62f7401d1d4fb84ea04414dd`, :commit:`dda93d3ea9521881c721c3ba875c769c9c5a79d4` thanks :ghuser:`hameerabbasi`) 0.14.0 / 2023-02-24 ------------------- * :obj:`sparse.einsum` support (:pr:`564` thanks :ghuser:`jcmgray`) * Some bug-fixes (:pr:`524`, :pr:`527`, :pr:`555` thanks :ghuser:`hameerabbasi`, :pr:`569`, thanks :ghuser:`jamestwebber`, :pr:`534`, thanks :ghuser:`sarveshbhatnagar`) * Some performance improvements (:pr:`570`, thanks :ghuser:`jamestwebber`, :pr:`540`, thanks :ghuser:`smldub`). * Miscellaneous maintenance fixes. 0.13.0 / 2021-08-28 ------------------- * GCXS improvements and changes. (:pr:`448`, :pr:`450`, :pr:`455`, thanks :ghuser:`sayandip18`). * Maintainence fixes (:pr:`462`, :pr:`466`, :commit:`1ccb85da581be65a0345b399e00fd3c325700d95`, :commit:`5547b4e92dc8d61492e9dc10ba00175c1a6637fa` :commit:`00c0e5514de2aab8b9a0be16b5da470b091d9eb9`, :commit:`fcd3020dd08c7022a44f709173fe23969d3e8f7c`, thanks :ghuser:`hameerabbasi`) * :obj:`sparse.DOK.from_scipy_sparse` method (:pr:`464`, :issue:`463`, thanks :ghuser:`hameerabbasi`). * Black re-formatting (:pr:`471`, :pr:`484`, thanks :ghuser:`GenevieveBuckley`, :ghuser:`sayandip18`) * Add :obj:`sparse.pad` (:pr:`474`, :issue:`438`, thanks :ghuser:`H4R5H1T-007`) * Switch to GitHub Actions (:compare:`5547b4e92dc8d61492e9dc10ba00175c1a6637fa..a332f22c96a96e5ab9b4384342df67e8f3966f85`) * Fix a number of bugs in format conversion. (:pr:`504`, :issue:`503`, thanks :ghuser:`hameerabbasi`) * Fix bug in :obj:`sparse.matmul` for higher-dimensional arrays. (:pr:`508`, :issue:`506`, thanks :ghuser:`sayandip18`). * Fix scalar conversion to COO (:issue:`510`, :pr:`511`, thanks :ghuser:`hameerabbasi`) * Fix OOB memory accesses (:issue:`515`, :commit:`1e24a7e29786e888dee4c02153309986ae4b5dde` thanks :ghuser:`hameerabbasi`) * Fixes element-wise ops with scalar COO array. (:issue:`505`, :commit:`5211441ec685233657ab7156f99eb67e660cee86`, thanks :ghuser:`hameerabbasi`) * Fix scalar broadcast_to with ``nnz==0``. (:issue:`513`, :commit:`bfabaa0805e811884e79c4bdbfd14316986d65e4`, thanks :ghuser:`hameerabbasi`) * Add order parameter to ``{zero, ones, full}[_like]``. (:issue:`514`, :commit:`37de1d0141c4375962ecdf18337c2dd0f667b60c`, thanks :ghuser:`hameerabbasi`) * Fix tensordot typing bugs. (:issue:`493`, :issue:`499`, :commit:`37de1d0141c4375962ecdf18337c2dd0f667b60c`, thanks :ghuser:`hameerabbasi`). 0.12.0 / 2021-03-19 ------------------- There are a number of large changes in this release. For example, we have implemented the :obj:`GCXS` type, and its specializations :obj:`CSR` and :obj:`CSC`. We plan on gradually improving the performance of these. * A number of :obj:`GCXS` fixes and additions (:pr:`409`, :pr:`407`, :pr:`414`, :pr:`417`, :pr:`419` thanks :ghuser:`daletovar`) * Ability to change the index dtype for better storage characteristics. (:pr:`441`, thanks :ghuser:`daletovar`) * Some work on :obj:`DOK` arrays to bring them closer to the other formats (:pr:`435`, :pr:`437`, :pr:`439`, :pr:`440`, thanks :ghuser:`DragaDoncila`) * :obj:`CSR` and :obj:`CSC` specializations of :obj:`GCXS` (:pr:`442`, thanks :ghuser:`ivirshup`) For now, this is experimental undocumented API, and subject to change. * Fix a number of bugs (:pr:`407`, :issue:`406`) * Add ``nnz`` parameter to :obj:`sparse.random` (:pr:`410`, thanks :ghuser:`emilmelnikov`) 0.11.2 / 2020-09-04 ------------------- * Fix :obj:`TypingError` on :obj:`sparse.dot` with complex dtypes. (:issue:`403`, :pr:`404`) 0.11.1 / 2020-08-31 ------------------- * Fix :obj:`ValueError` on :obj:`sparse.dot` with extremely small values. (:issue:`398`, :pr:`399`) 0.11.0 / 2020-08-18 ------------------- * Improve the performance of :obj:`sparse.dot`. (:issue:`331`, :pr:`389`, thanks :ghuser:`daletovar`) * Added the :obj:`COO.swapaxes` method. (:pr:`344`, thanks :ghuser:`lueckem`) * Added multi-axis 1-D indexing support. (:pr:`343`, thanks :ghuser:`mikeymezher`) * Fix :obj:`outer` for arrays that weren't one-dimensional. (:issue:`346`, :pr:`347`) * Add ``casting`` kwarg to :obj:`COO.astype`. (:issue:`391`, :pr:`392`) * Fix for :obj:`COO` constructor accepting invalid inputs. (:issue:`385`, :pr:`386`) 0.10.0 / 2020-05-13 ------------------- * Fixed a bug where converting an empty DOK array to COO leads to an incorrect dtype. (:issue:`314`, :pr:`315`) * Change code formatter to black. (:pr:`284`) * Add :obj:`COO.flatten` and :obj:`sparse.outer`. (:issue:`316`, :pr:`317`). * Remove broadcasting restriction between sparse arrays and dense arrays. (:issue:`306`, :pr:`318`) * Implement deterministic dask tokenization. (:issue:`300`, :pr:`320`, thanks :ghuser:`danielballan`) * Improve testing around densification (:pr:`321`, thanks :ghuser:`danielballan`) * Simplify Numba extension. (:pr:`324`, thanks :ghuser:`eric-wieser`). * Respect ``copy=False`` in ``astype`` (:pr:`328`, thanks :ghuser:`eric-wieser`). * Replace linear_loc with ravel_multi_index, which is 3x faster. (:pr:`330`, thanks :ghuser:`eric-wieser`). * Add error msg to tensordot operation when ``ndim==0`` (:issue:`332`, :pr:`333`, thanks :ghuser:`guilhermeleobas`). * Maintainence fixes for Sphinx 3.0 and Numba 0.49, and dropping support for Python 3.5. (:pr:`337`). * Fixed signature for :obj:`numpy.clip`. 0.9.1 / 2020-01-23 ------------------ * Fixed a bug where indexing with an empty list could lead to issues. (:issue:`281`, :pr:`282`) * Change code formatter to black. (:pr:`284`) * Add the :obj:`diagonal` and :obj:`diagonalize` functions. (:issue:`288`, :pr:`289`, thanks :ghuser:`pettni`) * Add HTML repr for notebooks. (:pr:`283`, thanks :ghuser:`daletovar`) * Avoid making copy of ``coords`` when making a new :obj:`COO` array. * Add stack and concatenate for GCXS. (:issue:`301`, :pr:`303`, thanks :ghuser:`daletovar`). * Fix issue where functions dispatching to an attribute access wouldn't work with ``__array_function__``. (:issue:`308`, :pr:`309`). * Add partial support for constructing and mirroring :obj:`COO` objects to Numba. 0.8.0 / 2019-08-26 ------------------ This release switches to Numba's new typed lists, a lot of back-end work with the CI infrastructure, so Linux, macOS and Windows are officially tested. It also includes bug fixes. It also adds in-progress, not yet public support for the GCXS format, which is a generalisation of CSR/CSC. (huge thanks to :ghuser:`daletovar`) * Fixed a bug where an array with size == 1 and nnz == 0 could not be broadcast. (:issue:`242`, :pr:`243`) * Add ``std`` and ``var``. (:pr:`244`) * Move to Azure Pipelines with CI for Windows, macOS and Linux. (:pr:`245`, :pr:`246`, :pr:`247`, :pr:`248`) * Add ``resize``, and change ``reshape`` so it raises a ``ValueError`` on shapes that don't correspond to the same size. (:issue:`241`, :issue:`250`, :pr:`256` thanks, :ghuser:`daletovar`) * Add ``isposinf`` and ``isneginf``. (:issue:`252`, :pr:`253`) * Fix ``tensordot`` when nnz = 0. (:issue:`255`, :pr:`256`) * Modifications to ``__array_function__`` to allow for sparse XArrays. (:pr:`261`, thanks :ghuser:`nvictus`) * Add not-yet-public support for GCXS. (:pr:`258`, thanks :ghuser:`daletovar`) * Improvements to ``__array_function__``. (:pr:`267`, :pr:`272`, thanks :ghuser:`crusaderky`) * Convert all Numba lists to typed lists. (:pr:`264`) * Why write code when it exists elsewhere? (:pr:`277`) * Fix some element-wise operations with scalars. (:pr:`278`) * Private modules should be private, and tests should be in the package. (:pr:`280`) 0.7.0 / 2019-03-14 ------------------ This is a release that adds compatibility with NumPy's new ``__array_function__`` protocol, for details refer to `NEP-18 `_. The other big change is that we dropped compatibility with Python 2. Users on Python 2 should use version 0.6.0. There are also some bug-fixes relating to fill-values. This was mainly a contributor-driven release. The full list of changes can be found below: * Fixed a bug where going between :obj:`sparse.DOK` and :obj:`sparse.COO` caused fill-values to be lost. (:issue:`225`, :pr:`226`). * Fixed warning for a matrix that was incorrectly considered too dense. (:issue:`228`, :pr:`229`) * Fixed some warnings in Python 3.7, the fix was needed. in preparation for Python 3.8. (:pr:`233`, thanks :ghuser:`nils-werner`) * Drop support for Python 2.7 (:issue:`234`, :pr:`235`, thanks :ghuser:`hugovk`) * Clearer error messages (:issue:`230`, :issue:`231`, :pr:`232`) * Restructure requirements.txt files. (:pr:`236`) * Support fill-value in reductions in specific cases. (:issue:`237`, :pr:`238`) * Add ``__array_function__`` support. (:pr:`239`, thanks, :ghuser:`pentschev`) * Cleaner code! (:pr:`240`) 0.6.0 / 2018-12-19 ------------------ This release breaks backward-compatibility. Previously, if arrays were fed into NumPy functions, an attempt would be made to densify the array and apply the NumPy function. This was unintended behaviour in most cases, with the array filling up memory before raising a ``MemoryError`` if the array was too large. We have now changed this behaviour so that a ``RuntimeError`` is now raised if an attempt is made to automatically densify an array. To densify, use the explicit ``.todense()`` method. * Fixed a bug where ``np.matrix`` could sometimes fail to convert to a ``COO``. (:issue:`199`, :pr:`200`). * Make sure that ``sparse @ sparse`` returns a sparse array. (:issue:`201`, :pr:`203`) * Bring ``operator.matmul`` behaviour in line with NumPy for ``ndim > 2``. (:issue:`202`, :pr:`204`, :pr:`217`) * Make sure ``dtype`` is preserved with the ``out`` kwarg. (:issue:`205`, :pr:`206`) * Fix integer overflow in ``reduce`` on Windows. (:issue:`207`, :pr:`208`) * Disallow auto-densification. (:issue:`218`, :pr:`220`) * Add auto-densification configuration, and a configurable warning for checking if the array is too dense. (:pr:`210`, :pr:`213`) * Add pruning of fill-values to COO constructor. (:pr:`221`) 0.5.0 / 2018-10-12 ------------------ * Added :code:`COO.real`, :code:`COO.imag`, and :code:`COO.conj` (:pr:`196`). * Added :code:`sparse.kron` function (:pr:`194`, :pr:`195`). * Added :code:`order` parameter to :code:`COO.reshape` to make it work with :code:`np.reshape` (:pr:`193`). * Added :code:`COO.mean` and :code:`sparse.nanmean` (:pr:`190`). * Added :code:`sparse.full` and :code:`sparse.full_like` (:pr:`189`). * Added :code:`COO.clip` method (:pr:`185`). * Added :code:`COO.copy` method, and changed pickle of :code:`COO` to not include its cache (:pr:`184`). * Added :code:`sparse.eye`, :code:`sparse.zeros`, :code:`sparse.zeros_like`, :code:`sparse.ones`, and :code:`sparse.ones_like` (:pr:`183`). 0.4.1 / 2018-09-12 ------------------ * Allow mixed :code:`ndarray`-:code:`COO` operations if the result is sparse (:issue:`124`, via :pr:`182`). * Allow specifying a fill-value when converting from NumPy arrays (:issue:`179`, via :pr:`180`). * Added :code:`COO.any` and :code:`COO.all` methods (:pr:`175`). * Indexing for :code:`COO` now accepts a single one-dimensional array index (:pr:`172`). * The fill-value can now be something other than zero or :code:`False` (:pr:`165`). * Added a :code:`sparse.roll` function (:pr:`160`). * Numba code now releases the GIL. This leads to better multi-threaded performance in Dask (:pr:`159`). * A number of bugs occurred, so to resolve them, :code:`COO.coords.dtype` is always :code:`np.int64`. :code:`COO`, therefore, uses more memory than before (:pr:`158`). * Add support for saving and loading :code:`COO` files from disk (:issue:`153`, via :pr:`154`). * Support :code:`COO.nonzero` and :code:`np.argwhere` (:issue:`145`, via :pr:`148`). * Allow faux in-place operations (:issue:`80`, via :pr:`146`). * :code:`COO` is now always canonical (:pr:`141`). * Improve indexing performance (:pr:`128`). * Improve element-wise performance (:pr:`127`). * Reductions now support a negative axis (:issue:`117`, via :pr:`118`). * Match behaviour of :code:`ufunc.reduce` from NumPy (:issue:`107`, via :pr:`108`). 0.3.1 / 2018-04-12 ------------------ * Fix packaging error (:pr:`138`). 0.3.0 / 2018-02-22 ------------------ * Add NaN-skipping aggregations (:pr:`102`). * Add equivalent to :code:`np.where` (:pr:`102`). * N-input universal functions now work (:pr:`98`). * Make :code:`dot` more consistent with NumPy (:pr:`96`). * Create a base class :code:`SparseArray` (:pr:`92`). * Minimum NumPy version is now 1.13 (:pr:`90`). * Fix a bug where setting a :code:`DOK` element to zero did nothing (:issue:`93`, via :pr:`94`). 0.2.0 / 2018-01-25 ------------------ * Support faster :code:`np.array(COO)` (:pr:`87`). * Add :code:`DOK` type (:pr:`85`). * Fix sum for large arrays (:issue:`82`, via :pr:`83`). * Support :code:`.size` and :code:`.density` (:pr:`69`). * Documentation added for the package (:pr:`43`). * Minimum required SciPy version is now 0.19 (:pr:`70`). * :code:`len(COO)` now works (:pr:`68`). * :code:`scalar op COO` now works for all operators (:pr:`67`). * Validate axes for :code:`.transpose()` (:pr:`61`). * Extend indexing support (:pr:`57`). * Add :code:`random` function for generating random sparse arrays (:pr:`41`). * :code:`COO(COO)` now copies the original object (:pr:`55`). * NumPy universal functions and reductions now work on :code:`COO` arrays (:pr:`49`). * Fix concatenate and stack for large arrays (:issue:`32`, via :pr:`51`). * Fix :code:`nnz` for scalars (:issue:`47`, via :pr:`48`). * Support more operators and remove all special cases (:pr:`46`). * Add support for :code:`triu` and :code:`tril` (:pr:`40`). * Add support for Ellipsis (:code:`...`) and :code:`None` when indexing (:pr:`37`). * Add support for bitwise bindary operations like :code:`&` and :code:`|` (:pr:`38`). * Support broadcasting in element-wise operations (:pr:`35`). sparse-0.16.0a9/docs/conduct.rst000066400000000000000000000124511463475501500164630ustar00rootroot00000000000000Contributor Covenant Code of Conduct ==================================== Our Pledge ---------- We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. Our Standards ------------- Examples of behavior that contributes to a positive environment for our community include: - Demonstrating empathy and kindness toward other people - Being respectful of differing opinions, viewpoints, and experiences - Giving and gracefully accepting constructive feedback - Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience - Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: - The use of sexualized language or imagery, and sexual attention or advances of any kind - Trolling, insulting or derogatory comments, and personal or political attacks - Public or private harassment - Publishing others' private information, such as a physical or email address, without their explicit permission - Other conduct which could reasonably be considered inappropriate in a professional setting Enforcement Responsibilities ---------------------------- Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. Scope ----- This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Enforcement ----------- Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at `hameerabbasi@yahoo.com `_. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. Enforcement Guidelines ---------------------- Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: 1. Correction ~~~~~~~~~~~~~ **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. 2. Warning ~~~~~~~~~~ **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. 3. Temporary Ban ~~~~~~~~~~~~~~~~ **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. 4. Permanent Ban ~~~~~~~~~~~~~~~~ **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. Attribution ----------- This Code of Conduct is adapted from the `Contributor Covenant `__, version 2.0, available at https://www.contributor-covenant.org/version/2/0/code\_of\_conduct.html. Community Impact Guidelines were inspired by `Mozilla's code of conduct enforcement ladder <:ghuser:`mozilla/diversity`>`__. For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations. sparse-0.16.0a9/docs/conf.py000066400000000000000000000142061463475501500155710ustar00rootroot00000000000000#!/usr/bin/env python3 # # sparse documentation build configuration file, created by # sphinx-quickstart on Fri Dec 29 20:58:03 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath("..")) from sparse import __version__ # noqa: E402 # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.doctest", "sphinx.ext.intersphinx", "sphinx.ext.coverage", "sphinx.ext.mathjax", "sphinx.ext.napoleon", "sphinx.ext.viewcode", "sphinx.ext.autosummary", "sphinx.ext.inheritance_diagram", "sphinx.ext.extlinks", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] mathjax_path = "https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML" # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = ".rst" # The main toctree document. root_doc = "index" # General information about the project. project = "sparse" copyright = "2018, Sparse developers" author = "Sparse Developers" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = __version__ # The full version, including alpha/beta/rc tags. release = __version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ["_build", "**tests**", "**setup**", "**extern**", "**data**"] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False autosummary_generate = True autosummary_generate_overwrite = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "sphinx_rtd_theme" html_logo = "logo.svg" html_favicon = "logo.png" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars # html_sidebars = { # '**': [ # 'relations.html', # needs 'show_related': True theme option to display # 'searchbox.html', # ] # } # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = "sparsedoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [(root_doc, "sparse.tex", "sparse Documentation", "Sparse Developers", "manual")] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(root_doc, "sparse", "sparse Documentation", [author], 1)] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( root_doc, "sparse", "sparse Documentation", author, "sparse", "One line description of project.", "Miscellaneous", ) ] # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { "python": ("https://docs.python.org/3", None), "numpy": ("https://docs.scipy.org/doc/numpy", None), "scipy": ("https://docs.scipy.org/doc/scipy", None), } extlinks = { "issue": ("https://github.com/pydata/sparse/issues/%s", "Issue #%s"), "pr": ("https://github.com/pydata/sparse/pull/%s", "PR #%s"), "ghuser": ("https://github.com/%s", "@%s"), "commit": ("https://github.com/pydata/sparse/commit/%s", "%s"), "compare": ("https://github.com/pydata/sparse/commit/%s", "%s"), } sparse-0.16.0a9/docs/construct.rst000066400000000000000000000201731463475501500170500ustar00rootroot00000000000000.. currentmodule:: sparse Construct Sparse Arrays ======================= From coordinates and data ------------------------- You can construct :obj:`COO` arrays from coordinates and value data. The :code:`coords` parameter contains the indices where the data is nonzero, and the :code:`data` parameter contains the data corresponding to those indices. For example, the following code will generate a :math:`5 \times 5` diagonal matrix: .. code-block:: python >>> import sparse >>> coords = [[0, 1, 2, 3, 4], ... [0, 1, 2, 3, 4]] >>> data = [10, 20, 30, 40, 50] >>> s = sparse.COO(coords, data, shape=(5, 5)) >>> s 0 1 2 3 4 ┌ â” 0 │ 10 │ 1 │ 20 │ 2 │ 30 │ 3 │ 40 │ 4 │ 50 │ â”” ┘ In general :code:`coords` should be a :code:`(ndim, nnz)` shaped array. Each row of :code:`coords` contains one dimension of the desired sparse array, and each column contains the index corresponding to that nonzero element. :code:`data` contains the nonzero elements of the array corresponding to the indices in :code:`coords`. Its shape should be :code:`(nnz,)`. If ``data`` is the same across all the coordinates, it can be passed in as a scalar. For example, the following produces the :math:`4 \times 4` identity matrix: .. code-block:: python >>> import sparse >>> coords = [[0, 1, 2, 3], ... [0, 1, 2, 3]] >>> data = 1 >>> s = sparse.COO(coords, data, shape=(4, 4)) >>> s 0 1 2 3 ┌ â” 0 │ 1 │ 1 │ 1 │ 2 │ 1 │ 3 │ 1 │ â”” ┘ You can, and should, pass in :obj:`numpy.ndarray` objects for :code:`coords` and :code:`data`. In this case, the shape of the resulting array was determined from the maximum index in each dimension. If the array extends beyond the maximum index in :code:`coords`, you should supply a shape explicitly. For example, if we did the following without the :code:`shape` keyword argument, it would result in a :math:`4 \times 5` matrix, but maybe we wanted one that was actually :math:`5 \times 5`. .. code-block:: python >>> coords = [[0, 3, 2, 1], [4, 1, 2, 0]] >>> data = [1, 4, 2, 1] >>> s = COO(coords, data, shape=(5, 5)) >>> s 0 1 2 3 4 ┌ â” 0 │ 1 │ 1 │ 1 │ 2 │ 2 │ 3 │ 4 │ 4 │ │ â”” ┘ :obj:`COO` arrays support arbitrary fill values. Fill values are the "default" value, or value to not store. This can be given a value other than zero. For example, the following builds a (bad) representation of a :math:`2 \times 2` identity matrix. Note that not all operations are supported for operations with nonzero fill values. .. code-block:: python >>> coords = [[0, 1], [1, 0]] >>> data = [0, 0] >>> s = COO(coords, data, fill_value=1) >>> s 0 1 ┌ â” 0 │ 0 │ 1 │ 0 │ â”” ┘ From :std:doc:`Scipy sparse matrices ` --------------------------------------------------------------------------------------- To construct :obj:`COO` array from :obj:`spmatrix ` objects, you can use the :obj:`COO.from_scipy_sparse` method. As an example, if :code:`x` is a :obj:`scipy.sparse.spmatrix`, you can do the following to get an equivalent :obj:`COO` array: .. code-block:: python s = COO.from_scipy_sparse(x) From :doc:`Numpy arrays ` ------------------------------------------------------------------ To construct :obj:`COO` arrays from :obj:`numpy.ndarray` objects, you can use the :obj:`COO.from_numpy` method. As an example, if :code:`x` is a :obj:`numpy.ndarray`, you can do the following to get an equivalent :obj:`COO` array: .. code-block:: python s = COO.from_numpy(x) Generating random :obj:`COO` objects ------------------------------------ The :obj:`sparse.random` method can be used to create random :obj:`COO` arrays. For example, the following will generate a :math:`10 \times 10` matrix with :math:`10` nonzero entries, each in the interval :math:`[0, 1)`. .. code-block:: python s = sparse.random((10, 10), density=0.1) Building :obj:`COO` Arrays from :obj:`DOK` Arrays ------------------------------------------------- It's possible to build :obj:`COO` arrays from :obj:`DOK` arrays, if it is not easy to construct the :code:`coords` and :obj:`data` in a simple way. :obj:`DOK` arrays provide a simple builder interface to build :obj:`COO` arrays, but at this time, they can do little else. You can get started by defining the shape (and optionally, datatype) of the :obj:`DOK` array. If you do not specify a dtype, it is inferred from the value dictionary or is set to :code:`dtype('float64')` if that is not present. .. code-block:: python s = DOK((6, 5, 2)) s2 = DOK((2, 3, 4), dtype=np.uint8) After this, you can build the array by assigning arrays or scalars to elements or slices of the original array. Broadcasting rules are followed. .. code-block:: python s[1:3, 3:1:-1] = [[6, 5]] DOK arrays also support fancy indexing assignment if and only if all dimensions are indexed. .. code-block:: python s[[0, 2], [2, 1], [0, 1]] = 5 s[[0, 3], [0, 4], [0, 1]] = [1, 5] Alongside indexing assignment and retrieval, :obj:`DOK` arrays support any arbitrary broadcasting function to any number of arguments where the arguments can be :obj:`SparseArray` objects, :obj:`scipy.sparse.spmatrix` objects, or :obj:`numpy.ndarrays`. .. code-block:: python x = sparse.random((10, 10), 0.5, format="dok") y = sparse.random((10, 10), 0.5, format="dok") sparse.elemwise(np.add, x, y) :obj:`DOK` arrays also support standard ufuncs and operators, including comparison operators, in combination with other objects implementing the `numpy` `ndarray.__array_ufunc__` method. For example, the following code will perform elementwise equality comparison on the two arrays and return a new boolean :obj:`DOK` array. .. code-block:: python x = sparse.random((10, 10), 0.5, format="dok") y = np.random.random((10, 10)) x == y :obj:`DOK` arrays are returned from elemwise functions and standard ufuncs if and only if all :obj:`SparseArray` objects are obj:`DOK` arrays. Otherwise, a :obj:`COO` array or dense array are returned. At the end, you can convert the :obj:`DOK` array to a :obj:`COO` arrays. .. code-block:: python s3 = COO(s) In addition, it is possible to access single elements and slices of the :obj:`DOK` array using normal Numpy indexing, as well as fancy indexing if and only if all dimensions are indexed. Slicing and fancy indexing will always return a new DOK array. .. code-block:: python s[1, 2, 1] # 5 s[5, 1, 1] # 0 s[[0, 3], [0, 4], [0, 1]] # .. _converting: Converting :obj:`COO` objects to other Formats ---------------------------------------------- :obj:`COO` arrays can be converted to :doc:`Numpy arrays `, or to some :obj:`spmatrix ` subclasses via the following methods: * :obj:`COO.todense`: Converts to a :obj:`numpy.ndarray` unconditionally. * :obj:`COO.maybe_densify`: Converts to a :obj:`numpy.ndarray` based on certain constraints. * :obj:`COO.to_scipy_sparse`: Converts to a :obj:`scipy.sparse.coo_matrix` if the array is two dimensional. * :obj:`COO.tocsr`: Converts to a :obj:`scipy.sparse.csr_matrix` if the array is two dimensional. * :obj:`COO.tocsc`: Converts to a :obj:`scipy.sparse.csc_matrix` if the array is two dimensional. sparse-0.16.0a9/docs/contributing.rst000066400000000000000000000110541463475501500175310ustar00rootroot00000000000000Contributing ============ General Guidelines ------------------ sparse is a community-driven project on GitHub. You can find our `repository on GitHub <:ghuser:`pydata/sparse`>`_. Feel free to open issues for new features or bugs, or open a pull request to fix a bug or add a new feature. If you haven't contributed to open-source before, we recommend you read `this excellent guide by GitHub on how to contribute to open source `_. The guide is long, so you can gloss over things you're familiar with. If you're not already familiar with it, we follow the `fork and pull model `_ on GitHub. Filing Issues ------------- If you find a bug or would like a new feature, you might want to `consider filing a new issue on GitHub <:ghuser:`pydata/sparse/issues`>`_. Before you open a new issue, please make sure of the following: * This should go without saying, but make sure what you are requesting is within the scope of this project. * The bug/feature is still present/missing on the ``main`` branch on GitHub. * A similar issue or pull request isn't already open. If one already is, it's better to contribute to the discussion there. Contributing Code ----------------- This project has a number of requirements for all code contributed. * We use ``pre-commit`` to automatically lint the code and maintain code style. * We use Numpy-style docstrings. * It's ideal if user-facing API changes or new features have documentation added. * 100% code coverage is recommended for all new code in any submitted PR. Doctests count toward coverage. * Performance optimizations should have benchmarks added in ``benchmarks``. Setting up Your Development Environment --------------------------------------- The following bash script is all you need to set up your development environment, after forking and cloning the repository: .. code-block:: bash pip install -e .[all] Running/Adding Unit Tests ------------------------- It is best if all new functionality and/or bug fixes have unit tests added with each use-case. We use `pytest `_ as our unit testing framework, with the ``pytest-cov`` extension to check code coverage and ``pytest-flake8`` to check code style. You don't need to configure these extensions yourself. Once you've configured your environment, you can just ``cd`` to the root of your repository and run .. code-block:: bash pytest --pyargs sparse This automatically checks code style and functionality, and prints code coverage, even though it doesn't fail on low coverage. Unit tests are automatically run on Travis CI for pull requests. Coverage -------- The ``pytest`` script automatically reports coverage, both on the terminal for missing line numbers, and in annotated HTML form in ``htmlcov/index.html``. Coverage is automatically checked on CodeCov for pull requests. Adding/Building the Documentation --------------------------------- If a feature is stable and relatively finalized, it is time to add it to the documentation. If you are adding any private/public functions, it is best to add docstrings, to aid in reviewing code and also for the API reference. We use `Numpy style docstrings `_ and `Sphinx `_ to document this library. Sphinx, in turn, uses `reStructuredText `_ as its markup language for adding code. We use the `Sphinx Autosummary extension `_ to generate API references. In particular, you may want do look at the :code:`docs/generated` directory to see how these files look and where to add new functions, classes or modules. For example, if you add a new function to the :code:`sparse.COO` class, you would open up :code:`docs/generated/sparse.COO.rst`, and add in the name of the function where appropriate. To build the documentation, you can :code:`cd` into the :code:`docs` directory and run .. code-block:: bash sphinx-build -W -b html . _build/html After this, you can find an HTML version of the documentation in :code:`docs/_build/html/index.html`. Documentation for pull requests is automatically built on CircleCI and can be found in the build artifacts. Adding and Running Benchmarks ----------------------------- We use `Airspeed Velocity `_ to run benchmarks. We have it set up to use ``conda``, but you can edit the configuration locally if you so wish. sparse-0.16.0a9/docs/gen_logo.py000066400000000000000000000062711463475501500164400ustar00rootroot00000000000000import xml.etree.ElementTree as et import numpy as np def transform(a, b, c, d, e, f): return f"matrix({a},{b},{c},{d},{e},{f})" def fill(rs): """Generates opacity at random, weighted a bit toward 0 and 1""" x = rs.choice(np.arange(5), p=[0.3, 0.2, 0.0, 0.2, 0.3]) / 4 return f"fill-opacity:{x:.1f}" rs = np.random.RandomState(1) colors = { "orange": "fill:rgb(241,141,59)", "blue": "fill:rgb(69,155,181)", "grey": "fill:rgb(103,124,131)", } s = 10 # face size offset_x = 10 # x margin offset_y = 10 # y margin b = np.tan(np.deg2rad(30)) # constant for transformations # reused attributes for small squares kwargs = {"x": "0", "y": "0", "width": f"{s}", "height": f"{s}", "stroke": "white"} # large white squares for background bg_kwargs = {**kwargs, "width": f"{5*s}", "height": f"{5*s}", "style": "fill:white;"} root = et.Element( "svg", **{ "width": f"{s * 10 + 2 * offset_x}", "height": f"{s * 20 + 2 * offset_y}", "viewbox": f"0 0 {s * 10 + 2 * offset_x} {s * 20 + 2 * offset_y}", "version": "1.1", "style": "fill-rule:evenodd;clip-rule:evenodd;stroke-linejoin:round;stroke-miterlimit:2;", "xmlns": "http://www.w3.org/2000/svg", "xmlns:xlink": "http://www.w3.org/1999/xlink", "xml:space": "preserve", "xmlns:serif": "http://www.serif.com/", "class": "align-center", }, ) # face 1 (left, orange) et.SubElement( root, "rect", transform=transform(1, b, 0, 1, 5 * s + offset_x, offset_y), **bg_kwargs, ) for i, j in np.ndindex(5, 5): et.SubElement( root, "rect", style=f"{colors['orange']};{fill(rs)};", transform=transform(1, b, 0, 1, (i + 5) * s + offset_x, (i * b + j) * s + offset_y), **kwargs, ) # face 2 (top, orange) et.SubElement( root, "rect", transform=transform(1, b, -1, b, 5 * s + offset_x, 5 * s + offset_y), **bg_kwargs, ) for i, j in np.ndindex(5, 5): et.SubElement( root, "rect", style=f"{colors['orange']};{fill(rs)};", transform=transform( 1, b, -1, b, (i - j + 5) * s + offset_x, (i * b + j * b + 5) * s + offset_y, ), **kwargs, ) # face 3 (left, blue) for y2 in (5 + b * 5, 10 + b * 5): et.SubElement( root, "rect", transform=transform(1, b, 0, 1, offset_x, y2 * s + offset_y), **bg_kwargs, ) for i, j in np.ndindex(5, 5): et.SubElement( root, "rect", style=f"{colors['blue']};{fill(rs)};", transform=transform(1, b, 0, 1, i * s + offset_x, (i * b + j + y2) * s + offset_y), **kwargs, ) # face 4 (right, grey) et.SubElement( root, "rect", transform=transform(1, -b, 0, 1, 5 * s + offset_x, (10 * b + 5) * s + offset_y), **bg_kwargs, ) for i, j in np.ndindex(5, 5): et.SubElement( root, "rect", style=f"{colors['grey']};{fill(rs)};", transform=transform(1, -b, 0, 1, (i + 5) * s + offset_x, ((10 - i) * b + j + 5) * s + offset_y), **kwargs, ) et.ElementTree(root).write("logo.svg", encoding="UTF-8") sparse-0.16.0a9/docs/generated/000077500000000000000000000000001463475501500162255ustar00rootroot00000000000000sparse-0.16.0a9/docs/generated/sparse.COO.T.rst000066400000000000000000000001031463475501500210670ustar00rootroot00000000000000COO\.T ====== .. currentmodule:: sparse .. autoattribute:: COO.T sparse-0.16.0a9/docs/generated/sparse.COO.all.rst000066400000000000000000000001041463475501500214350ustar00rootroot00000000000000COO.all ======= .. currentmodule:: sparse .. automethod:: COO.all sparse-0.16.0a9/docs/generated/sparse.COO.any.rst000066400000000000000000000001041463475501500214540ustar00rootroot00000000000000COO.any ======= .. currentmodule:: sparse .. automethod:: COO.any sparse-0.16.0a9/docs/generated/sparse.COO.asformat.rst000066400000000000000000000001231463475501500225020ustar00rootroot00000000000000COO.asformat ============ .. currentmodule:: sparse .. automethod:: COO.asformat sparse-0.16.0a9/docs/generated/sparse.COO.astype.rst000066400000000000000000000001171463475501500221760ustar00rootroot00000000000000COO\.astype =========== .. currentmodule:: sparse .. automethod:: COO.astype sparse-0.16.0a9/docs/generated/sparse.COO.broadcast_to.rst000066400000000000000000000001431463475501500233340ustar00rootroot00000000000000COO\.broadcast\_to ================== .. currentmodule:: sparse .. automethod:: COO.broadcast_to sparse-0.16.0a9/docs/generated/sparse.COO.clip.rst000066400000000000000000000001111463475501500216120ustar00rootroot00000000000000COO\.clip ========= .. currentmodule:: sparse .. automethod:: COO.clip sparse-0.16.0a9/docs/generated/sparse.COO.conj.rst000066400000000000000000000001111463475501500216140ustar00rootroot00000000000000COO\.conj ========= .. currentmodule:: sparse .. automethod:: COO.conj sparse-0.16.0a9/docs/generated/sparse.COO.copy.rst000066400000000000000000000001111463475501500216350ustar00rootroot00000000000000COO\.copy ========= .. currentmodule:: sparse .. automethod:: COO.copy sparse-0.16.0a9/docs/generated/sparse.COO.density.rst000066400000000000000000000001251463475501500223470ustar00rootroot00000000000000COO\.density ============ .. currentmodule:: sparse .. autoattribute:: COO.density sparse-0.16.0a9/docs/generated/sparse.COO.dot.rst000066400000000000000000000001061463475501500214550ustar00rootroot00000000000000COO\.dot ======== .. currentmodule:: sparse .. automethod:: COO.dot sparse-0.16.0a9/docs/generated/sparse.COO.dtype.rst000066400000000000000000000001171463475501500220160ustar00rootroot00000000000000COO\.dtype ========== .. currentmodule:: sparse .. autoattribute:: COO.dtype sparse-0.16.0a9/docs/generated/sparse.COO.enable_caching.rst000066400000000000000000000001511463475501500235710ustar00rootroot00000000000000COO\.enable\_caching ==================== .. currentmodule:: sparse .. automethod:: COO.enable_caching sparse-0.16.0a9/docs/generated/sparse.COO.flatten.rst000066400000000000000000000001201463475501500223200ustar00rootroot00000000000000COO.flatten =========== .. currentmodule:: sparse .. automethod:: COO.flatten sparse-0.16.0a9/docs/generated/sparse.COO.from_iter.rst000066400000000000000000000001301463475501500226520ustar00rootroot00000000000000COO.from\_iter ============== .. currentmodule:: sparse .. automethod:: COO.from_iter sparse-0.16.0a9/docs/generated/sparse.COO.from_numpy.rst000066400000000000000000000001351463475501500230640ustar00rootroot00000000000000COO\.from\_numpy ================ .. currentmodule:: sparse .. automethod:: COO.from_numpy sparse-0.16.0a9/docs/generated/sparse.COO.from_scipy_sparse.rst000066400000000000000000000001641463475501500244220ustar00rootroot00000000000000COO\.from\_scipy\_sparse ======================== .. currentmodule:: sparse .. automethod:: COO.from_scipy_sparse sparse-0.16.0a9/docs/generated/sparse.COO.imag.rst000066400000000000000000000001141463475501500216030ustar00rootroot00000000000000COO\.imag ========= .. currentmodule:: sparse .. autoattribute:: COO.imag sparse-0.16.0a9/docs/generated/sparse.COO.linear_loc.rst000066400000000000000000000001351463475501500230000ustar00rootroot00000000000000COO\.linear\_loc ================ .. currentmodule:: sparse .. automethod:: COO.linear_loc sparse-0.16.0a9/docs/generated/sparse.COO.max.rst000066400000000000000000000001061463475501500214540ustar00rootroot00000000000000COO\.max ======== .. currentmodule:: sparse .. automethod:: COO.max sparse-0.16.0a9/docs/generated/sparse.COO.maybe_densify.rst000066400000000000000000000001461463475501500235110ustar00rootroot00000000000000COO\.maybe\_densify =================== .. currentmodule:: sparse .. automethod:: COO.maybe_densify sparse-0.16.0a9/docs/generated/sparse.COO.mean.rst000066400000000000000000000001111463475501500216030ustar00rootroot00000000000000COO\.mean ========= .. currentmodule:: sparse .. automethod:: COO.mean sparse-0.16.0a9/docs/generated/sparse.COO.min.rst000066400000000000000000000001061463475501500214520ustar00rootroot00000000000000COO\.min ======== .. currentmodule:: sparse .. automethod:: COO.min sparse-0.16.0a9/docs/generated/sparse.COO.nbytes.rst000066400000000000000000000001221463475501500221710ustar00rootroot00000000000000COO\.nbytes =========== .. currentmodule:: sparse .. autoattribute:: COO.nbytes sparse-0.16.0a9/docs/generated/sparse.COO.ndim.rst000066400000000000000000000001141463475501500216150ustar00rootroot00000000000000COO\.ndim ========= .. currentmodule:: sparse .. autoattribute:: COO.ndim sparse-0.16.0a9/docs/generated/sparse.COO.nnz.rst000066400000000000000000000001111463475501500214700ustar00rootroot00000000000000COO\.nnz ======== .. currentmodule:: sparse .. autoattribute:: COO.nnz sparse-0.16.0a9/docs/generated/sparse.COO.nonzero.rst000066400000000000000000000001201463475501500223550ustar00rootroot00000000000000COO.nonzero =========== .. currentmodule:: sparse .. automethod:: COO.nonzero sparse-0.16.0a9/docs/generated/sparse.COO.prod.rst000066400000000000000000000001111463475501500216270ustar00rootroot00000000000000COO\.prod ========= .. currentmodule:: sparse .. automethod:: COO.prod sparse-0.16.0a9/docs/generated/sparse.COO.real.rst000066400000000000000000000001141463475501500216110ustar00rootroot00000000000000COO\.real ========= .. currentmodule:: sparse .. autoattribute:: COO.real sparse-0.16.0a9/docs/generated/sparse.COO.reduce.rst000066400000000000000000000001171463475501500221400ustar00rootroot00000000000000COO\.reduce =========== .. currentmodule:: sparse .. automethod:: COO.reduce sparse-0.16.0a9/docs/generated/sparse.COO.reshape.rst000066400000000000000000000001221463475501500223140ustar00rootroot00000000000000COO\.reshape ============ .. currentmodule:: sparse .. automethod:: COO.reshape sparse-0.16.0a9/docs/generated/sparse.COO.resize.rst000066400000000000000000000001151463475501500221700ustar00rootroot00000000000000COO.resize ========== .. currentmodule:: sparse .. automethod:: COO.resize sparse-0.16.0a9/docs/generated/sparse.COO.round.rst000066400000000000000000000001141463475501500220150ustar00rootroot00000000000000COO\.round ========== .. currentmodule:: sparse .. automethod:: COO.round sparse-0.16.0a9/docs/generated/sparse.COO.rst000066400000000000000000000030341463475501500206730ustar00rootroot00000000000000COO === .. currentmodule:: sparse .. autoclass:: COO .. note:: :obj:`COO` objects also support :ref:`operators ` and :ref:`indexing ` .. rubric:: Attributes .. autosummary:: :toctree: COO.T COO.dtype COO.nbytes COO.ndim COO.nnz COO.size COO.density COO.imag COO.real .. rubric:: :doc:`Constructing COO objects <../construct>` .. autosummary:: :toctree: COO.from_iter COO.from_numpy COO.from_scipy_sparse .. rubric:: :ref:`Element-wise operations ` .. autosummary:: :toctree: COO.astype COO.conj COO.clip COO.round .. rubric:: :ref:`Reductions ` .. autosummary:: :toctree: COO.reduce COO.sum COO.prod COO.min COO.max COO.any COO.all COO.mean COO.std COO.var .. rubric:: :ref:`Converting to other formats ` .. autosummary:: :toctree: COO.asformat COO.todense COO.maybe_densify COO.to_scipy_sparse COO.tocsc COO.tocsr .. rubric:: :ref:`Other operations ` .. autosummary:: :toctree: COO.copy COO.dot COO.flatten COO.reshape COO.resize COO.transpose COO.swapaxes COO.nonzero .. rubric:: Utility functions .. autosummary:: :toctree: COO.broadcast_to COO.enable_caching COO.linear_loc sparse-0.16.0a9/docs/generated/sparse.COO.size.rst000066400000000000000000000001141463475501500216400ustar00rootroot00000000000000COO\.size ========= .. currentmodule:: sparse .. autoattribute:: COO.size sparse-0.16.0a9/docs/generated/sparse.COO.std.rst000066400000000000000000000001041463475501500214570ustar00rootroot00000000000000COO.std ======= .. currentmodule:: sparse .. automethod:: COO.std sparse-0.16.0a9/docs/generated/sparse.COO.sum.rst000066400000000000000000000001061463475501500214730ustar00rootroot00000000000000COO\.sum ======== .. currentmodule:: sparse .. automethod:: COO.sum sparse-0.16.0a9/docs/generated/sparse.COO.swapaxes.rst000066400000000000000000000001231463475501500225210ustar00rootroot00000000000000COO.swapaxes ============ .. currentmodule:: sparse .. automethod:: COO.swapaxes sparse-0.16.0a9/docs/generated/sparse.COO.to_scipy_sparse.rst000066400000000000000000000001561463475501500241020ustar00rootroot00000000000000COO\.to\_scipy\_sparse ====================== .. currentmodule:: sparse .. automethod:: COO.to_scipy_sparse sparse-0.16.0a9/docs/generated/sparse.COO.tocsc.rst000066400000000000000000000001141463475501500220010ustar00rootroot00000000000000COO\.tocsc ========== .. currentmodule:: sparse .. automethod:: COO.tocsc sparse-0.16.0a9/docs/generated/sparse.COO.tocsr.rst000066400000000000000000000001141463475501500220200ustar00rootroot00000000000000COO\.tocsr ========== .. currentmodule:: sparse .. automethod:: COO.tocsr sparse-0.16.0a9/docs/generated/sparse.COO.todense.rst000066400000000000000000000001221463475501500223260ustar00rootroot00000000000000COO\.todense ============ .. currentmodule:: sparse .. automethod:: COO.todense sparse-0.16.0a9/docs/generated/sparse.COO.transpose.rst000066400000000000000000000001301463475501500227020ustar00rootroot00000000000000COO\.transpose ============== .. currentmodule:: sparse .. automethod:: COO.transpose sparse-0.16.0a9/docs/generated/sparse.COO.var.rst000066400000000000000000000001041463475501500214550ustar00rootroot00000000000000COO.var ======= .. currentmodule:: sparse .. automethod:: COO.var sparse-0.16.0a9/docs/generated/sparse.DOK.asformat.rst000066400000000000000000000001231463475501500224770ustar00rootroot00000000000000DOK.asformat ============ .. currentmodule:: sparse .. automethod:: DOK.asformat sparse-0.16.0a9/docs/generated/sparse.DOK.density.rst000066400000000000000000000001251463475501500223440ustar00rootroot00000000000000DOK\.density ============ .. currentmodule:: sparse .. autoattribute:: DOK.density sparse-0.16.0a9/docs/generated/sparse.DOK.from_coo.rst000066400000000000000000000001271463475501500224720ustar00rootroot00000000000000DOK\.from\_coo ============== .. currentmodule:: sparse .. automethod:: DOK.from_coo sparse-0.16.0a9/docs/generated/sparse.DOK.from_numpy.rst000066400000000000000000000001351463475501500230610ustar00rootroot00000000000000DOK\.from\_numpy ================ .. currentmodule:: sparse .. automethod:: DOK.from_numpy sparse-0.16.0a9/docs/generated/sparse.DOK.from_scipy_sparse.rst000066400000000000000000000001621463475501500244150ustar00rootroot00000000000000DOK.from\_scipy\_sparse ======================= .. currentmodule:: sparse .. automethod:: DOK.from_scipy_sparse sparse-0.16.0a9/docs/generated/sparse.DOK.ndim.rst000066400000000000000000000001141463475501500216120ustar00rootroot00000000000000DOK\.ndim ========= .. currentmodule:: sparse .. autoattribute:: DOK.ndim sparse-0.16.0a9/docs/generated/sparse.DOK.nnz.rst000066400000000000000000000001111463475501500214650ustar00rootroot00000000000000DOK\.nnz ======== .. currentmodule:: sparse .. autoattribute:: DOK.nnz sparse-0.16.0a9/docs/generated/sparse.DOK.rst000066400000000000000000000005761463475501500207000ustar00rootroot00000000000000DOK === .. currentmodule:: sparse .. autoclass:: DOK .. rubric:: Attributes .. autosummary:: :toctree: DOK.density DOK.ndim DOK.nnz DOK.size .. rubric:: Methods .. autosummary:: :toctree: DOK.asformat DOK.from_coo DOK.from_numpy DOK.from_scipy_sparse DOK.to_coo DOK.todense sparse-0.16.0a9/docs/generated/sparse.DOK.size.rst000066400000000000000000000001141463475501500216350ustar00rootroot00000000000000DOK\.size ========= .. currentmodule:: sparse .. autoattribute:: DOK.size sparse-0.16.0a9/docs/generated/sparse.DOK.to_coo.rst000066400000000000000000000001211463475501500221430ustar00rootroot00000000000000DOK\.to\_coo ============ .. currentmodule:: sparse .. automethod:: DOK.to_coo sparse-0.16.0a9/docs/generated/sparse.DOK.todense.rst000066400000000000000000000001221463475501500223230ustar00rootroot00000000000000DOK\.todense ============ .. currentmodule:: sparse .. automethod:: DOK.todense sparse-0.16.0a9/docs/generated/sparse.GCXS.T.rst000066400000000000000000000001031463475501500212130ustar00rootroot00000000000000GCXS.T ====== .. currentmodule:: sparse .. autoproperty:: GCXS.T sparse-0.16.0a9/docs/generated/sparse.GCXS.__init__.rst000066400000000000000000000001361463475501500225550ustar00rootroot00000000000000GCXS.\_\_init\_\_ ================= .. currentmodule:: sparse .. automethod:: GCXS.__init__ sparse-0.16.0a9/docs/generated/sparse.GCXS.all.rst000066400000000000000000000001071463475501500215640ustar00rootroot00000000000000GCXS.all ======== .. currentmodule:: sparse .. automethod:: GCXS.all sparse-0.16.0a9/docs/generated/sparse.GCXS.amax.rst000066400000000000000000000001121463475501500217360ustar00rootroot00000000000000GCXS.amax ========= .. currentmodule:: sparse .. automethod:: GCXS.amax sparse-0.16.0a9/docs/generated/sparse.GCXS.amin.rst000066400000000000000000000001121463475501500217340ustar00rootroot00000000000000GCXS.amin ========= .. currentmodule:: sparse .. automethod:: GCXS.amin sparse-0.16.0a9/docs/generated/sparse.GCXS.any.rst000066400000000000000000000001071463475501500216030ustar00rootroot00000000000000GCXS.any ======== .. currentmodule:: sparse .. automethod:: GCXS.any sparse-0.16.0a9/docs/generated/sparse.GCXS.asformat.rst000066400000000000000000000001261463475501500226310ustar00rootroot00000000000000GCXS.asformat ============= .. currentmodule:: sparse .. automethod:: GCXS.asformat sparse-0.16.0a9/docs/generated/sparse.GCXS.astype.rst000066400000000000000000000001201463475501500223140ustar00rootroot00000000000000GCXS.astype =========== .. currentmodule:: sparse .. automethod:: GCXS.astype sparse-0.16.0a9/docs/generated/sparse.GCXS.change_compressed_axes.rst000066400000000000000000000002041463475501500255030ustar00rootroot00000000000000GCXS.change\_compressed\_axes ============================= .. currentmodule:: sparse .. automethod:: GCXS.change_compressed_axes sparse-0.16.0a9/docs/generated/sparse.GCXS.clip.rst000066400000000000000000000001121463475501500217370ustar00rootroot00000000000000GCXS.clip ========= .. currentmodule:: sparse .. automethod:: GCXS.clip sparse-0.16.0a9/docs/generated/sparse.GCXS.compressed_axes.rst000066400000000000000000000001571463475501500242050ustar00rootroot00000000000000GCXS.compressed\_axes ===================== .. currentmodule:: sparse .. autoproperty:: GCXS.compressed_axes sparse-0.16.0a9/docs/generated/sparse.GCXS.conj.rst000066400000000000000000000001121463475501500217410ustar00rootroot00000000000000GCXS.conj ========= .. currentmodule:: sparse .. automethod:: GCXS.conj sparse-0.16.0a9/docs/generated/sparse.GCXS.copy.rst000066400000000000000000000001121463475501500217620ustar00rootroot00000000000000GCXS.copy ========= .. currentmodule:: sparse .. automethod:: GCXS.copy sparse-0.16.0a9/docs/generated/sparse.GCXS.density.rst000066400000000000000000000001251463475501500224730ustar00rootroot00000000000000GCXS.density ============ .. currentmodule:: sparse .. autoproperty:: GCXS.density sparse-0.16.0a9/docs/generated/sparse.GCXS.dot.rst000066400000000000000000000001071463475501500216020ustar00rootroot00000000000000GCXS.dot ======== .. currentmodule:: sparse .. automethod:: GCXS.dot sparse-0.16.0a9/docs/generated/sparse.GCXS.dtype.rst000066400000000000000000000001171463475501500221420ustar00rootroot00000000000000GCXS.dtype ========== .. currentmodule:: sparse .. autoproperty:: GCXS.dtype sparse-0.16.0a9/docs/generated/sparse.GCXS.flatten.rst000066400000000000000000000001231463475501500224470ustar00rootroot00000000000000GCXS.flatten ============ .. currentmodule:: sparse .. automethod:: GCXS.flatten sparse-0.16.0a9/docs/generated/sparse.GCXS.from_coo.rst000066400000000000000000000001301463475501500226130ustar00rootroot00000000000000GCXS.from\_coo ============== .. currentmodule:: sparse .. automethod:: GCXS.from_coo sparse-0.16.0a9/docs/generated/sparse.GCXS.from_iter.rst000066400000000000000000000001331463475501500230010ustar00rootroot00000000000000GCXS.from\_iter =============== .. currentmodule:: sparse .. automethod:: GCXS.from_iter sparse-0.16.0a9/docs/generated/sparse.GCXS.from_numpy.rst000066400000000000000000000001361463475501500232110ustar00rootroot00000000000000GCXS.from\_numpy ================ .. currentmodule:: sparse .. automethod:: GCXS.from_numpy sparse-0.16.0a9/docs/generated/sparse.GCXS.from_scipy_sparse.rst000066400000000000000000000001651463475501500245470ustar00rootroot00000000000000GCXS.from\_scipy\_sparse ======================== .. currentmodule:: sparse .. automethod:: GCXS.from_scipy_sparse sparse-0.16.0a9/docs/generated/sparse.GCXS.imag.rst000066400000000000000000000001141463475501500217270ustar00rootroot00000000000000GCXS.imag ========= .. currentmodule:: sparse .. autoproperty:: GCXS.imag sparse-0.16.0a9/docs/generated/sparse.GCXS.max.rst000066400000000000000000000001071463475501500216010ustar00rootroot00000000000000GCXS.max ======== .. currentmodule:: sparse .. automethod:: GCXS.max sparse-0.16.0a9/docs/generated/sparse.GCXS.maybe_densify.rst000066400000000000000000000001471463475501500236360ustar00rootroot00000000000000GCXS.maybe\_densify =================== .. currentmodule:: sparse .. automethod:: GCXS.maybe_densify sparse-0.16.0a9/docs/generated/sparse.GCXS.mean.rst000066400000000000000000000001121463475501500217300ustar00rootroot00000000000000GCXS.mean ========= .. currentmodule:: sparse .. automethod:: GCXS.mean sparse-0.16.0a9/docs/generated/sparse.GCXS.min.rst000066400000000000000000000001071463475501500215770ustar00rootroot00000000000000GCXS.min ======== .. currentmodule:: sparse .. automethod:: GCXS.min sparse-0.16.0a9/docs/generated/sparse.GCXS.nbytes.rst000066400000000000000000000001221463475501500223150ustar00rootroot00000000000000GCXS.nbytes =========== .. currentmodule:: sparse .. autoproperty:: GCXS.nbytes sparse-0.16.0a9/docs/generated/sparse.GCXS.ndim.rst000066400000000000000000000001141463475501500217410ustar00rootroot00000000000000GCXS.ndim ========= .. currentmodule:: sparse .. autoproperty:: GCXS.ndim sparse-0.16.0a9/docs/generated/sparse.GCXS.nnz.rst000066400000000000000000000001111463475501500216140ustar00rootroot00000000000000GCXS.nnz ======== .. currentmodule:: sparse .. autoproperty:: GCXS.nnz sparse-0.16.0a9/docs/generated/sparse.GCXS.prod.rst000066400000000000000000000001121463475501500217540ustar00rootroot00000000000000GCXS.prod ========= .. currentmodule:: sparse .. automethod:: GCXS.prod sparse-0.16.0a9/docs/generated/sparse.GCXS.real.rst000066400000000000000000000001141463475501500217350ustar00rootroot00000000000000GCXS.real ========= .. currentmodule:: sparse .. autoproperty:: GCXS.real sparse-0.16.0a9/docs/generated/sparse.GCXS.reduce.rst000066400000000000000000000001201463475501500222560ustar00rootroot00000000000000GCXS.reduce =========== .. currentmodule:: sparse .. automethod:: GCXS.reduce sparse-0.16.0a9/docs/generated/sparse.GCXS.reshape.rst000066400000000000000000000001231463475501500224410ustar00rootroot00000000000000GCXS.reshape ============ .. currentmodule:: sparse .. automethod:: GCXS.reshape sparse-0.16.0a9/docs/generated/sparse.GCXS.round.rst000066400000000000000000000001151463475501500221420ustar00rootroot00000000000000GCXS.round ========== .. currentmodule:: sparse .. automethod:: GCXS.round sparse-0.16.0a9/docs/generated/sparse.GCXS.round_.rst000066400000000000000000000001221463475501500222770ustar00rootroot00000000000000GCXS.round\_ ============ .. currentmodule:: sparse .. automethod:: GCXS.round_ sparse-0.16.0a9/docs/generated/sparse.GCXS.rst000066400000000000000000000020161463475501500210160ustar00rootroot00000000000000GCXS ==== .. currentmodule:: sparse .. autoclass:: GCXS .. rubric:: Attributes .. autosummary:: :toctree: GCXS.T GCXS.compressed_axes GCXS.density GCXS.dtype GCXS.imag GCXS.nbytes GCXS.ndim GCXS.nnz GCXS.real GCXS.size .. rubric:: Methods .. autosummary:: :toctree: GCXS.__init__ GCXS.all GCXS.amax GCXS.amin GCXS.any GCXS.asformat GCXS.astype GCXS.change_compressed_axes GCXS.clip GCXS.conj GCXS.copy GCXS.dot GCXS.flatten GCXS.from_coo GCXS.from_iter GCXS.from_numpy GCXS.from_scipy_sparse GCXS.max GCXS.maybe_densify GCXS.mean GCXS.min GCXS.prod GCXS.reduce GCXS.reshape GCXS.round GCXS.round_ GCXS.std GCXS.sum GCXS.to_scipy_sparse GCXS.tocoo GCXS.todense GCXS.todok GCXS.transpose GCXS.var sparse-0.16.0a9/docs/generated/sparse.GCXS.size.rst000066400000000000000000000001141463475501500217640ustar00rootroot00000000000000GCXS.size ========= .. currentmodule:: sparse .. autoproperty:: GCXS.size sparse-0.16.0a9/docs/generated/sparse.GCXS.std.rst000066400000000000000000000001071463475501500216060ustar00rootroot00000000000000GCXS.std ======== .. currentmodule:: sparse .. automethod:: GCXS.std sparse-0.16.0a9/docs/generated/sparse.GCXS.sum.rst000066400000000000000000000001071463475501500216200ustar00rootroot00000000000000GCXS.sum ======== .. currentmodule:: sparse .. automethod:: GCXS.sum sparse-0.16.0a9/docs/generated/sparse.GCXS.to_scipy_sparse.rst000066400000000000000000000001571463475501500242270ustar00rootroot00000000000000GCXS.to\_scipy\_sparse ====================== .. currentmodule:: sparse .. automethod:: GCXS.to_scipy_sparse sparse-0.16.0a9/docs/generated/sparse.GCXS.tocoo.rst000066400000000000000000000001151463475501500221360ustar00rootroot00000000000000GCXS.tocoo ========== .. currentmodule:: sparse .. automethod:: GCXS.tocoo sparse-0.16.0a9/docs/generated/sparse.GCXS.todense.rst000066400000000000000000000001231463475501500224530ustar00rootroot00000000000000GCXS.todense ============ .. currentmodule:: sparse .. automethod:: GCXS.todense sparse-0.16.0a9/docs/generated/sparse.GCXS.todok.rst000066400000000000000000000001151463475501500221330ustar00rootroot00000000000000GCXS.todok ========== .. currentmodule:: sparse .. automethod:: GCXS.todok sparse-0.16.0a9/docs/generated/sparse.GCXS.transpose.rst000066400000000000000000000001311463475501500230270ustar00rootroot00000000000000GCXS.transpose ============== .. currentmodule:: sparse .. automethod:: GCXS.transpose sparse-0.16.0a9/docs/generated/sparse.GCXS.var.rst000066400000000000000000000001071463475501500216040ustar00rootroot00000000000000GCXS.var ======== .. currentmodule:: sparse .. automethod:: GCXS.var sparse-0.16.0a9/docs/generated/sparse.SparseArray.asformat.rst000066400000000000000000000001531463475501500243210ustar00rootroot00000000000000SparseArray.asformat ==================== .. currentmodule:: sparse .. automethod:: SparseArray.asformat sparse-0.16.0a9/docs/generated/sparse.SparseArray.density.rst000066400000000000000000000001551463475501500241660ustar00rootroot00000000000000SparseArray\.density ==================== .. currentmodule:: sparse .. autoattribute:: SparseArray.density sparse-0.16.0a9/docs/generated/sparse.SparseArray.ndim.rst000066400000000000000000000001441463475501500234340ustar00rootroot00000000000000SparseArray\.ndim ================= .. currentmodule:: sparse .. autoattribute:: SparseArray.ndim sparse-0.16.0a9/docs/generated/sparse.SparseArray.nnz.rst000066400000000000000000000001411463475501500233070ustar00rootroot00000000000000SparseArray\.nnz ================ .. currentmodule:: sparse .. autoattribute:: SparseArray.nnz sparse-0.16.0a9/docs/generated/sparse.SparseArray.rst000066400000000000000000000005371463475501500225140ustar00rootroot00000000000000SparseArray =========== .. currentmodule:: sparse .. autoclass:: SparseArray .. rubric:: Attributes .. autosummary:: :toctree: SparseArray.density SparseArray.ndim SparseArray.nnz SparseArray.size .. rubric:: Methods .. autosummary:: :toctree: SparseArray.asformat SparseArray.todense sparse-0.16.0a9/docs/generated/sparse.SparseArray.size.rst000066400000000000000000000001441463475501500234570ustar00rootroot00000000000000SparseArray\.size ================= .. currentmodule:: sparse .. autoattribute:: SparseArray.size sparse-0.16.0a9/docs/generated/sparse.SparseArray.todense.rst000066400000000000000000000001501463475501500241430ustar00rootroot00000000000000SparseArray.todense =================== .. currentmodule:: sparse .. automethod:: SparseArray.todense sparse-0.16.0a9/docs/generated/sparse.abs.rst000066400000000000000000000000721463475501500210170ustar00rootroot00000000000000abs === .. currentmodule:: sparse .. autofunction:: abs sparse-0.16.0a9/docs/generated/sparse.all.rst000066400000000000000000000000721463475501500210220ustar00rootroot00000000000000all === .. currentmodule:: sparse .. autofunction:: all sparse-0.16.0a9/docs/generated/sparse.any.rst000066400000000000000000000000721463475501500210410ustar00rootroot00000000000000any === .. currentmodule:: sparse .. autofunction:: any sparse-0.16.0a9/docs/generated/sparse.argmax.rst000066400000000000000000000001031463475501500215240ustar00rootroot00000000000000argmax ====== .. currentmodule:: sparse .. autofunction:: argmax sparse-0.16.0a9/docs/generated/sparse.argmin.rst000066400000000000000000000001031463475501500215220ustar00rootroot00000000000000argmin ====== .. currentmodule:: sparse .. autofunction:: argmin sparse-0.16.0a9/docs/generated/sparse.argwhere.rst000066400000000000000000000001111463475501500220500ustar00rootroot00000000000000argwhere ======== .. currentmodule:: sparse .. autofunction:: argwhere sparse-0.16.0a9/docs/generated/sparse.asCOO.rst000066400000000000000000000001001463475501500212060ustar00rootroot00000000000000asCOO ===== .. currentmodule:: sparse .. autofunction:: asCOO sparse-0.16.0a9/docs/generated/sparse.as_coo.rst000066400000000000000000000001051463475501500215120ustar00rootroot00000000000000as\_coo ======= .. currentmodule:: sparse .. autofunction:: as_coo sparse-0.16.0a9/docs/generated/sparse.asarray.rst000066400000000000000000000001061463475501500217120ustar00rootroot00000000000000asarray ======= .. currentmodule:: sparse .. autofunction:: asarray sparse-0.16.0a9/docs/generated/sparse.asnumpy.rst000066400000000000000000000001061463475501500217440ustar00rootroot00000000000000asnumpy ======= .. currentmodule:: sparse .. autofunction:: asnumpy sparse-0.16.0a9/docs/generated/sparse.astype.rst000066400000000000000000000001031463475501500215520ustar00rootroot00000000000000astype ====== .. currentmodule:: sparse .. autofunction:: astype sparse-0.16.0a9/docs/generated/sparse.broadcast_arrays.rst000066400000000000000000000001431463475501500235740ustar00rootroot00000000000000broadcast\_arrays ================= .. currentmodule:: sparse .. autofunction:: broadcast_arrays sparse-0.16.0a9/docs/generated/sparse.broadcast_to.rst000066400000000000000000000001271463475501500227170ustar00rootroot00000000000000broadcast\_to ============= .. currentmodule:: sparse .. autofunction:: broadcast_to sparse-0.16.0a9/docs/generated/sparse.clip.rst000066400000000000000000000000751463475501500212040ustar00rootroot00000000000000clip ==== .. currentmodule:: sparse .. autofunction:: clip sparse-0.16.0a9/docs/generated/sparse.concat.rst000066400000000000000000000001031463475501500215140ustar00rootroot00000000000000concat ====== .. currentmodule:: sparse .. autofunction:: concat sparse-0.16.0a9/docs/generated/sparse.concatenate.rst000066400000000000000000000001221463475501500225320ustar00rootroot00000000000000concatenate =========== .. currentmodule:: sparse .. autofunction:: concatenate sparse-0.16.0a9/docs/generated/sparse.diagonal.rst000066400000000000000000000001161463475501500220270ustar00rootroot00000000000000diagonal ============= .. currentmodule:: sparse .. autofunction:: diagonal sparse-0.16.0a9/docs/generated/sparse.diagonalize.rst000066400000000000000000000001221463475501500225340ustar00rootroot00000000000000diagonalize =========== .. currentmodule:: sparse .. autofunction:: diagonalize sparse-0.16.0a9/docs/generated/sparse.dot.rst000066400000000000000000000000721463475501500210400ustar00rootroot00000000000000dot === .. currentmodule:: sparse .. autofunction:: dot sparse-0.16.0a9/docs/generated/sparse.einsum.rst000066400000000000000000000001031463475501500215450ustar00rootroot00000000000000einsum ====== .. currentmodule:: sparse .. autofunction:: einsum sparse-0.16.0a9/docs/generated/sparse.elemwise.rst000066400000000000000000000001111463475501500220560ustar00rootroot00000000000000elemwise ======== .. currentmodule:: sparse .. autofunction:: elemwise sparse-0.16.0a9/docs/generated/sparse.empty.rst000066400000000000000000000001001463475501500214000ustar00rootroot00000000000000empty ===== .. currentmodule:: sparse .. autofunction:: empty sparse-0.16.0a9/docs/generated/sparse.empty_like.rst000066400000000000000000000001211463475501500224070ustar00rootroot00000000000000empty\_like =========== .. currentmodule:: sparse .. autofunction:: empty_like sparse-0.16.0a9/docs/generated/sparse.equal.rst000066400000000000000000000001001463475501500213510ustar00rootroot00000000000000equal ===== .. currentmodule:: sparse .. autofunction:: equal sparse-0.16.0a9/docs/generated/sparse.expand_dims.rst000066400000000000000000000001241463475501500225430ustar00rootroot00000000000000expand\_dims ============ .. currentmodule:: sparse .. autofunction:: expand_dims sparse-0.16.0a9/docs/generated/sparse.eye.rst000066400000000000000000000000721463475501500210340ustar00rootroot00000000000000eye === .. currentmodule:: sparse .. autofunction:: eye sparse-0.16.0a9/docs/generated/sparse.flip.rst000066400000000000000000000000751463475501500212070ustar00rootroot00000000000000flip ==== .. currentmodule:: sparse .. autofunction:: flip sparse-0.16.0a9/docs/generated/sparse.full.rst000066400000000000000000000000751463475501500212170ustar00rootroot00000000000000full ==== .. currentmodule:: sparse .. autofunction:: full sparse-0.16.0a9/docs/generated/sparse.full_like.rst000066400000000000000000000001141463475501500222150ustar00rootroot00000000000000full_like ========= .. currentmodule:: sparse .. autofunction:: full_like sparse-0.16.0a9/docs/generated/sparse.isinf.rst000066400000000000000000000001001463475501500213520ustar00rootroot00000000000000isinf ===== .. currentmodule:: sparse .. autofunction:: isinf sparse-0.16.0a9/docs/generated/sparse.isnan.rst000066400000000000000000000001001463475501500213520ustar00rootroot00000000000000isnan ===== .. currentmodule:: sparse .. autofunction:: isnan sparse-0.16.0a9/docs/generated/sparse.isneginf.rst000066400000000000000000000001111463475501500220460ustar00rootroot00000000000000isneginf ======== .. currentmodule:: sparse .. autofunction:: isneginf sparse-0.16.0a9/docs/generated/sparse.isposinf.rst000066400000000000000000000001111463475501500220760ustar00rootroot00000000000000isposinf ======== .. currentmodule:: sparse .. autofunction:: isposinf sparse-0.16.0a9/docs/generated/sparse.kron.rst000066400000000000000000000000751463475501500212260ustar00rootroot00000000000000kron ==== .. currentmodule:: sparse .. autofunction:: kron sparse-0.16.0a9/docs/generated/sparse.load_npz.rst000066400000000000000000000001131463475501500220540ustar00rootroot00000000000000load\_npz ========= .. currentmodule:: sparse .. autofunction:: load_npz sparse-0.16.0a9/docs/generated/sparse.matmul.rst000066400000000000000000000001031463475501500215440ustar00rootroot00000000000000matmul ====== .. currentmodule:: sparse .. autofunction:: matmul sparse-0.16.0a9/docs/generated/sparse.matrix_transpose.rst000066400000000000000000000001431463475501500236530ustar00rootroot00000000000000matrix\_transpose ================= .. currentmodule:: sparse .. autofunction:: matrix_transpose sparse-0.16.0a9/docs/generated/sparse.max.rst000066400000000000000000000000721463475501500210370ustar00rootroot00000000000000max === .. currentmodule:: sparse .. autofunction:: max sparse-0.16.0a9/docs/generated/sparse.mean.rst000066400000000000000000000000751463475501500211750ustar00rootroot00000000000000mean ==== .. currentmodule:: sparse .. autofunction:: mean sparse-0.16.0a9/docs/generated/sparse.min.rst000066400000000000000000000000721463475501500210350ustar00rootroot00000000000000min === .. currentmodule:: sparse .. autofunction:: min sparse-0.16.0a9/docs/generated/sparse.moveaxis.rst000066400000000000000000000001111463475501500220770ustar00rootroot00000000000000moveaxis ======== .. currentmodule:: sparse .. autofunction:: moveaxis sparse-0.16.0a9/docs/generated/sparse.nanmax.rst000066400000000000000000000001031463475501500215270ustar00rootroot00000000000000nanmax ====== .. currentmodule:: sparse .. autofunction:: nanmax sparse-0.16.0a9/docs/generated/sparse.nanmean.rst000066400000000000000000000001061463475501500216650ustar00rootroot00000000000000nanmean ======= .. currentmodule:: sparse .. autofunction:: nanmean sparse-0.16.0a9/docs/generated/sparse.nanmin.rst000066400000000000000000000001031463475501500215250ustar00rootroot00000000000000nanmin ====== .. currentmodule:: sparse .. autofunction:: nanmin sparse-0.16.0a9/docs/generated/sparse.nanprod.rst000066400000000000000000000001061463475501500217110ustar00rootroot00000000000000nanprod ======= .. currentmodule:: sparse .. autofunction:: nanprod sparse-0.16.0a9/docs/generated/sparse.nanreduce.rst000066400000000000000000000001141463475501500222130ustar00rootroot00000000000000nanreduce ========= .. currentmodule:: sparse .. autofunction:: nanreduce sparse-0.16.0a9/docs/generated/sparse.nansum.rst000066400000000000000000000001031463475501500215460ustar00rootroot00000000000000nansum ====== .. currentmodule:: sparse .. autofunction:: nansum sparse-0.16.0a9/docs/generated/sparse.nonzero.rst000066400000000000000000000001061463475501500217420ustar00rootroot00000000000000nonzero ======= .. currentmodule:: sparse .. autofunction:: nonzero sparse-0.16.0a9/docs/generated/sparse.ones.rst000066400000000000000000000000751463475501500212210ustar00rootroot00000000000000ones ==== .. currentmodule:: sparse .. autofunction:: ones sparse-0.16.0a9/docs/generated/sparse.ones_like.rst000066400000000000000000000001141463475501500222170ustar00rootroot00000000000000ones_like ========= .. currentmodule:: sparse .. autofunction:: ones_like sparse-0.16.0a9/docs/generated/sparse.outer.rst000066400000000000000000000001001463475501500214000ustar00rootroot00000000000000outer ===== .. currentmodule:: sparse .. autofunction:: outer sparse-0.16.0a9/docs/generated/sparse.pad.rst000066400000000000000000000000721463475501500210160ustar00rootroot00000000000000pad === .. currentmodule:: sparse .. autofunction:: pad sparse-0.16.0a9/docs/generated/sparse.permute_dims.rst000066400000000000000000000001271463475501500227500ustar00rootroot00000000000000permute\_dims ============= .. currentmodule:: sparse .. autofunction:: permute_dims sparse-0.16.0a9/docs/generated/sparse.prod.rst000066400000000000000000000000751463475501500212210ustar00rootroot00000000000000prod ==== .. currentmodule:: sparse .. autofunction:: prod sparse-0.16.0a9/docs/generated/sparse.random.rst000066400000000000000000000001031463475501500215250ustar00rootroot00000000000000random ====== .. currentmodule:: sparse .. autofunction:: random sparse-0.16.0a9/docs/generated/sparse.reshape.rst000066400000000000000000000001061463475501500216770ustar00rootroot00000000000000reshape ======= .. currentmodule:: sparse .. autofunction:: reshape sparse-0.16.0a9/docs/generated/sparse.result_type.rst000066400000000000000000000001221463475501500226250ustar00rootroot00000000000000result_type =========== .. currentmodule:: sparse .. autofunction:: result_type sparse-0.16.0a9/docs/generated/sparse.roll.rst000066400000000000000000000000751463475501500212250ustar00rootroot00000000000000roll ==== .. currentmodule:: sparse .. autofunction:: roll sparse-0.16.0a9/docs/generated/sparse.round.rst000066400000000000000000000001001463475501500213710ustar00rootroot00000000000000round ===== .. currentmodule:: sparse .. autofunction:: round sparse-0.16.0a9/docs/generated/sparse.rst000066400000000000000000000022621463475501500202560ustar00rootroot00000000000000API === .. rubric:: Description .. automodule:: sparse .. currentmodule:: sparse .. rubric:: Classes .. autosummary:: :toctree: COO DOK GCXS SparseArray .. rubric:: Functions .. autosummary:: :toctree: abs all any argmax argmin argwhere asCOO as_coo asarray asnumpy astype broadcast_arrays broadcast_to clip concat concatenate diagonal diagonalize dot einsum elemwise empty empty_like equal expand_dims eye flip full full_like isinf isnan isneginf isposinf kron load_npz matmul matrix_transpose max mean min moveaxis moveaxis nanmax nanmean nanmin nanprod nanreduce nansum nonzero ones ones_like outer pad permute_dims prod random reshape result_type roll round save_npz sort squeeze stack std sum take tensordot tril triu unique_counts unique_values var vecdot where zeros zeros_like sparse-0.16.0a9/docs/generated/sparse.save_npz.rst000066400000000000000000000001131463475501500220730ustar00rootroot00000000000000save\_npz ========= .. currentmodule:: sparse .. autofunction:: save_npz sparse-0.16.0a9/docs/generated/sparse.sort.rst000066400000000000000000000000751463475501500212440ustar00rootroot00000000000000sort ==== .. currentmodule:: sparse .. autofunction:: sort sparse-0.16.0a9/docs/generated/sparse.squeeze.rst000066400000000000000000000001061463475501500217310ustar00rootroot00000000000000squeeze ======= .. currentmodule:: sparse .. autofunction:: squeeze sparse-0.16.0a9/docs/generated/sparse.stack.rst000066400000000000000000000001001463475501500213470ustar00rootroot00000000000000stack ===== .. currentmodule:: sparse .. autofunction:: stack sparse-0.16.0a9/docs/generated/sparse.std.rst000066400000000000000000000000721463475501500210440ustar00rootroot00000000000000std === .. currentmodule:: sparse .. autofunction:: std sparse-0.16.0a9/docs/generated/sparse.sum.rst000066400000000000000000000000721463475501500210560ustar00rootroot00000000000000sum === .. currentmodule:: sparse .. autofunction:: sum sparse-0.16.0a9/docs/generated/sparse.take.rst000066400000000000000000000000751463475501500212010ustar00rootroot00000000000000take ==== .. currentmodule:: sparse .. autofunction:: take sparse-0.16.0a9/docs/generated/sparse.tensordot.rst000066400000000000000000000001141463475501500222700ustar00rootroot00000000000000tensordot ========= .. currentmodule:: sparse .. autofunction:: tensordot sparse-0.16.0a9/docs/generated/sparse.tril.rst000066400000000000000000000000751463475501500212270ustar00rootroot00000000000000tril ==== .. currentmodule:: sparse .. autofunction:: tril sparse-0.16.0a9/docs/generated/sparse.triu.rst000066400000000000000000000000751463475501500212400ustar00rootroot00000000000000triu ==== .. currentmodule:: sparse .. autofunction:: triu sparse-0.16.0a9/docs/generated/sparse.unique_counts.rst000066400000000000000000000001321463475501500231500ustar00rootroot00000000000000unique\_counts ============== .. currentmodule:: sparse .. autofunction:: unique_counts sparse-0.16.0a9/docs/generated/sparse.unique_values.rst000066400000000000000000000001321463475501500231340ustar00rootroot00000000000000unique\_values ============== .. currentmodule:: sparse .. autofunction:: unique_values sparse-0.16.0a9/docs/generated/sparse.var.rst000066400000000000000000000000721463475501500210420ustar00rootroot00000000000000var === .. currentmodule:: sparse .. autofunction:: var sparse-0.16.0a9/docs/generated/sparse.vecdot.rst000066400000000000000000000001031463475501500215310ustar00rootroot00000000000000vecdot ====== .. currentmodule:: sparse .. autofunction:: vecdot sparse-0.16.0a9/docs/generated/sparse.where.rst000066400000000000000000000001001463475501500213540ustar00rootroot00000000000000where ===== .. currentmodule:: sparse .. autofunction:: where sparse-0.16.0a9/docs/generated/sparse.zeros.rst000066400000000000000000000001001463475501500214040ustar00rootroot00000000000000zeros ===== .. currentmodule:: sparse .. autofunction:: zeros sparse-0.16.0a9/docs/generated/sparse.zeros_like.rst000066400000000000000000000001171463475501500224200ustar00rootroot00000000000000zeros_like ========== .. currentmodule:: sparse .. autofunction:: zeros_like sparse-0.16.0a9/docs/index.rst000066400000000000000000000100041463475501500161230ustar00rootroot00000000000000Sparse ====== .. raw:: html :file: logo.svg This implements sparse arrays of arbitrary dimension on top of :obj:`numpy` and :obj:`scipy.sparse`. It generalizes the :obj:`scipy.sparse.coo_matrix` and :obj:`scipy.sparse.dok_matrix` layouts, but extends beyond just rows and columns to an arbitrary number of dimensions. Additionally, this project maintains compatibility with the :obj:`numpy.ndarray` interface rather than the :obj:`numpy.matrix` interface used in :obj:`scipy.sparse` These differences make this project useful in certain situations where scipy.sparse matrices are not well suited, but it should not be considered a full replacement. The data structures in pydata/sparse complement and can be used in conjunction with the fast linear algebra routines inside scipy.sparse. A format conversion or copy may be required. Motivation ---------- Sparse arrays, or arrays that are mostly empty or filled with zeros, are common in many scientific applications. To save space we often avoid storing these arrays in traditional dense formats, and instead choose different data structures. Our choice of data structure can significantly affect our storage and computational costs when working with these arrays. Design ------ The main data structure in this library follows the `Coordinate List (COO) `_ layout for sparse matrices, but extends it to multiple dimensions. The COO layout, which stores the row index, column index, and value of every element: === === ==== row col data === === ==== 0 0 10 0 2 13 1 3 9 3 8 21 === === ==== It is straightforward to extend the COO layout to an arbitrary number of dimensions: ==== ==== ==== === ==== dim1 dim2 dim3 ... data ==== ==== ==== === ==== 0 0 0 . 10 0 0 3 . 13 0 2 2 . 9 3 1 4 . 21 ==== ==== ==== === ==== This makes it easy to *store* a multidimensional sparse array, but we still need to reimplement all of the array operations like transpose, reshape, slicing, tensordot, reductions, etc., which can be challenging in general. This library also includes several other data structures. Similar to COO, the `Dictionary of Keys (DOK) `_ format for sparse matrices generalizes well to an arbitrary number of dimensions. DOK is well-suited for writing and mutating. Most other operations are not supported for DOK. A common workflow may involve writing an array with DOK and then converting to another format for other operations. The `Compressed Sparse Row/Column (CSR/CSC) `_ formats are widely used in scientific computing are now supported by pydata/sparse. The CSR/CSC formats excel at compression and mathematical operations. While these formats are restricted to two dimensions, pydata/sparse supports the GCXS sparse array format, based on `GCRS/GCCS from `_ which generalizes CSR/CSC to n-dimensional arrays. Like their two-dimensional CSR/CSC counterparts, GCXS arrays compress well. Whereas the storage cost of COO depends heavily on the number of dimensions of the array, the number of dimensions only minimally affects the storage cost of GCXS arrays, which results in favorable compression ratios across many use cases. Together these formats cover a wide array of applications of sparsity. Additionally, with each format complying with the :obj:`numpy.ndarray` interface and following the appropriate dispatching protocols, pydata/sparse arrays can interact with other array libraries and seamlessly take part in pydata-ecosystem-based workflows. LICENSE ------- This library is licensed under BSD-3 .. toctree:: :maxdepth: 3 :hidden: install quickstart construct operations generated/sparse roadmap contributing changelog conduct .. _scipy.sparse: https://docs.scipy.org/doc/scipy/reference/sparse.html sparse-0.16.0a9/docs/install.rst000066400000000000000000000007631463475501500164750ustar00rootroot00000000000000.. currentmodule:: sparse Install ======= You can install this library with ``pip``: .. code-block:: bash pip install sparse You can also install from source from GitHub, either by pip installing directly:: pip install git+https://github.com/pydata/sparse Or by cloning the repository and installing locally:: git clone https://github.com/pydata/sparse.git cd sparse/ pip install . Note that this library is under active development and so some API churn should be expected. sparse-0.16.0a9/docs/logo.png000066400000000000000000006724451463475501500157570ustar00rootroot00000000000000‰PNG  IHDR¤¤]#ÒÐ pHYs  ÒÝ~ütEXtSoftwareCelsys Studio ToolÁ§á| IDATxÚìÝy|wa÷ñÏofö”dË÷}ß±ã8NìØ‰ÏĹI8 ”¶´ ”»¨ZèC®–(NG ”¤O -<BÈ}X¾ÇNÛñ}HŽïS²¤½æzþXI¶›\öXã|ßJ«Ý™ýÍj~ûyÍþÖ„aˆˆˆˆˆˆˆˆˆHTŒ‚”ˆˆˆˆˆˆˆˆDIAJDDDDDDDD"¥ %""""""""‘R‘H)H‰ˆˆˆˆˆˆˆH¤¤DDDDDDDD$R R""""""""))‰”‚”ˆˆˆˆˆˆˆˆDJAJDDDDDDDD"¥ %""""""""‘R‘H)H‰ˆˆˆˆˆˆˆH¤¤DDDDDDDD$R R""""""""))‰”‚”ˆˆˆHDŽ÷êÆÐx -¬…Õ5uzFDDDäíJAJDDD$"Çﺺ<ñ h0Æ«­®Yr¯žy;R‰HGj§0%"""oS R""""yUjÐ`Œ[S]³ô·z–DDDäí@AJDDD$"g RíSª­®YV§gKDDD.d R""""yÍ ÕNaJDDD.p R""""yÝAª]Àbc‚šêšºµzöDDDäB¢ %"""‘7¤Úþ}ƘÚꚺz=‹"""r!P‰È›Rí¦DDDä¡ %"""‘·¤Ú•ÃTMuM]£žU‰#)‘ˆœµ Ðo¡…µPaJDDDâFAJDDD$"g5HµS˜‘R‰È9 R회ñj«k–,Ô3-"""]‚”ˆˆˆHDÎijÐЦîÕ3."""]•‚”ˆˆˆHD" Rí¦DDD¤ S‰H¤Aª]@ƒ1¥Ûªk–ÕiDDD¤«P‰Èy RíSªU˜‘®@AJDDD$"ç5HµS˜‘.@AJDDD$"]"Hµ XlLp[uM]½FFDDD¢¦ %"""‘.¤Úþ}ƘZ…)‰’‚”ˆˆˆHDºdj§0%"""R‰H—Ríÿ®¶0Õ¨‘sEAJDDD$"±RM¡ñZX ¦DDDä\P‰Hl‚T;…)9G¤DDDD"» Õ. É¯¦ºfɽE9¤DDDD"Û `ÙT\ó¹†äøµXö½My+¤DDDD"ç eœÙŸ#9fXvƒ»kEmbÄ•÷jTEDDäMÍ-¤DDDD¢ë •Ì’]ðy’£gƒ±p·/%·è{/†­ûjªk–ÕitEDDä Í-¤DDDD¢ë •®¢âÚ¿#1b&‹Ò–§É/ùAî,6¦T«0%"""¯{n¡ %"""8)+ÛƒìµObØåå µñQrËþ0Ê—ï)L‰ˆˆÈë¤ %"""‘X©Ê>d¯ý;C¦‚1×?H~ÅO '^}ãÀÀSS]SW¯Q‘ÓQ‰H¬ƒT·T\ûyœA—€âÚÿ!ÿÌ}„Å–3ÿQàßgŒ©U˜‘WR‰Hœƒ”Ýc0Ù‡3`"Rxþ—VÝOXʽö+L‰ˆˆÈ+(H‰ˆˆˆD$ÖAª×² >‡Óo<„…çî§°æ—„nþußGzwZX «kêu4ˆˆˆ¼½)H‰ˆˆˆD$ÖAªï*®ùvŸÑøäŸùŵÿCèߨ4…Æ[¨0%""òö¦ %"""‘8)gÀEd¯þì^# ýù?¡´þ÷o ¾ûÖî8 Á¯¶ºfɽ:BDDDÞ>¤DDDD"ç •6ÌÜOcW"tóäßCiË“à{gç¦DDDÞV¤DDDD"ë 5ò*²s>…Õ­?a±…\Ý]”¶-À;»¤0%""ò¶  %"""‘8©äØùdf} «²/a¡‰Ö§¿‹»sþ¹yÀ€ÅÆ”j«k–Õéȹð(H‰ˆˆˆD$ÖAê¢ÈÌü0VE/‚Ü1rO}·~„Á¹}`…)‘ ’‚”ˆˆˆHDâ¤R“ßIúŠ?ÇÊT´¡õ‰ÿƒ·çˆj.©0%""rAQ‰HœƒTzêûH_þ§˜T%Aó!Zÿg¼}ë£ßÀ¿ÏS[]SW¯#JDD$¾¤DDDD"ß eHOÿ3ÒS߇Idñ›ö‘{ìx·œ¿MR˜‰5)‘ˆÄ6H‹ÌŒÛH]ònL"|7­~ÿÈÎó¿må0US]Sר#LDD$FÓ )‘hÄ6HY™«>Bêâ[0vÿh=­|ÿøË]cûšBã-´°*L‰ˆˆÄƒ‚”ˆˆˆHDb¤ìÙÙŸ$9ñFŒeãÞAëÃwœ8е¶SaJDD$6¤DDDD"× eœ™¹·“špƒw`s9HµíšÜ¦zÖ,©ÕQ'""ÒEç R""""шmJdÈ^]CrÌ|¼ýhy¨–°p¢kox@ƒ1^muÍ’{uô‰ˆˆt±ù…‚”ˆˆˆH4b¤’Y² >OrÔU†¸{_¤õ¡ZÂR.; 0%""Òõæ R""""шmJUQqÝß“1ƒ0ððv¯¡õ¡; ýR¼v$ Á·¦ºféou4Šˆˆœçù…‚”ˆˆˆH4b¤2Ý©¸þ‹$†]Nè»x»ž¥å¡Z ¦óÈ€ÅNŸ!µU¼¯NG¥ˆˆÈyš_(H‰ˆˆˆD#®AÊÊö âÆÿ3x ¡_ÂݱŒÖG¾ë±pú£â_[lUöªêttŠˆˆDKAJDDD$"± R½¨¸éqN"ôЏÛÓúø?Çy Œ3h•7݉ÉV´]\|þ—5™ÙŸ\«£TDD$¢³±‚”ˆˆˆH4b¤*ûPqó8ýǺJ[ž"÷Ôwb<6$†L¥âÆ/cÒU-‡)¬þÅ~}Ÿ1¦¶º¦®^G«ˆˆÈ9>+H‰ˆˆˆDãø]ׄq\wÉêÖŸÊw|»ÏèrÚø(¹º»c<6$†Ï âú/bRÍ)¬ºŸâ†‡Ê¿|…)‘s}:V‰ÆÉ+¤ q SV÷TÞò5ì^à Ý<Åõ¿'¿ô‡1ž[$F]EÅu_À$2MûÉ?s/¥ÍOt¾]àßÕ¦uôŠˆˆœåÓ±‚”ˆˆˆH4šÿû¯CÿÈNB¯Ø>#aÊî1˜Š[¾Ýc0a)GqÝä—ÿ8¾aÙ$ÇÌ#»às'…|…•ÿNi[Ý«oÐo¡…µPaJDDäìQ‰ˆtWèn_BiûRü£» cֵÔÝs•·~«ûÂb+ŵ¿&ÿ̽ñË!9~WlÿXùå?ÆÝ¹âÌ£0%""rV)H‰ˆˆˆD'$ðð줴å)J;W4îådŒêšaÊî=²¤ªú[(<ÿK «îï(Ø Ro$;÷Ó`ÙøGv’_öo¸ «^ûošŒñj«k–,Ôá,""òæ)H‰ˆˆˆD§câú%üCÛ(mz·þY‚æÃtÕ0e÷Må;ÿ «¢a¡™Âê_PXó_ñÛI’“o!;ûS` þ¡mä–þoÏÚ×' maê^Ö"""oâ|¬ %""" oßúÐî3“Ètü,tóxû7RÚò^Ãs­G»Üv;ýÆQùÎÂdª óMžûO /ü¿øN€©KÞMfÖÇÊãr`3ù%÷àíßøÆïLaJDDäͤDDDD¢qâç C.%1f>NŸQ`':~[ðö­§´é Ü=/曺Ìv;/¦òÖ¯aRU„¹Fò«þƒâ‹¿ï8‘&5õýdfÜ€·o¹Å÷àÚúæï4 Á˜ÒmÕ5Ëêt¤‹ˆˆ¼Žó±‚”ˆˆˆH4Žßuuàô¿gØ4’£®Âî5,»ã6a¡÷å5”^zïÀFÂbËyßngÈT*o¾“ª È£°òg7<ß p2Kú²žþA¼=kÉÕÝ´þ­ßyÀbcJµ S"""¯q>V‰Æñ»® O]Êt ‰‘W’>»zp§0äŽáÕ¯¢¸ñ1üC[ ÝüyÛîÄð+¨¸ñc’Y‚Ö£ä—ÿ„Ò¦Çâ;NUžþ礧¾w÷r‹î"hÜsöDaJDD䟤DDDD¢Ñ~…Ô+-OŒ˜Abø C/êê¶Óñ» ùîŽe7?Ap´žÐ+F¾Ý‰Q³¨¸áK'EÐ|ˆüòSÚòT|'Àé*23n#uÉ»pw=C®în‚ÎþƒþƘšêšºz½DDDN9+H‰ˆˆˆDãdꘊѦ,›ä¨Ù$†OÇ<«²ÏÉ+¦Â i¥mu”6?‰ß´|7²íN޽šìµŸo RÉ/ý¥m‹ã;ÎT“¹òä&½wDzrj9rî4ðï3ÆÔ*L‰ˆˆ´¤DDDD¢ñê Õ1%£=L™d–ĨY$FÌÄ0«¢S¾Yàã7î¡´ù J[4‚À?çÛœp=×ü-ØAÓ>rK„»cYlÇÁÊö$3ë£$'\@iÛbòuw䎟ûW˜)Ïy¤DDDD¢qæ ušIZ¦;É1sI ¿»ß8¬lÊá B¿„d¥Mãn_B;çpN—št3Ùùw€å4î!·ä¸»ž‰í8X•}ÈÌþ8ɱWPÚü$¹Å÷ND¶ aèÝia-¬®©kÔ+CDDÞŽ¤DDDD"òF‚T;«¢Éq×à ›†Ó{&Ó Œ@XlÅ?¼â¦Çqw,oûF¾³?·K]ò.²socák ¿øû¸»WÇv¬nÈÎù$‰Q³(m|”Ü’ï[£Ý€¦Ðx ¦DDäíHAJDDD$"o&Hµ³º$5áZœ!S±{Ǥ*Êa* ‹Íx·PÜð{Ü]+Á÷Îêv§§¾Ì¬O€1øGv’[|Þžµ±«zÙ¹·“~Å ‘_òƒó÷M† S""ò6¤ %"""‘7¤:#ŸÝg4ÉqW“|)Võ`L2[^c*ð 'ðö­§¸î¼—_8kÛžö§d®ü+ÀàÞF®î¼}ëc;vÏadæÞNbèe_ü-ù¥?"ôKçwÚŒñjªk–Ü«W‹ˆˆ\è¤DDDD"òV®ze˜r]LrÜ‚òÂçÝú—ÀïäŽáî~žâúßáÜòg‹†ôô¿ 3ã/ðn!_w7ÞM±»÷H²ó>ƒ3h2…çE~ù!ðºÆ4ãÕ*L‰ˆÈ…LAJDDD$"o-HuLß85L%†^Nrµ8ý'`*zaœ4½Á‰x»×P\ÿ þ±†7÷p–MfÆm¤§ýÞþäêîÆ?´5¶ã`÷CvÞ8. ðÜÏɯüé9]þMQ˜‘ ˜‚”ˆˆˆHDÎNê˜ÆÑ¦Œ!9z‰1ópúÃd{`ìDy})·@д·~Å 4|õßÿ!¶CæÊžú~¼½ëÊAêÈÎØŽƒÓÙyŸÁî7)<û䟹¯ënpÀ‹Æ”jªk–ÕéU$"" )‘ˆœÝ Õ1£=,;Ibì|’cçc÷…•é– _þF¾Æ=¸;WPÜð{ÂBó«þþ´÷î¤È\õRSþïåçÉÕ}ïÍ_qÕ8'‘vïQ†äWþ”Âs?ïú°Ø˜R­Â”ˆˆ\¤DDDD"rn‚TÇ´ŽŽ0•ª 9á’cæ`÷†IU–¿‘/ðòMøGëq·-¦¸éÑS¾‘ïôaÊ$2df}œÔä[pëW‘_rþñ=±gð”rê9 €ü²£ðü/ã³ S""rP‰È¹ RÓ;ÚÃ’UÙ›äÄ›IŽº«ÛÀ¶oä| wïàfÜíK)m}ú”õ“:‡)“ª 3ë¤&Ý €»s9¹%? hÚÛqH ½ŒÌ¼Ï`÷OnÉ÷)¾øÛøíHàß—žòžÚÌü;êõꑸQ‰H4AêÕìžÃHN¼‰Äðéåoäs’Bè—[ãíßHiûRÜËO&!&UEvî§HN¸€Ò¶Åä—þ ùPlÇ!1bÙ¹·cu¾G®î.ŠŠå¾¤/}/™«>zv¢¨×«LDDâBAJDDD$"ç+Hµ³ûŽ!uñ-$†\†UÙlh[ø¼ùÞ¾ ”¶>÷ò ''‹™j²sÿšä¸k(m~’ü²%h=ÛqHŒšEvΧ°ºõ'ôKäžü¥ÍOÄr_Ò—ÿ ™ÛÁ?¶û¾üÿ§¶òï©×«MDDº:)‘ˆœï Õ~Õ“3`"©KÞ3d V¦º¼¾–rM{ñö¬£´uÞ'Eöº/3€âK_ñcÂ\clÇ!9f.™9ŸÂªìCèÉ=þMJÛêb¹/ééNæŠ?ËÁ?ZOîño5¹×/´°V×Ô5êU'""]•‚”ˆˆˆHDÎꘂe“v9©‹oÁ0 “®,ÿÊßÈw¼wÇrüC[INzÇÉ µþAò+~JX8ÛqHŽ_@fÖDZ*zºyZýÆ+>®™™&}ùÊAêðvZŸü6þ¡­ÐOaJDDº,)‘ˆt Õ>´0NŠÄ¨«HM¼ »ÏLª¢í—!a® ÿøË˜lìƒ(<ÿ+ «þƒ°ØÛqH^t™«>Š•íAXÊÑúP-îîձܗ̬‘¾ô}`Ùø‡¶–ƒÔáí'o 0%""]”‚”ˆˆˆHDº\jgÙ˜TÉѳIŽ¿»ÏhL"]þ]à—¿Ïv(®ýr‹ï‰õ8¤&½ƒÌ•…Ét',4Óòû/ãí}1–û’™ó)ÒSÞÆÂ;°‘Ü“ßÁ?ºëÕ7 h0Æ«­®Yr¯^‰""Ò(H‰ˆˆˆD¤Ë©v–ƒUÑ‹ä˜9å0Õk8XN§›øÇwS\÷;J›Ÿ ,4ÇrR—¼‹ÌŒaÒU„ùÆrÚ·!–û’÷R“o-©}ëÉ=õ/øÇÎü S""ÒE(H‰ˆˆˆD¤Ë©vv»[cæ’šx#V·~´ÅÛ³w×JÜ]Ï–r±‡ô¥ï%}Å_`R•­Giýý—ñlŠáLÞ"{u ©‰7•ƒÔžµäžþüã{^ûoŒqkªk–þV¯L9/§1)‘hÄ&H"5ñ&ÒWüVUßο‚æC¸»×à6¬Âݹ/û”¾ì¤§“Ì4¢å÷_./7¶CÅÕŸ%9áz0w÷jrO/$hÚ÷úï#`±1¥Úêšeuz…ŠˆH”¤DDDD"Ç å œDvÞ§±ûŒß%ô]L"¦ü|>Á‰ý¸õÏâÖ¯Âmx®ËïSzúI_ör:±Ÿ–¿ŒdGü&òNŠì5Ÿ%9nA9HíZI®î{'¼ñ;S˜‘¨Ïc R""""шcJ ½ŒÌìO`÷Eè)½ô&U3d*VE¯“7 üã/ãîX†[¿ oßú.»O™™&ué{1‰4þñ=´>ô§_¼«Oä“Y²×ü-É1óÊAjÇ2r‹ï!h>ôæï4`±1AMuMÝZ½bED䜞ǤDDDD¢Ë 5b™«>V^àÜ÷È-ú.~ã^¬ÊÞ$FÏ!1äRLªêä„þ‘”¶-ÁÛ½ïà–.·O™Y'uÉ»0N ÿè.ZºÿøîøMäSUT,ø‰Q³ÀJ[‘_ò‚Ö£oýÎÿ>cLmuM]½^¹""rNÎc R""""шe5‹ÌUÅî1|—Ö§¾ƒxVe/0Ve#fâ ¼“ª8ù‡¾‹wx¥ÍOáí]×¥>—{;ÉI7—ƒÔám´<üU‚Æ=±;žL¦šŠŸ'1bF9Hm~‚üÒ%È;{¢0%""çê<¦ %"""8©äØ«É̼ «z0¡W$÷Ä·Zb’§Ä'+UÙ‡äèYØýÆc’ÙŽ_…nÿÐVJ›Ÿ,‡©ã/Ÿ÷}Ê^óY’®ÃØI¼›i}ô«MûcwxÞö©âº¿'1v~9Hí]Gëcßxkë.'VUßr:0×=@~åÏ 'ÎÍ4…Æ[ha-T˜‘·JAJDDD$"q R©I7“žþA¬ª~„ÅVZû:ëÔ)%aªÐ –ƒ3pɱó°{;Ñq˰”ÃmxŽÒæ'ðn9;ë½±é/7|‰äèÙ`'pw¯!÷Ä7 ZŽÄîx²º bÁçq_ ÏÿŠÂªû ‹Íçö¦DDälœ‘¤DDDD¢Ë uÉ»I_þ¬Ê>„…fZýZ[Œ:Ý®œ¦Š­'…3d*‰‘WbWÛé¸eX8»keùŠ©#;su Û¡âú/‘yØî®gÈ=ùí³»îRDìCÈ^ó9œA“ „šÿ¢°úç„ÅÖh6 -Lõ¬YR«W·ˆˆ¼Q R""""‰cJO}?©©ïêèE˜k¤õѯ·…¥?´+§„)·€IfH ›Ž3ô2ìîÁ:¦‚–ø»ž¡´åIü#õçüê“ÈPqÃ?6­¤¶/¥õéïž›u—Î1»×ðr0ªÿKáù_–rÑnH@ƒ1^muÍ’{õ*‘×}NV‰F,ƒÔ´?#5å=XÙ­Gi}ìŸ0§\éôSM:”ïa’Yç“zVU¿Nû šöSÚ± wÛ"ü£ „nþÜL~ÓUTÜð%C¦‚±)m]D®îîs·îÒ9d÷CöšÏâô O~åÏ(¾øB·p~6HaJDDÞÈ9YAJDDD$q R™·‘š|+&SMÐ|ˆÖ'¾‰9eM¨×9å¤#LV¶‰¡—á ¹«¢7S¾Yâ7ù ÜË ÷zųº?V¶'7| gÐÅ`,J›'·ä„Å–ØONÿ d¯þì>£À÷È/ÿ1Åõžõçì h0¦t[uͲ:½êED䌳)‘hÄ2H]õQR“nƤ«š{êÛ)ƒSONý¨Ÿ©èMrÄ ìae{´ý<ü£õå0µk%þ‰ƒà»ge¬ª¾T\ÿ¥òÇ܀ↇÈ/ÿqôs; œA“Éο»×pB¯D~é(½ô¡_ê°Ø˜R­Â”ˆˆœvV  %"""X©9Ÿ"uÑõ˜d%~ãrOã$ÏÊ}‡^‰ÐÍãôKbäUØ}Ç`eºw|”/ôŠø‡·SÚø(nÃêò7òÞ[zL«z0×§ïØòºKë °òßÏßÇÜÞ‚ÄÐËÈÌû4võB¯@~ñ÷)n~â¬Å»³FaJDDNCAJDDD$"q RÙùww &Y¬¾¤’Ù³úa¡,§ÿ#f`÷Iwëø(_XÊáØD饇ñö®#È7B࿩Dz{ §âº/`÷Ex_øVÝþ?æö&$FÌ$;çSXÝ–räêmø^×Üà€ÅÆ·U×ÔÕ뿈ˆ(H‰ˆˆˆD$–AjÁçHŽ™‡I¤ñì ·è.Lªòœc¨¸îï;>æVXýs kþ»ë]Uô:$ÇÌ%3ëcXUý ‹-äžþ.¥ËÞòUdç\àßgŒ©U˜y{S‰HƒTÅõÿ‹Ä¨Y;‰wh+ùº»1™îçô1ÃB3&S3øƒ§`U>yUV´ÃÛóÅ ãÞVþF¾×9§uL$»àsØ=†º ÏÞGáÅßtÝ«Šþ€äød®üVeÂ|­Oÿ oúê±È)L‰ˆ¼­)H‰ˆˆˆD$–Aê¦/“>c;xû7’[úÃò:O ÍXÝúã ž‚3ðb¬ª¾˜d0à{Ípw?OqÃï Ž¿Lè•8uÑôÓqO¡âš¿Åê>€°ØJ~ÅO(¾ôp|"Î)Ro"=óCXÙž¹cäžünÃê7|ÕØyøw%N¬­üã4ê¿„ˆÈÛ‡‚”ˆˆˆHDâ¤*où*ÎÐË1ÆÂÝ»ŽüÊŸb¥»Eº a¡»×œA“±û_„UÙã¤À@èNÀ­–⋼æÂç‰áW½º¦|UQá¹¥?¢´ù‰×}…UW’ºä]¤§+SMÐz”ÖÇ¿‰·ç…XîKæÊ6%'^¿Ð؉…&U¥0%"ò6  %"""‘X©w}gð%x»Ÿ§°êÿbÒUçe[B¯„Ó{Π)ØýÆb²=0v°”ÃoÜ‹»} Å—!Ì7qº«¥£g“{;VE/‚|#ùºïQÚ¶„׺²ª+JO}?éË?€IU´¢õ±ÆÛ·!–ûRyË×H ›˜&w÷ê…­~iaõgžR˜¹€)H‰ˆˆˆD$vAÊXT½çÛ8&†-Vd IDAT~ù*¤5¿Ä¤+Ïû¦9}ÇbšŒÝkVº;Øa±ÿh=¥-OSÚò$¡[8u‡Êë.ÍþV¦;AËr‹î*¯»Céé$}éû0É,Á‰ý´>ö ¼›c¹/Uïù6ÎÀ‹pw¯¡å·_j2Æ­©®Yr¯þsˆˆ\˜¤DDDD"· eì$•ôœ~ã}wç Šë~{rñóÍNb÷Cbð¥ØÕƒËßÈgÙx¹ãø·PÚ±”Òæ'ÛvÈšx™+?‚IWœ8HîéÁݽ&†³xCfÆm¤.yOù÷ÐúÈWñìŒß¾X6UïùNÿ‹Êás×3´>|gùw Æxµ S"")‘ˆÄ.H%³T½ç;Ø}FzÅòÇá6>‚IdºÌ6†^ ‚€ÄÈ™$†\ŠÕ}@ÛöB¿DØzoÿFJÛ—àî\Ajò;É\ùaL²¿q¹§¾…·w}ü&Ë&såGHM¾c'ðí¦õá¯àß¿7$‰4•ïþv[øtqw,¥õ±ê|#…)‘ Ž‚”ˆˆˆHDb¤²ÕT½ûÛØ½†ºJ[ž¢´uÆIv¹m ͘T%‰Q³pMƪêW^ø< Ýw’„nŽÐ+•ל2ÿÈNZûþ±†øM♹MrÂucáÜBëÃ_!h9»}±*z•³^à KyJ[ž$·è®?üG‹)Õ*L‰ˆÄ—‚”ˆˆˆHDb¤ªSõΪHXh¡øÒCx»×”×iêâÂR»çp£ga÷•ê¶ aÆ@AËarO·þÙö©1qù†:“H“÷’ãàØHËõ„¹ø}1Õ­?•ïúgìê!åð¹ñqrK¾ÿúþXaJD$¶¤DDDD"¯ e°{ §òÖ¯cuëG˜?Aqýƒxû6”»ML„¾‹Óo‰aÓ±{jû†À¶ð]¼›)m[„»sAó¡Ž}ïêaÊ$³d¯þ,É1s oß:Zª%,¶Äíív!åã¬z a¡™â†‡È/ÿñ»›ÀÀSS]SW¯ÿ4""19(H‰ˆˆˆD#vAªï*où*Veo‚\#Ńhk Ÿy‹0ðpN"=åݘTUÇoÊ ŸÃ?¼w×3”¶/%,6w<]5L™T%ÙŸ#9j>îË/Ðúp-¡[ˆÛÛìÞ#©¼õkXU} óM×ýŽü3÷¾¹» üûŒ1µ S""18(H‰ˆˆˆD#nAÊ8‘Š›ïÄÊö h=JqíÿàÙÛç?,¶’ºô½$ÇÌmûAÛÇ÷h S-GðöoÄÝõ îŽe„~©ã¹èjaʤ«¨¸î $†Ï \¼†Õ´üâ²V§ãlÀ$*nþG¬lÏrø|þWžÿåÙóлÓÂZX]Sרÿ@""]ì  %"""Æï߆^±}Öµã±H ›FÅ _¤* šQXóßM{ã;v’ÔE7’9|w÷ «Ajâ8æae{ž¼bÊÍ4îÅݳwç ¼=kO}rÎûØYÙžTÜø8ƒ§zEÜËi}ôk1|7bH ™JÅ ÿÐ>SXý Š/þöì=F@Sh¼… S""]ì  %"""³ÿ–v,Ç?V¾Û>£K†)c‘y%×}“ÌœØOþ¹_¶ŠïÄ7ÝäøkI ›¾KiÇ2Zý:&™Åî;–Ôä[I šŒÉTwüMXlÅo܃·{5îÎåx6Ÿzçm쬊^TÜô8'•ƒÔ¶:Zÿf Å"1|:×±-|¤°ê?)nøýÙ,…)‘®u P‰ˆï…þñJÛãî\|7øíkþt±0eÙ$GÏ!»àó˜Dš q/…Õ?'h=߉o"Cjò;ËWù%Üm‹i}ìŸ:ž«¢'vÿ ¤&ÝŒ3h2&‘éøÛ°ØŠd'n󸻞}ÅZZÑUÕ—Š›jqú'ôŠ”6?Iî©ïÄoP,»>¯ýûrølÚOþÙû(mzüÜ=f@ƒ1^muÍ’{õOIDä<ž—¤DDDD"Sžxùþ±]”¶<»sAó!N~”¯«„‡ä¸«É^óYŒÄ?¾›Âs?'ÌÇøÂc“žö§8&ú¥rÄyòÛ¯¸Áªì‹3h2©I7áôŸ¶sr‹-ø‡¶RÚ¹¯aþñ=çgxºõ§òwb÷Cè(m|”\ÝÝñË!9¦-|:)‚Æ=äWþŒÒÖEçþ±¦DDÎïiYAJDDD$a)šdöä|ÿè®òS;–4î:aÊvHM¸žì¼;ÀvðÖSxî~ÂbK|ŸÿÀ#{åG±û+_U´ñQr‹î:Ã,ÙÂê>€Äà)$'½§Ï¨Ž…ÏÂ\#ÞÁM¸;–ãî^CÐ|0Ò}±ªQùޝ`÷QR"·äûñÛ!9ö²×üMGøÌ/ÿ îŽeÑmƒÂ”ˆÈy¡ %"""‘Üâ{Âäè9Ø}Çtú8XG˜ÚZGiÇ2ÂæC„~é<‡‚©I7“{; ÿÈ «î'tó±}þCß#;çSؽGzEŠë$¿ä¯1[¶°{ '1lÉñ×b÷–Ý~­ÇðömÀݾoÏÚò7÷E1<=†PqË×°{ !,å(®{€üòÇoPN>óËþ·þÙè·%`±1¥Úêšeuúo%"rî)H‰ˆˆˆDäø]W‡vŸ18ƒ'“5»ß8Œ“:yƒ¶0UÜüîŽååõš:?x’h'IM¾•ÌœOÿÐVò«î‡óÊÞŠÀ'3÷vìžÃÝÅóú#ŽÀé=’ÄÈ+IŽU=¸ó]·ÅÛ½šÒ¶%xû6›Ï鮨=‡QyË×°ª•ƒÔÚ_“_ù³ø½±“$'ÝÔ)|æ—þw÷šóxœ(L‰ˆDrP‰Æñ»®î˜xÙ}F“6ĈØ}Æ`é“7ô]¼ÃÛ)mzw×JÂ\cäWL'EjÊ{È\õQ¼›(¬úO½ø@ÇɈó¯È?sß{^iì>cHŒšErÌ\¬ª¾¢ùî®gpw,Å;°‰°”kÿKÎæÂçvï‘TÞúu¬ª~„ÅV Ïÿ7…U÷Çï͈“*‡ÏÙŸ =|æ–|oïú.p¼(L‰ˆœÓs€‚”ˆˆˆH4ÊAªs˜púÃ6Äð+°{ çÔ5¦B¯ˆx¥-OâîXFoŠìŠ)“Èšú>23nÀÛ·ÂsÿI—ú&À7( B*ü-VU_ÂRŽÂê_´íÓ›x~’YœþHŒžCbÄL¬ÊÞ§ó‹ïÁ;°©ëldàßgŒ©­®©«×1‘³xP‰Æ©WH½*Lõ¿gØ4ça÷ŠIUvü.tó'ÃÔö¥ÅfðÏí•J&™%}ÙHOÿ`9ìYKaõÏÁX±}þÃ0¤âڿêèUŽ8«î§ðü/ßÚó”®Â8‰ä˜y8C/ÇÊö8ùËÀÇo܃»c¥mKðí:eÜÞZ˜²û+©lOÂbsÛ¾ü*~oFN>sußÃ?¼­ëm¬Â”ˆÈÙ=(H‰ˆˆˆD£s꘎Ñ)L ¸ˆÄð+p†^ŽÝsXç+¦Ü<þáí”6>JiÇòòÇÁ‚s¦Lª’ô´?#}Ùàî^Caõ/0¶ß¡âº/`²Õ„…fòÏÞGqíÿœ•»¶²=q]LbÌ<C/ë |üc ”¶>»}þñ—Oó7¦œþQyë×1™îg}_"}3ršð™«»ÿh}×Ýh…)‘³sP‰ÆéƒTÇ´ŒW^1•= gÐìžC;‡©RÿÐVŠÅÝõ a±ÂàìNÓUdfÜFê’wàÖ?KaÍ/1N"Æ3_CÅu_Ĥ«óMäWþ;ÅõžÕ‡°*ûà ™JrÌ\œÁ—tþ6ÅÀÃ;´Ò¦ÇpëŸ%8qàŒãÿZœSyË×Úö¥‘ü3÷R\÷»ø Iª‚ô´v Ÿùº»ðïéÚÐo¡…µ°º¦®QÿÝDDÞÄ9@AJDDD$8HuLÏè¦^LrôœAcu„IU´ý&$,´àÚzrñób+gk'“©&3óC¤.¾¥ v.§¸ö7ã+¤Œ• {Ý0© Â\#ù?¡øÒÃgëÞ;=÷V÷$†_Arôì~ã;-Zú%ü›(n| ¯á¹ò·)žá~ÎÄr)•7å侬ü)Å ÅoLÒUd®øKRSÞS>ÎêŸ%·èn‚ûã± S""oþ  %"""פ:¦it Sƒ§5«¦º ,_1e „!a¾©¦6?Ž»cù) h¿yV¶™«>Jò¢(m[LiýƒñRNšìµ‡If Z’_þcJ›?ÛÒiÜì^ÃI ›NbôìÞ#;‡©Roÿ†rPÜý€ìœ¿&1òJŠ/=Üoâ;w4½¨˜ÿ7`;MûÉ-¹§|%Y4Ϋ‚âÈ«p†^†Ý}@§0´Åݹ‚ÒÆÇðì8íG0“ã¯í×wDzØÉ«Âçæ'ÊAªØ|a¼èŒ)ÝV]³¬NÿED^qfT‰ÆÙ RÓ8:Ž3IŽ»§ßxLEOŒS^«(ôŠ-Gðl¤´ù ÜÝk^×½Û=“™s;‰áÓ(®·þ™7uµUWauëOvîí`9øÇ÷_|nꨧß'ÇÍ#¯"1b‰Á—bUö9¹FWœ8@iûJ›ŸÀ?¾ü“ÁLM¼‰ìüš“qmñ=¸»VÆpLû)#¯*g/=B~É÷ÿàÕaq†ÞZ[JDä4gD)‘hœÝ Õ1ãd˜2$F]Õ¦Æa²=1v ÝAËáòS›ÇÛûâî£Ìî9Œì¼Oã ™Z/þ¦í*« žO~`õFvö'ÁXøÇÈÕ}ïåçÏ×4¼ã97v’ÄèÙ$†_3h2VE/°ì¶íöñ›öRÚò4¥-O4îBRßBvÞ§Ár÷«ûnÃs±«z0Ù¹Mbøåãlýƒä—þÐ-\0¯{)‘3œ ¤DDDD¢qn‚ÔéÞå;$G]uò£|™`Ûåoä+å šâí‰Ò–'ñö®?ujH{$±{$;ï38ƒ&Pxþ—¸õÏal;žQÀ÷pú#såGÀü#;ÊAjïºó=ïxÎMª’䘹$†_ÝV¶gù›|ÿønŠ¥´å)’c¯&;ûmW{í&·èîó×Þ<»ç02so'1ô2 >óKÿ•Ð/]0¯{)‘3œ¤DDDD¢YjŸè9)£g“¿»Ï¬tUÛZE!a1Gpb?Þ¾õ”¶.ÂÛ·¡s(è;†ì¼;p\@ṟãÖ¯Â$ÓñŒ^ gÐd23þ0ø‡¶–#Î]n[­lOãæ—ÃTïQX™êŽ0–rx6_Â<“ÈàÝUÞ—NW½ÅC9|~gÐ%åãìù_‘_ñãNOŒ;)‘3ÌS¤DDDD¢uê˜ð%³$FÍ"5á:ì>£1©J0VÛS­{q÷®ÃݶïÀfœ~ãÈίÁî7)<ûå •®Šgp $†^Nzúðl"Ww7þÁ-]v›­ª¾$'\GbèåØ=‡bÒÝÊãø„‡±åÝEîé…xûÖÇn\ì>cÈÎÿ ΀‰Vÿ‚üÊõZe¯:ö¤DDN??Q‰Æù R¿T%ÉѳIN¸»÷HL²²|åM[ñ÷àí}‘âºßaeº“™÷œþ€üŠŸâ֯ªèÏ(àHŒ¼’ôÔ÷àí]_RGvtùm·{#9á:œÁ—`÷‚IdO®1EyáúüÒQ\÷@ìÆÅé?žÌ¼;pú ðìæ>âümޝ:ö¤DDN?/Q‰ÆùRÀLu9L_€Ý{&™mçLXlÁ;´•0w »ßxìC ðÉ/ÿ7܆簪úÆóÉw $FÏ!5å=x/¿PRǺòTSÃŒÓ<ɱ×à ¹´<.v¢ãwAîÞî5×=€·cl†Å0‘ì¼Ï`÷„äWþŒÂªû/¨×½‚”ˆÈÎr R""""Ñè*Aª< 4X½HŒšu2L9©¶wÐa)¶Óñ³Ü“ßÂÛ·«û@N÷­|]> ¸’ã®!uñ-¸õ«È/¹ÿøž8LÙ;=߉‘3ÉÌü+ìÞ#;ï£W$h>Œ·{5ÅõâÝÕå÷Ìt ÙyŸ.ïK’_þc kþë‚zÝ+H‰ˆœáì¦ %""".¤ÚYvG˜J¿¶N¹ò¦»})ùÕ¿ÀsÊ:Rñ S¡[ 9á:Ro*ïÏÎåä–ü€ iœ¦îÏwfΧH_úÞò¾yÅòoídÛ¾æ šöâ6¬¦¸þ÷'öwÙñJ ½ŒÌÜÛ±{ƒ0 ·ä‡×þú‚zÝ+H‰ˆœá¬¦ %""".¤ÚÙ ¬l£f“ºè:ìž#ÀvN¾©vóø·à6¾OPS‰F,‚T›ä„ëÉÎú8&[ _ŽÆ:ù&»Ø‚hëÉ0]:L…nô”?"1z6Å ‘_ùSÂ\cü&ðÉ,Ù¹·“¼èÜ+(¼ðkŒ“¯„=h2É3±ª—¬7| w ÿðJÛê(m~âÔ{UIvÞgHŽ_”¯*®ÿ¦mí¯Ð+ºy’cæ’1«ª&‘ ¡_"l9Šw`#îŽe”¶->õž#¯äØùdf}«ª/¡W$÷ø?¿b›âOAJDä ç3)‘hÄ)H¥&ÝLz懱²= '(¾ôV·þ'‰Õc(v÷å+oÚßt[;"‡xе?}ôeLbØ4 Ïÿ’ªû ‹-ñ›Àgº—ƒTûÇ·B¯„Ó{8‰Ñs°ûŒÆ$2í£EXhÆ;´¯~¥íK šž›ãlêûHOû³ò•x¹Fš÷¿ðn¹ ^÷ R""g8)H‰ˆˆˆD#VAꊿ$=õ}˜d–àÄ~üÃ;K­¯cvicÒÝ0É4¦¢7vU_°N®m[ñö­Ãݾ” ØY˜2v‚ôŒa÷ a@nñ=7<¾»ãÈî1”ìÕ5ÖÃòö¾ØéÊ´×Ͳ! ±z#9fvï»ý£•!A®ÿÀ&Üúg)íXJ˜k<»ÇÙåBúòt\‰×òÀÛÅ¿p(H‰ˆœáܬ %"""8©Ì•Eê’wc’YüÆ}Gw¾Ž uÊZC–ƒIU•ÃT¦Gy©Naªoßúr˜*4ÿá«¯ÎÆ¤×I“žù!ìžÃ ðÈ-º‹â¦ÇÀ÷bwÙ½G’wGÇzXùgîÃ?¸™·´ÎSðÿÙ»óøª®ûî÷Ÿ=žI³„„˜gÌŒcãyÊÔ´i¦¦ÍmÚä6M“fhÚ¤©ý´½iâäIœ¦MÓ´é“Þ¦7Oû¤Nlj ƒ ðŒ™ÑŒ„@óp¦=­ûÇBƒ ^¿÷öË>G眽×Öþ¾Öú­ ;„U· {þ¥ÓÇ«’=xí¯â6ïÃmÚN¿¼¢k?@tå{FGâ ÿôÏ𻯫ó^)!„˜àÞ,”B!DnäU µñ£D–>ˆfÅðûOâ÷4Ã¥Œ »˜Œ ¦0Lð\Œ©ó/ ¦œÞÉ—qŽïDe‡AùW§Ók½éCaÐâ»$Ÿþ:Îѧ!ðóî82*ç¿õ˜Õ‹Ez÷÷Ãç ì;åf@)¬k±ç݆^\=Òž¡`è ^ÛK¸MÏã¶ì ßÿV޳›~›ÈŠw‡#ñ:þéð{[®«ó^)!„˜àÞ,”B!DnäS ßôqìÅ÷¡YQ‚¾6¼žfpS—ÛÕd4˜Òôp*Ÿ ‹ŸT\L¹-ûqOì@¹Ù+LiÑ"b7ý6zq ÊË’ÚòUœã;@yw™Õ‹ˆoúð–R¤ŸûnX|ü î3å» öì X³7 Vž{1ð ;ÃöjÙÛ¼÷MïÇØú¹áíáH¼¾6’?ûâU/¤žkH !Ä÷f ¤„B!r#¯©ÍŸÂ^xš!èm )/ýf»œœ1e ÇJÁŽ£ÅK0`XçÞ3C¸m/áÛ¿B‘/#vãÑ‹ªQNŠä“_ÆmÚ yØ6k—¿åcSæBà“zö;¨¡Óaˆt…)ßG³"X³ÖcÕ¯A/¨8÷bàã´ã6ìÂm}¯íåËþüØÆYú@8¯§‰áŸý¼6ï IDAT9Á@ÇuuÞK %„Ü›%B!„È| ¤w~kÞmhf¿·%œ²çeÞâ§ŽŸÊ§ÇKÁŠ¡ÅKÐåcŠiCêÃk ÷Ø6”÷ Ÿ>zÉ4¢«ß‡^X‰Ê“üÅ_á¶îÏË@ʪ[ElãÇÂí¾Gjç·QÃ=¨«X‡K)Уa05}%Z¬xÌ‹~OÎñx­/àu¾äÏßú ìE÷ YQü®ã ?þƒ×Õy/”BLÐ+@J!„"7ò*ºç Xs6¢6~O3~o3xWjE¼‹Sv-V‚ž(L©t?nëK8GŸFù.šnp¹Å»•ç`TÌ&ºê=è¨ôÃO<ŒwòUÞR!ðkÄš¹ŽØ†ßÃ(«GyYÒ;ÿŽ Ù A ´kz¼kÖzÌiKÑ"ç^ |¼3ÇqoÇm{å’VË‹ßþiìw ™¼Î#$Ÿxˆ`èÌuuÞK %„ÜR$B!„Èü ¤4÷} kÖº0ên MûÎÿžqÁT¢ÍŽ¡ÅŠÐâc‚)¥Òýx-ûpŽ?ƒò=4ÝäRÃ$åf0«Yñkèñ2‚T/ÉŸýùeä™L¬9‰ÝüŒÒZ”›!µãoQéþÜh7lôxÖœ ˜U‹Ðìø¹ýíeñOÅ9²ïÔÁptÝwý ÖÜ[Ã@ªãÉ'þA²çº:ï%Bˆ zH !„BäFÞR†EÁ}Y¿Ͱð»ñûZÀ¿ZSÂÎ ¦ ¦„ÁT´-VŠfFFžì‚ánÜÖp?ƒ 4Ãâ‚)•MbÖ­"ºìh±‚á®p5·®yyÙó7[÷á°@»“"½ý[Ù¡«¶Bá„ûÕsPn³jöÜ[1*ç¢YÑs¯;)¼ÎÃ8‡ŸÄëVŒ¶•rRh‘¬é«0g¬Á(ªLÉ܆]8'v[ûAÌêÅ é8 Ï‘zúk¨lòº:ï%Bˆ îH !„BäFÞR…SIÜ÷瘕óP* èn GH]“80>˜Š W…#¦Î†S† J¡œÁà)Ü–ý¸ ÏA3m‚ÁÓDV¼{ÞmhV¿§‰¡ÿó)Tv8/£èª÷YùëèñR‚áž0 <&ÇŠc‚)ßEa֭ª]Ž^TúÙ`Jôw€E• çÈÖpúá[Íqr@J!&¸cH %„B‘yH•ÔRpï1*f¡¿§™ ¯T0y~¤Å(ª‚HAX=R€¦[ ”“ÂïkÃkïä+ƒ§ˆ®ùÖœájn§1ü_ŸF¹é¼<Ž¢k?@tÙ»ÂzXÃgH=ó(ŠÉH>fŒþ¥@aկƬ]Ž^Pš>ò>¾W)œ†gImû*=p]÷H !Äw ¤„B!r#_)£|&‰{¾€QV‡ò‚¾6üÞV®í”° X1Œ¢*´H!DâhV M7!PÙaü¾6ÜÆ]˜µË1k—£&Þ©Ã ÿäsy;'¶îÃDnxZ¤`ð©íßmÌ(²Éõ¸qîwi:zÁÌé+1k– ÇËF~wHe‡Èìÿwœ†çPC](ß¹.Î{ ¤„b‚;„RB!„¹‘7Tå<wÿ)FI-ÊÍà÷·ô¶29V£¸:œÆgÇÑ" ¨ÀE¥Ð ;ü(ܶ—þïχCwòPlÃïYršÇh'ýÌ·A7&wûŒ ¦t ½h*öœ[Â:ec Ÿ»üîFœC¿ÄmÙÖùò½¼>ï%Bˆ î H !„BäF¾Rfõbwý zq *›$èÀïmar#[»•Â(Ÿ‰^\®º§átCM~_+Ãÿõ‚dO^GñMÇ^xwX«·…ÔŽo£™‘¼hŸ³üþŒŠ™Äoýz¢ltæÞÈ?PÙa¼ÓGqý¯íe‚Ì ~^¶—RB1Á=[)!„BˆÜÈ›@ªv9‰;?‡^X‰Ê ✠kHåSà¹hv £b6zÁ´HœÑ°ÃÍൾ€Û²çøTf0¯¶-¾ùSØóoG3müîFR;ÿÍŠæÕ6ƒ§A$î8œêŒd³ãá ‰#SUz¯ýÙC¿Àë8ˆr’“«–Ù¥‹H !ÄEI %„B‘#ùHiXõkˆßñiôD9AºŸ`àAÿÉ|ìê‚C•bTÎE Pn•êÃ;s¯e?Ή¨l2/¶)q×ç°æl ëa9Fzç?Œnù#îF³ã$îúÂŒ YQìwaV-¼pÄT_nË~ÜÆÝøgŽ_Õ`J/¨ à`”Õ£²Iœ#Oá6î]%0šÅĨ˜MìÆ‚fà9Fjë#øÝoþ3 {ÖÍØ‹ï §hF‹Ãý¢Tf¿··õEœ#[;¯ZO)!„˜ Ï!”B!Dn üó{”9í¬k1§ÝSš>úzê ‹gÛŽ×ñ:AºTÛÎa´ØÍ¿Kdé ÀïmÁïm/“§{]G/šŠQ9ÍŠôŸ$½ûû¨Ë ؔ瀗A/™Ž½ð"Á”ïô¶à6ïÃmÜ×uü+?•O/¬¤ð]_C/©Ee‡É|¯õEдÍN„ûÇ÷ 2øÝ a;5‚sl;nÃ.”—¾â#¦ôÂJâ·}kæMç©®9¯euå6È@/ªÁ¬Zš†×y„ÌÞÿuÅ¿Fe†F uÏ ¦æ^¸*_Oóh0åtŒuv顇Q9‚·=^ŠJõ“yé?ð{ó¯]T€½àNìw‚ p›÷’ÜòUTzàê>üX1ô²:"KÄš¹=^2²ÿG‚©Žpåˆ]x'_û—¼Ù`J)!„˜àš,”B!Dn¼a 5æáמ³sú ÌiËЋ¦Ž 7*3ŒßuçèVœÏ¢Üô«1¥N%~Çg°êVRm##pò´Ï¨›èEU˜Õ‹ðN$³÷_¯Zð0˜²0¦Ìž{+ÆÔùNåëi ëƒ5íÆ輬ÑgfÍ ü+´hAªÌ ÿNÐ×–íøØKÂÑJ¨·aÉ­_Ee“¹yŠbL™M䆷cÕ.G‹ŸkC'®œØúnÓ¼S‡Æ›—{.H %„\‹%B!„ÈK¤Î±f­Ç™Ê§WL ŠUv¿«çÈœÆÝ¨Ìà˜©FoŽ^\MâŽÏbÖ.HÍß®›èEÕ˜Õ‹ðN¾Bfÿ¿ƒ®_Õ¯=L™Uó±fm‹ªŸLyÝ a}°¦½¨dÏ%MÃ4§¯ àþ‡Ñ"É2ûxå ç’ïYþ¬YëA8ÇwÚúÊÍíjŽz¼cê|"K߆Y³-’8׆Ù$~oóèʉ~wÃØÇ(.5˜’@J!.N)!„Bˆy3ÔYVýZ¬Ùë1«¡U¬¦ ÂçîœcÛqžE¥úßôo4Jk‰ßñ9Ìš%áhžvüÓGòw§ëÖÈ©0r[_$óÂÿ‡6¦F×Õ¤2C`F0«bͺ£bÖ¸©|ø.^× œ#[âکþ_LYõkIÜ÷%4;N0ÜMfò®Y”ï]ùX3Ö†ÔÑm$·>rÍŠçë…•˜ÕKˆ,¹£f šaŸû­ÙpD¢Û¸·y~_ëØÇ)Þ(˜’@J!.N)!„BˆysÔø^«n5Öœa0U8ÍŽ‡+ó©•ÄïnÂixçØ3c–²¿tFY=ñ»>9u~X{ #¿)骅¸Íûȼø#4ÓÎEW{´íTfÍŽcL]€5sFùŒ°íF(/Žv;útXü<ÝÑpÆšµžÄ=_@³¢CgHïùTº/ïšEù.±5ïÇœ¾çÈ’[¿vmk•izQ5ÖôØ‹î­;6ú›GpO<‡Û²Ÿ`èô¥m«RBqñË®RB!„¹ñVFH]L­Âš½³fqLY±pZà‡5¦zšpžÃ9¾}̈©7Ía”Ï$q÷ŸbL™Rýíù=eψ WaN]€Û¸‹ìË?†œR¶]L%0ª`ͼ £|æøâçgƒ©ÃOâ6î&È Œ+\oÏÝDü®Ï£™‚ÁS¤w•Ê»fQ¾GìÆßœv>ÙƒOÚöIò„¤c”Lì[EdÉýå3ÃÐ÷ìoOõãuÀix¯í¥7\P)!„˜àr+”B!Dn¼µ@j´ûÆØPÉœ¶ {Þm#ÁTUXcJÓ!ð²ÃáÊn»qŽ>=fÄÔÄÁ”1eNH•Ï ©¾6ü®ãù»ÓÍFq5Få|œ;ɾòš¹]oΦ̚%˜õ«1ÊgÔ )/‹úÙC¿Àm~• W¦›;‰;?†E0ÐAz×÷¢öùÆ÷ˆ®ûp8•2ðÈxœÔ3ßšdOJ:Æ”ÙX3nÄž;FiݸSA²ïä+8ÇwൿÖp» ¤„b‚ˬRB!„¹qe©‹3k–bÏߌY³td*_, ¦|— 3HÐÛ‚Ó´çðSa]£°+ÈùÁ”1u>‰»ÿ £t:ÊËôµWÌ9ϘQôâÌʹ8G·‘=ðÓqáÏ5è‚sA05}æô•¥ÓÇOåsRøgŽ‘=SöìÄoû#0,ü¾“¤wý#\B1ôI'ð‰Ýü»Sçƒï‘yõ1ÒÏ~gr>06Få\¬Y7cϽ½¸zü¦ w……ÏïÀë<„rRã^—@J!&¸¾J %„B‘W3:ˬZ„½ð®pU¾ÂÊ‘SøAz¿·¯e?Α­©³µ‡Î$fÕ"÷üzqMHõ¶à÷4åïN·âá©)sp?Iöà/®q Åû]e†Ðbص˱j—£OC‹ÄGÞ¾î:ˆrÓXõkÐ"ø½-¤Ÿý.uµ·ÉD>ñŨ˜R/ÿ'é]ß›ÜNV £jöìX³×£L÷z0؉۸çÄNüÓGQ^6ÜVå?\öÉÉP!λ®J %„B‘¹¤Î2¦Ì%²ôÌÚe艊ñÁTf¿»¯õ%œãÏ ý;³f)‰{¾€^XNëi"èmÉßn'0Šk0*f}ýç8G¶L’@j´Kθ`*^Š9mYL•L;·¢bà£|7,È®é#Ôwò²Y”RÄ7~ £|ø.™þôó?ȨH³f)ÖìX3Ö¢'ÊÇn~_; Ïâu7¢¼´ŒBˆ‹]O%B!„È\RgSæY2LU.g¯•ÄïmÁk{ çØv‚ÁN̪$øKôD9ÊMãw7ô·åog×N —Ô†Á}õ'8Ƕ„<“î×r~0eÕ­Æœ¶½¸f$D³ê››!óâðZö£E ó®mb·|£´å;döþ¿döÿ0¿Ž­hVír¬9·`Õ­D‹•œ{1ðñûZqŽïÀ9¾ýáâþà!¹ !Äy×Q ¤„B!rãZRg³Â`ªnU8•Ï)êí{™üžfÜãÏ]ý>ôâê°~QOAÿÉüíìF ÑK¦a”ÕŠìËÿ…sbç$ ¤F5cƒ)½ ³~ fõŒ’i`X£ïT™A¼ö×pN~wãÃFå܇ä („çÝ $B!„ÈkH 9ŒÒéØKÀžu3Zá”ÑS¨€`¸ •«H!*3Rƒüª•ù&ug7R„^Z‹Q:Pd^ønóóhV>7ç˜*ª"¶ê=•óƼG…¡bª¯ó0nÃshÑ‚I¾iñÛ>^8åeI?÷]²¯þ$ߎ®qç„^8«~ Öœ˜5KÆcÉP!λŠJ %„B‘×6ó­ieuØ‹îÁžu3zQèfø² FÞ¦ƒ p›÷¡2>„OúÎn´£t:zÉ4P™ý?Äm{9¬Ã”?[(‚¬y·»éCaSyYPA*j:ÊwPÉ‘`êÄ‚áîÉ;•O7‰ßöIô‚ ”›!½óïɾþx¾>R;'ô’ZìYë°fmÀ¨œ‹fE%Bˆ‹]=%B!„ÈÉHyˆ6 ŒÒ:ìù·cͺ9œv6˜áw7à¶¾„fÇÑ s‡ðIÛÙ•`”Ö¡WCà“Þû¼S‡ÎÛ–ü ÒXsn!ºú½aÛô4 œB/®F/®>WÌË ÷àwÄ9ñ,*Õ7&˜ší¦é±Û?…/C9)RÏü-Îá'óýÑjܾ5*faÍ\‡5ëæ‡Íª…ÉP!λjJ %„B‘“+Ó!4lô’ì¹›°ÞŽ˜¥P™aüžf¼îtÓݘð!|Òm[¼£¬½p*é]ÿ„ßÝpÞ6ä失æl$ºâݸMÏ“Þó}ôX1‘ގƉ•ŒÔS('M0tïÔAÜÆÝ¨tÿä ¦Ì‰ÍŸB‹• ²IRÏ<Šsdëõòˆ5nßSç=\ô›ÿð\…⼫¥RB!„¹1Y©±¢«ßKlýGFþK1nU·‘"ç~׉p4Ž®Oø>i:»ñrŒ²:ôÂJð=RÏ~‡ ¿}ìfå ÀšµŽÈ²wà¶ì#½ãÛø}'Á0±goÄ^pFå<ôh&(…r3ƒ§ðÚà6?Ö£ºÖÁ”%qû£E‹PÙaRÛ¾slûõö¨(”ò.ûä·ä („ç]%%B!„È|¤ìE÷¿íÐÌ*Ýr3hñÒs«òÖ.ò»ñNE·¢ i<„Oz¢½¬>¬Uä;¤Ÿù[‚á®ü<€ «~-‘¥à6î!µóï:Îí}3‚5g#ö‚;1¦ÌA†£Á‚å¦ :ð:à6íE9ÉkLiv‚øæKe2 IDATO¡E P™A’O÷ij×åy¯”ÿpÙ'w<$W@!„8ï^ ”B!DnäC ¹ámÄ6~ ÍŒ tÞ÷oD—¿£|æ¸SÊwz[ðNYQlò’Ò ¦„T¢åeIm•êÍÏŽ»Ǭ_Cdñ½8'v’Þù‚¡Ó¾×Žc͹…È‚;1¦ÌF‹ŒªWÙ$þ@^û«¸M{ÁËä<˜Òìâ·ÿ1šG¥úI>ý5ÜÆÝ×åy/#¤„b‚{RB!„¹‘SöV¼;œ²gXý'I>ù7x‡0Jk1ëoÄž;Æ”Y£´a$˜ê;‰×yͰC`Rl^85 ¤â¥(7Cê鯣²Cùy™QìY7c/¼ çØ6ÒÏ~÷WŽøÒb%Øs6†íV1-gt*Yf¿¿ ¯ý5Üæý¹ ¦h±bâ›?‰fÅR½¤¶<‚Û¼÷º<ï%Bˆ îQH !„BäFÞÔºéwÀ0ñûZImý^Çë዆‰^8«nöü;Â"Úc¦ò¡ü¾¶pÄT´Ü,(ÿšn^X…^>=VŒrR$·>n:?;5o3öüÍ8‡Ÿ"ýÜ?¼áˆ/ =Q62•ïŒò™##Ú*;ŒßÓ‚×önëK¸W7˜ |ô¢*b›>ŽÄî"µå«¸­/^—ç½RB1ÁÝI)!„BˆÜÈ‹@êÆß"¶ö ›ø=Ma Õyø¼¤Ž^4«vyX«¨jáø` ðû;©q^– ÝwM¶G/®Á(«?W<{ËWQ¾“§=w {á=Øs7=øé]ÿ„J÷_âÎ0Ðc%a0µðnŒòçÚM ¦šqÛ^Äk{ïªSÊ÷0Êêˆmøý0:Mò©¯à|åº<ï%Bˆ nkH !„BäF>R±u&ºú½ ø]'Hnýþ™cc»Œ &ôâj¬ÚXónŬZ„fÇÇ}^0ÜE0Ônš =›ÓíÑ‹§…ÁËHñìÔ–GP9þ WL¹ámX³7}í§¤÷ü3*s™Su=Q†=çìù·£_L á÷4á¶¼€×þÚ¦”ç`VÎ%ºîãµÊ’[¾‚×~àº<ï%Bˆ‹“@J!„"Gò"ÚðûDWþ:h:Þ飤¶~ ¿»ábÝH.LÍÞ€Y½xL€ ’=CgÀM¤!G£”ô’éá©H‚ ÕGêé¯Aàçåñ£|èÊ_Çšq#ÙW#½ç_PÙá7÷#S0íYëÃSå3ÁS´>3ˆß݈۲¯ýUPêŠSÊMcV/!zӇР¿¯Ô–¯à:t]ž÷H !ÄÅI %„B‘#ùHÅ7ý‘eï ©SI=ýuüžæ_Õd\0UT9m)öì ˜5KÐb%ãÞ¤úPÃ]¨ìðH0•½ªÛ£—Ö…”'î&µí‚ òó |"«ÞƒU·€ÌKÿAfï¿¢œÔ[û\ÃÂ(šŠ5{¤øyY=èÆèË*;Œwú(^Ë~¼ŽpÓ[ ¦”“œ¾ŠØš÷‡µÊz[H=õ7x§^—ç½RB1AB)!„BˆÜ˜ô”¦¿õD–¾ 4 ïä+¤¶}¿¯õRþ˜qÁTa%æ´eX³nÆœ¶=^6þ!=3D0Ôþû*SzÉtŒŠYhV”`è4©mß䪭wµ)Etõ{1k—Ùÿïdöÿu…Š´kf½°kö†°øy٠дs_?L¹M{ðGꊽ™`Je†±fÞDtÕo†Twɧ¾Šßuüº<ï%Bˆ î;H !„B䯤¤t“ømDdñ} i¸­/’ÚþM‚þöËé^rA0U³kæM˜Ó–¡TŒXw’ƒ¨T?Av¼+Lé%µSæŒÖ*J=ó­|îº]ý>ÌiKÈìýW2/þåf®ì·˜‘p5Ź›°çÝŠQ:ô1Sù²Ca0Õ° ÿÌQ@»¬`Je±æl"ºâ×Ââù§’Üú~wãuyÞK %„Üo$B!„ÈÉHif„øæOa/¸3 ¤š÷’Úþ-‚ÁSoæÓL%Ê1k–`Í\‡Y» ½` hú¹‡v'I0Ô…JõdÁs¸#™ÆR~_+©‡6fÔO^Ñu¢«?€Y½€ôžï“}é?Q^öªzÉ4ì9±ælÂ(«ßfÙ!¼Î#¸»ðNE³bh¦}Ñö+HõYx'‘Þº‰×yˆÔ–Gð{[®Ëó^)!„˜à>#”B!DnLú@ÊŽßüÇØóo4܆çHíøvXŒüÍ*cƒ -V‚U» kƘÓn@/¨_H;›$Hõ ’½áª|^–·Lé%Ó1*ç„ų»I?÷ãB•¼bØD×¼sê|Ò»¾Gö壮rxÍŠ¢—LÇž}3ÖÜ[1JëÆOåsRx¯ã6í!Hõž7ÊíÂ`*HöYò‘¥„µÊÚ_ k•õµ]—ç½RB1ÁýE)!„BˆÜ˜ôT¤øíŒ=wÎ±í¤ŸýÁp÷•ÿ®hæ´°f܈U» ½p*Ö¹‡x7CìB ÷¤ûÁsË/Fn”ÖaL™†…æ8é]ÿ8®`w^1"ÄÖ~£r. Híü{²¯ý|77LJA/«Ãž½kî-%Ó/ ¦Ú_ÅmÚKî›púe0ÜCdÙ;ˆ,¾7 ¤Ú^"¹í—954H %„ÜW$B!„ÈIHÅKHÜþ¬Y7à~ŠôsÿŽz¹²ßÄÙQ3šǬ]5cm8•¯¨ ͰÏ=Ì{‚áT²‡ Õ¾sY«äé¥u˜#”×yˆÌžï«‡”Ww+Ftí1*f Híø6Ù×ßËíï0#3±foÄš½£xÚ…«òu¼ŽÛ¸› 3pA0${‰®x7ö‚;@Óq[ö‘Úö蛜:ùI %„ÜO$B!„ÈÉHéÄoÿ4ÖŒȾþ8éÝßG¥û¯VW”Ñé\ºŽ–š±³f zQ5š=÷PïfÂúRÉ^‚dÏH©7¦ôÒ:Ìʹa­¢ö×HïûW´| ¤"D×|£|©í’=ô$Þµù=v£lÖœX3oÂ(©=/˜Âë8SéÑ•U²—ÈêßÄž{kH5î&õÌ·ÞâÔÐÉK)!„˜à>"”B!DnLú@ªp*ñ;>ƒU· €ì«‘~þ¨ÌàUî‘j ÆSkÃUùª†Á”# ¯Ââç*=Hì §ú¿j*ŸNÙ«œº‰Ûú"Ù—þóìGå_Ç=ZŽ*¾KrÛ7pŽlÀ¿¶¿ËŽc”ÏÄš½kÖzŒ’iç¬Oá|·á9üTv˜èš÷RÎñ¤w|; ¯CH !Ä÷ ¤„B!rcÒR%ÓHÜþiÌÚåd^üßdöÿ•ÎU×”s#¦ ¬ú5añóêÅáT>;J¡ÜTL¥ú†NSçí^M™²7; ¤šž'ûúÏ!póòøÑãeD×¼½¤åeIm}çØ3—5…ñj¶™fF0¦.Àšu3ÖÌuÅÕãGL9)ܦçq›vc/ºkúÊ0:²…ô³ß½ SC' ¤„b‚;ˆRB!„¹1Ù)£¬žøíŸÆ¬Y(2ûþÌ‹ÿå¤rÝEel¸dÕŒ˜ªY<2bêl0 œ4*;Sƒ§ÆøÖÍp„TÅ,Ð ÜÏâÛ†rÓùÙqOT[ó~ôâj”›!õÔßà4<{ntÙµýuŒ ¦ªcÏÞ€Y· £¨züJŠé~@C‹‚¦“}ý礟ûnƒÏÜ’@J!&¸sH %„B‘“>š2—øæObV-¥Hïù>ÙWþë8çSk°f® GLW¡Ù‰0˜ |”“Be†R½áˆ©ÀÃÂ(ŽQRÎÑm8'vL¸úÛd§V]ó^ô©('Eò—Ûüü$ ¤.l3ÍŽcV-š½súŠ‘`ʺà/ÜÆ=¤wÿ~OÓuyÞK %„Ü1$B!„ÈÉH™U‹ˆßö ŒÊyø¤w}ìkÿºæÎø`Êœ¾2}S³½pd*Ÿ>Le† †Nã÷µ¢Y1ô©Sf‡SÃýçø3y{üè%Óˆ®|za%*;Dò‰¿Äm}‘ ¦*NŽÇŒÑߥE˜SŽS«0ŠªÆ˜"ðp÷=ð3üî†p5ÅëˆRB1ÁB)!„BˆÜ˜ôÔ´ˆßú‡á7ß'õìßãüÅ$¤&ø½ÓWbÏÙŽ˜*œŠfÅFƒ© =€ßÛ‚Q\ƒ^4ÐÈø)αíãVïË'zi=Ñ•¿Ž^PJ0üó‡ðÚ_crR££¿O³b˜ÓnëG͸ñ‚vP™AÜæ}d>ßÓŒJLòm»4H !Äw ¤„B!rc²RVÝ*b·üßå3PžCzÇßá~ å;“z¿šÓW`ÏZ9톰ø¹MCù.šf€aÙW#{àgè…•yyü峈¬ø5ôD9Aª—äã_Â;u(_;8.™Õ‹‰ßùYŒÒº‘é†jܪ|Aª¯e?Ùƒ?ÇïiAe†Èç`J)!„˜àÎ ”B!DnLú@jæ:b?ŠQZÍÞþ-œcÛÆ ŸÄÌš¥Øón ƒ©Â©hvl|Ð1ØIöõ'𻎓‡Q9è²w¡ÅK’= ÿôÏðÏÏ¿í(ŸIüŽÏ`N]€ò‚¾V4+ŽVPfFFÞ¥†ÎൽLöÀãø}m#EÏó¯Ý$Bˆ‹“@J!„"Gú½]Mæj{Î-Ä6ü^¸Š›“&õôÿÄiØ ¾—WûÙ¬Zˆ½ðn¬ú5áh(Ý_|‚T/~O~ÇA¼î†°øy>P ³f ‘¥oC‹ aø¿ÿ4/ ›Õ‹‰oþ$FÅ,”“&ó↺°êVbV-LA²·õE²¯ý” ÿ$ÊI“OÁ”RBqqH !„BäHzçß+·ýÕI;ªÅ^p'±u¿ƒ^4•&¹õܦçó'´9O䆷»éCh±â‘d€pæ˜ï¤ûñ»›ð;á>AÖŸš¬súJ"KîG‹ œbè'BÐ2ïÚŬ]NüÖO`”Õ£|‡ô3ß"{ðh‘B¬Yë°f¬Åœº­ ͰÃ?ò]‚d7nÓ^²AÐ׊òò!˜’@J!.N)!„Bˆ\=˜ºiåwÀ9¶¯ý~wäú}‘%÷½ñ·Â¢Ù©’[þ&\Å-ðór›ÓW„ÁGi]8Ò&;ŒfÇÑÌ0äP¾‹Jõã÷4ã|·ãu4 E 'ß±ã»X3×Yt7š]€ß߯ðcB0t:ïÚŪ_KlÓ`”Ô¢Ü4©߯9úôèÔP-Z„=ç¬ú5•sÐâe£#¦”—%êÂkÞKöÐ/ñûZGFðMÞg ¤„ââ$B!„Èá³)€r3øgŽáÙ‚×ñzøP= úd‘eï ºæýèñ²°hö“_Æ;ù ¨ /w¶5c-ñMG/ž†r†É¾òZ¼ô\ñó³ÓÂ| ݇ßÓ„×ö2^û«€6©‚)å9Øs7aϿ͎ã÷63ü_Ÿ%Hõæ_»ÌÞ@|ãGÑ‹ªQN’Ôöoâß92ï\ñs=^Š5wVÝŒŠ™hñÒ0LT ¼ Á@në‹8¯?ßrÒ§H !ÄÅI %„B‘³P!«ÎmG{ø§áÛ†×þþ@Ç5]ùDV½=VL0ÜMò—wêõI–½Öì Äoùza*3ˆsl{2©-V‚^:#Q†þAà¤ð»ðN¾Œ×þZØažÁ”rÓØ îžwšÅïn`èÇŸAe‡ò®]ìy›‰møzA%*3HjÛ7pwMxìë…•X³7`Õ¯Å(Ÿ+§ò©å¦ úÛÃS~J0tfò÷H !ÄEI %„B‘#™½?PÖÌuèeõŒ ¦Ü4Þ©C8GŸÆë8@0pꚌöˆ®ý ÑåïB‹ v’üÅ_â>F>®l`ÏßLlýï¡T¤pw«I¤áô½H!záôx9æHƒ#ÁÔq¼–ðÎß¹¦Á”rRD–>ˆ5{šaã9Îð?…ò²ù×.‹î k•%*Òý¤¶>‚Û¼ï y½°{î­˜õk0Êg G‹@7!ðQn ¿ÿ$nÓ^œC¿$îš4Û+”B\œRB!„9Ò÷­Û•9mVÝ*¬úµèeuラì0^ç!œ#[ñ:^G{ä0˜Š­û0‘ÞŽIà÷·“|âaüî&ò6Zt±›ÿ/ôx)A²¯íå‘m9»=è&šE‹¡TŒ1¥Tz¯»¯y/~w#ÊM_“`Je†‰¬|7ÖŒ›Ð ïÔa†ûtÞ­€Yú`X«,^Fì!µå+¸m/]òH<½¨{ÁíXu«ÑK§£G Ã0Ñ÷²ƒ}m¸ÍûpŽlÁ”RBqqH !„BäHߣ›ÃŽ—¦aÖ®Àª_ƒU· ½¤ÍŠŽ †ðNÄ9üÞ©ƒÉžœS±%²øþpJXo É'Âï;™·û;²ìÄÖþZ¬ˆ`è Þé£àe¹hÀfXhf-Zˆ/Ã(œ:fÄ”Beð»›p›÷âwÈy0¥2ƒD×¼³n5šfàu¼ÆÐcŸËËú^Ñï&ºú}a» w…µÊÚp¹Á§QZ‹½à.̺UÅ5h‘èFLeñ{Âörn#Hõ]³í•@J!.N)!„Bˆ ¤F{b:VÝjÌúÕXÓW¢—L?b*3„×ñÙCOâŸ:D¸z5¦4ø¦?Ä^t7šaãw72üø—òr·‘ "ºê7ˆ®~/Z¤€`à~_*3ð«ÿ̰ÐÌš@+šŠQ0%œv¶M²Cø] ç‚)'•“`*H»éw0§/Àk{…áÿþ|^¶LtÍûˆ®ü 4;N }ò¯ñ:¼éÏ3Êgb/¸#1U\…fÅAÓÂUÓø]'p[_À9²•ÎùöJ %„Ü©%B!„È ©Ñ'j«n VýÌÚeèÅ5c‚)…Jàu¼NöÐ/ñN ª¯t0¥Ä7 {þíᜮã$öÅk:²ä­õrõ°&ÖŠw‡#¾:úO¢2ƒ—¸?¬p*ŸE+ªÂ(¬L9)ü®ã¸ÍûðÏG9É«Léâë?‚YsJùx­/0ü³?Ïdž!¶î·‰,{'šÃï?Iò—ßuâ-²1e‘E÷`Ö.WQu¨<•êÃïƒDçØv”“ÊÙK %„Ü$B!„È ©³tkæMa0U³½¸ú\0¥‚T^û«8‡žÂë<„rÓW.˜2Lw|kî-hèx‡þٯɈ’+B7‰­û°&–iãœ"èmE9×ý9šG³"hS0Š«/ ¦ºÃS§¢ÜZ¤àŠoNî'¾ñ0«¢·iÉ'þG>}èÄÖÿ.‘¥o Û¥·•ä/þ¿·å µ»Y9{ÑÝa0UX9z)7ƒJvãuÀmÞ‡{bgN‚) ¤„b‚[‚RB!„¹ñ†ÔhMÞ³ ³~5fÍô‚J4+høÉž0˜©1¥<ç-×ÒÌñ»>=k= ð:0üÓ/äå*nñ ¿½ä~4ÝÀïoÇïm÷Mº‰f'À°Ðè%ÕçVì#\)ÑïiÆmÚƒßyå9hvôŠmŽJ»õ˜•óP¾ƒÛðÉ'¿œí¢›Ä7~{ñ½h†N }âápeÉ+ö„£¡6ÆÔyDÝScŠÕ+/K0Ô…úpL5îÃÝ«D)!„˜àr-”B!Dn\r u–abϽ«nFÕbôÂ)h¦ haáæd7ÞÉWÈŽSÞ%¯TvA§ÐŠ‘¸ç‹X3Ö TÖ(úÙ®^ͪ«ÝÉ5#Ä6ýö»Ð4= ¤zšÁ˼µ>LézAÅyÓ+ðÃïiÆk~¯ã ÊwƬ³TfømŸÂ˜2åfpŽï µõ‘ükÃ&vëb/¸M7ðÎ#ù󇆻¯Â—éhV4œÊ·äþ0˜Š—ŽŒpS(7K0|ÿÔ¡0˜jÚsUX ¤„b‚Ë´RB!„¹qÙÔٛڽkæ˜S %ÊGBÖÇIöà¶½„sð—x‡Ï>_ÞwD$îû ¬é+Q¾‹×²ŸáÇ¿”¿\+Fü¶?ž·à$~wøÎú-RšS¥ÓǘÂwñ{[pwãu@Þ¸àêr)'Eü¶Ob”ÏD9)œ£O“ÚþÍük3Ö*i¯óÃ?•_ª£Ù ŒÊ9D–>ˆY½=QšžCn†`ð4Þ©ƒ¸MÏãµ¾pEƒ) ¤„b‚˳RB!„¹ñf©ÑŽ›Çž{+挵˜S梔„ cªO¾LöÐ/ñÏ¿¼ÏŽRpÿØӖ…SÂòµFј}¿ýÓØsnà÷Äïi G‘]QzXÌ\=V†^^?>x <ü¾6܆çpÛ^ ÃøìoQ^–øæOa”LG9Ã8‡Ÿ"µãïò¸]6‚R¸í¯‘üù_ä¦È¸¦£ÅŠFjL݃9íôx  …ûØÍ žÂëx·y/nË>ðÏ/—ò޶RBqñ˲RB!„¹ñV©ÑœÅ^x7VýŒ)³Ñb%££s”›&èÀm}çèÓ—¼z™+¦àÁ¿‹f»Y܆gó³FÑÙ퉒¸ësX3oF.Ao[H½ÅZ[¿ò;£Eèñ²pÄ”?÷bàã÷ŸÄmz·e?蚦]òg+7CâÎÏ¡W£2Cdþ‚ôsßÍÃvI¸óO°f®à¶½Lòñ/¡®ÔȵK¡è±Œ© ˆ,º³vY8Úíì¾ö²xm/ã¶ìÃm{é-SH !Ä÷ ¤„B!rãJR£9;޽ø^¬ºÕ³ÑbEhºÊG9iü¼ÖpŽnÃïiúÕÏèñR Þöÿ`LrÓ8G·‘zúëùÛÉ‘¸çϰêÖŒR­øÝ¼ÙQ.—ûÝ(…/E/© GPøø§ðš÷â6?BCÓõ7üL•M‘¸÷‹è…•¨ôÙ?%½ç_ò°] IÜýgXõkQÁ˜©¡×â™DÓÑåSç?_6.DT^– ¿·õE¼–}¸í¯¾©`J)!„˜à2,”B!Dn\é@j´C/!²ðn¬úµèå3Ð#…`àû(g¿¿¯õ²‡Ÿ³šÙøj=QNÁ;¾‚Q1+¬Qtd ©íæo'7^BÁ½_¬]Žò‚žæ7 宸oˆòÑâ%eõãFá Á`'nÓnÜæ½a0õ+FL™! î½ ‚ ÕGöÕÇÈìû·ük—XqX<¿n%Ê›$SC5 ½`JL-¹³j1Z$q®©Ü AnëK¸MÏãw3¢ëƒ) ¤„b‚˯RB!„¹qµ©³ôD9ö¢»±f®Ã(™ º®È—$èmÅmÙsd+Áp׸j½°2 ¤ÊêùÿÙ»ï(9®ûÀ÷ß[U{¦{ƒH3)f‚$DQYTX{u,Ù–dÛ’`¯³½ÖP–Ãóy»ÆÚ²ü¬³ö£d[ÏkËÊ –@`ŽˆŒÁ OžžÐ±ªî}Üžˆ ™îÁïs%Q“ºBwU}Ï­[¦œ§²÷! »þ¶n×µ“j!õÖN¼•WÙ‘.CÝö){‹qÂKƒ1¨X·uÃä­}€ Sã}öV¾î'1¡rfÎ1eÂü©ûÿ'Ù„. Q~î‹”žý×úÛ.É&RoùÞªk1Aÿð#ä¿ógµóú2íx+® ºåx+®œ¦‚2zø8þñgñ>Npfß´9ÉæS¤„bžã£)!„Bˆ…q±ƒÔäEuª…Ø5÷Yw+N¶I€r1ÚÇ”ÆÚI›?‚ë«^ˆ¯¤á‰“]…)SÞó Š~¶n×µÓ°Œô[¨Þ‚X":†ê^ÜM¡ªOçsšVá$›˜˜PcÐc½øÇž¶aÊ/£\0˜Ò¸QÒoý*‘EPzîÿP~þ?êo»¤ZH½í¼WÚ uð‡ä¿÷—µv‰„“]…·b Ñ-¯Çk¿fÆDõ&¬Ø¸{ô üî'z€'vv˜’ %„ó|ÚJB!„X ¤&/þW»öíDÖÜ„“iGE′aª#èÂ?ö•ƒ;!(Ñøóÿ/NÃr;iö¾Jññ¬Ûuí4® }ÿ§ì-ˆ~=tŒpèXm¼¸h '‘EE“¨dN¼ÑŽd0=ÞOpây*]»1…a§¤ßÚ‰Š5 Çû)=ý/”ôµúÛ. ËH¿íOp—mÂø%{kèþªv_ov5ÞÊ+‰^ñz¼U×L><&ÂÔ1*Gvãw?I8pdÎ9¦$H !ÄÜ$H !„B,…RÆNvÕ´0µrr´‡ ʘbŽ ÷:wŠèå¯Åi\)RzáK”žü|Ý®k'ÓNúíjoA¬‡£k%HMˆ$pR­¨H•hÄIdÀñì׌AçŽ?Kåà!'ußï b)ôX/¥'ÿ‰òKߪ¿í2+V^ú6…‡?]ó¯ÛmZƒ×~5Ñ-¯Ç]~¢c¾ IDATÅÙ#¦»m˜:ú8zèøŒ9¦Œñ%H !Ä\g(¤„B!Æâ©‰+j7³ŠØÕo#²áTºer´‡ ʘJÞ†‘HSÈQ|êó”_ü (µ8O@{Õ¡ƒô;þ'ÓnƒÔÐqôð±Ú|±^ '½lž0¥Ñãèü nÓj;Bjä Å'¤²ÿ{u·]œL;éwünÓL¥@ùÇ_§øèß×Ï~Õ²¾:bê>Üe—ÛÛa«LP¶aªk7~×c„Ã' ôe„”BÌC‚”B!ÄYÔ eOýPn§©ÃÎ1µþVœTËTüÀ BŸÊ¡vnŸ—™§¦ÃAóZÒïü { bÙ>ipÑçú‰/:ŠÓ° I¢âiœD¸SaÊn=ÖgƒÔÞ‡êî=0;–_ü2ÅÇþ¡þ–£uÞÊ«ˆn¾wÅ–·òú„ƒG©z¿ë1‚ác4ÿÚ:åP!f•HB!„X‹¤&OQ‘nË:¢WßOdÍkpÒ­ œÉï0…þ±'ñ»ŸÂ?þŒX»ú³õ¦ÜÖ ¤ßùá¤Z0å1ôÈÂÁ£õ±£8Nãr¦b ¨D#Ê‹25º&8ó¥§þÿø3v{ÔÉ9½Û²Î©j(,=ÿź¾5ÔmÝ€·êZ¢›¶á®¸bV˜ »(ï}èä¶_ë”O@!„˜u6"AJ!„baÔNš<DE¸-ëˆßòóDÖÝ:ã«&(cÆû zöáw=ŽüiL9?ù³µ¦Üå›ixûŸ£’YLq=ÚCXë#¤ÎÚ^Ÿ;Žãá¤ZñV_gÿ}âü] 탞}Töÿèc5¦Üe›H¿ã/p’Mvòü§¿@é¹[2ïqwùf"«¯'²q+n놉9¦:åP!f’ %„B±@j9HE.»‹ÔÿåÅÐùAôhNº §aÙÔ7é=Úƒâ9ü£Oœ|ã—&N+©¥0åuÜ@ú­Ÿ´O¥+£sgйõzÊFÝòP¦<Ž©P±4*j'Õ6AS&è=€ð‡TŽ>aPsKâ­ØbCa"ƒ)æ(>õÏ”_øÒ’{¯{Ë7ã­¾Èú[!À[¶¹S>…bÖÑM‚”B!ĨéRWÜGê¾ß×CçNQ|ú_Ž¿é½D:n@ŧ¾Y‡èÑ3øÇŸÅïzœàÌL¥0qzI-„©Èº[I½åQ‘:?„99U§gìÊ‹¹ì.»ús§1~PvRúh¢:Ç¿\ Sûñï¢Òµ{Z˜Zümã­º–ôýŸBÅÒöiŽO>HùG_[J—W3Ö±»l* á§ÿW§| !ĬOL RB!„ £–ƒTìª7“¼÷7Àñ‡OPÜõ·øÝO¡’Y¼e›‰^ù&"«¯?k£p´‡àijöib§\3#¦"·’zÓNø99]§gì.*Þ09ÇW˜;‰:(T"ƒ“jEÅ’àÆPÕ§ò™ ‚ÉôÂ?ü(•#»j"LEÖ¼†Ô[@E“èü ¥'¤¼ç›Kñ2kr<м}W§| !ĬOJ RB!„ £¦ƒÔµo'¹í×@9„CÇ(>òwøÝOM~ÝI6ã.¿œè–7YóT,=õÃF£GNã{šÊ‘Ý„g^ÂåE]žésbéñ~tî4z¬§>wÇÅI¶àuÜ@8|; އrL¢¢IT² åÅìSùœ`ìÄôù!¦ºÃ?´kQçÿЬ¿Ô›ÿÈŽ\ï§øØ?PÙ÷Ý%ý¾— %„s“ %„B±@j6H)EìºÿBòžŠp Ë©ãÏžõ­NªwÅbW¼oÍkPÑäôKotî•®Çñ>NسoÑÂTôÊ7‘º÷7í-ˆc}v„ÔXo}î8އӰ¯ý*ôðqÂÜi”™ñm& Q‘„ R±4N<®Œ_ªŽ˜:‚ßõ•C;!ô'·ÿBM„½ìn’oü};rm¬âîÏR9ðƒ%ý¾— %„óœ~HB!„Xµ¤â7þW[P„ý‡(<ü‚S/Îû#NªoÕµD7ß‹·êzT,5ýœpø~×£øGŸ ì;8íV¾…»æ~;âËqÑ£=è‘SèñúÜq§©oÙå„CÇÐc½(ÇóÛQ^•È¢â 8±†j˜ÂŽ˜ è;ˆßýþ¡Ó¢áÅ15cäÚèŠ~–Ê¡‡—ôû^‚”BÌsú!AJ!„baÔlr<â7ý ‰Û?@Ð{€âß&8óÒ\§LNÃ2¦6mÃk¿o˜v%® s'ñ»ÇïÚMØwhÁFLÅ®ÿ)’w ”šœ?JçësÇq"¸­ëq[Öv£ÇPŽzÙ3:D¹Q{+_2‹M3-Lå‡z÷á{¿k7¦46ç6¾fŒ\9Ma×ßáwí^Òï{ RB17 RB!„ ¤fƒ”둸ùçˆßú>‚3/QxøÓ„½^î4’™ajy5L݃·êTlz˜2„¹Sø‡wáw=J8pô"‡)Eü5ïžñ¥s§GNa Ãõ¹ã¸ܶM¸M„ƒG«AÊ9§uat0¦œx#(û³&(c C½ Ž?CåÈnL17ïv~µ¦\³“ç¿ûÉ%ý¾— %„ó¡$H !„B,ŒZ RÊ¿íýÄozÁ©)ìü4áÀ‘sùiÎ S«¯·aªýšY·òÙ ¹ýC;ñ»#ì¾8aÊq‰ßôžÉ_áðIôÈ©Y¡¥Ž¸Qܶ¸Mkìò taòƒpNAjj; œÈT˜Jf«Û}taˆ ÷Á±§ñ»G†æÝίTìºw‘¼çãS“ç?üé9ç*[J$H !Ä9k2øW¦â7¾›ÄÖ_e'Ï/<üi‚“/,é÷½)!„˜çˆ$AJ!„baÔlŠ&IÜñab×½¿û)Š»>M8|ò•ü6f„©L;‘µ7Ùp;Þò-gÏ15tœJuÄ”:Ž +¯~yfø ‡ºÑ¹Ó˜òX}î8^·e-nó:»<GÐù¡W¤¦¶“Ñ!J9¨D•nÅId§}Ý óC„½ûíSÇžBœ™w;Ÿ«øÍ?KâŽÙåè;Dáá¿!8½gI¿ï%H !ÄOØ£iœl;nËz»<9HM0ZÛ9¦RÍÕ05kÄ”_"ì;€d7þ‰çf…¥³Ã”òb$îü0±ëí­”¯lòüú#AJ!æ9¾IB!„X5¤–“¼çãD6n ¼çwÿoLiôbž†2#Lµ¬#²áN"k^ƒÛºÁŽ˜ª”툩}ßÅïÚ΂ç_žT ‰{>>Øôð ‚þ#–ër¿Q±¤&&5ï?‚)^ü 5ÁhB¡RM¨d NrÚSù&¶Oÿ*‡~HpòEÂþ#Ì5JJE$¶þ±kß¼ÚÉóë‡)!„˜çø&AJ!„baÔlÊ´“¼çWˆ¬¿ €ò‹_¡øø?bÊ 1 ø¬0ÕºÈÆ­D:^ƒÛ²öì05ÐEeïCö©|…a0úìåiXf—§ØÂÜI¾ƒçuÛ_M°Çq²«p›ÖØåé?‚.æªGM2:N²ÉÎ3•È€ãN}=¬Å?¼ËŽ˜ê;4cû¨XŠÄÖ_ž¼•ÒïÚMa×gf=½oé‘ %„óß$H !„B,ŒZ RnÓÛ~…Èš›(=÷ü¦RXÈÓRf„©¶MD/ÛŠ×q#nóT,=ù=Æ/Ù0µï»ö©|…ÜŒŸØÂÁ£„GA×iJdp2íÓ‚ÔaLqdú ¥e”ÂIdQÉ&œD¸SaŠ0 >FåÐÃÇž&è‚ÐGEâ$_ûëD·¼¸Ð“ç×. RB1ÏñM‚”B!Ĩ٠պä=¿‚·úzJÏü”žúgÌ¢L>3LyË7ÙtÞªkq³6L)Æ`*ãöV±ýß§räÑÉ[ ݦÛ~u2°½й“ç4ÿTMž°'šp³í8ÙÕ„}«£×ww2a(œD•l²ÿ¸‘éßaoµ<´ ÿÈ#˜ržÄœœÛ«²ÿûý{{ æ&AJ!æ9¾IB!„X5¤–m²AªýJOý¥§¿€ sÎ¥9ÂÔå¯%²ê:œL;*š´·‹)ô¡²ï»Tö·i É×þ:^Ç §~„ï¯ß •l¶A*³ 0„½1•<‹¤¦¶ }À±a*‘A%›Q^tÆw‡]øÇŸÅk»lr»”÷|“âãÿ€)ä–ôû^‚”BÌs$‘ %„B±0j5Hy+¶¸çãx+® øø?R~öß0a¥NW™¦V^Eôò×âµ_ƒ“Y‰Š$Àq ÑåQ‚“/ö!²þv¼ö«ð=moq3a]î7Nª'ÓŽ“icú¢©³·ÏD˜RÑÊâdWUoµ¬ Œ Q^ €òKߦøÈg0åü’~ßKBˆyŽ ¤„B!Æð_ÝÛÃÚZ{]ÞªkHÞõ1Üå›(>úYJ/|ÂZQ4+L­ºŽèåÛl˜j\aÔã—1åqT$f'DÇÎUDX™sôzà¤ÛpWâdV‚ѽÀ/ÔØ«œ¦*–FÅÒ8©–™aª*ì;Dñ‰ N½¸Àó•-, RB1Ï‘C‚”B!ÄÂÚqw§2Þv2µòš¼ŽIÞõ˸m—†Âß¡ü£¯Öè-n³ÂTÇD7ß‹×~5NªMT+“Oçó?‹)ƒ®ÓRée¨L;nãrÐAïAŠ5újç1•h@ESöé|ÕH8Aõá}‚ÊÁÿ$8º@Ov\X¤„bž#†)!„Bˆ…•Û±-«ÑÛk%LEÖÝBbë/ᶬ£)ìükÊ{¾YW'²æ&¢[^·âJTCÊšÃÈ”FGz0Ŧ¦N…©ÛÞ^žÓ°'³§a„Aï~Ë5þª§­[Sý×h7³?{—×ùAü£SÙû¡cK*LIBˆyޤ„B!G­„©È†;HlýEܦ5  ?ø+Ê{‚:@8|üâ¬ù¤j;LÙÛõVगa aï{ b}\nL®[S)â´¬ÃÍ®bŠ ‡Û Ð…!‚î§(ïùáÐq;y{_¯HBˆyޤ„B!WnǶ¬1fŽûþÅøûÑM÷¿ýC¸M«!ôÉÿÿ¦²ÿûÔÃè¡9—çŠûHÜùœt›FÛ‰ÏÁT è±>Âáãú³æ•ªÍ0ådVá4®ÀI·b‚² RÚ¯»íb·yœ= WŸÈCESàz߉$8õ"å=ß$ì?\×aJ‚”BÌM‚”B!DÈíØ¶ÎÓ¹Ða*ºùu$nÿœL;&(Søî_؉ÀëTtó½$nÿ ]žr=Ö‹Jdpp<À`ÊôèiÂÁã`fÏ•U[aÊÉ®Æi\Ž“jÅø%¾5:¿×Ë3aˆÛº'Ý Ê{²“ž'²8é6T,^åFª?`ÐÅa‚S?¢ü£¯öÄø¥º›œ^‚”BÌM‚”B!DYè0½òM$n}Nã Œ_$ÿПâw=V·ë/ºå$n{¿]žJÂþPÑ+ß„·l“}â›r˜ S¹“ƒ](ÔäDèÕSej!L9Mv©T3¦R ì;8ë–Ãú`´¶ë?‘cð»Ÿ؉赆jœR±DSaJ‡èÒ(Á©©ìùAï>ûT¾:¹Ž‘ %„s“ %„BQ£r;¶n3&Ú‰Ã=óïÄ®¹Ÿø-?‡“nÔóä¿ýIücO×íz‹]ý6»< Ë0•ùo}@Eâx+¶Ùp'nÛFT4m”1˜Jžpø$aÿa”ëÕT˜rš×â4,ÃI6Û×Ùw¨.ƒ€Ûz*‘œz”QÞ´ èµ¶£¦’YT4Y SÕ¯ë]!8½‡Ê¾ïÚŸ¯j~™%H !ÄÜ$H !„BÔ¸‹¦b×½‹øMï±s•Æÿæ'N¾P·ë+vÝ;‰ßô^»<å1Æ¿ñ };òÆq1A¯m#‘u·á¶m²ÿ¿M˜Ò8aîaßa;BÇq}yÜæu8 ËQɬ}}ý©Ïù½œÖõ8‰,„>þé=Vl´—&“ËeÃT 'Ùl£T$†rc6ê"èÝoÃÔñg1Aí>uP‚”BÌM‚”B!D¸Xa*~㻉½æÝvN1Çø×ÿˆàÌÞº]Og/Ï·£m¦zr<Ð!nË:"·â¶nDEâS¡AáÿsÆh§s:Iöâ nËz¢—Ý…Û´¦Í#eÊc„CÇ1ÅÀTGN]nóZœl*–BçЃݳžX'Ñ4ªqN"3m.¬W÷Cú¨h'݆Š%ÁÙÉ虦zöâŽÊþï-Ê­|¤„bžã‚)!„Bˆ¥ã•„©ä¶_#zÅëQ±á`7c_úML!W·ë ¹í׈ny*š$P¿A*žÁiXno¥,æûT'4¿P×#ª:b*…Š7ØIÐãÕùà Æ/£Çûâw?øa»]/2 RB1ϧ¶)!„Bˆ¥+·cÛ:cL'Žûþù¾'õ†ß%rÙÝ(/NÐ{€ñ¯üÎE½íbK½ñ÷‰l¼ åÅzöQÜý¿Q‘èùÝ&ô§ÂÔúÛq³«gÞÊW%ìFç‡P®wAFL¹-ëpšì$ë:?€>Á…‹8 Ä€J6Ù[ëâvröp ë"…µj˜ŠÄQ‰,*Þ€kœÜN&(cÆû zœx–Ê‘G1¥‹7˜)!„˜çÓZ‚”B!ÄÒ÷ra*õæ?"²þ”%8óã_ý}L¥P·ËšzË'ˆ¬»åFÎì¡øÄƒ¯z„Ôl& ˆ¬½©¦Vͼ•¯4F8pÄÆ þ+_ŸNËܦ”Eçë4HTª'ÝŠŠ¥0ù¡˜œ]Ù[ù"qT"ƒŠ7ÚSÕ‘m&¬`ÆzöœxŽJ×îêœ`zÑ%H !ÄœŸÒ¤„B!.Õ0µÇ}ÇÄÿ—~Û'ñÖÞŒr\‚S?füë¸ ·2],éûÿoÍM(ÇÃ?ù<å§¿^ô¢ü-(ÇÅë¸È†­¸™•3o嫌£Ç0Åtiüâyÿ ·u#NSÊñÐ…!ôðI@×Õ61Zã4,ÇI7£")tapà(Êqâ’£C”CÅQ‰ N"Ê©nà f| wÁ‰çñ>Î^¸e— %„s:KB!„¸ôävlÝfL´‡{Òïøs¼ŽP8ø'žcüÿB¿^OoI¿óÏñV_o—çØÓ”Ÿÿ÷‹¤&Ø[ÄDVß@dý­8+ÏšcJõbJ£èÒèy…)·í2¤pÐÅatîä¢<-îU­­q2+q’Ív.¬|5H¹î‚îFk”±“ž'28‰ìd˜"ôÑù{+ßñgð=ë{õË.AJ!æþT– %„BqéÊý¯»·¥ÞõW‘U×Þc0ÝO1þ­O‚êóäÖ‹‘~ÇŸã­¼ƒÆïzœò¿Šr£ ò÷MàÄRx7âuÜX15óV>=Þ)æÐ¥1~ÒH4§¤V¡PvRóÑêo„Tˆ›]J6¡\{ëa8Ø…r#‹±—`ŒA9žü<™ÅI4UŸøè$èÙW SÏ Çz_ù²KBˆ¹?%H !„BB›1z‡ßõØuù‡>uzލbi¤–mƘÿð.Ê{Zðða´Æ‰§ñV߀·úÜÌŠaJ—F0ã˜B]Ÿ?L9nÛFÜÌjŒ í(«ñêm)¸MkªAʳO»ìFy‘ÅÜ[0”rQ±Tº'ž™ SÆ  C§÷{ ÿÔíè´ó]v RB1÷§°)!„B1¡²÷¡ä¿ó—8¬­Ç×ï$›I½ýOñÚ6bBŸÊP9¸so ›bŒÁ‰7à­¾HÇ 8 Ëf…©QÌh/º˜Ã”ó–gþ7‚Ûºa*HåNV'Þ®Ó •jF)¯… 5ã²Èþg¼•jž9bÊtq˜ðÌ^*Gvö ì>}@‚”BÌùÉ+AJ!„BÌ–Ûq÷Œñê.L9+Hßÿ)Üæµ˜ Leßwñ»ƒ™<{~F‡¸é6Üökˆt\“^6mŽ)ƒ.Ž GíS¦<aÅ~É‹á6uT—§B8ÔýŠ&F_l&ðq›×¢Ò-öÖÃüáà± R“—Gö?c ¨dNªe*LaÐ…aÂÞøGvôì#<ú“—]‚”BÌý‰+AJ!„âÜävlËjôvgGvûÎÜÒ_æ»·WÃT¦Nmq›:H½íܦL%OyÏ·N>_3¯Ðh›Y‰×~ ÞÊ«pÚ¦FLUoÓ#gl˜ Ê C;Bªy-Æ/öÖÝ~d·eNªwÏ's IDATc4&?D8|l‘æ:·} *Z½•/Ù<-L)ôìÇ?òÁ™½/¦$H !Ä<Ÿ´¤„B!^ÞDˆRÆÛ>f4#F;.…05çò×hDpÛ6’~Ë'p2+1åqÊ/~• ç¥š|­Nãr¼U×á­¸'Õn5Lé=ÚK0pSªNŽÞŽñ gö¢\¯îö!Tp[6à¤[0:Ää ‡OÔͲ¨d3Nªkœ¦Êy‚ž½ø]» Nï!è:{Ù%H !ÄÜŸ­¤„B!ævN!FÂT Ù*¼å[H½åqÒmèbŽò‹_&ì;XÃgãn¦·ý¼[ìHœ‰Hèâ0*GES˜ò8ÁÉP±TÝí;&ðq[7ØRaS&>¹hs{÷ë7¥Tª'™EÅ@MÝjÊy¾TŽnëF¤üÒä­‰j‘çö:ïå0Õ§ò%›P‰ N¼qÚ\`Õ0ÕˆÊá]6L Ç% RB1סO‚”B!Ä”W=™·æ˜RAç%¦ÖUÃÔûkâ9.‘ŽI½á÷QÉ z¬ŸÒsÿ†Î¬£³s·uÞÊ+qÛ.³#¦¦O~>>ˆ>Ž }Pn5èÔþù¼ éR•"º0ˆí©» 5¹<Ú •È Y¦¦Ý~hü"aßaü®G©t?ñ@æç?×)Ÿ®B1ë'AJ!„â"Ìn 0aˆr#¨D#*ÞˆJd§?Að S>m…b& RB!„¸$]ô5›æE¥*Û³Ûݹô×íÖmÆD;…bÖÑA‚”B!–¢šQg]¹†ŸSJuf·ïì^úÛæâ„)M»á§IÜú~PŠ g/¥'>_¿gœ¦’÷ü*¸z¬ß)¢\wòÛLèƒòp—£b ¨håFíׂ2„ûd¾ü  S“?»PaJr«A*)æGÎ@ÞJy^oëÂ0ãßþ¤)!„˜ëè AJ!„KInǶëqvÔMˆ:ë öÒ SC;îîTÆÛŽCæ‚œØÆRÄoz/ñ›~P§~Lé馦&S:¿}§i É»?Ž‹ë#:†RjÎeš˜PÛiXŽŠ7‚³#¦”0~*taSÎ/`˜Ràx¸Ë/GyqLq˜0w‚â’ÿ<Ò…ãß~@‚”BÌut %„Bˆ¥ ·cÛ:cL'Žûþ¥q%{i„©ÜŽmYÞ~!”Š7¸å}Änø)‚ÏQ|ú (׫Ûõã6¯#±õ—@)ôh¯ RÓFGÍÅ„!ÊõPéVûD¾X#¸`0-E%oÃT¥0mý\¬0¥Àà.Ûlo5, Žœ‚ ¼ä?—tq”ño}B‚”BÌut %„Bˆz¶äBÔYW´¦ÎùÄ6‘!qÛ/»öíøÇž¦üܿ٧ÐÕ#'‚Û²ŽÄ²»ÂháðqÔ9-Âèåx¨T *‘Á‰¥ÁñÀLPÆøEL%ÉaüâE S Ü(îòÍ(7‚. £s'!¬,ùÏ']cü›,AJ!æ::HB!D=Zò!jÆU-#F;œÙí;sK|»f1;^Évu’MÄïø0±«Þ €ôqÊ/|&'ó®3n¯í2â·ÚU¡GÎæNN›œüœN÷1ZÛ[ùRÍhĉ72Œ_ÆT 6L†0~é"„)^ oÙf;Va;¡¿ôߺå<ãßø# RB1×ÑA‚”B!êÉ%¢Îºº½¤ÂÔyog'ÕBâ®_&ºùu€Á?ü(å=ߨß9¤oÅUÄo~¯Ýü¹Sè‘Ó¯0°)Œ16L%›QÉ&T,5ùU”0å¼ý§0Œ Š(72ù³¯.L)ˆ$ð–oå  Cè¡`‚%ÿ–5åcßøC RB1×ÑA‚”B!ê$P¼â‘3KŽ„©99é6wŒè¦{CåàN*û¿ u{¾ëàu\OüÆwæN¡GΠœWتqÉÚ0•jAEâ“_5ASÇ”Æl˜ Ë L)T4‰»|3 0ù!Âáã`Â%ÿV5•"c_ÿ RB1×ÑA‚”B!jæf—w¦ßý·.ýýbë6c¢s…)'ÓNòÞíDÖÜÆPÞûmüûêvY1D7n%vÍýèá“„£*HM^Øÿòb8©T";kÄT S·qª0„ ýóSÚ@,=¤ ÃèÁ£\œ'úÕØ6ôËŒ}í÷$H !Ä\G RB!„¨­à !êœO䜆åxí×Ys#nÓšc*‘íT±ÔƒK?9;L¹M$ïý ¼Õ׃є÷|“ÊÁ¢¼h}.¤ÖD7ßKôÊ7Ù>‰=s‘&iWLL>Q©f”;µÞLP¶£¥J£vÄ”>0¥u5Hm±¿«0D8pt²…-e&¨0öÕß• %„sy$H !„¢Œ}á#Y¿ï€„¨Ÿxö¦PÑNªÅ†¨wâ6u â”Å ~ñ˜ïïtÛ6=¸ÔWGnÇÝ0ÆëÄa­Û¼–ä}¿…·ò*¤^ü2•?DÅÓõ¹pa@ôÊ7½â>û¯Ã'У=x„ÔY;(<;Ç”“j×üª +˜â¦8‚.äÀ?1L­QѤÝ.€°·ìIBˆKû”F‚”B!ý¢Í/n×¹Sþ±§3å=ß´O³ÎÚÊ‹£’Mx«®%ºinv•}ZšÆ/¢óƒèáø'ŸÇïzò˜=љݾëÁ¥¾zr;îþ€Órygò ¿½Ö[¾tHéù/R9øœdS}¾/BŸØ5o¯NÒáÐqôXïER“;œ Sn'ÝŠ“l™¦|L1‡) £‹#`ÂyÃÔYAj´‡0w¥–~‘2aÀØW~[‚”BÌu¤‘ %„BˆE¼\û¨N`-Æ`*ÂÁ.ü®Ç(ïû¦“U„×ÃI6áuÜHtãVÜ–uv®Ÿ‰”í¼<¹“ø'žÅ?ò˜½µk‚æa¥*Ùíî\êk+ì?ü·í²Nt°¶ôÌ¿R9øCœ†¶ú|w>ñþ ‘ wÚe[Ð U} ZÛØkÄI·Ú:žª#¦ 9L1gGLaP®7µïb0aˆŠ%ñV^mwÇÜIôX—ÄR¤„bþ3 RB!„Xhù¯ÿÁ"ï錬¿m­Jdf_cÊã½ûñ»§òÒ·1aåR=UÃI5ã­½™è†;lˆJ6WçDR“£TÂÜI‚Þ…ëŸÿBÿÒ SYB{ñÉÚîÞ™q2íu¹&ô‰ßøn"ën'HM-€ S m8‰&˜ OajS®†)=®ŒQÑÔä©p°SÌ —ü;Øè±/ÿ–)!„˜ë,G‚”B!Êä|?ž·Öm]›YEdÓ=DÖ܈Š5Ìüf¢K#„=û¨ÜIåÀ¸FTLp’Mxën%²îVÜ–u8ÉfT$Jat€)¢GNãÙmCTaÂðÜÖÑ%¦Šþ}¶´çk¿å5møuOÕßDR¡Oì¦÷Yóû¯CÇ1c=ซö’l˜R¨xNª'Õ2óõ„!º0„) ¡‹9Ð6:©Df*HõÆT  ý%ÿ>6Z3öåÿ&AJ!æ AJ!„Ýô‰§g|Áñð–]†Ó´–è÷á­Ø‚Š&gþ°Ðù!‚Ó{¨ìý6þñg—ôºrR-DÖÝŠ·úzÜ–µ8é6T4eCTècJc6Du=Fåèãö‰g~^É9Ý%¦r;¶eɬø=7Ùöñº Sa@ü–Ÿ³O ¡cè±¾E!U½t`"xN„)'‘E%›æSÅ!ôh/¦R@Å&Ÿ²ôì¿è%ÿÙ'AJ!^æ¨"AJ!„/lÝfLt×½ì7:Þ²M¸Ë6»úm8M(/6ý²Â=Þâ9*/}Û^Ô.!“#¢V]‹Ó¼§q*šB)£§BTpêE*‡Aõc*ã¯,DͦÃÏ)¥:³Ûwv/íýqÛ:•jý3§qù{T¼¡æ_¯ C·½¯ýÀÞê¦Çû1HM^B03L98É,*Ñ„JfPntj‚2¦0 Já4,ÁÉ0¡_˱ÛÐƾô›¤„b®£‰)!„B\ø ÿ­ÛŒ‰vâpÏyý`uÄ”·òjb×½'½lÆ<5`0A=ÖKpüYJ/|;Yß'c±4‘ wâ­¼·e=NÃ2T¼åxÓnÍ;Cpz~÷è±>t~ÌE]"aª¦’¸ýƒÕ‘E†pà(:?XC!gV˜R.N"c'ÜOfg„)ŒêSõ‚Ó{0•ñ“£/U¤„âeŽ"¤„Bqá.ô_aˆšÍõp[7Y{3±ëÞ…“ÈØGÐO»¸5~ÑÞºÖý$¥çÿÃN’\?§`6D­¿ oÙå¸Ë.Ãi\‰Š¥Qn¤¢ª#¢Î¼DpòEÂÜ)ôxßäœ<•„©Ú`4‰Û?„»|36Hu¡óC58²h¾0Õ„“lš•Aç‡Ð#§ œ˜Cjé^HBˆ—9zHB!Ä«¿°¿@!jÆ.6L5uÝ´ØuïBÅ’S_³W{˜Jžpè8þá]”üu;ŸRÍžy)T"CdÕu¸Ë.Ç[¾'»zÚˆ¨SCôô¼Dpf¯½Mk¬wÚÅû²aj{vûÎÜRÞǾðÁm&t>¥âÉ;ÏšÃl‘%îø0nÛe` áÀaLqdr¤Q îàÌ SN2ƒJ6Û9¦¦ t~]¿\ý¹¥w]b Œ}é7$H !Ä\G RB!„x¥.|ˆšç"×uq[Ö»ê-D·¼‰ÏºêÓ˜ò8Aß!*û¿OeßwjìŒË†(oÙåx+®Ä]±·y *Ö`GDU_¿9Mг°÷aÿôh&¬,îk׌ìppv,õ05òÙû·a¢Ÿr²+î¬SŠÄÖ_ÄmYFÛ§Ó•Æf4Ù½Ä`z˜RÑ4N¶'Ù|Öwš°‚) £Ç!(Mü¿KfŸ’ %„/s´ %„Bˆó•Û±m1¦Ç}ÿ‚ž¸D¸+® v囉l¸cÖù h.ž~‰òþïáyô¬ ä…¾0w’YÜÖ vDÔÊ«p›×¢’M3CÔh/aï>‚¾C„½Ð#g?DÍv …©ÜŽ­ÛH,û7»òêE SÊ!±õ—p›×‚ úA%_O—€AWЏMkp›×اf–ÆP®‡ŠM­[ã—0Åœ-(.â{öÂ’ %„/s” %„Bˆs¿P_œuÖ L2k'>¿êÍxí×κ•Ï€Ñù!‚S/RÞóM‚S?Zð q•Ìà6­Å[¾¯ýjûä¼d¼èä­†f¬Ÿ ÷aÿ!‚Ó{GN/έyçã’ Sw€xË'ݦöŽE SŽKbëGq›Vƒú_¨»õhÂ`rÂ~BŸàÌK¿ˆ“iÇiX>cÄ£ J“#¦LPF©úS¤„âeΖ$H !„â'_˜×Fˆ:ëz=³¯ýbW½·í2T4ÁŒ0†èñ~üÏR~ñ+„]ÿä*Þˆ›]…»ü ¼Õ×á¶l˜¢ ˜ñ‚þC„ý‡ Ž?gC”êk§¨†©KáB{2Lµ­ïP^táþ°!y×Gq2í†}ª£‡ê‹Ñ!Þ²ËQ‰,Á©aJ£¨HÂŽŒ§Qñ Ê‹MýŒ_†ÐùALP©ë05ú¤„bÎs& RB!„˜ÿB¼6CÔY×í-ëðV_Oìê·ádW͸°ƒ }ôÈüî')¿øUôè™ RoÀI/³#¢ÖÞ„Ûº'Ù„òâ£Á/¢ÇöÆï~Špè8u[’æ˜RAgvû®—úûaä³ïøˆ“Yó?U<•^?èÅlj\ }ÂÞ–ën½­ñVlAÅÒ` ~÷ã`@EbvŽ)/ŽJ6£b)û7mÄT%oGLå‡íˆ)Ç©»÷Œ)!„˜çÜI‚”B!fËíØ–5Æì¨õ5›»|3‘57»ê-¨T3v4KuÄ”1˜ Œ>N¥ëqÊ/|S~õóñ¨X 'ÙŒ»ò*"ën±·&¥šmˆÂ@¥ˆÎŸ ì=€ßý$áàÑ¥·Ó\"a*·c[–ÌŠßs“m¿ØaJE$îú(NÃ2LX©©Jý­4î²M¨x#èÿÔ‹à—Q‘©Ñf6L%ìû6šBE“¨HŒ‰¹¤LiÜÎ1U²#¦—z S¤„bžãœ)!„BL¿ØÖèíÊxÛqÈÔërx+®$²þ6¢W¾ '‘×e*LiŒ_"ì?BåÐ)ïùæ+˜·I¡"1T¼oÕuD6ÜÛ²'Õ £³ü2º0„>Aгφ¨¸epÑI˜º@ëQ£6H¥Z0AÙ)í×áÚR¸­ì-{: 8ýc;ÚÉL~}ÆSù"‰êˆ©´ýß^ ”²sÕǠ8Š. båº5¿ô¤„bž£ƒ)!„B,•5ƒãâ­ØBdãV¢›_‡“låL}½ú„» g•—¾Måð®sº°ÆõpâxíWÛßÛ¼'Ù\½MPM>Æ>Ì$8ýcü®Ç»/½JsL){vû#_Yêï¦Z~÷BN|nÂ'ÝJâÎØ å—{÷ƒ ëðŠÃ|ºä䤿Aåzg¿¿f„©$Nªb©j˜ªŽx ty SÎcÆû1aeŽßU;$H !Ä<‡ RB!Ä¥kI†¨g: Iඬ'zÅ}D/»•ÈØÑ“Wþ]ÌŽòKß"8ùÂ<¿ËAÅÒx«¯#ºùuvDT²ÅÞV¤LècŠ#è‘ÓøÇŸ¡rh:wRv2ÍÃJU:³ÛݹÄßKëTªõÏœÆåï¹aÊÜL;‰;>ŒJf1~ÑŽªÇ åx6H%²v¤WÏ>œg¾7.Sa*@ES8©û4M/>9Gœ +”1¥1ôhý5¦$H !Ä<Ÿö¤„BˆKÏ’Qgñ(œtN¦ØÕo#²ö&l4˜¦tˆÎâw=FyïC„}§~<Ö`CÔ¦{ìdåé6û¨ú‰UGöàw?‰hWõ©y!u?aù…$a꼿ˆÛ¼ŽÄí¿`CN¥`GHÕã>åFq›:ìrLŒôÒÁÌ0<÷—a*–ÆIµ¢¢Ip£L<ñÐhß>½²<ŽíÅhÚ퀋O‚”BÌó)/AJ!„¸´ í¸»ó’ Qgù8¸ÙÕ8™•Ä®};^û5öÉ_SÂ=ÖKåÐÃTíÄÍ®"²îÖjˆZÑÊq1:œ QÇžÁ?òˆ¥á—ÀhÙÑæs)…©DË?;Ùw¾’0eÊyÜe—“¸õ}¨DSÎÛ£êpe¸1Üæ5¨x#¦R$ìÝ{¾o\¦ÂThÃT2k£T¼a2>™ bÃT%íÖD˜’ %„ó|ºKB!. ¹wÀ¯‡µr¤p›×â4u¿î]¸Ë¯°#ž¦r„!Æ/`ü"8ž}ò—ëM…¨‘3vþ©ƒ?°!ª<^%Ήæa¥ôöìö/,í÷ÝÖm*±üSç¦Li¯ý*â·ü*Ö€)ö¬Ï å%pšVãÄ1•AÏ^”z% 2-Lék°ŸÇR8±p=Д0~SΣG{½¨aJ‚”BÌó©.AJ!„XÚ$D½ü®ÛÔ»òJâ7ü4nS̸p5Sÿ¥Cti=ÚCpf/~×nôh:?do?¯Œ?§”êÌnßÙ½´ß‡ç¦Lq¯ãFâ7½KcŠ£ý‡Põ¤"IÜìjT¼SÎÛ 5ïüQçÇhƒo€XoÀ‰&ÁñìC *L¥hGLõ±XaJ‚”BÌs&AJ!„XªÀ¢Îù„(–¶“žo¼ ·ýªêEëô+Z£ Cø]Szþ‹è±%D]0—P˜"Þöy·©½ãå”. Yw+ñ×ü *šÄs„ý‡©Ç"¥¢iœì*ÖÊc=û/Xš|‡pâö¾XI€ã‚íh©Ò¨ Sù!À,h˜’ %„ó$H !„Kí‚WBÔ9Ÿy1¼u·àµmÂ]~¹Å‘ÈÚ§xM^0Ï|"Ÿ)ôî§²ÿ?©ìÿž¬Ä ¹=Y¼å›?çµ_Ý¿ùg»—úû”xË'ç Sz|覻‰]ÿS6H† û€S‡A*Ö€“Y…Š¥Ð¥QÂÞý(ǽXÍF©xKÙ0¥” S¥qôxŸ T•"8îcs‘ %„ó|bKB!–ÊîÖmÆD”u¬ÑîòÍx+¯Â[q%n“ QxQ0Æ>­+?ʱ#;"qÎz"_1Gpz•=ßÀ?þ¬¬ÖW,Òx+¶Øm²| ns*Ñô9ÜÈvåEsKû}[ Smë;&ž Çú‰^q±kߊ&Ñźïpý)*‘ÁɬDES˜âAßÁA ¼8*Çi\aŸÌ ti 3ÚcoÈ Ê`.îÜo¤„bžOj RB!D½_ÐnÝfL´‡{dm¼üªŠ%q[6Øø±òJ¼ÖÕQQÌÄœ3c}} zöÙ[¤‚ñ›ÞKdã]¨X’™a*@çñ=CùG_#ì?$«ù|¶HĆA·m#ÞÊ«q[×ã$›À‹M„Áýÿ³÷ÞAv^çæsÎî½oçt#gÌ”DRàHV²dÓ´ÆvÙ«‘Öµ®™Ù0ÆlÕ®]µ;³PÕxwõÏj¼®õÌìhe{g'Ø–-K'ˆ”(RL`‘¹sº}ÓÎÙ?ÎE£@“ ˆÐá}J*ªÔ$îíïÜû}ç<|ßß;{ùP:üî¡Ìý_\Õbjæ_}áÐùÁ®²õ fv„p×çÈìù*ÈÞE‘s›±UׂnꩵN‘Žž\Px_Ú0)*Ⱥ÷ÐØuux5˜Ê ¶Z„¸Š­!%‚°Ä>@„” ‚ ¬LDD}€ O¦¯u=^çVüž]x[Ñu-(?ƒÅBTÂÌ‘Œž =A:y¢¢#QÂ:²þ2~ÿ^T˜ãª˜rùLa„xè*/ý{LiòÊ+3Œ.\Åóñ;¶âµoÀïÛ‹×¾ÝÐ>½çÅàØI’Ëo“ŸI&OÒ–Cù‡§WïwzžæîßÖAã©6fö>C¸ó35!5E2vzÅEHYcÐ íèÆnT˜Ã–&Ýïq+½lšÔ¡ê[Ñ(/¼ò1Õ9lyâ 6*Þv1%BJa‰ý™)AAXY¤“g÷W_ýÓƒÑéç>nË3rAÞk£“iDç{ñ;6ã÷ÝWQ­dP(l\ÆÌ“ŽŸ"=N:zªvpÍÞø`kAY2{ŸÆëÜz˜²iŒ™¹L|â0•Wÿ•dâx­xù>‚ÁGñ:6¡: ¬s·¶¦0Š©PJ ›FF¤S—Êéä雞{埭v1e0ê?ñ?wüL³ò3îšÌ\ºãíe·k º±ÝÐáZçÆI'†îªºú^R°Ö;IVßž凘j[š¤â*§0·åuEH ‚ ,±O!%‚ +†ýÀAàã6HÇNQyñI.¿­ÎÉÕY¸žî ² IDATÁÉ4 ;ñ:·¬»¯}“«À ²5ñQÁ”&I'†HÇN’ u­=aîæ^@ûèÆN2;>n]¿8cÊZla&ÏR=ú·T_ÿ3÷šk/@×·ãµo x¿{;º±®N'ULq S‡š4°•¥[¨La¢¿û×ÿ¢ñüáï¬æË½ó×ù`ëþƒÊÏþf:u[ž†4ZQ¿ƒ5)º©ÝÐŽ r˜¹±šÒ÷ð=°ÆI²\kvùÀåÃU ØÒ4$ÕšTþpbJ„” Âû5R‚ ‚°¼)ü‡<˜ýȯò{wÿüµáÚ6©’\|ƒÊ ÿéØil³–[ÄTX‡®oÅëÚN°þ!¼ŽÍW[Á”‚¸Š)Mb&Ï‘Œ ¹ô6¤*Sk/ègÐͽdvý,º±J½@L%ÒÑ“DGÿšêÛµöÄóÑÙftË:‚uà¯ÝÔ… ëPÊæ‘ Š&˜ò”Q&y©&ħžIÎ<÷ÛùÏ~c•_ÉÁäò;ÿ›­ÌþŠòƒõÆmš ›ûjB*‹™ÃL Ý• ©÷}oƉ&ÝÐá2¦² ôü÷×”§œS·vo!%‚°Ä¾M„” ‚ ,O¦í´ÖDû_VÙ2;>M¸ë³nê›°HLU爇^¤òâ’Î\rU&kiCãgP¹f¼®m„?vUDYך—D˜Òfú<éè âóG ­¢²þP›DT%Üõ9t®<ïêúXƒŠ$—ß¡úö&>ùÜê_í£Â:¼–~üu¬ÝÜ‹Ê6¢´‡5©ËìIc¬I0…Qleö¦DÔu×?®ŸùéšS3ÿúƒ­O}=èß÷Ô-KÔ»ŒM¼|?ª¡ùÖÃɳËBHÍ¿GcQJ¡ÚÑõ­¨°…a]¦<-Maã Äe>¨˜!%‚°ÄþM„” ‚ ,/®Š(ïË×ó›zÈÜ÷ÂMO »®æŸ€û7ú¥IâS?¦òÒ¿ÃÌ­þ‹å¨°¿k;áÖ§æ3‰T­Ì&¶´_¸ëólxd÷rS6‰Ýg`^HNž[–Ó­µ(ª.ïZùê[Y˜gÊ3ØÒ´«èKªÜ¬˜!%‚°Ä¶A„” ‚ ,›Cæ’"êZ¼Î-döüÁ†Çn >LaŒèØ÷¨¾ñ-Lqbî`4*Èâwï Üñ©ùŒ¨y•&ØÊ,fv„äò[$ç^Á&T¦áÎj+PŠpû§·<‰Ê6]m—QsEù&fúÂjXк¡pÓãø9 ‘Ë»j>k]€|a SwíOw¨½ôª˜úá¯äüèðê¾g<¾?Üõôï.g1e“¯e=ª±¥La”tjy ©ù÷l†RÙ&7•¯®uAŔŔ¦°¥™Z«iô¾ŸcR‚ KìDH ‚ ½>TÞ¼ˆº¿Ù}Ïà÷ヲͤ6ñmê<Õ·ÿšèØ÷\Êjؼø¼ždv|ºÖš×áBÅ•®Uà13—H.¾A|îךÞíúÂ&U@‘Ùý9‚Gœ [ЄI0³ÃDǾOåõ?c%OLT¹f­O¬¯u=ª®ågjB¢êðqªELa„»‘sfã ɹ#oÄ'ÿî7ELÝ; )S#:O¦ìÝÚ‡[£Â'¦UL)N`ËWÄÔÒ‚U„” ·XR‚ ‚p¯‘n´»²þ4ÍæÏ 6~„ì¾/âõìDyáb1WHGŽQ}û¯ˆ‡^pó¯Â-Ot.1eÓ¯e=º¡”ÂÌ»ª@¥VЕU51Uïr¦êZýÔ”¦°Å lTš˜k3R‚ KÜ]EH ‚ ÂÝ>4Þ>u-™]Ÿ#Üóüöµ|©bªZ$¹p„ÊkJ:zÜU¬\9l-c1¥2õø=» uQM=µpl—IDµ„™#¹üñ¹W°i‚ÒÞmÉ%º­Zí£³M„;?…×±æX,¦’ñÓDoþÅòŸÈç„Û>AÐw^ûFtC'„¹«“óÊ3.¨<*º5J¢ZÐþ½ûœ­-1õäW‚mŸþÚrS6M® )lMH]­Và•u­©*l@5´¹vé˜ÒvnÜUL-¸ÿˆAXâ®*BJAîÖ!ñΉ¨keAvï/îú,^¾¯–/µ`â[e–èôO¨¾öǤ“çÀ𫇭e$¦TX‡ß»Ýxíñò½¨l3Ê °Ö@Tv"jä]¢Ó?FéZÀû-Lj»«è/ßK¸ý“è–õó­mµÂÆUÒÑT_ÿ3¢‡—ׯ1Èâ÷ï«ÉÁÍèÆ®Z•šçªÔʳ˜Ò¤›˜g“ži²¬>WWÅÔ~>àðÐê¾çÔÄÔŽOvª…îæõNS¼Öõè†v¬5NTί° ©ë¾ n’d¶UׂÎ5s]+_e[ší1ûÇÿ­)A„ÝMEH ‚ Â>Þ%u º®•̃¿D¸e?º¾íº‰o¦<íÚÄŽ|sYŸ+?ƒß¿¿o/^û&¼|*׌ò3Xk°QÉ#$>ù¶:¡“"+k¦ñ;·lû$^s7èÅm4Gré-ªo~‡øÌOîí[ ë𺶠>âÖ¤¥ßÉÁ+“ó*H*˜jÑÄ«W2u̲½ü6®\~çoã£óW»˜*üÅoÿ^v×~C…Ùà®_gcðZÐõ­Xc0…Lateû¨«wYð|T¦•kªµò-¨J-Ïb*³¿ûµ¯6ýýÿó < A®Ù_ˆA„;Çä¡'Þmu-^çV|>ðk1™ŸøæÚÄLqœêëߢúö_a«…{·)ñ3ø}÷áuïÀïÜŠnY‡®…c_QvnŒdô8ɹW0ÅI@¡ÂìŠþŒX“nüÁ¦'œ8\8‘ÏLy†äÂkTßüÉ…×¹›ÕF*S×¾¿~ç¼¶ Nz¡QQ37æÚñ¢2¦:禎Y³r®ÿSÓ‡öç3~é÷ýÎíÏÜM1eÅk@×µ¸A ³#Øâ¨Utq•ç*¦2 ¨útváíÞbf‡¿ª›zÊQášÛ§)AA¸‡¿'¿b­ÍÀryOÁà#dî{¿g*S¿HLÙ$ÂÌSyùß?|Wƒµ•âõîÆïچߵݺÞM³ò3nþú•Ö¼±“$_ÇF{M«ÛÊÇV‹„;?C¸ù T¦ñš‰|)¦4E|æ'TßúéèIRA¯c~Ï.¼Î­.'êÚ5)ŒL¡³MØ4¤²¢DÔu×_ÄÔÁXtÛ RI3;Œ-M­.!uõf^ˆÊÔ»V¾lÓ•Ÿ|8(OFA„kn›"¤Aávö–Ÿˆº–pçgÈìþYWí䮊k±q™trˆòþ5ÉÅ7îì&Äϸö¯îíN|´ot-/AÖI¸Œ™';Irùwµ)Ê VõgÈZKfûÏlx̉Å'÷4ÁÌø!Õ7¿™á¶Š)/Àkßèä`÷—UßAÎý<*cæF1…ï…Ø¨ÄrnÍûÀ×?®Ÿ{ù?$'ŸûÇù‡§Wï½jÿ`¸ï™¯ýûžº“ÁçÖ‚×¶kÆÆeÌìe—1¶ªOXNLél#dйüWA”'¤ Â5·KR‚ ‚p;wË_D]Kæþ_$³ësèæ^”r5øÜb+³$ÃïP~þß’Nœq§ÊÛµùrx­ëñ:¶à÷íY =ˆ¨âéøi’áw1Óç±fõ‹¨E‡ø$‚¤Löá/á÷íEeê¸v"_:}èø¨¾úÇØ¤úá^PûnMÚ7á÷îv-“]ÖDT\Á'0³Ã® Ê °å–ótÆ‹)LTâ ¯üßé¹Wþ™ˆ©[þ¶àµo@e›]‹çÌå{Ú|×¾ÃÆ¸I{:€´úÕ`ÃGÊ“Ráš§„)AAø0‡¹•'¢mÂ:²ý*áÖ§ÜXvoáD>‹)OŸyò›T>”˜RAïÃëØLÐw^çVtC»Q0/=Òñ3¤£ÇI'Î`M²¦DÔu‡Ú¸‚ ëÉ>øËx›QaŽE¡ÉI„™õ£üã•kÂkêÁ_w?ÁºÑù>T¶éªˆŠJ®0±¥ILiL,—¾†)LTâwÿú_4þƒ?üÕ~¯ûðbJÒ®B*Óà„Ôô€¿†!%‚°ÄSB„” ‚ ÜÜál5‹¨k 6?Afçgñ{w£ÂúÅßÒ„tâ4å—þñÉç®ß\øT®Ùµæ <‚ßµm>h^DUfHg.“ŽŸ"9ÿ6*¢Â:ù ÝôÎ#x`ãã.Þó¹*¦ ¶Z$¾ðÕ×þ„äÒ[ND…uè¦üþûÃkY‡Ê6¢´5)6.A9)UœÀ”¦\›žp6MˆO=?’œyî·óžýÆj¿÷ݺ˜R <¼ÎM¨ [™&º°æ>W"¤A–xJˆA„÷>Œ­%µx— ÉÜ÷s„[žÂëܲ¸M ‹+¤ÃG)ÿôÿ%¹ð:x>*¬w<?Š×µ ÝØ‰ 뜈ªµ™éK$c'I.¾Ž-M¡²òA»Uü ÁÀÃ>‚Î5»vÊ+˜Sž&9÷2ѱ ;6=Ž×²U—w-‘Öb£’“O liSšu“ظB|æ§kDL=ù•`Û§¿ny¢ó‰)/tRA®Vy~ÍU܉AXb«)BJAntøZÃ"êÚÍ‚Ÿ!ûð¯lx ¯uàšjœÚD¾ËoŸþ1^÷NüÎ-è†NW¥=W}SÃÌ\&=N|îelyFDÔmÂ&˜”Ì®Ïløˆ»®×´ZÚ4†$rÒ°–Íeã*¦2㪢ªÌܤҚwKk bêÆ×ÅZ”Ÿqü~SžÆLžuÓçÖ"¤A–ØcŠA„…‡­ýƒÖêoˆˆºÁ¦!—'÷è—A7u_|ŽI±6E)´v:Õ9la”døâ3/¸Ö¼Lƒ\ÌÛ·*€¶2 aŽì}Oã÷﻾Õ·ç»Ò’gM Õ"éÌEQ· Wˆ‡^9•œþþ•?ð£Ã«û^YS;>Ù©<ÿÆ“1à‡x]ÛQ~ˆ-ÍNœÌšú\ˆAXb#BJA®ˆ({í}Y®Æ{£›{É=öe‚‡Q¹f®¶ñX÷Ÿ¤Š™½LrñM¢Ó?†4FùÙ5w½‹[:Àb£ º¡ÌîÏ9 äj˳`¬Å'HGa«%·fÈ~ðvbã ɹ#oÄ'ÿî7W³˜š>´?ïm|ìw²»¾ð*Ì^7ÓšÔåõíÏÖgHÇO­¹Ïƒ)A„%v/"¤A„µŒˆ¨¸qð3x›¶ìÇï݃×6X›ô‹¤‡I1³ÃTßøÉÔ9W5µÆÚtî&¶ZD7uâ÷ìÁëÜŠnêBePÚ¯UI©kÄ”uáôã§Ig/CR]°†²7¼m벆ÄTæÑ/ý¾ß¹ý™…bʦ)˜”`ðÐ[ž%;¹Øa¯DH ‚ ,±¯!%‚ ¬EDD}À CÅëÜJ0ø(^ç¼æ>T®å…W—J/Î.lT";EõÍ?ÇÆPaR%uû°•*ۈ߿ËîÊ÷¡²ÍnržM±•Z6T\F5t¢;jqA8}‘ŽÀÌŽ€I!uÖi­‰©Ž-¿¬2õ.»,M6~”r-{ã'¯i%]ýˆAXb)BJAXKˆˆú€… ‹×µ ^ÇtK?º®åg°ˆ+˜â¤kÿŠJøë@7u£üÌÂã8¶Z$~‡ê«âr¤$ÐüCa£"ÊÏâ÷îÆë؊׺•Ë»œk\ˆüìÉå·H.4 xˆ`ãÇÐ]µpú+ ›¶— ¿ƒ-Mͯ›p›×-®\|ãùøØ÷~-àðÐj¾Ï†ûžùz0ðÈSfv˜ÌÞŸw¿iŠdìJë5µî"¤A–ØgŠAÖÓ‡öç æ€Rþÿ"Wã&6~¯{~Ï.WÕ:€®k… &šâ fnœtüñéŸNÇÌ\kÈì}šÌI±2Ö`˳Ä^£úÚ?· ÝO¸lTåá÷ìÀëÜŠ×6ˆ®oEùY¬5•I #¤Ãï_8‚)Œ¢³M‹+R”G°ñcƒ»5]No0åi’Ko¹ê+/@ÄÔXǸB|ò¹?9˜yà—†Vëï9óû?7˜}ôË3÷ÿâ—Áb‹“¤CÒ²'‚ ¸-‰)Aa53/¢¬M³\‘÷Áóñ{vãwn]$<rîçqÅbŸ&>ÿ*frˆtbWý1º¡Ì¿D¸ùI'=Vã˜Sœ >÷ щg×Z÷Î-a£ `ñ:·àwnqÕj í.´°q37Frù’‹¯»ö;/xoá§4á–§ðBçò‹¥•I1Åqâó¯BšÖþÙ3~x4ìì^Çf¼ö Ôý؃ÊÏ ­â_|ì¡túÂÏÛ™ÐÒ²'‚ ˆAV)3¿÷Ù|—(뉈º©s²‡×¾ ¿{~ÏN¼öèú¶ëEÔÄ’Ko‘ŽŸ"?JïùÇzíÉ>ü«øý÷»Jùj‹McÌÌe’¡‰†~Ê’£ã×06*ƒMñZð:·áwms•g™:ÊM3,NŽž ¹ø:éÔPê¦+ÏlAšyà‹ý÷£² ,*_IÒÙaâ¡ÐÙfd"ß­¿T¶ РÀoß ™:×*Yš$¹|”äÂkŽ;Øø«ÿzhµ^†øÌóû£c‡ÿpóã÷«LýšY~R‚ 7F„” ‚°ºðÕBÞÌ ˆ‡^:\<ÒŸ{Ùm[ Ò^ê ìµ¬ÇïÞáZôº¶¡ÚˆR â*¦4I:q†ôò;$£'œˆª>ÐËë"óà/ãwmuÁæjq¨¶™¢zôoHÇO£‚¬|ŽÓ⺩¯k~÷ts*¬Gi›DØòéÄÉÅ7H§/b“J­Åî^¯R@eêÉ<ðËøÝ;PaŽÅb*&¹„™¹ŒM*Dˆ˜º™ï—Ê4å…è¶A®ˆ[)` £Ø¸Œ2kÌܦ0ü«/þÑ?Ê8<½Z/Ëô¡Ç÷‡»žþÝ`Ã#»×‚˜!%‚pcDH ‚ «SÏëºÖX{À¦Q3&Áƈϼ@rá5âs¯ˆ˜Zˆà5÷ºª¨Þ]ø½{ÐõíæPJ»Ê›Ò$fâ,Éè1’áwIÇN`+…õ²á¶¿GfßñÚjâi˜ŠÊ¤#Ǩ¼ò°Æ ÿõpÓG7¬f1%BJa‰])Aa¥SüËþ¯sÇA¿{û€nD×åÁ2_ÉaJSDÇ@rñÉÅ7×´˜R~ÝØ…n]Oп¿w7º¡ó5™:G:v’äÒ›$£Ç?´ˆZð@Aöþ¿O¸ãSè|_­½L-"³$—ß¡úö_AZ]3kc¥^ÇüÞ=x-ý¨l³ËßJlµ€™¾H2r 3už´0rËQïû^â ~ÏN­O¡[Ö_³FW±ÅqÒÙa7•OD¯Cû.×˦àgðZPu-îªE%lµI[žÁ¦ÉûJ×µ#¦žüJ°íÓ_ ·<ѹÅ”)A„%v…"¤A„•|ˆ±Ö?ˆ§ts~ç6üþ½xmƒxùu¨º¼;Ô%Ulu3;B|æ'$ç_!>ÆZj9R~Uߊ×6H°î¼î]è¦.TXçDTcË3¤ÓçIGO’\|døÝÜšwóoH£Âz²ý Á¦ÇñšºOä|~þU¢7ÿ®Þ6>›¦`·6൮GÕµ ¼kÒÚg÷2éè Òɳ˜ÂÈÝüäà÷î"ܲÕØY`51e 6®ŽŸr!ê6]»7#í»I‡iŒ ²xíP¹Úý'.CTr÷¡Ò6>°H\sbjÇ';WS¦œ)A„%v"¤A„•xh±Ö?ˆf`ñ¡ÐC×·ãwo'Øð¼ŽMè†Î«­2IÕŸ:Orá5’ GHÆOC¯Þ½¢rÍxmðÂïÙnêtYDJcMâ²l¦/Œ"9ûÉð;ïV~ûÞ ÆkYGæþ_$xØ©ë+Q (Ìì0ñ™ç‰ÞýÞüZ®‚•q¿aeݲžpÓãèÖAt] ʱÖ`£"¦0†™8C:v‚tføžH›DTw|š`ãÇÜD>Ïc‘˜ªIFŽbçÆׯ´^Æ &kñ:6:UË_³q •±¥ '«>dE›*q|ò_­ûôÿü;«ù²ÿòŸ}-Üþ™ªÂl°~R‚ Kì†DH ‚ +…%EÔ ‰º¾ ¿wÁ¦Çñ;·º žL;D§1éì0éèq’Ëo“\z3}WVÏÅò|TØàªn6<æZó»P™Š}¥lf˜trˆèÄa’KoÝ;9§4~ï²÷ÿ"~ÏnT®ÉµVºc8XK:yŽøÄaâ³/צÁ­È­Êó±T˜%Øø8^Û º¾ åg°XˆÊµ‰†C¤£ÇœˆZ­‹¶R?Cfçgqk ˆ)“bÊÓµÏQ´ºÛø”/@g›PÙfT®•mD)ωï¤âZôæÆn©"ê=×!MˆŽ~w4>ö7¿•?ðì7Vïý~>óè—~ßïÜþÌJS"¤A–xœŠA–ÿÁ䉧­ ½¯ˆº/pbªáæ'ð:·Ö„ŒïÒiB:u–äò;¤#ïºlž™Ë®Åf¥¢=”Ÿq"jÓND5u_QWZÀ £¤ã§‰O>K|öe–Oû¢"Øô1²{¯sóõùÒ˜täñ‰’Œž¨Mƒ[[®ZvYŠ ë]_ç–yêjüäYâKoaf/C²ü2´l¥€ªk!³ó3ø}{ÝÔ¸yyˆSÅqÌì¦4¹,‡ñsß±lº.jhG‡î÷·ILŒ©°…‘ÛRõžëP-xNÄÔ @„” »#R‚ Âò=ˆ<¾ßÚð š ‚,*×L0ð0Áæ'ñÛ7AÆe'¡<¬‰IÇÏ^zË…xŸÆÇWXÅ”­ñÚ6nÿ~÷NtsÏUe D%ÌÜéØ)¢“ÏŸ} ÌòÍýÉìþYÂ]?‹×6xý´·¨Lráѱïaæ&–yƔ¦1:S?ø~×6tC‡“iÊæ¶<ƒ™¾H|á5â³/¡‚\-H|ùb+ts/áÎOãwípSåÔÕpzLJ:uŽtüô*˜Æ§œˆÊ4¢ëòè¦7%òJÛkmrž-Mº°r­ïÞ:¬%1õà¯ü‘ß³óó+-ø\„” ÂOWR‚ Âò;xÜuƒ‡^¶ k"Øð‚ÁGÝXû°Áy Ö¦n²Üå·I.¼N:1ä˜Ë¾ÂCáµ®'ÜõYü¨Æù\—1s£$#ljO=Grþ5lÁŠØ(²|‘p×çðš{Ÿ[‹­Ì~žøøa÷ÿÝEpS² ª 2õãuïÀkìrEÚs"£ZÄÌ^&9ÿñéçôëVĺ\©ª³•^çVÂmŸÀëØäDÍ‚|©tê<6*aæÆ!.­¼’òP9ך§›ºœ,Ôþ|¨»)Œb«³Ø4­‰¨{ó½²Õ"Ñ©çÏÄG¿óëù?:¼zŸûÃ}Ï|=èß÷ÔJS"¤A–xÄŠA–ÏAãΈ¨ë~uyt};á¦á÷Þ‡n@g›kççÄUvŒŸ"=N|æÒ‰!Ìrm¯}'¢¶ÿŒ«ºiêvYY~檈*Ž“Žž >óñÅ#Øê¤+­ZE¡2õdø%ŸªÔOòœ IDATŸ/ζ37NtòY’‹oºjœ{<ñÍV‹¨l~ß^ü®íèæ^·6ÚÇÚÔ šÂ(éð;Dǃ ¼0+nmæÅTôÞG°ñ#è–õµVDÀ¤Ø¸ˆ­–°ÕéÔ0+`€ö\>TÖUE©°ÑUêYëÚ+ #ØJkSÔ‚ê½{­‰Ïüô­èí?ÿïDL-DH ‚ ,±‹!%‚ ÜûƒÅÝQ×7ëÛÐù>|Þ¾ï¯ÉíÚõ’ˆtrˆtbˆèø÷]ÅT¥pïÞ^ˆn Üø1¼®íx-ý¨\ÞµxYëª6Фc'ˆ‡^$¹|[žÂÆU–OVÔ­ Ë“}ì+Nzäò×d%¤3—ˆO>G|ñÍEÝcwMT àeðûïÃïÞ‰—ï«­M‹j S!=N|æyW¥¦¼{.Ðnï:ùøÝ;ð×=ˆ—ïEuî;eô]žu-Š…áå|~EDeê]‹^. snòa\Ƨ°•ÙZâ!·ü¾WkJLíúü·ƒ ì^®bJ„” Â{ZR‚ Â=tC»Ë‡‰JØêéÔyÌôE¢cß%8ƒ­ïþõ¼¶ ƒâwnA·¬C×·¢|—Ÿd“ ¦8éÂÊOÿ˜tìfnÌ…´¯¦ç½Òxmƒäû ~ß}¨Lã¢àsÒ„tò,Ñ»G2r¬ÖBv‡ÿ•(…ß»ÇI¶Áùµq“ó\~W2rœøÜK`¬“P+>[i‰ë‘DظL¸ñ£ø,SI-3kæ¦<½¬BÏU¦‚:—G×µ¢2õX,¶:çDZuŒqŸ³2Ep툩Ç÷‡»žþÝå(¦DH ‚ ,ñÜ!%‚ ÜýƒÃþAkíA´÷ååô¾tC;^ûFŸuÕSMÝ躗÷•0Å ÌÔ9Ìì0Õw¿‹™<{w‚Ï=¯}Aÿýx[ðZ\%W˜C]™ÎVœ "z‘tü4fæ"¶ZbEWDÝÁà#dúU¼ŽÍµ‰{ ‚Ïã*éØI¢·þ’tæâÉfº"^üîø][ñÚ7¡: È:IW0sã¤c'‰Ïþ3;ì¤ÇÁV  }­ûñû÷¡:ç×É&Ulq‚tò¬k%½—ŸUkÁZts7:¿•krÕ†Õ9÷ßÊ,6*¡<å®EµH|áÈ¢#ßüõüÃC«÷ù²üÄ”)A„#BJA¸‹…å)¢®E7uãun!³ãSîÝÐŽÎ4:1—k“êNb £DǾ™¾pg‚Ï=¿}^÷Nü®mxíœì¸"V’*¦4M:q†äìˤgÜá>*®®Š¨› ³ûódö=ãr´Nä³Í‘\>Jõ­ï`K“·IiÐÝЉ׾¯s+º±Ö;•T]µÚØ)’ G\n’Ö+Zh|(R) êZ6~ ¿wO­zÌeL¹<¦QÒ±S ¤w1_ÊÏbJSxͽxíPÙ&÷žH(•…Òju¬ÅSÁ¶ÏþÇpË÷ZL‰A¸1"¤A„»p0X"êjŒû«×Ò驪mý{®Zª±ÃµÇiïê!zä]Ò‰³D'覈ݎôöðZÖã÷ìÄëÞß¾ ÕØé*|®ÈŽÒ4fò,ñùלˆ?…­Üã*“{½z^Hæþ_$³ç ¨úV7epÁ´7Sš"¹ø:Õ#æBÄý…¡Ü7¹8 ”« kéÇïÙ…nìBePÚÃ&¦4‰™TÚUÇY ~ˆ×:à*¢”r•P•Y÷Ý­Îa­A)µ6Öb­‰©Ÿì¼Û•Š"¤A–x4‹Anÿƾ&¢~sÕ<0³xù~¼öøãµmÀkê©y ‹5‰ ­¾ð:É¥7‰Ž®UÌÜÄaZi¼|^û&üÞ]x];ÐM]‹D”-O“Nž'=F:rŒdä¶<-¶÷ÀkÛ@ö¡_!x¤V³ J)MH§/ûÉÅ×ß#_ª&¹ü :ßßwŸ¹f”öÝäµÊ,fú"Éå·I'Ïa«…Zˤì±ÞW†$~÷v‚ÁGÑ-ë]k¬çAšb¢DeW 8séÖ'*Ï­¯IAûx›QÙ&”Ò˜øŠˆªb+¬IWmEÔû®ET‰“Ñw¿Y}ñþQþÀáU{s)üÅoÿ^v×~C…Ùàn½¦)A„%Ñ"¤A„ÛÅô¡ýyƒ9 ¬Mójüu}ºu½›v×·¯cº©ÛèÒØUx”¦ˆÏ½B|îeâS?r•é ¦©iÝè&üù}÷á÷î¾Úþ¥jÓÈ*3¤Sç]5ÔÈQÒáw1¥)ù°}üþ}düeüž]W3žÜŠAšŒžpÁç“CN\]ù©1(@·®'è߇×6ˆÊµ üЉ¨êfv˜tø]Ò©s®eÓZ@ª¢>° ±¿w7ÁúñòýW'ò™ÄU,•¦°•¦0ÊM‹>í£‚6‰Pžç‚ïëZkaóUlµ€+ØÊ 6M¤­òÊZ¬15}h>óè—~ßïÜþÌÝS"¤AnŒ)Aá¶lîW»ˆºî¬ÛàD’×µ¿k‡ oêrº$‚4Á–§‰‡^tbjè§W,x>º®ÕåDõïÅ_wÿåaM­ýkêéØI’Ko‘ ¿-IEÔ-ãù„[?Af×ç\…ŒŸ]$¦l‘^~›Ê‘o¢¼Sœt­yë¨MÎk?S I/a £$c'1SçH§ÎKEÔí!II•`ËÇ Ö=€nêAÕ¦Ú$–¦Ig.¸É}ïÕ«}”ŸqžçãwlAÕ/Qqɉ¨ÒÔ¼¬n°"¦n"¤AnŒ)AáCmæ×šˆºæ1Š®o¯pòºw൮G7´ÊUKE.'>ûÉÅ×I§Î£ëÛ ÖíÃ_÷ › —mtí}i‚­03—IÇOŸ•äÒ›ØòŒ|Øn׊…udîûyÂ-ûÑ­ë›ãdSR%?-M¢›{]^XàDqSœ ?M2~ 3}éöå óØJÖlù¸«lè@9Àbã ¦8N:1Qqq¦òÀ]+eãwlB5t8É›V±Q—±¥Il\®…Þ ï»Q%®¼ý퓞~áZÍb*Ü÷Ì7ƒþ}O݉àsR‚ KìËDH ‚ ·²y_Û"êÔl~×6‚Gðzw£;Ñu-µ›ðfK“ب‚ÊÔ¡²ÍµÉl¾kKŠŠ˜Âéø’³/ŸÍe wÝÐAfß/nzÝØéÚ*YÐÊgݺa›¨XœÀL'>Jráø™šÌî¶R@7vl~¿k‡›Èçg‹Ê˜ÙË$c'°¥its P™tSº¡íªˆŠ«—1Å l\u+k‘&DG¿;û›ßÊxö«øÙ6î{æë·[L‰AXbÿ,BJAø`ö'¿b­HDÔ «¨L~ï}?Jпw^<¡”«²Y8uÍššˆ%8C|æâ¡±qE.åÝ@{øÛÈ>òkøýû\{ ­-ƒ™½LtêÇDïþÊÏ,Ê™î<¶:‡×¾‰`ð¼Ž-.PÞ Ü÷'.cæ&°IkBÕµÖDT&qû £ØD*¢nÏZ‰N<'bê"BJa‰³)Aáæ6èO~ÅZÿ š¹ïƒöQaáöŸ!³ë3è–u(Ô\‡›Úv%‹ÈÌ\&:q˜êÛÿYZóîÞöå‡è|™=_Àë܆—ïCež/SiB:uŽêßÂÌ-–ŠÂ]ÃF¼Î-51µ irùæÿÀ¤˜ê,¶ZÄ'jaåJ.Þí^‹µ#¦ö…»>ÿGÁ†Gv1%BJa‰™)Aá½7ä"¢>(^K?á¶Oâ÷݇nîEÕå)d_[7Aoø(Éå·IÇO“N_tcè…;´óÑxù^Â]ŸÃï½ÝÜ ë]õŒI]6¸Ö0Ïc¡˜²I•tô8Õ7¾­ÊÝ lTå‘{ü7ð;¶ÔZ-ü<‰æ§ñÙjuÍÏ…Û¼Õ"Ññgχ۟úçÁÆ}cõ>ßîzúwoUL‰AXb[&BJA¸ñ\DÔE7õîø~ÏN¼æ^—yã…\™âf“ ˜¼Ö¡´W‹+2˜© ¤£Ç‰Ï¿J:vÒ¨£’\ÔÛ¶ãQxù~”Ý·×UDåòN<ÙÔeÍ’N]ÀÎCX‡×±¯u}-·hþŽÊ$ß úÆŸcÓXZÁî6*ƒò6>F¸éñ«k7ß kAk@aS'¥lRÅ'±qQÖèv¯G£¼ÐMBÌÔ»{šüКô ò3‡WïsñÖÄ”)A„%¶g"¤A„Eçûů˜jõ ‹"¢nêIªÑ í„;?ƒß¹ ÝÒçÂÌý 5ˆNž%¹ð:éøIüÞ=øÛð:6¡Ú]æI\»Ñô—'uú'$£'0…‘-bµv?á¡›z6 殊ÂÒ$fv[™Eeêkm•×_Û+¡ÙÁÀCè†Ð Ä“I1¥IHæ°©\øÛ!>*÷|Ý>tc:Ûä~W°åLyÊ}—ŒA…9tC'*Û^åNÖªm¥€™¥PZËEþ 둦 º±äæÿk1;én+3& 5fnœøÌKozm}ÿ$ûÈ—¯Öër³bJ„” Â!%‚°Æ™þ—ßïõÙy£JIR‚ 7F„” ÂeúÐãû­ ¢ù¸×¾‘ìý_$Üö PkSˆ«¤ÓçI‡ß%:ñCÌôELir [¯e~ÿ^—-tEt„uNt¤1¶†×½m„ùCü fúÉØIâÏ’NŸ_[2D{xͽxÝ;WÜd‹¨© $ç_ÅÌ^ÆÄU”Ò\[õ¾‡@@y!áæ'ÐM=è¦n÷:~ÒØeQŸÁÌýk'©ªs²™É6â÷íÅï܊׽Ög›\¥Yc+3nšáÄY¼æ'‘>ô=†4Aåòx[\x½º:FÑFeÒñSTßü¦8.ÙEK]ÇZkž×¹¿w7~çVT] hJîçQÉ $k?¸@4]×âZ2ÃW§=×j±U÷ç›ÒŒÈCÀZ &Aeš\^Ceêjm®Ñ|uš­Îa­©U¾ÏYBi׿êùÄD'ÿ :òÍ_Ï8<´:Ÿ±ûó™G¿ôû~çög®ˆ)R‚ K<"DH ‚ ¬ ¦íßg­>´PD-z ø¼îdv}¶Ö>¶iÏÇÆÌì0fæ"éøW15sѯÚ'¤B7÷âwnÃïÞ×yµâ­!M°åÒéó$ßÄÌ^r-`·”ïtÍ?£4Ê 6~ݲ¯¥¼ÐI “:15z‚têщg1³Ãî`½¶¶0¨0çÚº:6¹¿¶¬s¡ä¾»N¶RÀÌ\"¾ˆò3èúVnwö–)McŠS}{ðÚ7ºC7êÊé[-\z›Ê«ÿɵŽI9Ž»4•9'{ÛˆêÞ‰ªou"<*9Ñ•ˆûŠújèyX 2®4Ýk%ULi [[»áôÊsÓòrÍ.#*SïdRÏW”ÙJk’›Q×â…nʨM±q…øÜ+kBLë|föOþÉÿ*BJáR‚ «›éCû­µÑÞ—oêÁiÄïÛãÄTCG­-Íõ¶q3uŽtbˆtê<ñÉçœ I£UuÍtS7^ÇæZûÐ.tsÏbU™!¾H:|ÔU)Ufo“èXpÈS^Mn@¸á#xíÑ-ë®ææX‹)M‘Ž'>Jtìûó‡ÆU¾uA…9¼Î­xmƒµÉyèºÖšˆ2®kö2fæ2x>*¬åÝ ë«ë•N_Bi¯g^¾¯–/uELLy†äìËTï¦Ú8W+6*ï#èߋ߷w¾"ЏêòârM|¤·ÕßYcÀZtS—kå rNLÕªH«ØjSÁ&•5TÕ¦A{®ª0Ûà¦^¹×Å%lµäîqitîs‹E–­‰/ùAtä›Ïä^•yÓ‡öB2˜?ð£Ã²#A¸æ© BJauòAEÔuG”ú6üunÿ¤«˜ªoEùÙy1•Nœ&=I:~ŠøìK«"ø\7u»œ¨žø}{Ñͽ®eNûµJŠfú"ÉèqÌä9Ì] FVØjÝÐF0ø(^Çftëz×hÁb°å’á£$ŽŸx[;X¯ºMKE7÷áwmÃ_÷^Ç&t}ÊÏ`±P-a #¤“g][žê.´b]9d+ÒÂ(ÊÏ8‘Yß¶ øœ«ùN&:~•m\;7$c°Ñº©Ýë@Õ·ÎÍÛê\MDÍ`Ó䎒Ù4Fyª±»zžqÕkàÞK\qblvk“U+¦, ”v"*Óè*¢¼šÔË®R­<íîëwø>g£JœŒ¾ûÍê‹ôV«˜An°ƒ!%‚°ºø°"êZ¼–õøëî'Øô8^¾U×Z›J滜œ‰Ó$Ãï’\z“äân »YAÕJ£ëÛ\{^ÿ>‚þ%DÔì0éØIÒñ!î~—+\¬1(kP nY×6¼Ö×Rc¬5ØÊ,ÉåwˆÏ¼@|æyH"lR]ù›• ‹nèÀkßX›ž· ÝÐ¡±q37F:~ 37†nêqmYw=ȉ)k fvؽç®mè\ó¢àsÒS&:ö}â¡Wµ˜R:Àj—!ôîÁ_ÿº± ¥¼ùï–+ND%Õ»:ÐMä«G7´£2 .|»6µo^’Uç0…QWAtÛ+ìî ÖX”RNÆåš]lc±Iâ²k_,ÏÜõß[Ä” ÂÚB„” Â*áv‹¨E ?ãd@ß}øëÄkp·ÖºÊ”¸B:1Drñ âs/‘\~¬kY¶hmDçûñû÷ºÖ¯…"ʤó­_éøiÒ±SND™ä&S ï›ÏOòZÖ»l¢4Áš[ž&¹ô&ñЋÄg_ÂÆeH“÷™V~U׊×>H°îA'¢šº ¼²\qNCµÊ¤ÐÉŽeNmÓ•Q¹FüŽ-®ísaðyc&ÏQ}ë;¤£'V•˜R:Àb]µX÷NüõºÖW¥k×¥XËmšÄÆ•{*{lš 2躼“œ™7‘Ϥب8ÿm\bå&€)¬IÁZT¦¡ôîBÞÌ.¸ß·<)MâžV†‰˜AXˆAXáLÚŸ·Öº"꺇F]¿s« ùîÙårêZ M±i„J˜é Äç^&>ý<éÄÜRÈ÷D{¨L^s~ß^‚ÁGk"ª åùîpV-ºŠ¨‰!Ò‘c˜¹Q'Ø–6M\fQçüžÝN6vº1k±Ñ¦4Eré-â¡]õZ¥°ì~âùèl³ ¼x¿g· ”믶x•§I§/`fGÜß_ß¶lèMySÁïqÁç*È]SÖU¥¤#ǨùSlqre‹)í»Ï˜öð{÷ <ì*Ö´ç$o\¤Œ)Œaãò²Z3k :ׄÊ4A˜C‡ .Ç-M0Q¢ ¶:‹)Ï® j©ZÕ^£Â:t};ª®ÙM…Daâ"Äleε]òš6h£J\yûÛÿ¦ñçþ÷ÿFžö‚ «R‚ +”éCûós@Yÿšæ»zælìÂïÝ…×¹ÍUNulBåò®B'q“±ÌÌ0Ñéç‰Oÿ3;¼ žxÚµ~5u¬àa¼–þšˆ œˆË˜Â(éä9’ ¯¹ª›e>Í&Q­ eþºûÑMÝ.à»–]dËÓ¤3—HGŽŸ{…tø¨«€X–2ÃCuxù^‚MOà÷íq²0ÓàZ¼Lì&çM_$- £P¨ºüŠÉøI§/5ø}÷¡ó}µ÷½@LEsÄg_¦úæ·ÝG¶–k´2v”žû,zžËˆº"¢<›&Æn(ÂÜ(¶ZXþkÔ¹ûE]«kiÓµÏ_uâ+ùf—ñÿ—ÊCÕåkykYPÊU|%Ule37æ*§–‘ˆZtK¢£ßýÍoå<û yú ‚ ¬DH ‚ ¬0ZüQ.w©{'~ß¼–¼¶WÝa 6.aæÆ1…1¢ãß'9û²ky»ïíã5u >VQë\vÊ•T0ÅIÌÔy'm¦/ ´Ï²ªìz¿C[TFeðûö¸ª¯ºVR¬]Š)N¸ÖÃÑã$—Þ";9_±ÏÊ ÐMÝ„Û?ᯀ ›l“kŸ´é|Õš™¹&¹º~+tvd^"ÎWµ-œÈWš">õñÙ—kylËùs¨\K(š`ða‚E7u×Ú^÷³¸Š)N`£J¯Œ¦7k ˜ÔM­MžS~µ–CÒª«*šÇÆÅeöYTXkQµjPÝØA¦V]ƒI0•ìÌ0Öše+¢®[“j‘èÄs"¦AV"¤AVËFD]‹öðjà~ÿ^¼ü:—“©¯UUH'Ïbf.½û]’‘w]Xî]:˜é¦.ÂÍOºì«–~T¶Ù,-®Í°4I:užäÜË$ã§Q~fJ4¶2‹nh¯‰©ÇP¹&ðÂùj37N:z‚døÒ‘c¤Sç1…±{'=”BåšÉìþt¾ßÉ&í_­Z›ÃÌ\rmŠaÝŠ9@¿fnÂ:üέ.¤]-h3)¦0úÿ³÷f1–]י濇sîÓyž3rN&Gq‡”Y¶Û6ºË Ì •^ äSÛ€Täjô[çC?4Ђ ~èîj@åjÏ’l¦IQâ˜Ì$™óó<Üw>çì½ûaˆL’I2‡ˆÈÖA$ɈgßsÏÞ_®õ/Wÿ áÔyz_º5,ÀU ¼ÁoÂ9Y×J¡à.¯¬Q%QX†Ø¥áK΄€Ðõí4‰ÎK~v"_T¥ðóü<œ »˜¢ … ¢¶¢¦ÂKQu¡345¯Z é6Ú±-®_û;²˜b†Ù3°b†ÙáìXõùŠNP;ÜÀ7 {Ÿ†jê#ùã§6ofî2ÌÒM×άNP¦Ñ!ëÚàœ"IÖØ ‘j¤C½ˆ’¥U˜•qD“ç­LÄÕRfo¼i¤‚+e!:¡;C÷= YÓOÌÒ€°ùy˜Å›ˆf/"š¿ »:¹ÍS"Y‡Ä±ß†îŽ×(ÝgÛ€ªÖ KÔîiBÀKŹ={ißBAÓ6¿YÛ Ý~"ÕÜmB˜ì‚‹‡há:µ=æ÷©«áløex^¡j<Œ…Ý[¶œ¢ÊÝÿÕ®^)gB™B0 IDAT¤ÐüD  žKÓ*\XާqÎÇSHå¶¿>M])ʈ"ñnaÃP^ƒ-®À™ê®«þüª÷!‰©øw™Óoýï†aváNˆ…Ã0ÌÎ%{æÕÓÎéÑ,¢¾ð`I5@Ö4Ãz ^ß3P̓â,U€Xƒpâ Û¾ù j)ç6m"ߺˆRGãi€Mqˆ4î„a¯Œ#œúvmÂÑ´³½%:Ö/†‚«ä¡2½P‡ã)uwe9ÊÌZ¾pòÌü˜ìL, Ý–¾G¼çá ¾HkTÓ á%IИ¶¸ »6U!µñ{ÃíÍ5Ú ®²QÓÝvˆ¤ÇÝ_+0Ë·|ü×0¹i?½ý¯1(ÖÀø¼á—iݵôµ(„+¯Â–²€ h­ÜÞ[3gÂx"_# ”o¦Hü¸°© ©œ`·åõ’ªÓµT¥“¨:ÕUóq•ZBÈmyMÛ¾&Õ"ÂÛï}\ü«?ÍœþÅYÞ90 ÃìXH1 Ãì@²g^}=Qý»Ö…Ô¶@¤›àüt÷4]Lùt@. ½‡hî‚ëoÒA.(Æ™9ú4$ÁF^ƒn?ÕL’~΄påÌÊ¢ÉÍ_…HÖÖîiÉqg14`#Ȇ.¨æAÊ.ÊtC(Ιö0»:Iùæ¯Ò„Dmî¦#Õ¯çIèþo@µ AÕ·SÅOü~X`GX¡6Ö¢ý³W±å5ØÜ4tÏÓP­ÃŸk!upÕ¢ùk¨^ø¯p•]¿­>ð[„e +xž‚ó“ BÄU²°ÅÕÏ…•ïí5sÆ@¦¿†DŸ¦Ê#k©Z*(PîYiuKª¥œµ€3©FÈD Ë(D!‰þ 9ˆ½SýùUׄÅÃ0Ì®ƒ…Ã0Ìb/ˆ¨Ï=f 3]ÉøG ºó(TÓUKYg# ¬ ÑÜ%„·~WYƒ‹ª4:þk¿½„¬i‚7ü2tû!ªÆªm¥Ã¡±ˆÊ¬N!š<³xNiìÍJ¯Ej@(¨Æ^¨Æ¨Ö$¦tΆ@·Éåç\{ÑÜØìÔgÖóaDƒHe »ŽÃëy’~fCD²î3“óÌê$ÌüÊJÔÆÓ ÷ïÅdgºóTc/pw6‘³°å,Ìì%T>ü/€Ò[3‘ÏZ@ùP-ƒðzŸ¢u«iº#y×3¢ªjSrß­™s€Le 5~ µ(¯¿¯ãÌ&T °åµMÊ>£éy"Q gCÈtsœ¹¦àL[Z«äáªÊóÚ‡÷‹)†a˜]tR`!Å0 óøÙ{"ꋨ¦~ȺvøG¾ Õ:L‡l!m´–„ç`æ.#œø¶¸òå:BBÖµQK`ÛA’um±ÈP”cSÉQXùÔ˜•ñXp‰}Q)p?§hVàõ?™é‚j V>/EëQ-RxiÁ¥ 1•ŸðMF²ºótçq¨¶‘;¡òÊ£ l•ãø>ú̺T‹§Î¿œÿÉw3§ÏŽña†Ù9°b†y dϼ|Ê9ÿ $Nî×k ÚÁëy’ÆÅ'j¨ÕN'–V©rfî2ÌüU„3C(ŸäFû!èöÃÔj–¬ƒšªmªØì4¢é 0«Ópa‰EÔýØL!¼Á ûH %êÒ*ìÊlq ÕOÿ–&$–²_ÜTèTûa趨ÎcPÍ4ÝO'¨*«Z€ÍÏîNRÓ×x}Hxصy¨L7TÛÈFþÖº„pQ³táÍ·M‡HÖÝ%5¾|¿G"J@5õ“ˆê8B"JiQq¾I}’»öH÷S¡µqÕ¦NlÈ'gBʘªä7ª¿,cÊ9д¼T…•'ë¨"ÊYj\QCx]¾r]XL1 Ãì8XH1 Ãl#±ˆ…Äk|5ÝuÞÀ ð_ ¿ùO6P;Š1°…E˜ÕI¸J€€¬m‰«êé°li2™ÍÍ"š» »:[ÉÅaå–/î !$¼Á š¡Z†©©•ÏYØJfé6ìÊ8‚ëgas³TÁ&tÛA¨–¡8À~˜Úï¼$ªƒ2l~fm€Ø_ÞŠ°çý"<\P†¬m¥àóxBaüUšÈ7wÁåŸÁ¬NÜ%¦>÷}¢.,Ceº¡{ž„î<Y×F÷VÀU‹pa‰¤‡Xx<Ä: ¿"ÝDR*YOä3pQ.(“¤-.Ö@(gHÐ íoHa‘Ê@( ç \X¥ µrŽD Â_S Ã0;R Ã0Û‹¨¯Ajx½OÃz ºç$UÜ=N}ýà¥4*¯ç­Í!Z¸»:gN…|-yg!;¯ÿY¨Ö¨¶@ª8´ZÀUó0+ãˆf.Â,^‡HÔÆ•Q)kÈKÑ÷ +°Å%˜• :`뜵›îÌØR¶°¯÷Ȧ¾8Øü.1U-!šúÕOþ?ª‚Û>—’®4¼Ž£Ð}OCÖµS¶WP5T\ÅCSÚx¯øHÄD$—Rõ~ d¢–®¿à‚2µÆæçá*k´Fʇ¬i¦*5/X˜.(À–²$è…b¡û¨ëR-"š½ô7Õÿï“9}6ËW„aæ1l;YH1 Ãl,¢ð¡”j€?ôMx#¯B·„ðk»å…U—Í^D8u®ZL•/Þf¯…òá„LÝs’Ú%;C÷‰¸Ê£8Kí–^’ZÈÂ*lify 6¿UßxÉxê³Ù˜ì „ÒPÇ¡ê;¾0‘ÏUònÿ áõ³€s$[À|²¾Ú^MtGD•s,¢¶B€XKù’u€Ÿ†ôR”Ÿg#¸ [ÎR¥TªþŒspA+TIUÎη·R†t‚Z_¥¦`ì  „eª¾‰Êà¬ò­_3‘ÊPÕT"¡SÀú=âèÿ(³«Uh‹¨-‡ÅÃ0Ìcx"²b†Ù<²gN 8çF!ÕóÕ¸Ÿ§„HÔ@Ö¶Q–ÔÐKP=”#¥|8XÀDÔ®'$UáH V©¬´ŒhúcD³—`óóÂÍlÁ:)’†^ ÞðËÐ]Ç!“ ±”Š…ˆM9,,#¸õ6d²ž¦ç±ÝØ6É8 >ÏÏCÖµQ–Wºñ®‰|wíù¬­¨[X† J’k{ˆ×ÃKBÖwÆ-|wW‚: -/.ÃW`"³°˜b†ÙÆÝ )†a˜G‡EÔƒ>}$„—„¬m…î9 oðE¨Æ>jUYQA¶¸ ››¡|k¡šú¡2]u€Òth«æaKYDS!šù®´ú¥!ÎÌCH!à‚2tû!è¾gh_*CdOöŠ…œƒ ˰Å˜Õ ÀY:lsðò¶¯›­äÍ|‚äSÿ ²¶-®À¹«Ô0n3ËcpA™s½¶ë~‚ )༦‰BÏeÜ»þgâŠOVâ¶ØÜÚ<œ YLm#.¨„Á•ø_k~÷GÆWƒaf‹žŒ,¤†ažì™SçÜQ÷ûÔÒƒ¨k¥óÁ¡šú7D@Q¶´ »2pâCjËË/BÖ4QpvçQèŽ#um4L’˜²Å¸Â"ÂÉsˆf/Q•ÅÔ#œ…ÔpBBÖwÀë}ê3ëD°D™6a…ÂæýôÆ41”y³6KmdÎRõšs\1µ ÂÃdgà*yx}Ï@6öÒšA_ëw¤Gq¶°,¦¶l]X ‘¬…¨ï€ðRqvWH÷J~.ÎÊPõ¡—‚Ð$})h¾@ƒ ‹$ô¹ v[p&Bpùç áÕü³Ìé7ÌW„af“Ÿ,¤†a•3¯Ž §OC¢¯ÆW=qdm+¼þoÀZQ™Ã²‹ª”?”Bxã-7ß‚+åðùÊYÛ Õq^÷Iè®é dº *SK0ËcˆÆß‡Y¾M!Î,¦èðì¬n„î>Ië”nÜ˜ÐæÂj<9oÑøû¨^úG¨æAxÏÁz™2£ü4„Ÿ¢ïUÉo„›C ¿€`ñ±ëf‹+°Õ<¼®ã™ž¸Š pQ™Ö ¸ Õ2Dõbq¸qð®`ó °…@jÁæp3Ö„þ!©jýÞP>ez…eªð,Æ-“JÃ…U’R©¨¥u´.,ÑŸªpÕ"x‰¶W-¢ð×£ãBØS™ÓgÇøŠ0 ÃlÒ“’…Ã0Ì£“=s*caO³˜º7²¦Þà‹ÐýÏB5õCÖ´@x & ¿ùÏÍ ¸ñ&‚ëgá*ÀF_ý=3ÝÔFÖ󼞓q@p ‰©‚€£™OÞ~¶°H cï™{º‚ TËtÏ“P̓5M^,–¢*\ifeáø{®ÿ \em#ß ÎÒzô?ÿÀ+4²^'bá(áJY˜å[0kóÚ'1%5çm¶° V¨r°¡3^3ª64K7\{á­_ÂW ü4’Oý+ø ²®Púîw›—VaK+÷Êžb`—­ ’õ•ƒTñ‡ÿÇÐ…•¸Úé³×ØYCRʯ…HÖBzôÙÁE ,Óçf9ÇÕR[ý¹X-¢òÁyßÌÿŸ2§q–¯Ã0Ì&>*YH1 ÃlbJèÏWé üáW {Nnˆ(ø)ÈXD­ÁæfN]@pågp¥U¸è>ÇÍ  ÕØÕv^ß3Ð=OB$j ¤Gbʆ°kóoÿ áø{:ùµ¢k_¸*yȆ.x}ÏPv]+à§!„¤–¢rfeÑÄnÿ ®°tïuZS½OQKfß³ ]€ö)'G(Øò*ìòÌꄟ¦às8–~wÁ–`+yèÎcPõ@"½qo™Å›®ŸE4ñ‰ÜÉ)’*ÓÄ¿ïÀ«©ÌFnQlCH˜—aó‹\ÍöÀK£¨²)Q øiÈu©kªâ,,À…eÈûxï j•MÔ‘T÷S4=Ñ„°ÕMã+çઈÏÈEæ‘?«E„·ßû4¸øWÊ"Šaf‹™,¤†a6Ÿýr.’õð¼ ÝuœZ¾êZ©eE$¢Ö¼iDÓ#¼õ6la ®š¿+Ø÷A~˜„ðS ]Ðm¡ûž+¦j±Ê kaV'\? 3w宀çýý ta²¦‰ZóZ†©º&Yv6‚«¬Á¬N!š<‡püصY¸ ‡”ÕšPÀ¹î}^ïÓTqÕØ x‰øS«“ˆ–nC¦ê)à™×ä~ÞðÔšWÊAw‚¬k#aËC³xÁÍ·Mž‡ÍÏÁå/_/!¡»Ž!ñÄÀë{ŽîqWð¹µ°å,eL•VYL}-’DT²Pý»Tp&"ÁWZ‚R|yÍ.»¢çdm\홄ڨ0uQ…„~PäàóGý\dÅ0 ³}»R Ã0[ÇþS"Yøe¨Ž#P-ƒ”U“¨½#¢ÖÇÌ'ˆ&ÏÁæfaËYÀšMøñ"YÕØÝqºÿ9èöñ˜`"8À®Í"¸ü3DóW©âÃÛþzº(€ðÓÐÝO@·€ÌtSk‘ôàœ¡Êµì4ÂÉM‡ÉNÞWå=߉4tïÓÐG¡;’ ôÒô:œ+­À®Í#š¿™j€H7Ç•¼?ù<¶°[ÎCµ B5tR&Ñ]Qá­·Í| “›…«è¾ò†^Bâ‰?€î:N•„w‰)p¥,li®’g1u¯÷¹Ÿ‚H5AøI@%(°Ü’ˆrå\9K"üëdîWÝ·Ö$!“qÅT,Ÿœ ãl©<\~ÎTYL=èõ­\k!¼ú÷Ä"Šaf›ž ,¤†a¶žì™SO:'Ï@âµ½÷$©Ì†R­ ë;âꋨp™B'âujŽƒå•Or((®Í!š8‡húc˜Õ ØÒ*`ÂG>°ËÚæxªbTûa¨Æ^’…ÎRæW) W\B4{‘™—äõúº˜ìd]Ý_um$yM@™^·‰hêlv ¶²öH‚×?ú$Nü>TëP|ýc1å\X‚+R¾” +û^L9€L7ÒT ä5ª JÔm´ò¹ÊLnfñ¢™Oa³“ V³šÝà Jʇlêƒn?Õ2DÓïâ)l+°…E˜Å'ÏÁ,\‡ÉMoYåÚ¡PmïÀ+P}ñëj!1%W)Àæçc` ‹q%ö‹÷ŠD‚ ¡{ ë»íQfuÑä9D³ŸÆ÷ÕÒ– á%‘|áuø#§(|þîàsk(_ª´JÓé··†5ʧªB/Imsʇs.®¼ÌÃWà Vئ÷«32UÄAêÂOCHMb*,SµVµ[ÎBžjéL„àòÏYD1 Ãì„- )†a˜ÇGöÌ«¯;§G!Ñ¿^¯L7Bµ€j? Ýq„ò€Rw‰¨ Hy@³a®!š¿›_xl"ê ¯¿¡^ÿstÀnì¥ÃeälKqÈöìE3k³qx°¿û\Õ"U#ÕwP°|ÛAÈ8XUa K0Ëc”µ2†hþÊÖ‹¨Ï£¼XL½ Õ<Ý6ËÂ@ȸµršÚ*kñë·{·•O(¸òœ©@5öCfº퓈ÊÎ šþ˜Z^®Ñ}µ]/+•AúåïÁx"UOÓ×1UL•V` Kwåí€zg-„Tw¦æ¥›è3ŽÚò‚ý³Z ö_©Óëté Ul%(üRR–U\-…j¶œÛ·í–.¨„á7þCú·~øyÂ0 ³¶>,¤†a?;]LÉt#dótët÷ ¨æAˆdUm8GíoùE˜ùˈ®S¥ÑÚÜC„`oõSB…U˼þçà ¿ÕÐh?žZ%a‹K0+ˆ&ÏÁd§HÒì’vpa²¶^÷I¨Ž#wrf„ UZ…]G4{Ñâu˜ÙKpQõñ.‹—„lè‚7ü të0Tç1j…òRT1U-®Í®ÑÔ8‘¬§@ú=ÓÊ'aË«pQºe2~O"¢þhö2ɨ¹Kt_=–—¨ šzñßBwøÂD>g" ï..Q%Ά4ܽbŠBÄq§ê(ÝxW]®¸˜¶¼À>6õÅJÑ‚DD"‡ÔSø½«ès ¸öMN› *a´på'Õwÿòû™Óg³¼ë`†Ù!,R Ã0;‡•3¯Ž §OC¢aG“Ó ]Pí‡áõœ„l€Lgè°ìUDå©åË,\C8y67CÙ);úé'ç {Ÿ‚×ýtÿóP ”Ã"èPiKË0«“o¾ ››‰Áý{È ÎYxÇ »OP^V¢.;•VaV'Í_…™¿‚hö"URí'£ILÕ¶Â;ð Måë:‹'Mê° ››Y¾g T}ç–fôlñ‚Ñû¬¸–ïd°éœ )\~þj\õ1lnv‡¼Í|èþg‘|æâa©;bÊ9bP\Í/PëÚ.ÎZÀ9’P©úŠ#„UØJˆªp¥lÜÆ§vìï°1`"]\¦ÿEÂ<\´w'ò±ˆb†Ùá»!R Ã0;‹ì™S {úqŠ)‘j€¬k‡î< ¯ç)Èæ~jSÑ î ŠpùŸÕë*¢É`VÆw¥~ ÞÀóP­àõ=Yß á§âõ+.Ãäf\{6;M‚D'°3Ú’$,„sP-ÃðúŸ%©‘¬§ðkÂUr0Ù™8#‹¦ºJ~ç.‰ò SÈ:ÊýÒ]Ç¡»žˆÌŽÄ[X¦©s—!”Yß¾~üÜï9°…EØjºãMÔ4Íæçi­¦Î#œ8»6»3 /ÿèo!qü¿‹Eš»ƒÏ]X¥j©Â"I›ÝPÍæâ p/QÛ ™¬t’~­°JSãß‹&Ýéÿ+™Z“ëÚãÐóÝKBlTTºJ6?OëºGªYD1 Ãì’8 )†a˜É†˜úßo›œI¤)¼ó¼ç!›úi¤y\…° [\†Yºhþ µµ-í§!µ{#§ 2ÝÐ]Ǩ²À¯Ù¶¥صyWN-nŸisÛn1%áL8Cí‡Ï“ÔH5Ü™pX-Àæfa–n"š¾€húcšÄ¶[P2ÕÕÔÝu‚¦9v¥–)À¸° ³:‰hö„—¢5;µELPøu~¶²F¿SË „ôàœÍ/®Œ!áíwhÀn¸uÒ$ŸüCø¿YÛ( 1e l5O¿s9·ƒe‡¸óXÓLUQ^ B(8S¥ö¼°W\†‹*;§5ïAÍúD¾šˆd- $Ôœ…‹ª±˜ZŸ¸{'ò¹jÑâõÿ‡EÃ0Ì.Ù‚³b†ÙÙdÏœpÎBª?Þ*#t¢¦ºã0ü‘oA6öR8®Nppae#[ÉÌ]Bxû]˜Õ‰½w±¥‚jè„wðסêÛã0ðv Ù¶le .¿[XDpíŸáÂJÜ2¶½gWÍC5 Àþf,Ÿ¡i_°@P‚Y›‡Yº…hòCD“çaK+»xM4dM3TÛt,¥Tëjè0]-Àf§¨b*QY×¶ƒä‡€³–Z£ªyè®' š6ÖË•²°ks®½àÚwM¬ÛeËÔÐ…ä3oðEjë•ëÕCÔÚë*Øx"ßΩ,"I(€XD5P›žˆÛ\M%Øâ2\X»B3!D¢žþ¢ÁKÒK¥kà‚òiåÝår\µˆpêüÁùŸ|7súìï†av,¤†av ›/¦„ò ÒÐ]Çá¼FQ5MYI.ŒC°³Sˆ¦Î#¸õKØìÔÞ¿ØÊƒjìƒä7¡º Z†HúøiSÅeØÜì1e¶º%IPå“É:xý߀jþÜZUhrÞÊ8Â[o#{o×Ê{_Am¤]Ç¡{ž„nlì‰C¦M+­Àäfa®ÇZéÇXÍB•Zfu.(ÆQC´^°Õ5Øü"«ÿ„àúYØâòÎÏ^»tÇa$žüCx}Ïȯ¿‹Â8·¨DáôQù±ç9A¤2µ-‰¡)+(Qðw9K!úbï}Ä9k!S É:ÀKAú±˜2lXªÅ;Ó-wp‹(†a˜]¾½c!Å0 ³»Èž9õ¤sò $^{èo"d²ºç)x^j„L7Ç¡·ô·è®œƒÍÍ œøÁ·`sÓ{âÀü@I/ ÙÔä5¨–a ^Š2¦¬Ù@feá­_Å×o³¯mWµ­Ð½OAµ ôú´¯(€+ç`²Sˆ&>DõÊOáJ9ìÖÉf÷uEêÚ {Ÿ†?ôUóÕ4“,\>_›ƒÍ/ÖÄ•HnÕëaå˰ùEè®ãTÑ‹¨õ×\{ÁåŸîî굯@w?Äñß…î:‘lؘb¹ÑW-À­ÍÃÙp›ÅU¬ ?YÛç*ÅCÂ2l%Tó$¥°÷?ïœU…y)Ü^ô™ãª (ÖspÕüŽÊÌbÅ0 ³GöÚ,¤†av'Ù3/ŸŒ IDATrÎ}P1%’õðúž7ü2TËP\e!6;pü}×þyã`¿¯–~ªí ¼Á ÛAµ Ò£C¶5°¥˜…kˆæ®À¬N@ UýД!ëÚáõ=M"ª®mcª™3\e 67ƒhú*çÿ+ ·µ…ðñ"ë;è½|àÊÐòk(÷KÞ >·Å%zÿ*ª$Ûj1e K°¥U +oŒï-‡6Ç®Æ÷Õ>^ß³ðÿ:tçqj‹‹Ãé]Â…8P{!ž¦¸µk㬃LÕCÔ¶Q¸÷zu¡ ¨j«’l´þ§÷Í}Dù dmD²¯¢ˆ+Ûz.ûäDQ Ã0{lÍBŠafws¿bJ¤àõ= oàP̓Teã×|VDåfM_@påçãÀÁÏ BiÈDTÛ¼þç :C5õRB8 [Î"šùÑÔ˜Ü ¤—¤š>t¨ ¨û$TÛHœcU !–Wò°¹iDÓŸ ¸ò3ØürÇ~[+j‹“™nzo¿ ÕÔá%IÜI W-¬NÂæç)ðÜKp›ÞÊgó‹VÞ~²±÷®,¢fuá7Þú%‰ì»ûÊ9ÿЯAµ„Hg ¤G¹Ea ¶¸BÙEAiKÚãÖ«€dM3å&yéXꆰ¥¸µ9¸(ؘ>·ÿî£ø:™Bzõí$¥¼äÆDQq^[þ±T¶¹jáí÷> .þÕŸfNÿâ,?†aöÈNŽ…Ã0ÌÞ {æÕ×Ó£èÿÌ}ª^ÿs$7Z†!ëÛ¨’DjQ•5صyD3Ÿ ¸öF¾\ÜW•6ôàôRß:LYFÝ'¡{6äˆs®¸‚pò¢‰`Ëk^ Ά_èªä!ü4tß³Ðí‡ :©ÝI}vr^4{á·`²“¼Vw¡{¡ûž…7ôU'yi©©bªš‡ÉNÃfg(È?‘„~Ä j³6WÉB·‚ÌôÄ“)›Z9)ÏËæ¦)?W*‰#ߦ‰|͉ú;¹EA*ôyTÉmŠ0t.n»­m¡V4åÇR7¢ º•q’îb½:‹÷Ä@,¦t2SP~\MæàÂ*\X¢Ï¢µyº—¶°²EÃ0ÌßW³b†Ù[¬‹)‘ªí÷_€î<Õ"²®Âè›H6tS»ž”pA™‚µ«DÓçk ’ P™NØâ*Uèx)ÈÆ^¨LµRšvmŽ²Â¦.4,,q¥á#àõ=Cbªý0DMåYKaÚA®œ§jNg!¼\X…P>Dºðñ”Å4ÝK•<¥WÖâûgóƒì÷ïÇœ"ù—¨¥J4¤û( èºGUªä¼‰|,¢†aöù#……Ã0Ìþ!÷¿}gÀ†åQHõÇ|5¶òé*!á½oøeèž'!5ʧVç(«+žèåLWÎÂ,ÞD8}fáÌâuQ[€lè„7ðx)ªd‹«×à¨êÆ,ßB4uÁµ7`–oóåÛ®û¨¶‰ã¿oè%ÊíÒÉÏÜ>°&7»6Oõpœõp&„ðÒ4ÐÁ‹'ò­·\%jo{/W¹ø7g2ÿöÿÝ/×å{?øÑëNˆQ ôà[ÿç_üð,¿[†aîÚ±b†!!µq¾.ÀÚÓ?ÿ“ïì›cöÌ˧œóGYLmƒ5YÕÐÕq^ß³PMý”·¢}šî–)HÞÙ8ó&š,QÑâM˜ù«ÇÞY¼VÁÕR[¶Z$›úàõ?oø¨úvà3Ì4-ÑWŽ¿pì]Øìlv†*©˜­CiÈtTó ü‘×à ½‘¨û‚²ål~®šÿÒàsf{pÆP _ªžÚø’uÊËÁš3®Z<#R Ùýpþä‡qJZ7*¤Øx¦²b†¹ÇNŒ…Ã0Ìg…Ô]ü‹µv”Ås_T? YÛ Õ~þPÜf”n„Ð 8W ä(üze¶¸ Ýy²±ªy¶gàT 0+ãˆæ® ¼õ6Ìâ ¾À›ºX"Q Y×Ýýü‘W¡šh2@aØ&¤*©) €3\aáíwN|³:IáÍ< `s‘2YO¢°÷ixýÏA6õQ뤳€ ¡¥ãŠCÀ…UZ·jvmžÖ˜³£. M^ÈyÃ/žQ}gDºqߊ¨uXH1 ÃÜcKÆBŠaæK…Ô:ûPL½úºszýüîøÊÇ(„ö!j[ Z†àœ‚j‚¬i‚ÐI88 (×`–ÇÍ|Špü}ØÜ »ë; ÛS;Rm dS?lt°.®À®N"š»„àæÛ°Ù)¾ä´\ÂOAÖ¶Aw‡?òTûajÑ`Ë«°«Sˆf/Â,Ý„¬kƒî~ºûÉÐìõ #[XD8ö¢‰s0Ë·aËYÀZp5Û£¬‰BÕÐÝ÷,¼¡—¨ÂPyp6‚+ea–ÇÜxà <Ýq”Za•Gí¯a.,ÁUóp¥,¯ÇcÀ•°rñ¯ÿsëdNŸÝ"ê{?øŸŸ„3gî%¢Öa!Å0 sG? )†a˜¯R´É†ûo¡t§Ï¾þ±ýr]XL}éã“ò‡’ $¢ÿTëdm3eÜ@ÀEØÒ*ìÊ8Âé ˆÆÞ…Y½·PR½Ð]Çá ¾QÛ Uß‘¬§ïW|˜Õ D3Ÿ ¼ù6lqùÎëà÷ý‰€¨i×ý¼‘×Hdè[ÍÖÜü‚‹·q}Eªºuªó(t× šÊ—¨'‰U©š­°„ðö/N ³t ®œãuy˜%ò’µ-ÐÏÃ?ðTó „WVòtM~„hæÀÑ„7WÍC÷<oà¨ÖaÈd=µ[juA‘þÛr–«¥¶W-"œ:ÿFpþ'ÿÃ~Qßÿóѧõ(€¯ÂBŠaæÏR Ã0÷'¤Ö±ÿ9RvtŸ‰©Ó±˜jà'§„ðÓP­àú5èŽ#µm^ŠD…àÊ9ØìÂéO^? ³2q_‚Bµ A÷³Ü%¢¾›9}v_<DD­ÃBŠaæ;5R Ã0&¤ÖÙob*{æTÆÂžNŸÞ·bJyÐmá<Ýu²®¯”L[-À®Í!š:àÊÏaV'û€“ò¤‚j=¯çIè¾g û R Rpa f颙‹Çß…™»ÊáÚ_¶ÉIeà |‰#߆j;HQBÒ8úâ2‚ko úéßÂæp?ÂP$ë {ž„?ø"Të0Dº 2Ù(Mm|ùØü¢é©íoñ\eâ˯(¼áo"ùô¿Ž3¼R€ ‰mvmá·Üþ„—¢‰m_ûít×1x}ÏBÖwÒÀ!áLWÉÃE¸â \XâàóM`?Ѝ×GG3‰HŸÁˆ¨uXH1 ÃÜãÑÍBŠaæá„8‹œ‘8c¥æìë§öEIÄþSºã0¼á—¡;B6tÓô(©áœ‚l~ÑìET/þ=lv’‚®ög©Æ>èÞ§áõ> Õ2HU9ÎÑÏŒ˜Åë'>Dpý,l~ñÁå×^]­d=¼Áàü5¨¶ˆD „P$:òóoýÁ•ŸÁd§áAÛëD²žÂ¶¼ Õz€¦%&ë ”Objm6;…hæ"¢¹KÔÊW-ð¬£4¼Þgxê)#*Y!M¡,.#¸þ/o¿C×ÚO?Ðú¸(¬…ð5èž'IëÄÆ×œ (_*¿gSóÜÛ§"JWôi%ñÐÏ=R Ã0÷ØS±b†yx!µ±AߟbjÀ97 ©þx/ÿžªeÞÐKÐÇ¡{ R Y†‹EÔ¢…ë.ýLvŠÄÃ=Ú¾ Iíf²±^ï3Ð]' Z†èn œ Åaé ¼~¶”#Œößó]$ëáõ=ÿЯ‘(JÖCH ĶksÇÞAxãM˜Õ)¸°üÈë$k[(Ãhðù8_ªðSŸSfù6UKÍ]Yº Vöï¦ÓKBw‡ì·¡ÛA¤›î–Wòoþáí_ÂYKÉ=üú¸J"•?òtç1ˆšæø{Ú;ù_•5ØÂ" `ΘúúkZ-"¼ýÞ§ÁÅ¿ù=Q )†a˜{ì XH1 Ã<ºÚذ³˜Ú3¨Æ>xƒ/@wƒlêƒL7QË”` ‹0‹7Üxféli0áÖ¼©¡º ›ú(Ç¨í ‰)/Ø.¬6B8õ¢‰Üz®”[Wî‹{Ø?ø-ø#§h­jZ t‚DTnáøûÇÞ…]‡­æãª¨M\žšfèþçà <Ý~"QO9HRÁeØüÌÂ5D3Ÿ š¿³2¶uÙôRPm#ðGNAwÇ­®^’Ë«E„·ßApý,µ¥ ±yB$¦d¦þÈ«P­#Ôþª}ÀZ¸ [\!9–ÁAô_r 7DÔ_ýiæô/öPùw?üÑiX±iÙ‰,¤†aî±G`!Å0 ³yBjë0.…ýéwóÇûå’˜’?†Äk»ù÷ ð_„n?Õ2YÓBU/pa¶¸³xáíw`¯Ã–¶¯êEi¨Æ>¨–axýÏB5õC6ö‘(s.(ÀU T15qá­_Æ­b{ôY¯<øCß„7ô"TË0d}U'9›F8ñ¢‰a–Çàʹ-ÏÚR}Ð='¡{Ÿ¦  Tf#DÛ…eØÂ2¢¹Kˆ¦. šývmnOW² /Õ:L÷S÷ ‰:8g Œpâ}„·~ ³6‘¨sÒ¶HªèŽCÐý߀jØh¹…µ°AÅx"Ÿ³EÔ÷~ð£×£›;]–…Ã0Ì=ö ,¤†a6_H­³?ÅÔ˧œóGw•˜²¦™ÆÇ·PûU]+– Dli…&ÜMžC4w6?÷ø¦Ü)ªiºý¼‘W!k[IƬgå”s0ks0³Ÿ"š½Œpü}¸j~ïl^5”­Õ÷,tû!ÈLϵ6‡hò‰s”ÝT\Þ†Ðwué'tªuªãt× èÖˆš¸5Í„@X†­¬‘”šøáÔù=|.t‚&Fö?Gm¦½é IÓuazûØì t<õp{ö£ÎZèΣðÖø5€ŒƒÏ«…ø^_Ý×ùXDm®ˆZ‡…Ã0Ì=ö ,¤†a¶NH­Ãbj§>Dª^ßs$¢ÚCÖ·C$âåøpjWÇM‚höS˜ì4\9·3^¾ò¡Z‡¡»NÀ?ø-ˆšfÈT <®œC´xÑôÇ0K·M}”vï¦%YÝyŒ*:ŽB5õQkž³*?õ¢É`–nÂæ(Xþq¾ÖŽ#P-ÃÐG¡š)ÇHùq°v\Í6yáøûˆ&Îí‰i‰ªe^ß3ÐÝ' š‡ jš!ઘ…Ç߃Yà[ ¸‹ *Ã~•&ò5tÆá邾f¸ »6UöMð¹«\k!¼ú÷´ŸDÔŸüð/N)¸3€8¹•?‡…Ã0Ì=öK,¤†a¶^HÝÅ¿XkGþ'ßÙ7›Òì™W_wNBnÍß:?ÜÓO@¦¡:B·@w€lèŠC°U<&>³2Ijþ*ÌòmØÂ2vVû› ÿ)ªm„½|›2ŒtPŠªR*kˆ¦?A8ù!lvÑÜå]%¦D*Ývºë8t×q’;~zc*[8õ¢éO`®ÁäfvT>“L7Bw?Õ2 Õ~*ÓYÛ (µK«pA Ñô„·ßE8þÞ®¼ÏUct÷ èî“ÐG jš)j fe ád\±UwLKœ«ä!¼¼ƒ§è3 ¶5Ή´6a…&ò­ÍÃÁíÙàó;"êÿ,súÍï—gÓŸüð/NIëF…Ûò—&,¤†aî±Çc!Å0 ³­BjSSn´Cµ¤v¢æ~Êú‘šG5OÒfö"¢ù«0 ×` K€³;ÜhéAu7ð ø‡B'!”H8 [Z¥Šœ©ó°+c0ËãqþÕÎÜ Èt#TËTÇQèî' [†DD8ñ!¢És»â¾–õÐGHDõœ$Ù&V`V¨²Ð,Ý„ Š;öwp•Ývº÷)ˆdd*Cm–6‚-ç`³3'Þ§ßqæã|g®©¶è®'à |²®P0Ìê$ÌÜ%˜ùk°•ÜΗ¹1®R€j=@ÚÑD¾8øÜ…e¸Ä”-®BH±k?ƒ‰\þù¾QßÿóÑ£å ùãç³b†¹Ç–‚…Ã0ÌcRëðŸ#eGϾþ±ýp½³gNe,ìiáôé­S"QY×Ý~ºçI¨Ö49ÏKÄ“éJp…ED ×aæ¯"¶¸´ûDÔsȺ6xýÏB÷< Ýy,®üHPVQP†-­P¸öôDs—aó ¸; {Û_q²ª¡ ªë8åzµC$ëhJ«0K7©õpê#˜…ë»tY­M*C-nÇá ¿á×@x)SÎÁUrH?ñ>Ìâ ˜…ë5‹^»„L5@6PnÙW ë;)°ÝصY’¹³—HæîÒj"Ui`ÀÀ Mý‰:j5lP‚\e ¶¼¡v×D>TÂðÆÿ!ý[?üûåùþý?pZøãÇù:XH1 ÃÜckÁBŠaæñ ©uXLmâÎKBÖ¶Aµ€×÷,TÛµyIª8 +°Å%˜¥[ˆ®ÅS¿f°SÛ×ÕzÞà ÐíG¨E±¦™ÄTT…+ç`‹+0³)‹iæÓmŸúFÒ°ºã(¼¡¡ÚÞQå,Ìò8¢ÙO޽»{EÔ~iHMSÔòæx…ªÙ¤ŽÛ,l9‹hâ„“Á,Ý‚ÍNmþ—‰Z¨Lt×qx^…j €vX¸ü"ÌÊ8ÂÉs°¹Ù½ÓÖ&$Tû!xýÏCeº(ø|#c.OŸ!¥¸ ¸ãƒÏ]P £…+?©¾û—ßÏœ>›ÝÏ—"¢Öa!Å0 sG- )†a˜#¤Ö!1åŸ>ûú©}qpÈž95àœ…T|pʇ¨i‚j L˜¶Cu­€Ÿ†ˆÃŠmqfy fáÂÛ¿Š§~íq¤†î<oèE趃à\ÛBb*¬À—aÖæ`®!šüÑìE¸°¼•[?E­_Gàœ‚n?LQ‰²Ü ¢©ón¼µëZóè:x ˆttû!èÞ§á¿ ‘¨¥PpçàlW^C8ö.¢©`oÂi}¶r'„Ÿ&YØuÞðËÐíGè= WÊÂf§Ž¿‡pêƒ"TË0¼¡oB5B¤ê©]ϸ  I„—sÛ2‘ÏU‹§Î¿œÿÉw3§ÏŽí—uøÞ~ôºbTý;ýµ²b†¹ÇÃ0ÌîR'tµ7 IDATëX‡q)ÄèO¿û›?Þ/ëóE1% :áü¼Þ§!3ÝwF´ÃQëY~‘*¢®¿‰hî\PÜڼ݌ÒÐG û ûžn;QÛ !$à@UAÁÕ¦)wãïQ(÷}\Oj< ÿð¯SE”_!œ5påÂ[o£zñ`W'(¯¼F_¼ˆºç$¼ç¡;C¦3w‚éMWX†-,!š¿ 3wÑÌ'°Ååúºë'~*¢ÒRוs”ãuíŸáªÅ¸5×hgBxÇ¡ûžjì#‰'I¼x‚‰©-È×bµóEÔ:,¤†aî±Åa!Å0 ³»„Ô:Öa¼)•ý/ÿãk?Þ/ë”ýÿÙ{óøºªóPûÙÓ4ÍódÙ–-’lãl,6† ‰Œ N.%éFímo“@sO’æ¶÷~Uš¦Iš¶H°Û€<Û²e[’5ϳt$sö°¾?¶l“¢$<Îz~?ÿƒÎ°×z÷ÞçÝïzWý†[•¤‚z£|m‰Q²5­uöÁÜQa·aù` ѦØ}8áq°mù}>IîEÏ_Š(À(]ƒ–YŽš”yöÁ›ÙÊHã3X]‡±zŽò»*¥o"za ž…µn#l*ŠîqEÔÔ0fÛkDO>=Ò†0ÃàØ2ÍÀ(¬Æ(]^P…âKFñ¥ºM¶mË]f9ÞíJ©þçµc¢–½ð\…aR¶#á "!¬Îƒ˜-/áDB³ý’dŒ~'ªŽžW‰^¸-­ÅH8WžDXaÄôèÛ‘/^EÔß~ëÛ·Ž„&ëcIDA )‰D"™#÷”BJ"‘HbSHd$xùûjÚç¥'×éªòx¼ÄËlÚ¦&ç1|% ª»sÞô(ÎHÑS/œkö,+nÞ^ràMD/¨FKÍC/]ƒ–QŠš nŸœH™"rô ¬žìÁ¦ßz¿Q¶ÏÂMnï£Ä ·¶c#Bƒ˜0[^ÄjADBryÞÛ‰áC/ZQr•ÛlÜŸêöÿÒ=`Û8SƒØÃíX}ÇÝ>`}ˆèô™wƒª¢¥ã©ÜŠQ°5%çlSrv—æ5ï+‚¢hÇ”“~+ BàY°½° 59Ç•…³»{ +‘ÎDÂ1ß–˜ŠWeÛNmwÿ@ð{>²qdl<6Ç …”D"‘¼9§‘BJ"‘HbWHe'ù^_CiZ¦mïz¶©'xKeqÜ$¼"2µÍ™­wFÛSÍÖW±zÜí×Ïö9’¼£$Á—ŒQ|Zæ<ôÂjÔÔ\WL1+¦¦†q¦FˆžØŽ5p5!ÍÝ…,g!jr®+¢„ã.Ÿì>„Ùú*ö`3bfÜ}8—¼³øø…UèÅ«Ðó– &¤¡xÎ-›Ålv«ÙúŽcµžmÈmU»†ÞÎTZ½Ç0[^ÄiGñ&É ~»÷¥ð$JBš+¦r+Ïîb ®´‘»#_hõ¼vä‘)ÌÖ×¢ÇÿB î¥¸¹ÇïxåõÚõ+ª‚ÃØØ30È÷|”¡‘ј‹R‰D2G.#…”D"‘Ä®ÊKö󿯯¦$DĶyæTß{íÄ.Çq‚;îØ‰ïÌÎ ˜=Gêœñ¡:H•gó…GMHC/]ƒž»ØS¾d* ¸ý¥fÆA8ÇAM€î!ÜŠ¨®ÃXû°[SCîò<ÉÌäÔ”\ô¼¥è…U®˜JÌœ­Ìg›Æ‹HèlÏ/Å“àJ’ÙêB«ÿæé—qF:(³½Žd~øNáI´ôRŒ²µh9 Qü·*J8!„èôïœïxQwÞõÍj„]ÿ‘›7o\¿ª¯Ç wpˆüôQú‡FbrLRHI$ÉiŒR‰D»Bª05¿»®šâÔD–ÍÓ§ºø÷×Oùó®¨êlÛ¹mk[<Äp¬¾6ààÔ)B¿¢·þŽYT51Ým¬]P…^TbøÏ.Ir»ŸÂÁ™ÃêiÀ<½{àÎd¿Q;¡Ó<¨i…è9‹Ð –£e/t—âé>(ª&fCeE°›0[_Å8((ª†QvenñJ´¬ù¨ÞdPup,œè”Û›-2é6¢W4÷=‘VǸQŸÿR°Tèz¸à[®cãê•x=}CÃüûC?§w`(&Ç&…”D"‘Ì‘¿H!%‘H$±+¤JIܽi9…³Bê‰ü×¾ßîçãî³4'ObJQªÝ.Ïì 5¨¨þzÉUxnBÏYˆâK=':P܇ì‰>ÌM;±G»p&û])"¹ø!ò$ ¥—¢¯pÓ§£x’Ü]ÏâVN9C­Ìì}1=‚âMFʨ‹‹ŒœEè%W¡ ÏíÈw¦Šmf13AäÐãGÌæí_Œgu†[n¨¥ví*|ýÃ#üÇÿ «o &Ç(…”D"‘Ì‘·H!%‘H$±+¤æ¥'ó•Úå¤$0cÙüêx÷hžóµq(¦J…A)¦.PÂàMFË,CÏ[‚^X5[é‘2[%Ü 8+>Dt {  ³ó Vç~œ‰œ™Ñ³ËÆ$5) -§£hzñ*Ô”lU”Yqø1%Dd «ëჂª¹Ñ%Ý‹ž·£¨5%wvG>aGÛÅÔpšš7Tl S* _œëïïÝ´ë¯^ƒÏëapd”ÿøÙctôôÅäX¥’H$’9òK)¤$‰$v…ÔüŒ¾R»Œ¼äfL‹_ëà'‡Z~ÿ›„øvTówn«‹‡ØÎŠ©zTíy¦¿DÁ›Œ–Vˆ–[‰QT–]âOq{DÍŒcÆ™èCñøÑÒKQ3ËPÍSŽ™tÅTÇ>¬ÎC8ãÝrùÞňSB=³½`9zÑJ´´¢s»†'Ó£nß(_ʬtzÃú=gv™eÇ¢-/‚•z‘qBC(š§r+zAu»šœT<‰÷ÆËø·ƒ=¬×i*¿w‰õM¯aóúµø½^†FÇøÏG§­«'&Ç,…”D"‘Ì‘¿H!%‘H$±+¤*²RùÒÆeä&ù™6-ihã¡Ã­ð}ÂaÜV©wTO}üˆ©õµBx‚¨l”güy$Þ$Ô@zNza zîâÙFæ¸"j°«ï8V笾FÔ„t´Ü Œ²u蹋Q…(š‡3;¸‰°û³}/VOöH›¬–º qJFK/v+nÊÖ¢e”¢xÏînh6aõÅêkDñ$bVãY¼5! ´ÙÊ)7ª`Û8¡AÌÎD›_üíU~’ ŠObõ4¶+Š Ôí¾7^Æ}¾"êìoóúµl½öü>/ÃcãüèÑ_ÒÒÑ“c—BJ"‘HæÈc¤’H$’ØR•Ùþׯ¥ä$ú™2->ÒÊ#GÛÎÿ¡HŠ)ÉO < î®m9îÎz9‹Î‰¨Hg¤«÷˜Û »¯ñMïW“2Ñ‹V`”­CË(EMÊBñøÝFÚáqœ©aìávÌŽ}ؽDZG;䤿õ(¡xü¨©蹋0ʯAÏZÞ·rmz{¤«ë fë«Ø#íœi<¯ø’ѳb”­ÃSqÛ;JÕÞxW@XQœñ¬¶×ˆv’bê"¬(ö`k'áá¯Æ“ˆøÜÝ_¯ÃQ‚oeÓ‰ë×­ææMëIðùãÞ_ž…×a¯œ•H*":…3ÑÙu³éì¡V„9Ï4ÜÏÌ(ÁSyFéÃËo5>G "SاˆžØ=Ûûa˘œ'"<²§¿Ëxß·u;ÇâeÜïDDaênÙ\K¢?щ ~üø“46ŸŽÉùBJ"‘HæÈD¥’H$’ØR+ 2øËk*ÉJô11¹w3O|çý5âXLÕ¿•å$1üó?[%“<[isFÉ*ð$¸"ÊœÁ™ÅꀖWùߟã. O`õŸ rô DtÅðK1õ{ˆWuÇÝ÷ÔjˆzPªÞég­«YÎo¼ž¤„Æ''ùÉ/Ÿ¤áTKL΋R‰D2GF*…”D"‘Ä®ºª0“ºk*ÉHð26ù}§ØÞtáv rà0ŽS·ãŽ­q‘DÕל:EèuïZ1¥¨(žDôœ…x*·b¯:WiãXˆðæéWˆž|{°éÂ툧ê(šŽ^´£°-wZz1hwW>Ή)³ófû^Dhèí –wEœP5´Œyx+·b”¯w+× !lÄÌ8V_#‘C¿ÀêoÛº _«eÎCÏY„wÙûPÓKþ[Å”pâÌVO‘#¿;â.ÅDæ“gˆg¥:"¨¨ÊëÏwÕò%Üvóf’Mò௞æÈ‰¦˜œ)¤$‰dŽtG )‰D"‰]!µ¶8‹¿X·˜ô/cá(?xí$/œî»_µËqœ S1þ£oøÐrã­ÜŠ^¼ Å›ˆ¢àØsóô"ÇžÂlF˜‘‹"Ý‹âKA/XŽž·-gZF Šf „ãV℆°‡NmÞÙö"2wÂCË,Ç»dv]bº»c¡ˆHkà‘ƒbv˜Ý©P\”ï7ÊÖâYtƒ»Ôò·*¦86Îô(fû>¢ ¿Ý;ûšøE„'q&¿/B_–"ê°rébn{ÏR“˜…xè×Ïpðø‰˜œ')¤$‰d޼P )‰D"‰]!µ¾4‡?]»ˆt¿‡Ñ™(ß{í»[û/æWƘBÔ£j·Çô½áGË]Œ§â:Œ¢_*ŠîáàD&±:ixÒ]òeNƒsñ—aÝÍ/o ZÖ|ôœE¨gÄ”m‚Å™Ãj&Úð$fÇþ¸¸i¥n£ñ’Õ¨©¹³Ëâ@˜Ó؃ÍDžt%ǺøÇ“½Où<‹7£øoS¶…ÄìØ‡Ù¾ìø«háIœ‰þŸŠ©¡¯êv¶Å˸?ÿ¥`©­«õ*ê-ë;ªWðñ÷m%%)‰É©)~öÔ³ì;z<&çK )‰D"™#”BJ"‘HbWHm,ËåOÖTð{™‰ò¯¯4²§}àâ?€!~iª¢nç¶­qñð5V_[*„Æ¢˜Òó*ñ,¾=jbŠáCÇ]òÕ}ØmT=ØŒ m^úDÄ“€šZ€Q¼-³ -{!Z Tl'2‰˜Çî?I¤áÉ·ÕX=PSóð,¨Å(]šV„âMBAAXaìÁ¢'¶»Ë#¡ ·Œò­œG¹•x*oĘw5ª/47Š)aEÝ;öavqf7>…wku[<‹(¡ëAà¢ß—UÌç“·ÜLjR2¡é)}z¯nˆÉy“BJ"‘HæÈ¥’H$’ØRוçqçê…|F¦#|{O#¯u^²ïw÷YšŒ31UªÝr¥«^P…gÁµî’¸”<o¢+¢¦G±zŽmy »ÿbfìŠèÓ¤x“ÑÒ‹1ÊÖ¢e–£e” &eƒª¹b*<39àûÉç°š‰}Ñ¡ &ga”¯Ç(Y–^ìV!©Š`6=ý2V÷œñžÙ¥‹—UGÏ_Šwé{ÜÞcÞ$PUΊ)!\6܆ÙòVÿ wG¾Ù±¾[Ä”Q\21¿dA9Ÿºåf))„¦§yìÙçÙsàpLΟR‰D2G&$…”D"‘Ä®ºqAŸ]µ€TŸÁÐT„oï9ÎÞ®¡K~ñ'¦Ö× á ¢²ñJ;6½°£l­».­Õ›4+¢F°zŽa¶¿ŽÝ'4„0g®¼ÄÄ—ìö/šw5zîbÔ”\T_ h†»Äpj{¤«ë0fë+8cÝ±×ø\QPÒ1ÊÖ¹Qé%¨I™(ªŽ°Mì¡̶ױº`Íî8è\9»Ù)ÞDô)^ƒÁ©0ÿüÒqô _¶ã±àïÕS¿s[m\4ô½bÄ”¢ ç/Ã(^‰ž·5£Ô­^13†Õ{«cVÿIœ‰>DtúÊOP|)èÙ 1ʯq—&ç€n¸Í½g¢«¯«û(VÏÑØ—¢ úÓЋW`”\…–555ßÝ9Ï1qF;1Û÷buÆnÅ™¿$}¢ÞÞXTÔÄtô¼¥x–¾=w±ÛïJQνæLöîÃDOlÇ ¡ø’cî:?'¢úïÔ½7BárЍ3,,+áö¾ôÔT¦ffxò…yáÕ½19ŸRHI$Éé„R‰D»Bê–Êb>U=d¯Á@(Ìÿ{é‡{G.ïÛø­R_bjíBõ¨”\Ò/Ö ô¬ùèÅ+Ñó–¢e•»" ÜQ}X]]5Öˆ„bkb5Õ›„–»Ø­&*¬rÅ”¢¸=¦„ÀëÄêiÀj߇=ÔŒ3=zù—¶Í•pùR\iX² -»Â]ž§yÂÆ™ìÇjß‹ÙuØíç55¶#1ÒÑRrÝ%¢KnFË(E1|œë/åÞDx³}‘£¿Uu›ê_áKø¤ˆâ²ï2:¿¤ˆÏ|èý¤L‡gxzçËìØóZLΫR‰D2G~$…”D"‘Ä®úà’>^5d¯N(Ì?í>JCÿ•á€âSL]»M=x±Å”bøÐ2ÊÐò*Ñó—¡ç-9[y"f&°û1»`÷7b´_‘‚æ-×›ˆâID/¬Á(¹ =)jRÖì‰f#„ƒ3ÒÕÓ€yúeìÑNÄÌø•ÑË—‚ž½½`9zþ2´¬ù {ÀqSØ]]¡Ö{g²/vDÔ§îEMÍÇ([‹§â:Ô@á›wäsœð8æé=˜í{ÁŠp%ö–ŠWpç]_ß&%¨r‰åúï`^q!ŸùÐûÉL 0ŽðÌ‹{xæÅWbrn¥’H$’9ò)¤$‰$v…ÔmËJ¹my‰†Nh†ÜÝÀñ+Ëý‡qEUêžýìæ{ãå|šSõºº@ñ$ ¥¡å,B/¬BÏ­DI¸ŒLcõŸÀê9êö:}EöˆzGã÷PÒ0JV¡¼aüB ì(86öp+Vï1̦]Øã=ˆð\†\Gñ%£e”¢ç-uc•µ¼ ®ˆ »ªûˆ»ÿ‰“—•IÔ4yiß!yz{LÆ@ )‰D"y3RHI$ ±+¤>·º‚›* ðjãS|í¹CtOLÇjvi Á§?³%.öY1UªÝ‚ª»$™óð.Þ‚^TãÊ UC˜÷!½·hÓ.¬ž£—¬R¬¢¦æ£,Ç[¹Õ­–JHCQuPUDt {¨«§{è4V×Aœðäù÷/RToZ ÏÂë0Ö¢x“ÜXÙ&bz«÷ѦXÝ ˆÈ¤ Ès¨úè…Ux—½-£ Ÿ#Euû€M cõ6m~1=úŽÅ”O…ìéÁï2Þ÷­@Ýαx™ê/~õ›¥aÇ ·Çâñg¦ø“O~„üì,¢¦ÉžGxøÉgb2RHI$É)R‰D»BêO×.bË‚|¼šFûXˆàs‡è›ŒÝÕü†Æ7¶¬¼¯"+%h¨j[<œ{ãÿ¶¥VM[ô,»y£Q¶Å—Œ¢ê®Ü˜qwc‹žÜÕ}DЍ?œÖ³—²¦£g–£ç/Øw5jjž+¦PˆºÏÍî#X=G±{Žõ{v»S@ÓQ“sð.Þ‚gñ*ŠæÇ O`¶=þ4fÇ)¢Î7bÞ$ôœ Œ…›Ðó—¡&eºs +‚3ÙÕuˆhÓ‹ ð–ûKÅ«ˆ²m»´½§/øûzûôLì.MKMáO?y9Ù˜–Åk‡ò௞ŽÍ˜H!%‘H$oΤ’H$’ØR_¸z17”çahm£“üŠÝ‡$¯Á76¯`Af –ãÜ·«½/xcyA[|¬Þãà˜ð³âUD½xð``õ’%A]Ó¿ØÙÛÇ¿þø!&§b÷þ‘šœÄŸ}ê£ææ`Ù¯9ÆO26oñRHI$É›BJ"‘Hˆ]!UwM%›æåah*§G&ùꎃŒLGb6©>÷l®¡<#ÓvØÝÚÇ?½xì>Ks‚;·mm‹‡sÑ<õ|m¤ñ¹{­þƙǖè@  ç,BË©@Ïšš^âŠ)ÇBD¦\15ÒyúeÌö½8}îû’³1æoÀ³ -Pè6ãFAØQœ‰^ÂŽyúeDtæü—ýI~OfªàYxžÅ›Ñ2æ¡úSA3fãÂíÄjß‹ÙwEUßôöxQÛ‚Á€Öë>ýÁ›ëÖT/MÕ5®þ~þíÇ?cl2v«õ’øÂ§?FQ^.–m³ïè1îì×19)¤$‰dŽŸ})¤$‰$v…Ôÿ\¿„órÑU•–á îÞ~€ñ°³qÈHðòµj(KOÆ´žoéåÛ{Ž#Æm•zGõÔïÜV™cõ· ¡Q)‘WèK{ÐÒ‹Ñr£ç-AË(CK+Dñ¥€c#"“83ã8£˜]‡Q4½¨-£Å—‚¢h³"ªèñß=õ":‡;ç]‚Hyç'#Œ IDATð,¹ Où´´bo¨Úo-´Ú_Çì>‚š˜ŽBLýTL }%P·³-^æéŒˆÒTêPIýøû¶²¶z9†®Ó30È÷øÃcã1;¾Ÿ/ÜþqJòó°l›ƒÇùÑÏ“c‘BJ"‘Hæø½—BJ"‘HbWHý͵KÙPšƒ®ª4 Mp׳û Ec·J#'ÉOð†jJI˜¶Íöæ^þõ•ƳO1uí6!t)¦.hö£¢e”¢ç/E/¬A  ¦äºÒC8+âV;)š»3œª!,·"Êly™èé=8½ˆ°ìu±Q3ð.?zñªÙ 5¿ÛøüLùþ“˜'Ÿ¿ÏlÞŒ'𹻿^‡£QI=óßn»y ׬¬Âcô ñýapd4fÇèõxøâí§¤0Çq8tü$ÿùÈã19)¤$‰dŽ”L )‰D"‰]!õ¥Ë¸¦$MU898ÁWžÝÏŒ»K¼ò“øêõU”’ˆØ6¿9ÕÍ÷_;ù¦×SÏvK0ÎϱúÚ€ƒS§½îŸ’wˆf ¥— Va”®EËœ‡êKUçl)!áIÌŽ}DO=Ý gzô÷4?—\”PeÎÃ[yzQ+ À}ŠÃßOsqç]_ß&%¨òfIý¡¯gÃU+ðz ú‡ùÁCÒ78³c5t/nûeE8ŽÃ‘Müðá_ÄäX¤’H$’7#…”D"‘»Bê®MËY[”…¦*4Œóågöµc÷A¹(5‘¿»®Š¢ÔD–ÍS'»øáÞS¿óõŽ ]U”೟Ý|o<œ§RL]„DÈ“€QrÆ‚ZôÜŨ‰éîÒ°3މÝw³c/Vû^ì‰>Ä̸œ¼Ë€^° oåÍ÷éùK‚j °-žÆþûDÔnݼ‰kVâóxèáßþ9=ýƒ1;fUUøËÏ|ŠyÅ…GpôT3?øé£19)¤$‰dŽ&…”D"‘Ä® ^_ÍÊÂLT ¡Œ¿ý;˜ŽÃüŒd¾R»œ¼äfL‹ÇŽwðãƒ-çÿ¼*hêvü-ÇÃy;V_[:+¦n—Wñy&>†-k>zÁr÷_îbÝ‹@ fƱzŽb·!¦FξNM+DÑ wÇC!æ fÛkX]‡0; fƶ)—ñ],âXD©Ž*ª²ñ|ßsÓÆkؼ~-~¯—¡Ñ1þógÓÖÝÓóðÅmŸ`ai Acs+ßýÉñyïÑ´Mßÿû/ï”´D"‘¼áÞ(…”D"‘Ä®úÚ 5¬(È@ŽôòågöÇt*2SùRí2r“üL›6´ñÓíoç£v9ŽÜqÇÖ¸Hþ¥˜:„Ç𣦢ç-Å(ªAË«Dñ&¡  ¢SX½Ç°ºbõÁnGX´ŒRÔ”<<ó×£å.FKÉMwwä3Ãs³õU¬ž£XÝGp¦†])%s« ƒQßê{·lXÇÖ Wã÷¹BêÞŸÿŠ–Ž®˜ž/|úãTÌsW)ž<ÝÆwî(¦Ž_UUV-«¤fÉâM+*+vÊ ["‘HÞŸI!%‘H$ð­]GÅ®Ö>l'¶î‰÷lYAu^:‡{G¸ëÙ1‡ÊìÿëÚ¥ä$ù™2->ÒÊ#GÛÞÉGÆ™˜Z_+„'ˆÊFyUÏ&:†5%-§£¨=JB( ˜a¬þ“Ø}Ç1;`÷ŸrwØû­§I ={ZFƼkÐÒKPSrÜÆçŽw›ž·½ŽÝ׈ÕsÔS’·OœŠ¨Ï)Xjëj½ŠzËÛýŒë×­ææMëIðùãÞ_&#ná±cäajªR÷ì¶Í‡âá|–bÊí¥$g£g/@/ªA/¨BMÎvETt{´{°‰hËKX]‡Ïk¹ž·=o Æ‚¨ i¨‰™nÅ”máL áL cuÄ8…ÕsT6>«8´+ŠY¨{ññxöç¿,ºÞq…ãÆÕ+yßõIôûçþÇ~͉Óm1=?úÉÛ¨œ?EQhjï þG\ñǼta9k«—SRGjr2CG6©ª²S^è‰Dò†|M )‰D"!„ˆØ#ÓNM°£¹‡½]CWüqkëJ–å¤áû»‡îˆmßR—Îÿܰ„¬D““û´ðĉΠ÷¼+¸ÏÒœàÎm[Ûâ἞S÷¢þî]¹Þu¨:jb:ZF)Féô¢¨É9(ª†0Ã8“}Xý§0[_Áê:„ˆN¿å¯Ð‹j0 k0æo@1ü(þTÝ늩ÐöX7VçA졬Þcoë;â ‡vE±‚ºÝ÷ÆÓ°/¤ˆ:Ã5+«¹uó&’›˜à'¿|ŠcM-±=OŸø0KÌGQZ::ùç=À•úü²dA9«–UR^\D % C×0-‹‰ÐÔ¦¬ô´ò‚—H$’sH!%‘H$ÀŒi ¦µ†§#ewkÿ+¦Eá·®¤2;€ìíäkÏŽé8¬*È îš%d&z›üh¿9Õ}áŸãNL]»M=ø®SŠŠâKAK+Ä([‡Qº55E5v'4ˆ=ØŒÙú fÛ^Ddò¥Q¶£ä*ŒÒÕîŽ|žWL9öxŸ+¤ºbµḇ0Þ‰Sµ- èa½NS©C%õB~öšêe|xëõ$%$2>9É¿zŠ£'›cz¾þø£dù¢¨ªJkg7õ?z˶¯¨c\4¯”«–/¡¼¤ˆ´”d<†(X¶ÅDhж®4žÜtçmØ)/|‰D"yCú&…”D"‘À®Ö>Q‘™Jšßƒ¡©Ûfh*±1^héåPïÈu̺ªò¶®dqv*¶#xµso¼p$¦ã°¦(‹¿¸z1 ^ÆÂQ~øú)žké½x_(Ä·£š7¸s[íX<œçïV1¥xPSr1ÊÖâYxjJ.ŠæA8&bf {è4æé=˜­¯âL^Ø/× <ó7`¯B/¬r¥ë>Pu„câŒ÷b÷5bÎVLÙ#àXñ}Õ"ê‚‹¨3¬ZVÉm7o!91‘‰PˆŸ>ñ5žŒéy»ã¶Pµx!ªªÒÖÕ÷ï}¨i^Ç6¯¸«k–3¿¤˜@J2^¢(X–ÍäÔ½ýìo8ÎëG°›þãž»wÊŒK"‘HÞÃI!%‘H$p÷³Df¢ÕE™ÌOO!ÕïÁ£*8–ÍÐt˜cýc<ÛÔÃñ+Ã]xu¸q%‹²R°ÁžŽAþagl ©«K²ùóu‹I÷{›‰ò½×N²«µï¢~§p·UêÕSbj¬¾6ààÔ)B¿hÅ—(…AÑ ðøñ,Ü„wù-¨IY®ˆ6":…=ÔŠÙ¼³õœÐE¬tTTÔ„4Œ²uè…Õèy•î2>ÍB ¬œÉA¬ž£˜û±ûO\Üã¹RqŠUŸ^·;Oþ"ê 5•|ì½[IIJbb*Ä#OmgßÑã1=Ÿùð-ÔT.BÓT:º{ù—ûÊLøòV–ä³¶f JKȤΊ(˲MOÓÝ?ÈÞ# ì;zül5—R‰Dò¦J )‰D"»ž= ¦M›²´$ÖgQš–DŠÏƒGSqAزéÍp´o”ͽœº¼ ‹=:ߨ²‚…™)XŽà¥¶~þqwCLÇacY.Ÿ_SAšßÃÈL”ï¾ÒÈË헦ɼS±”½((ž$< 6â­þ€Û#J÷ ŠàŒv9±³y7NhðÒ–îE ¸ÕRyKв $¤¡hÂ6‘bj³ëVÇ~¬Þ†øè/5+¢TÔú@Ýθ¨F<Ãw}}›P” Ê¥©J\^±€OÜr©IÉ„¦§xôé¼v8¶nÿàûX¹´]Óèèíå;÷?ÄÔôÌe9–‚œl®^QEż2Òx Š¢`Û6S33ô rèøI^=t„Hô·«¸¤’H$’9r')¤$‰äœ:CIJ©ÈLeEA%iI${ Œ7TLõMÎp¸w„g›ºi ]–cNñܳeó3R°‡Ý­ýüßcûÁãºò<î\½€ÏÃÈt„oïiäµÎÁKz Âa•àöÏn©‡s¬¾6 „¢j_Œ‰ÄÅ—ŒQºïÒ›ÑÒKQ¼‰ ¨®ð i|†èÉçp&Àq€KŸç(Þ$´¬ù…UhY Ñ2KÏîÈ'Ì0bfgz«c?fÇ>¬î£—å8/:RD]2u†% ÊùÔ-7HI!4=ÍcÏ>Ïž±Ý[ðn}W-_‚®étöõñÝ?ÌDhê’CNf׬¨bÑü22Óx=TEÅvl¦gÂt÷Ðpª™—÷"‰ÎùRHI$É9“R‰Dòf!u†¨íP™`yneiI$y tUÁ‚ÓSz†y¦©‡®ñK› §ù=|ms åé)˜¶ÃÎÖ>þù¥c1‡ðÙU Hõ MEøöžã—­©¼#hW%øìg7ß×ÀX}m鬘ºýJ<>Å—‚QXQqzÎ"wIœª!ž zü¢'¶ãL #̸ò5! -)zn%zöÔÔ|ÔÄ Ð Dt'4ˆ˜Çl}v)ßÉwÇÉÇ"ês_¹çVG¥þR‹¨3,*/ãÓ·¾‡´ÔT¦f¦ùÕŽ]ìÞ{ ¦çô“￉ÕUË0tîþ¾û“‡›˜¼$ߑκšåT.˜GfZŸ×‹¦jضÍt8LïÀ GN6ñÚ¡&§~ …”D"‘Ì‘ßI!%‘H$¿[HEÀ‚¬jòÒ)$‘äÑÑ5Ûq˜ŠÚô…¦Ùß5̳Í=ôL\š%8™ ^¾¶¹†Ò´dLÛæ¹–^þeOcLÇá=‹ ¹}Å|R¼ƒSaþ¿—Žq°çò6“—bê2'*¾dôüåxæ¯GËY„š˜¢ûÜ>QÓ£D›važ~{¬ 13¹âæTMHs{Kå/CÏ]Œ’†šîVLE§q&úp&ú1[_Áê>Œ=Ú»'c[Q”`¼‰¨;î¾§VuDPQ•—ó8–•pûßGzj*Óáž|áEžeoLÏíÇÞ{#ëjª0tžA¾÷ÀÏ»¸Kæ3Ó¬©^ÆÒådg¦ã÷úÐ4˶™ ‡áÈÉ&^=tô¼å˜R‰D2Gž'…”D"‘œ‡šES*²R©ÎK§ 5ÁSªŠå¦¢ÝSìíâ¹–^ú&/n‹œ$?Áª) $aÚ6Ï6õðÝWOÄtn­,æ“Õå${uBaþï‹ é½"ŽÍ´#œm;îØcõëk…ðQ¹,ØŠ'=)FÙÕ蹋Q“³Q<‰³"j³}fËKØÃ­83c`_ù;ש)¹èUèùKÑ ªP}I(Þ$P5Dtg¢{°™hÓ.ì¡Óî²ÃXÁ±ï›QmñôÛq¥ˆ¨3”—ñ™½ŸŒ@€™p˜§w¿Ìö—^é9¾íæ-\³² aÐ30Ä÷|„Á‘‹ó»HNfÝŠåTΟG^v&~ŸMU±m‡™H„Á‘QN5³gÿaF'&ÞÒgK!%‘H$sä{RHI$Éù ©3hªBev€ªÜtòSü$:ºª` A(jÑ16žî!¶7õ0<}qvÊONà«×WQH"bÛüæT7ß-¶—ü|xi)«*#É£ÓšáŸv7p¬ÿŠ+´Øå8NPŠ©‹”˜h´¼JŒ²µ®ˆ ¢z“ÂALbõ4`žÞƒ5ØŒ3Ù¶;“©(îý#P€^XãVLÕ ~·)»¢!¬0Îx/V÷"ǃ˜Æ™¾rǧ"êλ¾Y°ë¯u†ÒÂ|þÇGn%3-ÀL$Âö—^åé]/Çô\èÆëÙpÕ ¼ƒ¾Áa~ðУô ^Øk"Áïcýª–-œOnVƬˆÒp„C$¥x„-­¼´ÿãoï7I )‰D"™#5’BJ"‘HÞº:ƒ¡©,ÎNeyŽ[1•`h¨Š‚åBQ“α)^ïâéSÝ„"öÁ¹(5‘¿»®Š¢ÔD–ÍS'»øáÞS1‡U•ñ‘¥¥$®úÖ®£œ¿R7ÅÔ½¨¯7ŽžSQ¶-o‰Û°Ü— €˜ÇêkÄjß‹Õg¼a†cw2UÍÝ‘/µ½`zþ2Œâ•(†o6;SŽ…3Ú…yzÑ–¡!œé1®˜æçq*¢>ÿ¥`©Ðõ pEöZ+ÎÏåŽÛ>@VzáH”ç^y_?¿;¦çüÖ͛ظf%>‡þ¡þýáŸÓÓa6»ðy½¬«YNueyY™$ø}èšæî¬u+¢N´´ñê¡#ô¼³~†RHI$É›‘BJ"‘HxûB ÜÆç¦í°¶8‹šütò“ð:š¦#EL:Ƨx¹}€íM=L›fiQiZwmZNaJ"3–ͯ;ù¯ýM1‡?ª)ç•Åø ÞÉi¾¹ó(ÍÃWúaζ۶ÆÅƒùXýµÛ„ЃLL) ZFzQ zÞR·Ç’? a÷ŸÄì:ˆÝ{{´™z÷L¦ª¡x“ÑÒ‹ÜÆçËÑ‹V h@€pVg¢—hãvÌö½n#ôèÔåkÚî°KQœmRD]™ädóÇû 9é„£Qv¾º_îˆmò¾ë7rݺ«ðy< òÇAW_ÿ;úL¯ÇÃUË—°rébò³³HLð£k:B8„#Q†ÇÆ9ÕÚÎ+ÓÙÛAÆ!…”D"‘Ì‘J!%‘H$ïLHa2bâÑT®.ɦ*/œD~CGULÛa"bÒ>bwk;[û˜1íwô}óÒ“¹kÓrò“˜±l?ÞÎýZb:ÛVÎçý‹‹ñëÝÓ|ã…Ã´Ž†bâØÁ}–楘:OTÝ]º–¿ÔýWXãŠ(Eè Ö`3V÷a¬î£ØÃ§áÉwïd**ª?-{!zÞôü¥h¹‹Qt/Š "!œ±n¢§^Àê:„3Ùi«Äv)J4¨{)®¨·ƒ©…/ÆÂñæeerçÇ?Dnf‘¨Éî½øÅ3ÏÅt Þ³i7\½Ÿ×ÃàÈÿù³Çhïé}[Ÿ¥kkª—Q½¸‚¢ü’ü èº[1£ŒŽO¸"êÀ‘·ý¿ )¤$‰dŽH )‰D"¹0Bê “Ÿ¡qMq6ËóÒÉJôáÓ54¢Ž`2¥yx’=ìhîÅrÞÞ®` 2SøòÆåä%û™1-~~¬Žé8ÜqÕBÞSQˆO×èŸâë/¦c,¶*bÁ}ŠApû§·´½Û¯›±úÚ€ƒS§½•Ôóz“f &g£gW V¹KÕ3P aG±‡Ncõ6`uÂê?OÄÑHAMÊ@Ï_Ž^Tž]š^|NLE¦p¦†pƺ1O¿ŒÕ{{¼çâöÑŠSuç7¾p&E¦rþçö@vF:Ÿûø‡ÉÏÎ$jš¼´ÿ<µ=¦c±õÚ«Ù²a~¯—¡Ñ1þë‘_ÒÚÕý–>CÓTV/_JueEy¹$'& kB@Ô4Ÿ ©½ƒ×7ÐÜ~qv¹”BJ"‘HæÈ|¤’H$’ +¤Î01Iñ¬)ÎbYNY‰>¼º†XŽÃX8JËð$/w ðBK/–óÖaQV*»q¹I~¦M‹G޶ñБ֘ŽÃç×T°ua^M£},Äמ?LÏÄtÌëk|óÆ•¿(+¥ÞPÕ±wûõs^bJÓQÒѲÊÑ ª1JV¡¦ä¡¨±qF;°š°:öavxwWDjr6FÉjŒÒÕhe(I™oS“Ø£]8£˜û°ûO`õ€p.ÜÄ©ˆ²l;ÐÞÕ[÷/÷>X6£©±vüiþä!?; Ó²xåàaúõ31“Íë×rÓµ×à÷ùãG?ÿ-ç)EaåÒŬZ¶„â¼’1t˜Q4·wòúáNµ¶_ÔqH!%‘H$sܧ¥’H$’‹#¤Î01 ø<¬-ÎbiNY‰^¼ºæ&Ä¶ÃølÅÔÎÓ}ìnëç|ïËKrü͵KÉMòŠZÊ‚˜ÇnÅ8…Õ{ {°gü.3r8¬(ѺxQ‘¨¹M×´úöžÞÔïÜÿS¦gb¯q~ %™?ûÔG)ÈÉÆ´,^?|”~õtLÇåºuWñžMHðùç¾_ýÁ÷¯ZVY¢k:½}üëbr*öª3“ùóO”¢Ü\,ÛbßÑãÜÿدc:>×®^Éû¯ßH¢ßÏèø8÷?þäï•HKÎçªåK˜WT@jr†n`Z&ã“!Ú»{yýpGN^ÚM@¤’H$’9Ò)¤$‰äÒ©3LFLr“ý\]œÍÂÌÒü^¼gĔ助Æ1vžîãõ®ßýPY“ŸÎ_­_BV¢ÉˆÉ}Zøõ‰Î˜ŽÃ_­_ÂÆ²\ M¥ed‚¿{ö cáhÌ#ÕçážÍ5”g¤`Ú»[ûø/C8Œ+ªR÷ìg7ÇÅCÿÌÎï” ® j™å·+º!Dx{¤«í5¢M»pBƒòt>¨:8zþ2ŒÒÕèūΉ)UsÅTh{¨³}Ÿ+¦FÛß°+¡Ìq›‹s%%¨Bɧn}«—/A×tºúúù×?ÌD(scJôûùóOŒ’ü<,ÛæÀ±Fîýù¯b:N׬¬æ[6‘èO`lb‚?þ$Ç›ßÜ/qÉ‚rV.]Lyq”ä³Q¦e3 ÑÖÝËc8ÖxYÜ·R‰Dòf¤’H$.­:C(bRœ–Ȫ‚Lf¦’æ÷`¨®˜ŠÚ6CÓŽ÷±«µ}ÝÃozÿÊ‚ þòš%d&z™ˆ˜ü×¾&~sª;¦ãð7–²¡,]Uišàî혌˜17Žt¿—¯m®a^z2¦íðÂé^ê_>~ÎÚUE Æ‹˜Â6kEt*huo4Û÷=ù<ÎX—¼ñ¼­ÌMEÑ ŒÒ5hùK1 ªPSóQ /áJ¿©aìáv¢-/b÷5bv¼¡úlVLűˆºãî{j5D=(UgþÛ'Þkª–aè:ÝýüÛO~ÆèDì5Ô÷y=üÅퟠ¤ Çq8xü$ÿõÈã1¯µÕËøÐÖëIJHd|r’Ÿüò)N5Ÿýû‚ÒbÖV/£¼¤ˆ´”d<†¢(˜–ÅdhŠöž^;Á¾£Ç.k¦R‰D2GZ#…”D"‘\!u†PĤ,=‰•™,ÌL!àóàÑTÛfh*±1v4÷p´oôìûVfòÅk*ÉHð2Žò{›ØÑÜÓqøÒÆe\S’¦*œšàËÏ`Æ´bn™‰>¾vC5¥iɘ¶ÍŽ–^¾³§ñM¯‹715yÿ§j­áž *å]çfp Š/ÏÂZ´Œ2ôÜųbÊŽ…°¢gw䋞z«¯Ñí/ç"JuDPQ•7{שׂ©ÂÐuzùÞ?cxl<æÆhè:_Üö ÊŠ p‡#'šøáÿˆé¸­ZVÉm7o!91‘‰PˆŸxšÃ§˜WTÀºšåÌ/)&-5¯Çƒ¢(X–ÅäÔ]ý¼~¸ý ÇqœËÿ¼#…”D"‘Ì‘ÎH!%‘H$—WHa*jQš–ȺâlÊÒ’Iõ»bJÁŒi38¦¡”íÍ=4Œsuq6~õbÒýÆf¢|ﵓìjí‹í8lZÎÚ¢,4UáÄà8_úÍ~¢¶sãÈIò¼¡š’@¦mólSß}õÄï|½‡qœºwl‹‡•±úõµBxêQ©’wŸwšÉ)¨IÙx—Ü„šš‡–µ5%Çí/e[8‘ÎäÎxßxôä3õNßÉú”?þùXżâB„#8zª™üôјŽßŠ%‹øè{o$%1‰‰©Ïïy¯úU: IDATÔ”d*ÊJH¤â;+¢lB3Óôôþÿìw|Õ™°Ÿ™Ûuï•®zµ%Y½ËÝãÞ ôÞ†B ‰÷ûöÛð )Ú…dÙýv¿ɦgC ½wlllãn²eÉVï½]éö2óý!É@Ìn7}ž?mÝ™óž÷ÌÜ™ç¾çöW×òQU5ÁÐÔùAC)@ øœÇ!¤`j©I|Á0Y±væ¦Å‘mÃn6b”%¼¡0}./UÝà z|\S”N´ÅȰ7Àoöc{K¯¦óð“åeÌ›N‚£½#üð½ý(üžJ±GðÓe¤;løÃaÞ«ëäw{‘nS¥âÂS‹×©ª¾™tq:U¡G†±è2tQÉÈŽ4d[<’1ÂI8X©x†+e{Â%¢î{ "CÕë+€;ÿÖß^¿v%‹æÌÄd4ÐÝ?ÀïŸ}‰Þ!Mƽþ®ÛÈIŸŽªªÔ44ñ›§_ÐtËòs¹õÊK‰²ÛÇ«Ÿ"ÌæQáp·×KW_?‡Õ³ë@þÀÔ[{P)@ 8!¤€©%¤& „ÃäÇ;(OŽ!=Ú†ÝdÀ0!¦|Á®@‡ÅˆI'3è ð«]5ìnÓöÑ+Ë™‡ éæ÷ök2Ž´¨~²¼œéQV|¡0ïÖuð‡ê¾Ì!„˜|µ;ƒ]l:†œåN]\F¥Î1­RŽL"êopíêå,ž7“Ñ@ÏÀ xîeºû4ÿ÷ï¼…¼Ì TTj›ùõSÏk:Ÿ‹æÌä†KWb2šUU'D”‚Ç祳§êºFv¬ÂãõMÙ8„‚ÏynBJ ¦¦:ñ«ªäÅEQ–MFŒ »Ñ€~BLÉ Ixƒ!~¿÷8ïj|Qó‡VÍdfJ,PÕ3Ì6hSH¥;lüxY)iBê­cíü×¾/¿Å¸¢òDH§Tl]·¶åB¸‡*WHª~=2Qâ®ôQpªR¨RgЬŒúö[BD}A®Z¹”¥ æ`6éâϽBgoŸ&ûáþ;n&oFuÍ­üò‰g5GBl ÊK˜SRHBl ’$qEÁåñÒ;0HÕ±:öªÖÄŽˆBH ÁÉ!%Lm!uâ†DN\$³RbH‹²i2 “ÇÐUU¥sÔöæ^Þoè¢kÔ£É<ü|õ,Ê’c8Ô=Ä7Ðd™Ñ6\VFjdÞP˜7jÛx|ÃWw ˜©\êPPÖ 1õUJø I’*ë·¶\Ha¯«¨pè}úõ:™¯ÜÅÛ[¶k./BH ÁÉ!%hKHtzX:#‰›K31êu¨ªzb _XQqC´»Øß9È»u8})“$IüËÚÙ%8P€}üãæCšO¹q‘<°¤”d»o0ÄKG[yæPÓi9¶ªà ËT*²±r뺥çý”¬‘Ê¥ªªV ëîDðY.PpÏWœ5ÉÚűú’…XL&†Gøó‹¯ÓÜ¡Í)Ð÷Þ|¥y9H²DS[>þ ¡pxʶ7Âlæ¢Ùe”åç’‹ÅlF'¯ùü~F]n¢ì6Ì&^Ÿw?ÜÉû;öh./BH Áç<ÿ !%ÚR=c^æO‹çæ²LlF=ž@ˆA¯Ÿh‹‰ƒY‚ÐDÅTûˆ›=íýl¨ëÄMÙ˜ô²Ì#kgS˜EX…½mý<¼¥J“ã)?>Š.)!ÉfÁ ñ⑞;Ü|ZÏ!ÄÔÌ,¢î}ð¡uª$UÈœÞðW/ZÀÚÅc1›qòØK¯ÓØÖ¡É>ºçÆk(+ÈE–eZ:ºøåÏà§\;Í&óËŠ™U”ORBV‹½Nª*øüú‡‡9ÞØB{O/W,_L\´¯ßÏÆí»yïÃ]šË‹R@p2BH ÚR}. §ÇsCI==./Ol$+ÆNyr Év ƒY–†ÆüAÚFÜìhéesc7žàÔSf½Ž_¬™M~|$aEeW[?ÿ¼õ°&ÇSa‚ƒXRL¢Õ‚;âùÃͼxä̸ƒ PL•«ª\‰Ì’ îF¥°M’”õŽõ[]h¡Ÿ)5ÉŠ…ó¸lÙ""̆FFxü•7©oiÓd_ÝuýUÌ,ÌG§“iëìæ—O>‹×çŸ2í3 Ì/+¦¼0Ÿ´¤¬ŸQCN'Ç›[Ù} Šöî^ÒS“ùÆ ×ãÀç°i×^1eO ΄´'¤Ü>.JOຢt,=Ýc^Ùv˜ºQ²bì¬ÎI¥,9š«ù3bjÔ¤yhŒ­}lnìÆš:Ó8¬F=?_=‹Ü¸HBŠÊŽ–^þõÃjMާ’¤hþþ’bmfÆü!ž©jäÕ£göåVQi•%©bãÝ«¿®Ù‘ÊEKUÕXqAˆ)…m’¨p¬ßqÁ½Ì~ëG_­ÈTž)5ÉÒùs¸|ùb¬ CN'O¾úÇ›Z4Ùgw^{³‹ Ñët´w÷ð«'ŸÃå9÷]èõ:æ—•PšŸCzJ2¶ˆtºñ©yþ`€aç(uÍ­ì=Tý™é’Ó’¹çÆkIˆÆðÁîysó6ÍåE)@ 8!¤í ©AŸE \]˜ŽE¯£kÌÃ/¶¦q蓇òã£X‘•Lir ñV3f½Y‚ ¢2ê P?0Êžö~67v +ç<¦(³‡WÍ"+6’¢°­¹‡ß~T“ã©,9†ÿ½¨ˆ›™1§6òFmûÙq¦˜ªD¦ì¼ îQ÷üøá¥²¢VH²tV„ã%sgqÕÊ%X- ŽòÔ«oQÛØ¬É¾»ýªË˜WVŒ^§§£·—_?õ<Î1×9k,ËÌ--bVQ>Ó’“°[#Ðëô€J dÈ9JCK>ú¹Ui)‰ñÜ{Óu$ÆÅà ضw?¯½¿EsyBJ NF)@ ~´a¿:¹[öXœ‘ÈåÓ°èutŒºùù–ô ŸüÒQœÍò¬$J£‰·™1étHà #¾ƒcloéeksaåÜ}'ÄD˜xhåL2cìà [šº©ÜY£Éñ4+%–¿[TH¼Ǫ̃?ÈãûxçøÙ]FQiÖoúÆê×.„kx¤rñ:UÕW ŸÙJš³“š÷n¹ž”„8Á ;÷WñÂ;5—!¤àd„à÷ö«:Y;Bʲ$3‰µ¹©˜t:ZG\üÓUtþíuBf¥Æ²:;…¼ø(b,&Lzù„˜òø©S{ÛûÏjLÉv ?[QNºÃ†?fC]¿Ý{L“ãi^Z?¸¸Ø#¾ú¸žM ]çºY”˜ª\\!©úõÈDMùÆ*´JR¨Â±þÃÇ/´{ï}Td¨z}pNwOœ[ZÄ —®ÂnµâtñìïQu¬N“}zÝÚ•\2g&&£žþA~÷ìKô ž•sçf3§¤ÓÒpDÚ0è €J0bÔ妥£‹}Gj8T{ü 36ÚÁ·o½”„x‚¡»VñÜ[4—!¤àd„à‡ïíSõ²¬™öº!–ÍHbuN † !U±é½.ï>ÆEé ,ÍL"7>’h³ £NF’ÀRôø©ía[su œ•˜Ò¢¬ütyÓ¢¬øBaÞ9ÞÁ?Öæ áÂéñÜ¿°€˜#Þ¿ûè8[›z¦Jó¶!Kë7®[uÞïÔ6R¹Ô¡ ¬Ÿ²bJˆ¨ αˆšdVQ7]¾šH«Q·‹çÞÜÀÁm ñ«W-cÉüÙ˜Fz†øÃó/ÓÕ{f`(ÈÊdniYӧሴa4‰`(ĘÛMs{'‡j³¿º†/ûêሴóÝÛo"51`(ÄGUGxúw5—!¤àd„àÿ¼»O5ê´#¤¼Á0˳’X‘•ŒA§£exŒŸ¾Ï—ÛÚ[g&qqzùñQ8,FŒ:U8Ì ÇOMï[šz8Ðufaψ¶ñà²RÒ"­xCaÞªmçÏûë59že$òÝùD[Œ {üfÏ1¶·ôN-¢òDH§Tl]·¶å|¿¾Oˆ)Iÿ³©Ñù®ˆZWQá0õ’ĦR»Ê ò¸åеDÚlŒ¹Ý¼ðÎFöÑævW¬XÂòs1›Œô ó§^¡½ûÌܲ¦§qѬ² eÇd4 I¡P˜Q·›öîö©áãÃ_}ƒŠH›•ï}ýfÒ’ …C||ø(O½ö¶æò"„”@ |λˆR@ =!å…Y‘ÂÒÌ$ :™¦¡1~òþ†½¯|Ìe3’X’™DV¬‡Å„Q–PTð… x|Ôô9ÙP×IMßÈ™y±‰±ó£e¥¤Ø#ðC¼VÛÆ“´¹¨ð’Ì$¾=?‡ÅÈ7Àî®e×9Z›ëoº‘ KLe¨ªZ¬;7U9 Î Uy¡Ýc×UT8ô>ýzÌ”¬V+ÍËáÖ«.%ÊfÇåqóâ»›ø¨ªZ“}}ÙÒE¬¼x>“‰þ¡þëÅWiíì>­çÈHKaáÌRr2¦…ÉhœQ!\½}|TU;꧸‹«5ÂÂýwÜÌôädBá0û«kxâ•75—!¤àd„àïßùX5éušio0¬°2;…Å™‰èe™†ÁQ~¼ñ£þà)W/K,ÎLbyV2™Ñ6"Í“S*Þ`˜>·êža66tq¼ÿôîÚ”ÉKJI¶[ðC¼\ÝÊ3UMšO˳’¹w^.³‘!ŸGwÕžõ5¹¾´+Qy"¤3®ßºnéÈù~½Ÿu1¥àT¥P¥Œ\éX¿õ¼ïßO3ÕEÔ$ŹYÜ~Õ׈²Ûqy<¼²a3»ÖdŸ¯¹d!k.¹‹ÙÄÀð½üMm§g—Ï´¤D.šUFnf:±ÑQ˜F@"ãòzèêíçPÍqö:B <-ç´˜M|ÿŽ[˜žšL8¬p°æ½ôºæò"„”@ œŒR@ü¯·?V-í©°ª²2+™E‰èe‰ºQÜxw tZŽoÒëX™ÌÅé‰d8¬ØÍF ²„ª‚7¦×奪{ˆõ]4 –s&8ø‡ÅÅ$Ú,¸ƒ!^8ÜÌ GZ49žVå¤pÏœ\¢Ì=~*wÖðñYZ‹ëTPœa™JE6V^ bª\UåJdÎÌÎn°ˆ¸÷Á‡ÖIªT©……å ³gðõ«¿†#2·×ÃkïoeǾƒšì÷Íã²¥‹ˆ0[áñWÞ¤¾¥í”Ž™œÇ™ede=^%K2a%ŒÛ륳§êºvî¯ÂœÖxLFß¿óV2ÒRP…ªÚ:þô«šË‹R@p2BH ð¿ÞþHµôši¯ªª¬ÊIå¢éñèd‰cý£ühÃ~|¡ði=Å cMN*ó¦Å“mÃn2 —%”‰Š©^——]C¼_ßEëˆë”ÎUœÍÿY\L¢Í̘?ijUM¼r´U“ãéÒÜTîš“C¤ÉÀ€ÛÏì<ÊþÎA팯 NL-ZªªÆŠÓ&¦„ˆZ§JR… éZis~V&w\ý5¢£¢p{½¼±y~´_“ý¿tþ._¾«Å°Óɯ¾Åñ¦–¯t¬Ä¸Î,£0{qÑÌ&²,‡ñø¼tõ pøX{UãòxÎH—Û^ÍážaMÆrSi&˳’[ŠÌhÛãÂý`¤rñ:UÕW  ±¢„Ÿ˜Q-ÚýÓç,•$*Ö7–ýþÙ—5Cvú4îºîJb<>/ïnÝɦ]{5Ë‚ò®[»[„çØyýªë¾Ðg‘v.š5^•‡ÅlB'˄à ^¿ŸþÁ!ŽÔ5°k#ccg-¦õwÝFNútTU¥¦¡‰ß<ý‚æò"„”@ œŒR@üà­½ªÍhÐ΃­ªreÁ4f§Æ!Gz‡yཱི7½$6Â41•/ŽÔÈ" zô²DXSíN7û:ÙXßÉ€Çÿ…Ž9/-Ž\\Hl„‰_€?}\Ϧ†.Mާk‹Ò¹¥lv“ž^—ÿûáª{µY0ó“åeÌITv· ´>²íHÅÆ»W=~!ܾ˜º€EÔ=?~xéý·ÝXQ˜3c‰„D}K+•?£ÉXfLOã®ë®$.Ú×çgÃö]lؾ[“±Ì))äÆËVc·Zu¹xöÍ÷8T{üüŒÕbaÑœrŠs³IŠ%ÂlA–eUÁç÷Ó70Dmc3;öbhÄyÖcúþ·—™ŠJmc3¿~êyÍåE)@ 8!¤¸ÿͽj¤I;B*V¸®8™)±H@UÏ0?Úpö×;I°™¹47™)1¤FZ±uÈ’DXQ épzø¨½Ÿwë:û;^”žÀ÷c12â ðÛ½ÇÙÖÜ£ÉñtcI7–fb5èéuyù׫©éÓ¦zdÍlJ’¢QýƒülÓA•VTeݦ{Ö^/WC•‹+$UÿÙ]â.p%+j…$KKî»õŠs²$‰†¶v*û Z|´ÌHKá7\=.¤ü~Þß±‡w·íÔd~fæsókˆ´Úu»xáí쯮ýÜ¿0›™_^ÂÌÂ<’â㈰˜Ñët(ŠŠ/à§p˜ÚÆfö:LOÿ¹[ïþ;n&oFÇ›ZùÕ“Ïj./BH ÁÉ!%ÀýoìU#ÍÚRþP˜›J3)KŽàP÷?ÞxàœµgZ”••Ù)ÌJ‰!%2‹AN‚ ¢âòiwºÙÙÚǦ†.\ÿÍN€‹3ùöü|¢-F†½~½ç;Zz59žn)Ëäúâ " zz\^Ùv„ãýNÍÅ!ÿréŠ(À¾Žþqó¡OÿÉ6EQ*.15R¹Ô¡ ¬—T)ãBQ÷=P‘¡êõÀ“ÿö­[®£$7I–hjëà?{å ­ew&™ž’Ä=7^C|L4>€Í»öòÖ–íšÌSi~·^y)Q6;.›—ÞÝÄÞªêÏüÙdbni³‹òINŒÇj±`ÐëQŸ?ÀàÈÇ›ZÙ}°ŠŽž¾sÓwo¿‰‚ìL$$êZZyTƒ•xBH Áç²- +ë¶®[Û"î*ÚçQ“Ü}ÃÕÌ,ÌC–eZ;ºxô‰gñš‹5%1ž{oºŽÄ¸|Ûöîçµ÷·h2oY™|ýšË‰ŽŒÄíõðú¦mì>Pż²bÊ òÆE”u\D©*‚A†œNê[Úø¨ªšÆ¶Ž)Ó·n¹ž’ÜìOUâýEÑÖ;ŒR@p2BH ð×w«Ñ“fÚ넸kvö©\9§&8Xž•LIR4 V3&½ ) N_†¡Qv·ö³¹±›Õ9)¬›M¤É@¿ÛÇì¨á@× &ÇÓݳs¸¼`½ŽŽQ7?ßr˜–a—æâ°tübõlòâ# +*;[ûxdÛ‘¿ù9Eå‰N©bJ›¬«¨pè}úõ:™Ï®›õ9ÜuÝ•Ì,*@¯ÓÑÚÕͯžxϧ¹˜“âcùÖÍד‹?dûÇxyÃfMæ/73;¯½‚˜¨(Ü^/GŽ×a13-)›ÕŠA?¾£l dØ9JCk{«ª©oi›²1ýu%^åãO …5•!¤àd„à¾×v«±ÚRž`ˆ»gçPð…+WÎ)ʼnѬÌN¦(ÑqBL N_€ÆÁ1\ó§Åa7èsùø÷G©êÒäxúæÜ\.ËKì×ÑîtóÐU´;Ýš‹ÃfÔóóճɉ‹$¤(loéåÿ~Xý…?/Ä”¶ø2"j’;®¹œ9%…èuzÚ»{øÏ§žcÌíÑ\ì ±1Üwëõ$ÇÇÙ±ï/¾û¾&ó˜>u;†¿?€^¯ÇhП¨ˆrŽÑÜÞÉ9Ö4õ/ÏoÜp5å¯ÄBJ NF)@ @{BÊ ñ9¹_ºrå\Sžêœ ⣈0¨˜ò‡a…ƒ$ÑíòòoVSÝ«Íé¾5/KóR1ét´9ÝüÓæCtŽjï%=ÊläáU3ÉŠ$Vø°¹‡ßqôËHU èL[×-w›©É=<\ñeDÔ$·_ý5æ•¡×ééèéå×yç˜öªã¢|û¶HIˆ' ²ëÀaž{ƒ&s¹jÑ._¶“qrºzBDº\´tt±·ªšêºÍÄô™J¼Î.~õäsš«ÄBJ NF)@ ¾õê.5ÎjÖL{½Á÷ÌÍ%7.ê+U®œkæ¥Å±<+™üø(¢-Æb IÀ ñÔÁ&^ªnÑäxú΂|Vç¤`ÒéhqQ±ù=c^ÍÅaâŸVÎdFŒ`XaKS7•;k¾Ò±TgX¦R‘•BLMî}ð¡uª$UÈþU>ë•—2¿¬ƒ^Ogo¿yú†Ú[À?ÆÅwn»”„‚¡{æÙ7ßÓT ŹYÌ..$oF1Q‘H÷ÓP8̰s”–Ž.­åÀÑcšËÏx%^zŽöî~õäs¸<Ú’üBH ÁÉ!%À7_Ý¥&hHHùB!î›GVìøTªmM_±råsIF"‹2É‹$ÁjF/ËãòBUésû8Ô=ćͽš[Üü{ X•ŒA§£eØEÅæƒô¹´·®N¼ÕÌ?®,'#ÚN0fSc7¿ÚU{JÇbjjpª"j’›/_Ùeôzºúúùí3/28¬½´:ìv¾óõIKL$±·ªš§_GmÏÍLgAy Yӧሴa4?u/íä­>dß‘͎׿®ÄûϧžgÔ¥­J3‹iQ'~Õ+*þp˜AŸšÞ65vkfM©\TÈò¬d :™æ¡1~ºé ƒ¿æò’h³P±²œt‡`8̆ú.~³çôTW¨ NI–Öo¼{ÕãâtöøÖ¾Z‘©VO]s+߸ñj’ãã †Bì>XÅsomÐô8¾áÒU,š3^‰×Ý?ÀïžÑ^%žR@p2BH p÷Ë;Ôd{„fÚRÏô(+þp˜wwòûŽk:ßYÏšœ :m#.þyÛJ“¢Y0-žÌ;v“ä˜ †èqy©êæý†.š†Æ¦T,ÿ°¸˜E‰èe‰ºq!å „4—“Œh.+%-ÒŠ7æÍÚvÛ_FÏ©@в~Ó=kÅ‹Û)pßЬ{üL‰¨I®X±„å æb6éæO/¼B{w¯æúËd4òƒ;o!=-EQ8Tsœÿzñµ)ѶĸX.žUFAö b££0èd™pX9!¢ª7°ë`^ŸŸèÈH¾sû¤&Ž/о·êϼñ®¦ÇóµkV°x²o`?<û2ÝýÚZ[P)@ 8!¤¸ë¥jJ¤v„TXQøöü|Ò&*WÞ>ÞÁŸÎPåÊÙâþ‹ X™5¹øoë 8ÁAÝà(1Frc£ÈŒ¶a7ÐËaUů˜Úß9Ȇú.:œî)ËKJ¸8=,q¼”mÜ7Ö\N²bìühY))ö¼Á¯Õ¶ñ䯳uúmŠ¢T1õå¸ïŠ U¯¯î<ç»lé"V]¼³ÉHÿÐÿõâ«´vvk®ß z=?Xw+™ÓRQ…ÃÇêùãó¯œÓ6%ÆÅ° ¼”œÄGGc6™ÐédB¡0ŸžþªjëØ[U͘û“{_¤ÍÊ÷¾~3iI‰šYëoqÕÊ¥,]0g¼opˆ?>÷ ½}ÚxÙ’`fa>å…ùËæ—‹û™@ |ú)„”@ À?o=¢öŒy°™ šhoXQøÎ‚R##ð†Â¼QÛÆãû4ƒO¯»Ô44Æ;Ç;(Jpœøÿ~·Y–(Lp0-ʊͨǠ“ )*ž`ˆî1û;ÙÔÐE‡óÜnþà²RL‹G'KëwòÀ{û „Íå$7.’–”’l·à †xéh+Ïj:ÛÍbê p¶EÔ$k_ÄêKb1™áÏ/¾NsG§æúO§“Y¿î6fLOCUTŽÔÕóûg_>'m‰v° ¼„¢ì$ÆÇb1™‘e™p8Œ×ï£o`ˆªãõì9xçØÉ;Íii=¬/Ê˳|á¼OUâ½J{wÏÔ~É’$Ê r™SRÄô”$"m¶ef“QÜÇàÓ÷J!¤ºF=jýà({Ûûéõb3ê§t{Uå; òOT®¼ZÓÆS5ƒ¿^wiC]'…ŸR“ xüu2ùñQ¤FE`5èÑëdŠŠ;¢Ãéf_盺éuyÏI,?Y^Ƽiñè$¨ésòÀ{û)Úû¾Íâ‡KJH²YðC¼x¤…ç7Ÿ“¶¨¨¯euýÖuk[ÄëÖUT8ŒA}…$ñƒsqþU‹pé⋱˜Í ŽŒðØËoÐØÚ®½b Ößu;ÙÓ§¡ª*Õõüî™Ïj"mV.ž]Naö ’âNˆ(EQðúýôq´¾‘íû~®ˆšÄl2ñý;o!}b=¬C5Çøó[ëËrÙÒE¬¼x>“‰þ¡þüâk´tvMÙöÎ,Ìg^YÓ’“ˆ´Ù0ôË$IÚ*îZ@ð©ï_!¤TUUýa…!¯ŸÆÁ1ö´÷Ó>âÆ>E+¦Tu|Í¥dû¸(x¹º•gªš4ƒNLs›\wéýú®ÏR“ xüDtäÄE’A„AN–ÆÅT0DÛˆ›ý¼W×yÖw¸ûÙŠræ¤Å!GûFøá»ûÐâ·mQ¢ƒÿ³¸˜$›W ÄsUͼTÝrNÛ¤¨<Ò)º˜ZWQáÐûôëu2둉:WíX¾p._[v f CN'O¼ò&uÍ­šìÓõwÝFNútTU¥¦¡‰ß<ýÂY9o„ÅÌųË)ÍË!)>‹ÙŒNÖï0ê÷Ó;8ıÆfv¨¢høoÏ`Ðóƒ;?™~XU[ÇŸ^xUÓã}Í% YsÉEXÌf†‡yìå7hjë˜Rm”e™¹%…”æç’‘šŒÝfàÿq+ 1æv/‹‹vlO\@ð BH à …U£N Vö¨pr°kˆ–VÃÔ«˜úî‚|’ìã¢à…ÃͼpDÛïç?ZZÊÂéŸLsû ±›‚xÇßü܈/0>•/ÞAjÔ¸˜’%)*®@§‡=mý¼W߉ë,ítW±²œÙ©ãBêHï0¼·_“9)MŠæï/)&ÁffÌâéC¼VÓ6%Úv¡Š©©"¢&Y2o6W¬X‚ÕbaØéäÉWßâX“6Sòý;o!/3•ÚÆf~ýÔógô|f3óÊŠ™Y”OR|,6K:U¯ˆáXc {¦«ï‹ï\(Ë2w×Ô˜~xºXyÑ|.]zñ¸øœ¨Äk˜"•xz½Ž¹%E”ä2-9 »ÕŠA¯  3ærÑÖÝácõËî¾îÊ­â‰K >A)@ ¶·ôªÙ±‘D[Œäq1TF¼G9Ø=Dó ‹A7%Ú«ª*÷_THâ„(x¶ª‰W޶j:Ÿžæv´w„mͽı÷m§?@UÏ0%‰ÑÌLŽ!ÉnS²D(¬0êÒît³£¥—»ÏøŽw­šÉÌ”X$ ªg˜mЦ*OŽá_RD¼Õ̘?È“yóØÔ™Ž¥*8Ã2•Šl¬ÜºnéÈù~Ÿº÷Á‡ÖIªT9DÔ$‹æÌäêUK±Z"å©×Þ¦¦A›Õš÷ßq y3Ò8ÞÔʯž|öŒœÇd42¿¬˜òÂ]Ut¸g˜:úÉŽ‰üJÇjwºéõRœä 4)š›³^‡,APQõhco{?›ºNûx¿X3›²¤hTà`× ?yÿ &s27-ŽõaÂé ò§ëx¿aê.$|¾‰©©,¢NŒ‘Ò"n¸tv«§kŒgÞx—ÃÇê5Ùßß¹ýF ³g !QßÚFåcOŸ–ãêt2sK‹™U”OZR"vkzP ƒ ŒRßÚÆÞªêÓ¶ üýwÜLÞŒ àÌV{-.šUÆ5«—c‹¯ÄûËëïp´þìŠO“Ñȼ²bJósHKLÀnµ¢× Å@0ĨËE[WGŽ×Ó78D„Åü™Ï !%'#„”@ ðY!ãH¥'°tF9SùŒºq¡PTœÞÍÃcì¢qh£îìŠ),ñ½ÄYÇEÁŸ÷Õ³¡¾SÓ9øtUÑ¡î!u ‘m;¥c¶;Ýôº½'FS’M‚ÍŒI§C’ 8!›†\ìhéeKS÷iÛ ï‘µ³)IRû;ùÙ&m ©ÓãùþÂb"LŒxüþ£ãliê™òíVTZeIªØx÷ªÇµØï÷üøá¥:ÔJʦz[gpÓ嫉´Úu»xîÍ ¬9¦Éñ~ß­7Pœ“…$I4´µSùØ_8•ÇdIšÜm­„éɉØmãSºTu¼’fxt”¦¶vS“SÀš†Æ8Ò3ÌñÑÓüÎ4zYæþ…ùã¢ÀàÕñAc·¦sðÏkfS:QUt s£}#L²ž–c·;Ýô{|”'ÅPà Þj¤×!ñIå[Ãà4v³£¥÷”vÄ“€¹tE `_Çÿ¸ù&s²(#‘ï.È'ÚbdØà7{ޱ½¥W3íך˜ºçÇ/•µB’¥%Zéãò‚.>GÝ.^|çý3&>íÖæLìš—÷™Š¨@0Àðè-]TÕgdÔõ7EÔ$BH Áç¼!%_LHM2+%–YÉäÅGg5aÖë¤OÍnqOåC’ÎÄ˵ŠÍhà¾ùyDOˆ‚_ï®e§EÁ$zY⑵s(Lˆ:!qZ†]$Ù,gä|íN7î`ˆ¢ùñQD[Lu2’þи˜ªéáÃæ^ö¶91eÔÉ<²v6ùñQ„•=íýü|ËaMæeyV2÷ÎËÅa62äñóè®Ú/ÝSŒmŠ¢Tlºgí”x)¼ïŠ U¯¯îÔj‡ådqûU—ሌÄåñðêÆØu@›ö7\Mya²,ÓÚÑÅ£O<ƒ?üBŸ-ÌžÁÜÒ"²¦§e·c4èéÄnkÍ]¬9ÆþêÚ³Ë=7^CYA.²,ÓÒÑÅ£?C Ôì…[šŸÃ­W^zFÅg¤ÍÊÜÒ"Jòr>S¥ª þ@‘±1šÛ;©ª=ŽsÌý…EÔ$BH ÁÉ!%|9!5Iyr +³Sȯ˜2ëuÈ»¹WL¹¨ê¦apô´¶5¤(D[LŸ¿Ü]Ëž6튓^Ç?¯™M~|$aEew[?]£â­æ3zÞv§8L~ü¤˜ß]QU!3èñSÓçdKS7û;¿Ð1ͱäMIJ³µG¶Ñd^Öä¤r÷œ¢ÌÜ~ÝUÃÇçÃ%NÅÔù ¢&)ÈÊäë×\Ntd$n¯‡×7mcûÇÚÜUò®ë¯bfa>:L[g7¿|òY¼>ÿÿø™ìôi\4«ŒÓÓˆŽ´c4Ç+gC!Æ\nÚº{ÙwäèYŸÆx÷õWQþ©X}âY|~¿fÇYqn·_õ5¢ìv\¯lØÌGôÛ­V.šUJAö R≰˜ÑÉŸì~82:>5ïh]תˆúk„‚“BJ øjBj’Y)±,ÎL¤(ÁA¬Õ|b7·¢âòiqq´w„c}Îñ¹i§H ¬`3sÏœ\¢Ì=~*wj[DôübÍ,rã" MHœ~·X‹é¬œ¿Ýé&V(IŠ&;ÖŽÃdëÞ}šŒåÎk¯`vq!z޶în~õäs¸=ÞÏýÛÓRY0³”ìôiÄDEa2Ž‹¨P(̘ÛMGoQ_uª³‹(ÈžÁׯþÚ'âóý­lßwj—Q¤ÍÊÂYeçd‘{’ˆvŽÒØÚΑºFÆ\î¯,¢&BJ NF)@ àÔ„Ô$eÉ1,ÉL¤(1šøÏS£÷ SÛwj»Ñ{ƒ!¦;lŸ;k8Щ]Qi2ððêYdÇFR¶5÷0ê â0Ïj;:FÝ„•Ò¤h2£íDYŒe ðÃô¹}T÷³©¡‹Ú>çŠåÃæ^þm»6…Ô•ÓøúÌ,ì&}.ÿ¾ã(UÝCçÍu.„Ôøzl9_ú0'c:뮽‚‡ÏË;[w°y×GšŒåŽk.gNI!zžöžþóÉçs»?ó7Ó’“X8«”¼ÌtbQ˜F@"ãòzèìéçPÍ1öT! ³Xn¿úkÌ+-B¯ÓÓÑÓË>õ<£.—fÇYþ„øŒ> â3Öż²bŠs³Iˆ&ÂlA§Ó¡ª ¾@€¡‘QZÛ8|¬×wÊ"j!¤àd„Nš¤0ÁÁЬdŠ$Ø,X&¦ò… c ]£^ªz†¨%ø™uBdÅÚ¹cBô»}üûö£Ò°(ˆ±˜ø§U3™c'VØÒÔ?¤`3êÏI{ºÆë0yhõL²b" †ÇåÚÿÛqT“y¹¶([Êf`7ééuùø¿VSÝ;|Þ\÷BH:3¦§q×uWíÀëóóÞö]lܾ[“±ÜvÕeÌ/+—8½½üæ©¿¾SãY8³Œ‚¬ bãQ²$V¸½^:ºû¨®k`×C_xÝ©3É­W^Êü² z=]}}üú//0ìÕì8;I|nÙÁæÝ_N|ÆÅD3¯´ˆ¢œ,bcˆ0›Ñëu„à þ@€!縈ªª­ÃëóŸ65‰R@p2BH §WHMRÅʬ”q1e5c6èÑO¬1å „èõp¤g˜ú'þ/!¦FýA ¢¸µ,ë„(ø·íÕéÑ®(ˆ·šùÇ•3Ɉ¶ ‡ÙÔØ¢¨Dôç´]Ýc^ô:‰œØHfÄØ±› èe EUñÃôº|ìäý†.Z†Ç«b#LüÓÊ™dNȵ»ytW&órCI7•fb5èéuyù׫©9Å ¿©„R§NFZ ߸áêq!å÷óþŽ=¼»m§&c¹åе,(/ýŒÄ1 ã"*;“8‡³É„,Ë„Ãa<>/]½ýT«coUõ”šwÓ×ÖpѬRŒ]}ýüöéÖîµ›5=»®¿ŠX‡¯ÏÇ{îdãŽ=_è³q1Ñ,,/!?+“ĸ,&3:LX™Q#£Ô·´q¸¶_ pÚEÔ$BH ÁÉ!%œ!5I^\+²“)IŠ&ÁjÆbС“$BŠŠ;¢Ã顦w˜cý_LLø”'ÇpSi&6£ž——ݦmQd·P±¢œt‡ 8ÌÆú. ²ŒQ'O‰öõ¹}ÈäÇ;H¶b7ÐëdŠŠ'¢wÌËþ®AÞ¯ï V®5tó«ÝµšÌË-e™\_œA„a|œ=²íÇûçÍu/„Ô©3=%‰{n¼†ø˜h|þ›wíå­-Û5˧%Nßà05 MdNK!!&³É„N' …ñø|ô røX{UOÉ©pׯ]É¢931 t÷ðûg_¢w@»U´_E|ÆÇD³pf)Y™ÄÇFc1™‘eEQÃ#Ô·´q¤¶0xÆDÔ$BH ÁÉ!%œY!5IN\$+²’)IŒ&ÉnÁbÐ!Ká 1Õ5êáhïÕ=#”0²ôù+ züÌ›Ç ç‘(H‹Šà'ËË™eÅ ón]V£½$M©vxüèåñ©|©‘VlF=LhBLuz8Öïdvj,)öˆrí7{Ži2/·—gqmÑt,=Ýc~±õÈiß5ò\"„Ôi¸v“¸ç¦kIŒÁ°uÏ>^ߤÍwî.[Å%sfb4C„Â!LF#:Y&Vðú}ô säx=;÷WMé5™®Y½œ%ófc2èâϽLWŸvwbý2â3)>–åã"*6ÚÅlB'Ë(ŠŠ/à§h˜º¦V¯' Ÿq5‰R@p2BH gGHM2#ÆÎЬdJ“£I±G`1èÑIVUÜ=c^vñQû!EÁn2|æóýn‹ÒÏ+QmãÇËJI‹Rok'ÆbBš¢íðø1éeòã£H‰ŒÀ:13<1•Ï “1éd|!…wë:øÝÞãšÌ˺YÙ\Y8‹^G稇_l=LÓÐØysÝ !uê$'ÄqïÍב‹?dÛGûyuãš‹ÃaáÛ·Þ@Öô4dYFUAUUUÁç÷Ó;0Dmc3;öÔÄZLW®X²…s1ô óÇç_¡£§W³ã싈ÏÔÄæ—“—•A\´ã“©yáñ P×ÜJu]þ@𬉨I„‚“BJ 8»Bj’I1U–C’ÝBÄ„Ô)bÊåáP×{Ûû )ê 1Õëò²|Fòy% fÄØypY))ö¼¡0oÔ¶ašòíòú‘e‰¢©‘V¬Æñª7I’€°¢RÓ7Â?n>Ĩ?¨¹¼Ü='‡+ò§aÖëèpºyxËaZG\çÍu/„Ô©“Ë}·\ORü¸Ú¾ï /¿·I3í·Z,,(/¡¼0ôÔdLÆñ=U¼þñ©yµ Íì9t„ÞíìdzÙÒE¬ºxf“‘þ¡þëÅWiíìÖì8ûŸÄgjbóËKÈ›‘N|tôÄ:_aåÕÐÒNck;¾@Y>7?u!%'#„”@ pn„Ô$ÓVVf¥035†$[VãøSa<ÁÝcªº‡ÙÓÖ‡;"¨(¬ÎI=¯DAn\$,)%ÙnÁ ñjmIV‹&Úîô8Ø5DQ¢ƒY)±¤EY±õ'ª»¼Á0uNvµõ±©¡›1 ‰©oÎÍå²¼4ÌzíN7ÿôAN÷ysÝ !uêÄÇDsß­7’G dç*^x{ã”o·Ålbni1³ŠòINˆÃf‰@¯×øÿ`(ľ#5lܱ›®^íMu[³ø"Ö\²‹ÉÄÀð½ô:Míšg‰q1|ë–ëIŽÃ²cßAöVU3¿¬˜ÜéÄ9˜Lã;*Š‚×?>5¯±­¦¶NÁÀ9A)@ 8!¤€s+¤&IŒ`UN 3“cI‰¯˜ÒMìèæ †éóRÕ3ÄÝ,ÏJæÒÜqQÐætóЇèpz4Ûÿ Qüpq ‰6 ž`ˆW¶‘l·h*†v§›êÞ.ÍMeÉŒ¤Ï,È +Œúƒ´Ž¸þ?{ïÇuê¿»³½` ,zï½›%ªÛ’bÉ‘-ÉEñMìØqûInúMrþ»¾7×Ns|ز-Ë*–¬b‰”HŠT¥(± ¢wA5› IDAT`Àb{›Ýß @$%Û+†9ï_|agÎìùfÏÌ«ï|¯yyed–H2µæ¯é‹›š¸µ¡£$1îóµÝG™ E¯˜û^©ó'Ïéà‹ÞGI9•bÿ‘}nÇš¯^¯cKWM ”bµ˜ÑIét†t:$Ih4àóøÖ£O213§Êy¹ië&nßv5“ŸßÏwò,C㓪³|W._|àãËq¦à]\4¸œ9ïÕùJ§‰ÅxšœblrfMˆ¨„‚÷#„”@ °6„Ô %9nª+¦»82‡«A·œ1•­OäÄÉd²…ÀõZ-ãþ0ÿs÷fC1Õ~ÿm…NþûuíÚL„)žê£ÌaUÝu¼9áÅŽóÛ;)°šÈr:ƒ¤Ñ i²ÿ&d†ƒìŸœg×àÌYuV¼\|yK37×£_R¹ëžp승ï…:œ9v¾ô©û(+,DN¥8pô<ûâš§N'±©³®æFÊŠ °Y-è$™L†¤,³è”eJ ò1èõÌûüüç?elzF•ó²mÓ>zãuXÍf|ßúyNŽŒ©6Î\N_yèA òrP”4&ÛùPQˆÅãx| ŽO0>=·¦DÔ BH ÁûBJ X[Bj…’ 7Ô±¡4ŸR‡›A‡N›íè–ÉdÐ-×Á˜Æø£ïâ ÇUûýw»ø½kZ)°™%dž86F¥Ó¦ºëØ?97ç¯o§Øn&¡(Œ/E0ê´ÚÌuZM6c*—ñ…xsÂËîáYä5(¦~gk3Ûk³BjÔâ/vf!š¸bî{!¤ÎŸ›•ßúô'(/*"¥¤x§ç8?øéÏÖÌø´Z-Ú[ØÐÞBYQ!öe”“øA†Æ&Ø´—ºŠ2n½v+f“ú·¹]»¡›»oÞ†Õla)ä?ý'†FTy-5å¥\{Õ:6´·`Ðgk)f2 ( ÑxœùE'GÇŸ™%•Z»™§BH ÁûBJ X›Bj…B›™k‹ØXæ¦$gYLIÚÕE‰”ÂÞÑ9^89Å o@•ßÿºÒ<¾zu n«‰à²ªR¡:0µ€7[Ý~‘S<Þ3Êq¯ŸíµÅ´æâ¶™0-ת‘•4þx’_˜}£sìCI¯Püê5­l«.B/iöùó—³K^1÷½RçÕbæ·?óI*Š‹I) {ûxø©çÖÄØ:›ØÜÝAEq!9vzŽL’²Œ?bdrŠÃ}'Y\ò“RR´7ÖsÇõ×`6™XôûùžŠ·¹méîàž[·c³X„Büð™èRÕ54TW²¾­™ºÊ òœLF#šå…/)ËŒOÏ286Áð„:æH)@ x?BH k[H­k6pkC)W•æÓŸƒqYjd€h2…'ã¸ÇϾÑ9zæ–Tõýo,Ëçw¯n!Ïb$—yìØ(µ¹vÕÅÑÁ™E¼á¿]E63ádŠGŽð“Þñì r±‹›ëJh.po1bÔIh€¤’ÆK2´d×Ð oM¬"Ê¿m×U¢ÓjZ òg/Re·ÀŸ‡RçÙdäw>s?¥Å(JšÃ}ý|÷Ég.ë˜:›ØÐÖBuy)9¶¬ˆZ‘Áp˜Ñ©Žž8‰ÏÄj1°èÐ\[ÍGn¸6[w)àá§žc`t\•órUG+¿zûÍØ­Vá>·ƒ£'T1öÆš*®jo¡¶²gŽ£Þí\ªFC:arnŽgwí[^ÕR@ð~„Ô!¤Vp™üÕÍÝÔæÙÑj4d22@:ñ”Âb$Îq¯ŸWFæ8:ëSÅ5m­(àË[špYŒøcIí¥!/GuqtxÖ—R×¶-o?LñÈ‘a~Ú7qúËbY>Ûk‹it;p™ «r1+¦ô{¼:êáÍ ïe½ž?¸®kª Ñi5 ,ùÓ—©¢ûÙ"„Ôùc4èùÏ>@UY étš£'øÎãO_–±´7Ö±¾­…šò2v}VDÉ©P˜±©ŽãY𭊨üÁµåܹýz¬f3KËu—úUZw©»¥‰OÞy+9VÁH˜Çö{O¬é17TW²¹«šò²¬ˆ2èÑh´¤”±x½NÑ`@Qõõsàè1U͉R@ð~„Ô%¤þèúv®®*D¯Õ“Š‚Í G/iI§3ÄS Ñ8ýÞ»†g×¼˜º®º/nj"×l`iYH5ªPHóá Çù½k[q[³õ°¾h˜çú?xKÉÕ•\[UH“ÛA®Ù¸Ú™/©(,F“œðúyuÌÃÛ“—'cê·u°µÂ¤ÕÐ?äOv$žR®˜û^©óG§“øÊCR]^J:¦§ÿ÷ØS—t M5Ulêj§¶bEDé )%E0YQ³Þ…÷‰¨‚á%ÅüÊRw©£©žîº‡ÍN(áÉ»8p´wM޵©¶šMmT—•’ë°cXΈJ))"Ñs ‹ô ±®µ™Š’¢Uñ¹ÿHºÖ!¤à}!%¨OHýɶ¶,‹‚Á…¯yØXžO™ÃJŽ1+¦2™¬˜šÄ韰gx–Ã3kSLÝX[Ìç¯jÀi6à‹&øQÏ(ÍùÕÅÑ1ÏÞHü´zXß}w~qaäëª ¹¶ªÆ|¹fÃòüABQðEôyìšáÈ%‹zC›Ë³qvÂàv\“Å×Ï!¤.Àƒ¤FÃWíAj+ÊÉd2âß}ò’œ»¶²œ­ÝÔV”Ÿ.2R)‚á0Ss^÷ij°€Ålú…Ç Gc”¸ùØ-7žRwégô «r^ÚjùÔÝÁa·ŽFyjçnÞ:¼¶N{cëZ›—3¢Þ‰Š¢‰Å˜õÎÓsr·÷”e¾òЃÔT”‘Ig860ȨjN„‚xŽBJ Ô'¤þÇl,w#i Ïà›oöa3èi/Ê¥»ØEE® »Q^«Y‹Ñ}^?¯y/[ÆÍÏãæú~}C“žÅh‚GŽÒìVŸê›÷ã ÇùÊ)õ°þãÝ^üå­ã5À5U…l¯+¦Öeǹœ1uªXìóxipšãÿ%3-Ðçõó;r%=7!uaøê¯=H]e™L†¾¡þõ‘Ç/êùªJKØÜÝNCu%.‡cyk—†TJ!2íñrìäS&ƒá¬Ž‹'ÈÏuòñÛoÂfÉÖ]úÑ3/ÒsrP•sÒ\Wçå#äæä‰Eyæå½¼öîá51¶ÎæÖµ6S]V‚ÃnC¯ËΟ¢(„cQ¦ç¼ô óÖáâ‰÷ºz~õsŸ¢nY|öóú»‡T5'BH Á<ÿ !%êR±½‹ eùhã^?ß|óÅv3íÜÖèvÐ^”KM® »É€^«!ÉnóEôÏx}Ì{Ùk­pGc­¯#Ǩg>ç±ccªÌê_à ÇùÝ­ÍÙzXñ$ß>0ÀžáÙ³>†^Ò²­¦ˆë«‹¨Îµ‘c2`Xž¿xJÁ‰Ó3ëc×Ð,'.nWÅ¿¼©‹õ¥Ù8ëõøùÃï^Q÷½R†ß}èª*ÉáÄð(ÿòƒÇ.ÊyÊ‹ ÙÒÝICuy¹ÎÕb×yNŒŒ215‹Á ûPÇN$ev÷Ýq v«•`8Ì£Ïíàȉ“ªœ“Æš*>ó±âr8ˆÄb<¿çUö¾}yïßΦ®êh¥²´˜[¶Æ×©"qjÎKOÿzŽŸ&¢VøÊCP¿g}C#¼zà jæ#“žA!¤àL„Ô'¤þçMݬ/ÍC›[â_ßî§Ðf>íod%Mmž®b5.ûib#©(øbI‚¼22{Ù»ºÝÕ\Χ»k±õxÃq?6¦Ê ©ÁÅ ÞHœ/oiÆe6à%ù··O²otîC˨“¸¹®„-nªÏ‹ñ”Â\(FÏœ—gö….ÊõüÕÍÝt—dã¬gn‰?ÞyðŠºï…º0üögî§±¦€“#ãüÓ÷½ Ç/vç³u]'ÍuÕ¸œL#Z­EI‰Å˜šóprdœ±éôË >,rJÁb2Vü±ç_âÐñªœ“ºÊr~íÞ»p9Dã1^Üû»Þ|û²Œ¥³©mT”’•-6ŠD™õÎs°÷{ûH$~Ïßùìý4VW‘!Cÿð{ß~G5ó!„”@ |0BH êR§Š‚#³>¾ýÎÖ®‘’H)TåÚØXî¦ÒiÅi2d·‚‰T¶«ÛÐb=×OLÝÓZÉý5Ø:<á8O¥ÙíT] ûBÌGâ|ió{Úÿe?¯yÎù˜½Ž[êKØXîÎf¼õè´” ÄäžpœÃ3‹ìœf¹ ×ó7·¬£³ØÀÑYúÒ¡+ê¾BêÂð[Ÿþ͵ÕhÐprtŒo>|a„Ta~[º;h©«!?׉ÉhD’´¤R ÑxŒ¹ùEúG˜˜C£9¿s)é Žîº›P$Â/¾Ì;=ÇU9'5å¥üÚÇï&?×I,‘`çko±óÕ7/Ý †FÃú¶fºš©*+!ÇfE¯ÓYµ²µòðñ~Þ9vYþåÝ;¿üéOÒT[µg{Þ: šùPÒz‡F„‚3× !¤@}BêonYGW±‹ pxf‘ÿ|w|ë/.Ú›ª]v6”åQ•kñ,¦ +¦–â 惼1îåÕ1Ï%­ô‰Žjîk¯Â¢×á Çx²wL•BjÌf>ç776f ´Ç’üÓ›}Dô9Ln¬-bkEåN+ö宊©t†¨œb.ãÐô"/ Í0¸0bêoo]OgQîjœý—_Q÷½R†/=x-u5h4Ç'ø¿ß}伎—ïÊesW;-u5æ»0MhµZE!ã]ôqbx”±©àÂýNi4¼ëŽåBàžÜ±›·SåœT–óß~õc¸]Nâ‰$/¿±Ÿö¾~ÑÏ+IZ6´·ÒÙÔ@EIѲˆÊnŸ”S ¡H„ÉY=ý¼Ós9•:û8ûÔrœ¡aplü²e| Š’¦wxT)@ 8síBJ Ô'¤þþÖõ´/‹‚ƒÓ‹<|hˆ<‹ñ¬>—j\vºJ\Ëų èµY1•T²bjh!Ä[^öΑJ_ü¯æ®>ÞZ‰Y¯c6ã©ãêR ‘8¿±±§ÉÀb4Á7ßä”BßȘR@pæÒ+„”@ ¨KHé´þþ¶ ´8P2p`rž÷Œâ4Îéxá„L¹Óʦr7µyv\f#F齌)<Ɉ/ÌÞ‘Y^ó^1õ¹ õÜÙTŽI'1Œð̉IUvÙ› EYŠ%Oëøoôqhzñ¢³ÈnæÖúRºK\”9¬Xô:´šlÍ’p2+¦öOγs`š`Bþq¦åïo[Ÿ³t†·'øëW„:_®D!õë÷}ŒÎæ´Z-cS3|ã{?")ÿòX³šÍlìl£»¥‘"w>V³I’H§ÓÄ“ |~FÇŸž!–H\‚+ÑðÀ·‘»†:Ó+…ù.¾pÿÇ)vç“HʼþîažÜ±ë‚_¯×±±£Îæ†S2¢td2 §d‚ᓳs¼ÓÓÇ‘ý¤/@¦í©q6:9ÍŽWßäBnÙ¼˜$å'FÇ…‚3W^!¤@]BÊ(iù»ÛÖÓäΊ‚·&æy²w Ç9 ©"Ée˪˜Ê51ê´h8]L½9áeïð, 剩Ïolàö†2L:‰‰@„çNL¨rËÞ\8†?žä3Ëç#q¾þÚqŽÎú.ú¹+œVnª+a]IÅv3æåŒ©”’&”™ DxcÜËîáYBg!¦Œ:‰¿»u=Mî”t†7'æù»½=WÔ}/„Ô…ás¿›®–&$IËøô,ß|øQâ¿@ ™Œ6u¶ÓÕÒHI¡›Ùršˆòùƒœãp_?Z­ƒ^w©‹¹ÿ£·®v¦{á•×Ù­¢ÂÙ§’ïÊå‹|œ’7IYæÍC=<ö³ç¿þôlêl§­±Žò¢BlV :IdV3¢Æ§g9Ü×Ï¡ãýtKÝç~õWèniD«ÍÆÙ {_SÍ|$’2ýcBH Á™+¯R@ .!eÖëø»[×ÑŸoŒ{yºo‚£þ‚?œLQá´°®$Ÿ†ü\fIʶéVÒâ2þ¦æÙ=4K<¥œ÷9¿´¹‰[êK0Jãþ0/œœ¢Q…RóÑ8x’:kW;~ýµ^ŽÍ-]²1TçÚ¸¥¾”®VÓib*˜[ 󿏗]óÄ~Ag+‹^Çß.ÇYj9Îþaß±+ê¾BêÂðÙ{îd}[ :Ibbv–zøÇDbï¯_¦×éØÜÝAGc=å%…«"*“É“«"ªçÄ YÆj1]â§b ŸüÈ­ä9Äâq^|õ ^~}¿*ç$בÃo}ê>J S)öéáÑçvœóñŒ›ºÚéjn ¤ÀÕb>MDBaF§¦9Ò×Ïᾓ嚺÷.Öµ6£“$Ægfy~Ï«©–Ø…G)@ ø9K¯R@ .!e3êù››×QŸŸC*æµQÏŸœÄfÐ_Ðó„2e ëJòhr;ɳœ.¦VäÆ[óìž!&Ÿ»˜úí-ÍÜTWŒ^’[ óÊÈ,UN›êâh1– —ùdg56ƒŽ¹pŒÿõj/Ç=þK>–úü¶×ÓUì¢ÀfƤ“Ðj@NgÅ“ -†Ø?9Ï®¡’íf7êù›[ÖQ——³WG=|ýµÞ+ê¾BêÂð™}” í-è$“ssüó÷#y¯Ó£N’–·æ5QVTpZVM")³206ÎÑÄâ‰K/¢VŠÑpßGn!?×I,‘`çko±óÕ7U9'96_þô'(+*$¥¤8Ðsœþôgú8³‰ í­t·4R\ÍlA§“Èd )ËB!Ʀf8ÐÓKïÀð%³çv¿ŠZ¶ìÅIÆ'…‚3×^!¤@]BÊi2ðW·tSëÊAVÒìcÇÀ4VÃÅÙÖJÈØÌl©pÓ˜ŸCžÅˆQ'e·ò-Ëqv;ØÞÑ9¢NÑ _¹º…jŠÑKZF|!L.Ph3©.Žüñ$Á„Ì}íUXô:<áÿðê1Nx—mLÍnª-¡½(·Õ„Iwz¶Ûˆ/Äëãö ÏVìƒâìÿ¼~üŠºï…º0ШDHÅ LL !%g „”@  .!•g1òµ›º©vÙ‘•4{†gÙ=<ƒù"×Y %dò­&¶V¸it;²bJ’Ðj5«SãKaöOγwd–@üìÅÔï]ÛÊõÕEè´Z†ƒôyýØ/pÆ×¥ ”” &dîm­Ä¢×1Šñ÷ûzX^ö±µærs} - ¬¦÷¤âr}°áÅ{– ×g2™Œ³o¼ÙwEÝ÷BH]î¿ó66wu ×é˜ñzù×Gž ¢¸ˆmT–‘c·&¢¡£“Óêë'Ь µüXÌ}w܌ەK<‘dÏ[xnÏ«ªœ³ÉÈï|æ~*J‹Q”4‡ûúùî“ÏüÒÏY-f¶twÐRWCiQA¶Ð¼6»­2)ËøC!F&¦8Ø{‚ãƒÃ—5Ξ~y/•dHEcq'§…‚3W^!¤@]BªÀfâ/·wS•kCV^šeßè&tIÎJȸ­&6–åÓ\à\͘Òj •ÎJÈŒ.…98½À˃3gÕÙí®k㚪BtZ  AÆ—Âè´ZÕÅQDNJÈ|¬¥³^Çl(Êßîíah1´fÆØYìâ–úšW¤â)bÊK2¸dÏÈ,C‹Á÷ÅÙ?¿u⊺0Ü÷‘[¸z]'½ž¥@É9%nvzžS ^NÍÐ{ryŸ ‰¨å‡b4Ü{ûM湈'“ìÝÿ.ÏìR§?0èõüîCPUVB:æè‰¾óøÓ?÷ï­3W¯ë¤­¡ŽBwV“yµ¾WR–Y ŸäàñþK–õ‹âlÆ;ÏS/½¢!‰ÆšBJ Þ·ö !%êRÅv3±½‹J§„¢°s`†7'¼¤K+pV2¦6”æÑZà$9ëf¥NQ8!3ˆp`jÓ¿PLýɶ¶T¸‘´úçƒx± Ò&üRK)„“2w5W`ÖIL³BjÄZscÝTîæ†š"ÝŽÓ ×'Ri–b Æ–ÂÔæÙÉ·˜VãìßÞî¿¢î{!¤. ¿í&®Û¸ƒ^¢((i½NhSË"jr𾡿æלˆ:•{o»‰¢ü<I™}òôK{T9'’¤å+=HME™t†ž“ƒ|ûÇ?yßß9sìlìl££±ž‚<³ ÝJ¡ù¤Œ/`xbŠG{›¸ìqv͆nŒ=³ó <µs·jæ#1<5#„”@ œR@€º„T™ÃÂÿ¸±‹ ‡•xJáÅ)L. —.OFQ(!“c4°¥ÂMK¡“‚å:E§Š©é`”ý“ó¼2<ËBôýíàÿì†N6U¸‘4Ðç 0ác”$ÕÅQBQ'SÜÙTŽI'1ˆðׯô0î¯Ù1_[UÈÕ•48p™$mvk•’F§Õ “´$R Ï÷OòíWÔ}/„ÔùS_UÁýwÞFIFCö±2³,¢"LÌÌÑÓ?ÈÜüšQ™LVz¹³Bêµwó“»T;7_ýܧ¨«('“ÉÐ;8Ì·~ôÄêËs:¸ª£5›•ïÂb2£ÓI¤ÓiâÉär}¯)ö9Æðøäš¸žÝr#×o\Ñ gnaQUsŠD™žBJ Î@)@ @]BªÒiãÏnè lYH=×?Éá™Å˾Å-”±õl­( ¥Ð‰Ûj¬“–kL…’2Ó(ïL-ðÊÈžð{máÿb{ÊòÑǽ~ú¼~Üõ5—Ói"Éw4–aÒIL"|mÏQ¦‘5?ök« ÙVSD}^¹fC6ãN£Ad2Æüþù­›[ºbî{!¤ÎšŠ2¶twPWQNAž Ýò–áL&C0a|z–£ýÌzç1›Œª¸¦Lî¹u;%ù$e™7åñ^RíýîCÐPUI† '†Fù—>FžÓÁæîšjª(rçc1™$-ét†D2‰/dh|‚·ö221µ¦®çî›¶±móLžE?Ù± µ¼Ç„ÂQFf„‚3BJ P—ªqÙù“m”æXˆ¥ží› Ç³„¤Ñ¬͠gs…›¶•Œ)½i¹ÆT8™b2áðÌ"»†fñ„c|í¦nÖ•æ¡zæ–84³H•Ó¦>Á‘É‘SÜÖPŠQ’÷‡ùÚî£Ì„¢ª¿V£áºêBn¬)¦!?+¦4Ëq%+i¦ƒQŽ{ü¼<4CŸ×¯úû^©Oei1[º;¨¯ª$ÏéÀhÐ/Lj&»å3)óòûéŨ²Æé ÜsË”¸‘S)Þ:|”?¿SµñýÛŸ¹ŸÆšJF&¦› ¥¶wž ‹É„V«%I“LÊ,úý ŒMðÎÑãŒNM¯Éëùè ײ}ë&LFó¾%žÜ±‹ô)A×2Áp„Ñ™9!¤à „Ô%¤êòrø“míÛ-ÄäOŸ Þ¿*Ö ¡„ŒE¯cS¹›ö¢\Šl&Lz ­Fƒ²,¦f‚QÍ,ÒQ”Ks€£³>^óÐ^˜«ÊXŠÈ)n­/A¿,¤þrבӲÁÔÀ5Ù IDAT€AÒòÉÎj>ÞV…I'eËg2¤3O)xÂ1zæ–Ø54³&:ž³€Bê¬)+*`sWMµÕË"*++•´‚¢(èuz´Z ó¾%þãñŸb³šU>vË” §R¼}ô?zöEÕÆ÷o}ú´ÔÖ ÕjI$“¤”fã{Qñd‚…%?ƒ£¼}ô3skúzn¿þjn¾f3f£‘…%?O¼¸‹tZQÅ\BaÆf=BH Á!%¨KH5ºüÑõíÙÌDåOôŽ1¼Z³ã %dŒ:‰Íån:Šr)²›1Ÿ’1‘SèµZÌz %áÐÌ"/ ΰ¹Ü­ÊXŠ+ 7Õ£—$Æ–Büù®#,Dâª»ŽÆ|¼­b»%IEA§Õ¢Ój²bJNá‰ÄW3ÝÖbáö_. „úe»óÙ²®“æÚjòr˜ F´Z-Š¢‰Å˜ñΓLÊÔU•¯Š‚ïþäY,*Ù¦wzÌF#·o»‹ÉŒÏïç{O=§ºízËñÀÝ7Ý@yq)%Å»ÇúøþÓÏ«êJ Ülìl£±¦ŠŠâ¢Ój{Eb1ýN±ÿÈ1¦=^U]Ûö-¹ã†kVãìÉ»‘S²*ƾ1áñ !%g „”@  .!Õ^”Ëï_ÛF¡ÍD(‘âÑža&ýQÕ|ס„ŒN«aKE]Å. lfrŒºÓj -†8:ëãø¼Ÿj§MUb*Ép]uzIËðb?{ù0xRu÷D[a.ÿýºSâìè{†g¹±¶˜­•”9,Ø zô’–T:CTN1Œrpz‘]Ã3LÖ~L !õ~\N[×uÒR[M¡;/+¢´Z%M,dzàcljš¤œ"בCÿÈf£‘Üp-V³_ ÀÃO=‡A¯S]Ì+Jš»nÚFeI1)EáÐñ|ï'Ϫb쥅lêj§±º·+£Ñ€¤•XÙÉO$yõC¼yð³ó ª\§·mÚÀGo¼«ÙÌR À;v!ËêR¾@IϼR@pBH êRÅ.~ÿÚVÜV¡„ÌŒ0Œªî;%d4¸¦²6•a^~ÍJ:MTV˜ F9:·ÄqÏ•*S×V¢Ój\òg/"”U7?Å.~ïšV lïÅÙ3}8Mnk(ec¹›2‡ër¦›’ÉI*L"šYdçà4ÞðÚÝ®(„Ô{X-f®»j­õµæçVô:Oà]XdbÖC<™Ài·¯~n¥Û]7]Õla)äO?$iUó)%Í]Û¯§²´EIs¤¯Ÿÿ|ò™5=æ²¢B6wµÓXS•­íe4db: $-™ L{<|ý;ß'žPŸ_ý]ÝÐÍÝ7o[³'wì"™TÇõ,úLy„‚3BJ P—ZWšÇW¯nÁm5LÈüàð0s¡˜j¿û¤’æ ©qÙЭ+¥Ñ€¤É ލ¬à Ç82ë£×ã§Âa]ÓbJ«Õpue:­†… ²óQ9¥ºyYW’ÇW¯y/Î>4ÄÏúOoŸo1r{cëJò²bÊ ;­hýt0Â;S ü¬Šà”rBHÍbasW;]-æ»°˜Ìï½N$ð,ú˜œ™#)ËØ¬–÷}~jÖC4çc·ÜˆÍbÁ òÃg^`õX8+R©4wn¿Žª²Òé4GO ðÇŸ^“c­,-fSW; U¸œÙÚ^’¤%¥(Äâ æ}èõzŠòóÐjµLÌÎòOÿ˜HL½kÅ–îî¹u;6‹…@(Ä“;vO¨c;ôâR€©y!¤àL„Ô%¤®*Ëç+W·g1ˆË|ïÐ*‹f¯`Ñ븷­’º¼”L†©@„DJ¡4'+8tÚì–°l­¢8Çæ–èõ,QšcY“bJ'iÙZáFÒjèŸòÇ;’H)ª›— ¥y|åêVò­Ù8ûÏwÙ9øÁíàKs,ÜTWº’zÃuT——’N§ééäÿ=öÔšcUY ;Ûh¨ª ÏéÄd4 Ñd‹ÌGãqæ}KÌx½„#1ìV Wu´¢“tLÎÍñ/?xŒ`8‚Z¹ª£•ûî¸9gáOíØM4®ŽµoaÉÏôü¢R@pBH êR[*Üüö–f\#þx’ÿ|w¥˜z·aØ :îi«¤Ö•ƒ¬¤Ù;:Ǿ‘9ºK\´åRl·`3èÐ-gmÄäsËbê¸×O‰Ý¼¦Ä”A§esyVHðø£‘•´êæeS¹›ßÙÚLÞrœý¿ìžý…Ÿ©Ìµqs] ÝÅ®ÕnŠ:­YIJÈŒû#¼1îaÏÈá51õ_QHôz¶twÐÑTOYQ!V‹$‘NgH$“Ù¢×#c ŒŽS_UñYQ§²2íñrß·`·Z †Ã<úÜÕt?;•”¢ð‘m×RSQF&áØÀÿþè“kblµe\ÕÑJ]eEvkžÁíš—ÎfDy}ÌxçÑët8sìŒMÍ`1›ØÔÙŽ^§cÚãå_ø8KÁ j׊u­Í|⣷cµ ‡yú¥=„£êØ®>ï[bfÁ'„”@ œR@€º„ÔµU…|qs.³¥X’ï¼3°&·C-zI˧ºj¨Êµ#+ »†gy¦o‚B›I£¡¹ÐI{aî)SÒˆË)<‘8½sKôyýÚÖ†˜2é%6–»‘4pÜãçw$­Âµvke_ÞÒŒËlÀKòooŸdßèÙµ‡¯ÎµqK})]%. ­&LËSr:C0!3â ñöÄÉ'ï¼5+ "a{þ%’²ú$yJIsûu[©«¬ “Ép|h˜{ä‰Ë:¦úªŠeUNnNΪˆJ§ÓDã1f½ xÑëõ8sì§ EQØÒ݉A¯cÚ3Ï¿ýè —üª]+:›xàÎÛɱÙE"<ýòB*Éøò..1»(„”@ œ‰R@€º„Ô¶š"~sc#N³_,Éw JªWH¥Ò>¿±*§¤¢°sp†NNá¶šVÿFÒhh*pÐZà¤ÜiÅnÐ#"¦æ£ z=Y1嶘.«˜²t¬/ËG ôzüüáŽwU9/×Vò¥ÍMä.‹ÏÙßÏëcžuŒÆ|ÛëŠé(Ê¥ÀfƤ“Юˆ©x’_ˆ·&æÙ54Cò2d‘ýWR’¤eC{+ÚZ(+*Àfµ “² ’rŸ?Èàøƒ£ãäå:ÏZD­”ez†xà®ÛqØì„"žxñebqõm#VÒn½f3õU•dÈÐ74¿þðñË2–úª 6uµS[Q†3ÇŽÉpêÖ¼¬ˆšñ.`2NQ+Dc1–!®ÙÐ…A¯gv~oýèI¼‹>Õ®íuçÛˆ«°‹Û©|ac#¥9b)…gû&xkbž“þ—~NÉdhÌwÐUì¢ÂiÅfÔ£×jÈd ¡(ÌGâôyô{ý8L†K"¦œfÅ. +¤þL¥B궆¬øÌ1fÅç?¾qœƒÓ‹äØëKóØ^[L“ÛIžÅ€A’Ðh ©¤ñÅœœòêèo\d1u¥ ©Ž¦zÖ·6SSQ†Ã¾"¢@N-‹¨ÉiF&§0DD­päÄIî»ýfr"±ÏîÞ·FEÁ/çúh®«Fƒ†±q¾ñ½]Ôóµ7Ö±®µ™šò2œ96ô:}¶X¹¢‰Å˜ñÎ3¿¸„¤Ó’çtžõqEÁç°¡½“ÑÀ¼o‰ÿxü§ŒÏ̪vh¨®ä³÷܉k9Î^Øû:ÞÅÅ53>ƒÞ@EIu•åä¹°˜Mè$ÝjVb ¾¡0ßµ@ ¬"„”@ 19•YÉÖ•4á„ÌL(Ê»S‹ìc&¸v¶ŸÜÝRÁ§ºj°õxÃqþýÀIUl5üyh5¾°±b»…˜œâ©ãœYÀfПõ1Ré4Mn'íE¹TåÚ²SË5¦’ŠÂb4Á o€þù6ƒî¢Š)—ÅHGQ.àÐô"¾ë°*çå£Må|v]-öeñù^?Îá™ [fkE×VÒävà21HZà=1Õçõóژ眷 þ2®!Õ\[ͦ®vjÊKqØíô:@CJI …›žexbƒ^AEÔ ÇNrÏ­Ûq9Dã1žßóó>uÖ*ºîªõ´ÔÕ ÑhŸà¿ûÈE9O{cWu´QUZ‚ÃnÅ ÏŠ¨TJ!‹²àó‰Æt:I:—ØÆ ÑÕÒˆÙhbai‰ÿ|âF§¦U»VÔWUðÐ=wâr:‰Æc¼¸÷ æ.ïÄt:ƒÉh¤¦¢”šò2 òr1MH’D:FNÉ$’2IYæ‘çvÜð•ÏÞ¿@ ¬"„”@ /ÎdÝ\#&„¤ÕRÒDä3Á(Gf}¼22ÇøRø²õžÖJîï¬ÁnÔá ÇùÖÛ'I¥Õ+¤$†/lj¤Øn&’Lñı1Žy–°èuúXJ&CËNg±‹ê\v“ò˜J( ¾h’…½ÿESùVm…NÒÀÁéþr×UÎ˯´Tð`W-v£o8Î×_ë¥gn颜ëÚªB®«.¤!ßËl@/iÉœ!wÏrhæÂfC¨]HÕWU°¥»ƒšŠ2rsìôÙlrJ&Ž09ç¡x £á∨Žs÷MÛÈs:‰Åã¼°ïuææÕY«èš Ý´ÕסÑhž˜ä¿ûòY¹­¡ŽÍ]íT”ã°ÛNQ)B‘¾@€h£®WvÎæ}K$eù‚‰¨SYôèlªÇj¶° òƒ§ŸçÄð¨j׊òâ"~ý¾Q—K<‘dÏ[›X>“H4N®ÃNGSUe%8íöÕˆJZ!™LK$I$“„#QEáÌÔ-!¤àž…‚Ó…Ô U¹6¶ÕÑUì¢$ǂ͠CÒ¾×Ém1š•{Ff/X±ç³áÓݵ|¬¥³^ÇL(Ê¿¿}5ÿ’›u:~cc=n«‰`Bæ{‡˜ Ç.à>Œ1ÕU’GˆÃd@¯}¯^ÑR,ÁÀB“  ’–²œsSZ­†B›‰&·%á­‰yþvo*çå®îm}O|þý¾.]‹u£¤e{] [+ ¨Îµ‘c2¬Ö‹§<á=³KìžaðÇ¥!URàfëºNšj«p9˜ F´Z ©åÂ×Ós^ƦfH§3仜—«ÕjénidcgeE…Ø­ÖUY’”³5¢|KÄ ô:’tq3{²Bª›ÅJ âÑgwp´@µkE±;ŸÏß/Eùy$’2¯¾sˆc'1~ˆgK")SVT@}U%•%EËR1{%­R’I™`(L8;«c !%ïG)@ à ©rÍn¨)fS¹›r‡%ûb|Êö/<ÉàBw¦Ø;2Kâ"u¾ûõ«øHc&ÄT ·œD«b!•cÔóß6Ôã²ñÇ“|ûÀ¡„|QÏ‘ST:­l*ËJ§Ù€qe.•4þX’_ˆ£s>L:é¬3¦t’–"›™úüRé4¯yø_¯öªr^~m}=w6—¯ŠÏ¿yå(£—±¨ŽQϵÅ\]Y@¹Ãºzÿ¥ÒbrйPŒƒ3‹ìšaÂ9«c®U!åvå²¹«–º óó0³QŠ’&ãYXddrEQÈuä\öX™˜žåÚëN£cªŒûõm-¬okA'ILÎÎñOßÿ1áhô—Ü÷:Zéjn<-#*“aµÀüüÒ±xƒþ⋨—ü´5Ö‘cµŒ„yìù—8tü„j׊‚<¿ùÀÇ)vç“”e^÷‡úú1 ì)E¡¢¸˜†êJJ ÝØ,ôË[SJš”’"‘H GˆDch>Dý*!¤àý!%œZÁ¢×±½®˜«Jó©>cû—œNˆ'[ óÎÔ»‡g‰$St¬_ØØÈí¥%‰‰@„o½Ýî"mù¸8M>·¡ž\³¥X’o½}’¨œº$çŽÈ)*V6–åS›—͘2ê´h85c*Dÿ|I£¡4Çò §—´ÛÍÔæe…Ô¾Ñ9þ÷kÇU9/gŠÏ¿zåèY‹ž‹/7Õ•°µÒMiŽ›A‡n9c**§˜ D88½Èî¡YfB¿X$¬5!å´Û¹zCתˆ2›ŒÙbÉé4±DÏ‚ñ™Yâñ$.gΚ‰•‰™9®ÙÐuš(81<¢Ê¸ïjibcG+:IÇÔœ‡þÁcÃ,bõz›:Ûik¨£¢¤»Õ‚NÒäTŠ`8‚gÁ‡œ’/IFÔ™øüZëkɱÙE"<ñâ˼ӣÎß#€ü\'_|ðW))p“”eÞ<ÔÃ;=½X̦ó>¶V£¥´¨€¦ÚjŠòó°šÍ«ó•RRÈrŠx2I0&ŸÓ6K!¤àý!%|8!uª|ØVSÄær7u§È 9!”™ DØ?1ϾÑ9£‰ 2Ö/mnâ–úŒ’ĸ?Ì·œ\bª|ɰùìº:œf¾X’Û‚xêÒv \ɘZW’G}~¹&#I‹Fò)bjh!(±°˜2è$Šífj\vd%Í+#³üß7úT9/gŠÏ¯í>Ât0ºfÆç0éùHc9Êò)sX°êuHÚlóH2Åd ÊÁév LÿÜ{o­)»ÕÂÖu´7ÖS˜Ÿ‡ÅdB’´(é4ñDςə9²LŽÍºæberÖÃÖu§‰‚ãƒCªŒûΦ6v¶£×é˜öxù—>†?xúVU£AÏÆŽ6:›)-tc;CDBaææI)) zý%Q+øAZj«qØí„£žÚ¹‡·«³¦@®#‡/=x¥…È©ûôðÖáló9Ó 7PQRD]e9…ù.Ì&:IG&“Ë•®yK ñDò¼ ¨ !%ïG)@ à܄ԩl©psCM1uyvò,FŒ:)Û†]INÊÌ£¼;ýÿ³÷Þqr]÷aï÷ÎÞggfgg{_ôB€€HнI¦ ­jR=rä’(²_ž_âÄqülÇq^œYv$[qlË‘"Y–dI‘eËN‰^»‹Åö:;½Üž?fwY@É,v/|¾ÿñÙ;çœÛö|ñ+ž¼0÷¶7õŸ¿a3wõ¦qÉ2³Eþì…á«ÚöúrÓôò‰Ý=D½n2…/<‡j˜ë2–’ªÓõ³;gS2BÌ÷j1•¯iŒe‹ -Ö‹h§C¯Þyœ2Ía±šaðØè,_zΞ)2¯Ÿ¿õØ1æŠÕ 7ÎTÐÇ=}Íìi‰ÓpËÈ’„nZ”TéB] ?<Þ±k.§“™…E¾òo“ÉÕ»9z=î•”¾Í47ÖÓ¹œNy-5/W(1¿”AUU<ÏUéþöÓÈŠôwu ‡)W+|ÿÑ'xæÈQÛ¾+"¡ ÿì㥵)…nè>~Ч%xs÷†iZx½zÚZéjk¡1Þ€ÏëYkZ¢é[DE IDATš¦SUr…"Šª]–s)„”@ \ŠR@ÀÛR«ìm‰s[wšd˜¸ß‹×)#K`¬DmL*ŸÍ2xa–‹o±ϯܴ…Û»Ó¸d–‹|í¥[¯}:äãã»{{\,–k|åÐÚ: ©UŠŠF:ìck’þDdE2®¤ò™…•´ÌÓó9‰t°.¦¼.™æ°ŸŽhÍ0xtd–?~ÞžBêÕâ³Ä¿?p”ÅrmÃŽ·-à®Þf®ki òã_‰˜Ò “¢¢1™/óÜÄÎ’¯i+ÓõRA¯gpÿÎíìÚÜOsªñÕ"JUXÎ噘Â4 ü>߆¿VfæØ»c+­©—EÁÑ3Cر´Ý–¾nºn'n—«.¤þ×w¨T«ìݾuå|% úü8õ(UÓÈë"ª¦(õ›uQ«ä‹%ú:ÚˆE"”«U~ôøS!µÊÎt7w¦Ø–Š’ xñ¹dR=¨¢Ì«œšÏòÄ…9Î-æßÔ±ÿå;·rkwN‡ƒÑL¯Å´ñ£¼5àÁ]„V„Ô—žCß **A7¶¯Š)÷Zô›j˜k*cÙçóM±5!¥ŸŸá+‡ÎÙò¼¼R|Ž-ùw޲|™ÒN¯$± ÷ô6³»9N*èÅ÷1u1[⹉¥XS¯ºúúÿþ¶}ƒmé¿Ù!×;vª*Ëù<ç.\døâ½ím~[\+³ ‹\·uóŠ(Ð9rò GNžÁaÃÈÍMÝ]Ü|ýnÜ.óKË;;D[º‰Ö¦—Åa=k5"j UÓñn€ˆ¨×R(•éik¡!¥R«òã'žá±çÛö]á÷zùü§ £9n=}–GŸ=D4ü©ß+Wj„C¶õ÷ÒÓÞJ,Æív½JDÕT•ZM%W, ëæ9—BH Á¥!%\~!µJ"ÌmÝMloŠ­Dm¼,¦jšÁbEáÌ|Ž'Ææ8>»ü†Žùk·lãæÎN‡ƒá¥}rlÝ#ŠÞ± ÙÞEÈãd¾TRæ{7¦ë[lJÖ#¦Ü²ŒCz¹^ØØr‘‘L‘;’´†(†Á‡¦øêa{¶Yÿ—ïÜÊ-]M¸äºøüG^’ò¶‘éiqw_3»Ò 4¼x]Nè¦EAÑÍ88±pûnÚzU7ˆËùÂmA¿opµæ¢j,çóœgøâɆMÉ„­®•¹¥%vnX/>Ë¡ã§l™JÜßÕÉ­û®Ããv£j:ŠªâóxVê@Õ#¢²ù"³‹K†Ž×ãÝp"jí¹U®ÐÙ’&‹Q­Õxèéçxäéçmû®ð¸Ý|áSÐÑÚŒiš;3ÄCO=G,zÝÏ—+5¢av ôÑÑÚL4Âãv!I Ã@ÕTªŠŠ¢ª”Ê ÃàJ†õ !%—"„”@ på„Ô+¥Ëm]Mìjn 9\/Àì”ëõ*jºA¦¢0´”gpt–#Ó™Ÿz¬}ëvnêhDvH -øîé‹(º}…TOCˆmï$èv2W¬ò•CCNH­mðV"¦ö¶ÄÙÜ!ðâ‘åz)Ó¢¢êø\2ÙAU7øÑ¹)þì{ ©_»y7w½,>ÿí£/QR4ÛÍc ᮞ4;VÅ”S^« VP´Û“ïUÝ Z–u›e1¨j*Ëù£Sœ§!¶ˆZe!³Ì¶þ^:ZÒ˜¦ÉÑ3C<÷Òqœ²Ýš-HÜuÓ~º:p8ÔCÖZjÞr¾Àìš®ðù7¬ˆzYÈTiK7‘lˆRU<{ˆ?ñŒmß.§“/|úAºÚZ0M“ç†ùÑàSÄ£‘W}®RUH%âlí립9M$Äír!I`˜&š¦£jÅr…rµŠi^÷§R@ð:o^!¤àÊ ©UšC~níN±·%AKØOÐãÂå°E7ÈTTF2ž¾8Ï3ã ¼Þ3úßÞ¾ƒw´%‘çó|ëøv~’$"üìÖvü.'³Å*rhkƒÏ¨¨h4ø<ìoO²%!¬‹)‡H`˜Çç–ùÿž<ÅrU±Ýyy¥ø<¿Tà×~‰ª¦Ûö:ÛšŠrwo3[£$ƒ+b n—$éªnkŠz[¾X˜bhlœh(h[µÊâr–Í=]k¢àøÙóFoG;–eqzd”ï=ü¿Ÿ¾Žvú»ÚinLðûq­œ+Ý0Ñ š¢R,•)W*HŽõ‰ÜBJ .E)@ àê ©U|.'wõ¤ÙÛš »!HÄëÆµòDzfšjs%^˜ZâÀÈ eUç·îÜÅžÖàÔ|Ž??2LØë²íÚoOÅxÏæ6|N™©B™?94„Ãfí¹ŠŠ†Ïåä=›ZyG[rM.Z+¢¢ª1™+óâL†#³¶S¯Ÿgòü뇎l˜bó—ƒ}­ nínºýîÞæ«ºAüùßøÝÛönܹyàšYËl¾@w{+½ímX–Å©áQþð†R’$ÑÓÞFoG©D>¯w­¸¼iškR*W,òÄóGèlkÁëqÛî¼Ô•T¼¦dEÕxúÈQ¾ûÐ[_k_øôƒôwv`a1rq‚Sãlè£)'àóátÊX膎¦éÔT•|±D¥ZC^çR!¤àuÞÉBH Áú ©Uœ·u7qC{’žxˆ˜×ƒÇé@Ô•¢ÙSù2‡&¹¾%Á¶¦pb.ËW‘xm»ö»Ò üÌ@+^§Ìd¾.¤ìX y¦P!äqñ›wì$î÷`Q¯+%KÒZ1í’ª1S¨pd:ÃCç§7t׺ç.ö®ˆÏÓ 9þÕßáZû‹Á4Í«Þeïçãwo»~ëæÁ›ú®™u,K´77Ñ·" ÎŒ\àÑgâqo,Qîr:élm¦¯³ƒdC ¿×‹¼RËOQU2¹ŠªÑ’jÄãv“Éå8|üTý¿m(¤TU#‹ÐܘDÕ4ž}é8ßþ»Gl}­}þ“°©»‡C¢ª(†±Ù¶ÚýpUDeójŠºa"Û„‚KBJ X!õJö·%¹£'M_—§CB7MJªÎL¡ÂK3c*_Þ0óù»w³»¹.¤ŽÏeù7BH]~þ7~÷¶wnÜÜÛ}ͬcMQH6ÄèîàüØ87ø4>¯g]Çåõxèlm¦»­•T¢¯Çƒìp`˜&Š¢’Éå9?6ÎÁc'˜œàöw\Ï}·ßLÀçc9ŸçÔÐÑHÈ–BJ7 ÂÁ­©º¡søø)¾þƒÛnÝí­ìݶ…ÞÎ6ZR8åºh²,‹ª¢ÔET­F®XBÓô [ëK)@ ¸!¤€)¤VÙÑãæÎÛšb4¼Ýε¢ßºi2¶\âä|–#SJªf+1e˜·t¥¸½;Kv0¶\äO_8džBj¾T%æóð/ß¹•Æ —¢¢ñõcX®(ÜNŠŠÆ¶TŒéA~—Œ,I–EY5˜.”96³Ìc£³Ln1õéŽÍ.ó¼tÍÝ÷ë%¤nÚ½cpÓŠ¼¹Ð4h8ÌæÞ.$$†/ŽóÃÇžÄï[ŸTb¯ÇCw[+]m-4Æcx=’Ó2©) ™lžs.rèØI¦ç^õÝ[®¿Ž÷Ýu+ŸŸl¡ÀÐèE~n·ËŽ×7Ÿ¶tºapääiþêod›ñoêîdïö-t·· ‡q»]Èy­¢ª©Ì/-S*—1LkÃBJ .E)@ `c ©UzãaîìIóî|¯èäf˜&5Í`±¢pv!ÇáÉ%–«Š-Ä”f˜ÜÙ“æÖî&œ£™_}a¯Ói»kh¡\#îóð«ïÜB2ॠh|í¥jšÁuÍq&óeÆs%¶¦bìJ7 ú¸e’„¹"¦f ŽÏ-óèðÌºŠ©ß¿wÛ›bXÀKÓ~óÀÑkî¾_/!uóu;û»:®¥u$à÷±µ·I’Ÿàû ø|Wq ^‡¾Ž6z:ÚˆÇ"¯Šˆª) ‹ËY†.Œsðè f—^÷87íÙÅýwßNÐï'W(0:1…Ûå²¥²L ¯ÇM{KÃ09zæñløqoíëaßÎmt´¤‰†BxÜ.$ÉaXX8åzáòL.ÇüÒòº+£!%—"„”@ `!õâç_|Ï>zã!$êE³%À¹R˜·¦d* ç—òœ\b¡TÝÐbJ5Lîíkæ)œ‰óKþÇ çñºì'¤Ë5~¿rÓVùšÆŸ¿8ŒD½“à+™Ì—¹-²mEL¥C~îWGLÍ—ªÉp`t–ñléêþqüþ»ö²-Å^œZâ·;vÍÝ÷ë%¤nÛ»{°§£íZZG|^/Ûû{‘&¦øÞ#¸ RåJ ¯ÇÍö>ú:Û‰GWD”ì@7 jŠÂB&˹Ñ1Ÿ8ÅìÂÒO=Þ;vïàƒ÷ÞAÐ _,2>=‹äp»ì(¤L<-iLÓäøÙóüÙ·ÿvCŽÕá¸nëfvoÙD{sá`·Ë…$ÕÿÑEÕ4tÝ@ÓuÂ’C"“Í‘+1m²—BJ ^çoN!¤À>BÊã”ù÷îaS2ŒaÁt¾LYÕiû y\¸ èËU•‘L§3LäÊÜOò(ºÁ}›Ú¸±=‰ì8·XàÏœÇgC!•©($ü¾pÓâ~¹šÊÿxa¿ÛI<üºß™Ì—Ë–ØÜaw:Ns¸.¦œºiQÑtJUŽÏfylt†‘Lñêl%‰ÿô®=lME1,8<¹Èï<~üš»ï×KHݾoÏ`w[Ë5³Žš¦á÷ùر©‡ÃÁØä4ûÈãW´†T¹RÃãr±}S}]í$bQ¼n‡Ã4¨Öj,.ç8;rCÇO1¿”yCǽ~ÇV>üî» äKEfæÑ ÖBÊ4MÜ.]m-X¦Å‰¡a¾ú­ïn¨1º].öíÜÆÖ¾nÚ›Ó„\N0LÝ0PU•|±L¾XÂïóÒÚ”B–dó²ù†iÚâ|!%—"„”@ `!åw9ù½{¯£?F7-ž¹8ÏŸâΞföµ&hêbJv`Y ™&¹ªÊp¦À©¹,£ËE<¨ÎFUÓ¹KûÛ“ÈœYÈóGêÇnd* © —Ïß°™¿‡\UåO‘xéŒêw'óe¦ zãázÄTØGÐí\I7²¨j:sÅ*'æ²<~a–á¥Â•Ý$Êþã½{ØÜÁ0-N.òO\s÷ýz ©;÷ïìlm¾fÖ±¦(|>vmÀáp0>5Ã÷yïè²·µ©þÎvâÑ(·I’0M“ªRc!“åÌÈžé8™\þMÿº­›ùè{î!R(•˜_ZFQU\6”ä¦iâr¹ènmÁ²,N ð'ÿë;ã]æõ²×v¶ô’N&øý+" tÃD×ujªJ¡T¦R© 9(ªJ( µ)…S–É dry C)@ °+BH öRa‹ß½ç:zãatÓ䩱yþàéSø\NîìI³·5NOCˆˆ×ËQ¯­¡™&…šÆx®Ä‰¹,ÃKydÇú×ݨh:ÜÚÁõmIÀé…ùâȆŒæú‡ÈVšB~~ù›hð¹ÉVU¾|ðmÑ-!ÿ:†fš,–kÄ|nzãázĔˉì0-¨i:s¥'WÄÔÐbþŠÌÅç’ù½{ö0 c˜ÏŽ/ðûOž¼æîûõR÷ܰo°­¹éšYÇr¥JÐïc÷ÖÍȲƒ‰éY¾÷ðãx.cºp¹RÃïó²} —¾Îv"ÜnW½X¹iR©Õ˜_ÊpntŒçŽž`9÷Öî]›xà½ï" R(—˜]XB×uœN› )§Lw[gF.ðå¯{]Ç‹„Ù»} Ûû{I%â|>œÎzM(ÝÐÑ´ºˆÊKTªµWÕ‡RUp0@s*‰Sv’/•Èdsè†a‹ó!„”@ \ŠR@€}„TÌçæ·ïÞMOCÍ0yblŽÿúÌéW}ÆéppkWŠÚéM„ˆy=xœ$@5-ŠŠÆt¾Ì±Ù,ç—ò˜–…c¥kßUßȪ:ÝÑÅž–8pr.Ë×Ú2e/WSiùùÅýÄ|n–«*üüYúa’þ7_Kg¹ªàuÊt7„hV"¦êÅÏkšÁB¹Æ‰¹,O\˜ãÌBî²Î%èvò»÷¼‰÷ôÅyþóS§®¹û~½„Ô½7ílmJ]3ëX(–ìÙ¶§,39;Çwz ÏeË«"j׿~z:Úˆ…ÃxÜîzjžaP©Õ˜[\âôð(/œ8ý¦#¢^ËŽM}<ø¾w †(UÊLÌÌcY¦-…”eZȲƒîöV†.\äþê[ë2–T"ÎõÛ·°¹·›Æx ¿×‡,ËX–‰¦¯ˆ(E%[(PSÔ×혧j‘`€¦d—ÓI±\fq9+„”@ Ø!¤û©„ßÃoß½›ÎXÍ0xlt–/=wö'~~[’Û»›èO„ið{ðÈ2‡„n˜U™B½pöK3˦yÕ  ïîaWº€ã³Ëüõ‰± •VøFÉ×TZ#>·¯Ÿ¨×ÍrEáKÏŸeGSŒÈÛH]ÊVUd‡D_"LÛŠ˜rÊõT¾š¦³XQ8=ŸåÀÈìeS¯‹ß½û:zV"ñž›ã¿<}úš»ï×KH½û曓×Ì:fóBÁûvlÅ);™š›ç»=†ËõÖïãr¥F0àgç¦>ºÛ[iˆD^Õm­\­²YæäÐ0‡Oœ&›¿o½ó¡iZhº†ªéÔ…\¡ˆ¢j¯+¢VÑ4H(@*‘ÀírRªT™[\5¤ÀÆ!%ØGH¥‚>~ë®]tFƒ¨†Á#Ã3ü·ƒçþÁïíi‰s[W›’â/^§Œ,±ÒÑMg¦PåøÜ2‡'ÑM몉©‚¢ñ©ëzÙÙÃŽÎdøÎ©qÜ6iãýÚ¹´GüüõýD¼.2…/>{†ýmI|—A°-WdIb ¡e¥ˆý+»+.”kœ]Èq`d–SóÙ·õ[ ¾ºøìn¡&ƒfùâ³g®¹û~½„Ô{n½i0•ˆ_3븴œ% ²çv\N'Óó |÷¡Çp:ßü}\®Ôˆ„ƒlëëyED” Ö"¢æ—2œæàÑ“ËåË:—-½Ý|âþûˆ†Ã”«FÆ'qÊΟ*J64–EoG;’$1<>Áþå7¸ú÷´·²gûú:ê=w=½Ò2Ñ´ºˆªÖjä %4]Cë«ë:‘PˆÆx ·ËE¹Zcf~AtÙ#„”@ `!Õòó›wî¤#D1 :?ÍŸzÃßßÞãæÎÛ›b4¼ø\2©žVÑ JUNÎåxaj‰’ª]q1U¨©|foÛS1LàÅé%~xfÙ!Ùî**± ÿdoa‹¥²Â}ö4·v5!_Æ”ÈÅr §ÃÁ@²1Xí®hAM7X*×8½ãɱ9ŽÎ,¿¥ßH¼üö]»Ö"ñŒÎòG?%Ï®¬—zïmïlŒ7\3ë8¿”! rÃNfùîý©û¸\©Ñ ³¥·›žö6¢án— Iª‹¨r­Êìü"§G.ðì‹Ç¨TkWd.›zºøäý÷‹D(W«œÇãv!ËöR†i2ÐÙäŸä‹ù Lóʽî6õt±wûzÚZ‰†C+镆i¢jŠ¢R]‰ˆ2 óM‰>]׉†Ã$bQBª#ä7nßAk8@U7øÑÙIþüÅá·~¼h[»SìNÇiû×ê­¦-WÎ/x~b‘…Rõ²‹©‚¢ñ¹ëûÙÜÁ0-N.òðùiÛ ©¾D˜ïê&äq±PªñGÏŸå¶®+ÛMm¡\Ãှx„ÎXð+ÄÔê9<·˜gptö ‹©æ°Ÿß¼ã­GâÙ…õR¸ë¶ÁX$|ͬãäÌÑpˆ›¯¿ÛÅÜb†ï<|€Ÿv¯Š¨›úélm&z9"J×uJÕ ³ K=3Ä‹'ÏP®V¯Ê\ú:ÛùôÞKC4J¥VåÌðü>ß«º½ÙË´ÐMƒMÝ8ƧføÃ¯}EU/Ëñ‰=Û¶°kóíÍMDBA\N’†a®Õˆ*–Ë”+Õ·Zg&Ñp†h¯Û¢jŒOÏb!"¤À®!%ØGHu7„ø··ï 9䧪|ÿÌ8õÒèÛ>n*èã¶î&®oMÐöô¸p;$,@ÑM–« #™/ÍdÏ– ¸/Oß’ªñ¹ëH†1L‹gÇxdxÆ–){UM§?áÝ„x÷íƒÑpèšYÇ‹S3DÃ!nÝ¿¯ÛÍüÒ2ßy輎((Wj$ã1¶öõеåvÕE·nè”+Uf9vfˆÃ'NQSÔ«:—îöV>óÁ÷‘ˆÅ¨Öjœ—Ì=iö¶$臈xݸ^!¦ŠŠÆÅl‰sY†—òÈoqãQÓ >·¿Ÿž†0šaòäØFfÞ¶èZÝ`K*ʇ·uâw9™+Uù³#ÃÜØ–\—ñÌ–ªH@"LW,DÄû²\T ƒåªÊÐbžgÇxf|áU…_‰÷ƒ3|í¥‘kî¾_/!õÑwÝ5 ø¯™uBjk*ʯݲ¦ ’ªó­ãc|çÔÅ+ö{N‡Ä-]MÜØÞHo·o€®†z7·ÇFgy|tŸËi»kH5L¶¥¢|pk>—“Ùb…¯½4ʾÖĺŽk¦X«]×õºqÉ,«.¦²5•‘¥"O]œã™‹õêõH¼í¤Cþ+‰·QX/!õÀ}÷ ú½ÞkfÏŽ\  sÏÍ7àóxXÊæø‹ïþH0À¾ÛiK§k©yš®S,•™š_àÅSgyñÔ™+Rhû­Ð–Nñóùñ5UåĹaÂ’ Óˆ Ã@Õ4¶ôöàv9™ž_ä+ßø6™\þ }¿)gïö­lîé¢1Þ€ßëE–e,ËDÓuTMGQT²…<5E{SóÞ*>‡ÆD^w]HMÍÎc˜BH ]BJ °ÚÑãÿ¾yA/EEçÇFùþ™‰«òÛûÛ’ÜÞÝD_"LÜïÁ#Ë8ºaRRu¦ ŽÍfxizÝ4ßPtÍ0ù…ýtFƒ¨†Á#Ã3 ^˜»l5ª®&ša²#ãþ-øœ23Å ß8z=-ñ 1¾ébÝ0ÙšŠÑÝ$ºRc  ¦›dk £™"Î’­ªkâ³¢]¹H¼õf½„ÔÇÞsï ×ã¹fÖñÔù"aÞuËMø¼^J• s‹K4D#kQ–µ"¢Êe¦ç8rê /:‹nj.Í©$ŸûèI%¨©*'Ï  lÙhA×ëÑK[ûzÖºþé7¿ÃBfù§~¯µ©‘};·Ó×ÙNc<†×ãAvÔ›]hš†º’š—-QÕ«#¢Vñ¸Ý4%âx=jŠÂÔü<†!„”@ Ø!¤û©ÝÍ ü_ïÜJ2२h|í¥Q~tnòªŽáº–8·u5±9!î÷àu9‘%0,‹²j0S¨pr>Ë¡ÉE4ã§‹)Ý´ø…}ý¯êæ6xaŽðeîæwU6¦É®tœ÷nnÃç”™*”ùëÙnØPãœ.TÐL“­©(ݱQ_]LI€bÔë„Í—ª´GƒD¼.JªÎ_ŸãoN^¼æîûõRŸxß»W£…®ŽŸ"Ñã½wÜ‚Û寲L ÓDvÈ€µ51;ϱ³õ®yMD­’N&øÜ¤)GQµµ”=; )M×QTm}=õî‡K¾úÍï2»¸ôºŸïéhc϶Íôu´FðxÜ8$¦eÖE”¦Q©)ä %4]¿ª"j·ËEº1QBJ lR@€}„Ôž–8¿zÓVEãÏ óÐùéu˶TŒ[ºRlKÅH½ø\2I´,*šÁB©ÊɹG¦—(*Ú늩šnð/nØLëJ7·¿šâɱ9¢^·í®!ò¸®9Î}­x2Sù2ß9=ÎŽTlCŽwºPA7M’zãᵈ)Iª‹BY’p8$jšÁÿ<:zESC׋õRŸºÿ¾A§,_k¸¸¼Lµ¦òν»H'“8–U¯÷£é…R™‹S3?wžOÁ47ö£¶1ÞÀ/>ø!Òɪ¦qb%BÊŽÔk;©lëï­w?Ì,óß¿õ=¦ç_ÝÌ`sO{·o¡»­•h8„ÇíÆá0L³~ E¥ª(äòE Ó\µŠÛé$J® ©éù… +7_‹R@p)BH öRûÛ’ü‹7÷{ÈÕTþì…aŒÌ¬ë˜ºBÜÑ“fGSŒtÈOÀ-#K†5Mg©¢pv1Ï “K,UjkbÊ´,rU•_¿m-ázñìžàé±yüöKg²,‹=- Þ=ЂG–™È—ùÁ™ ¶6F7ô¸§ e4ä/a &æ]©&IH+çiC=5T5-ÊŠ†×%ã‘e$ 4Ó¢XS].òÂÔFfm)¦„ú‡ÉdsȲƒŽ–fÒ‰KDT®PâÂäÅR™ý»·)”J|ëGsôÌ9Û]á`€þ‰Ÿ£µ)…nÔ#¤~{FH-çòä Eîºé¸]®W›wºa ¯tÌ+”Ë”+U6z*©ìpÐÒÔ¸!5·¸„¢ !%vE)@ À>BêîÞf>{}Q¯›LEáŸ=Ãá©¥ =f¯S掞4{[âô%ÂDVŠg[V½øjñlE7ø_Ç.ðôÅyšÃöܾ³3Åm]M¸d–‹<>:KOCÈvóx~r‘_~Ç&V"ñŒÌõ¹ÙÒ%î÷\"¦ÆseM.òèÈ eÕ>bJ©Ÿ&3 Ht·µÐ”Lðùp:e, TM%[(rabŠ#'Ïpfä»6ðÀ{ßE8¤X.óí?‘“glwíý~>ÿÉŸ£-Ý„n+BÊ~‚<_,‘Íç¹nëã±µˆ'Ó´Ð }­Xy®X¢Z«­HÆìpÐ’ªG|Õ…¹¥ ŠªÚbìBH Á¥!%ØGH½«¿…Ïìé#âu±TVøâ³§92±ÉFBâæÎ7u4ÒóÕ‹g;V6J–e1]¨r`dš‘å†i®ý?»pKWŠ[»šp:Œf |ÿ»‰C”*e¾ó÷8tü”í®}¿×Ëç?õÍitÃàÔÐ0~»)Ë¢Pª éD‚d¼€Ï‡,;Öž±ÕšB¥Zc¹GQ´uí˜÷Vp8´® )Ue~)CMQl1v!¤àR„ì#¤Þ³©O]×CÈãb±\ãÿæ4Gg–m·ÞûZÜÑ“f{SŒ„ß³ö¯÷úŠØ˜)T8:›áèÌ2ša®uæÛèÜÖÝÄÍ)œÃKž›˜§=b?!õâL†Ïíë'êu³\QøÃçβPª’x)*1Ÿ‡;’lJFHø½x2‡„f˜u1•/spb‘'ÆæX®lÜÍ¢R/“+qÊ2mééÆ$~Ÿ§,×›h*¹•ˆ¨CÇOq~lü’ïoíëá÷ßG$¢T©ð·<Îs/·Ýµïõ¸ùŸ|€ŽÖfLÓääÐ~߯®i§ëùb ‡$‘N%H6Äðz<È–e­<_%4]g~)S?×6Q«8$­ézÊž¢*Ì/-Sµ’852zû‡î½ó @ðòóQ)@ €¿õ¤µTÙøÔÞ¿¥íê!äq²PªñOŸâÄ\Ö¶ëþñ]=|dG½x6–…iCª§ò•UƒÙb…“sYN.ÚBLÝٓ榎Fd‡ÄÐbÃSK´Ú0ýðøÜ2ŸÝÛOÄë"SQøâ³gÈ×T"^÷ÚgVÅÔ;Ú“lIFˆêbJ^Iå+)ã¹2/N/ñØè,™ x !Ù|ÛE[º‰¦dŸ×‹ì±¬ºˆÊæ _œàÅSg_WD­²¹·›Oܱp˜rµÂ}‚§µÝµïr9ù§¤«­eà )]7Ö ·¤’4D"x¯§\Q5Ue9—gøâ/œ8ÍèÄÔ?x¼îN>õ³ï!‰P®VùÑãOñÄ¡#¶»öeÙÁ¯|úct··b™'Ïàóz6ÔuÝ R­âr¹hJƉ†BxÜn Ã4©) ™,Ãcãä‹%î¹ùÂÁ Šª257n¶}g¼JHi*‹™,åjuCŽ3ðãq»ñz~O{+ŸùÐû‰G£Tk5zêYyæ -ŸK¿ò™Ñ×ÑŽeYœ¹€Ëå\«s·†®8Nã1"¡ .§ IªKªr­ÊÂÒ2C.21;iÖ#  ¥2¡@€¿ÿgˆ†ÃÔ…ÙÅ%TMî¼VHer9ŠåʺËápôûðy¦eá$’v6Åhz\¸ è&ÙªÂp¦ÀÑ™eÆs%ü.çºÎç¾M­ìiIàNÎg͉¾¢î’]¸˜+ñÑ]k©¡ðÔi|®7W¹¨hx]27¶7Ö#¦^!¦ Ó¤¬êLå+¼4“áñÑY¦ WÿþúÇ ¤Jå ·‹–¦T=5ÏãÁáx¹ÆP&—gèÂEŸ8ÅÄÌÜ[þÎÖf>ûáûIÄ¢T…GŸ9Èß?ù¬-ŸK_øôƒôwv`aqvt ‡$!ËW¿¸¦éè†ßë!Ù#àZ)F®éåJ…ÙÅ%Î3>=»ÖMo•ª¢âr:ùäý÷‹DPT•ÙÅ%UÅδ§›êBJÓXÎåÉKë& eÙAÐÿêˆ(§,c&ª¦¢¨ÕšÂb6Ë—þç_‹.{@ð„à—pк«7ÍŽ¦© ŸË‰Ó!¡¯lœg Õµód¾¼nãü'{úxÏæ6|N™©B™ÿ0x‚‹Ù’m×ýõº*ºyI­"Ã4éKDØÑ£#$ìuárÔ7_ši’¯©ŒfŠœœÏ2’)à–×§ƒÔû6·±»9Žœ˜Ë2‘+t;mw^& e>¼­¿«.¤þË3§ñ½Å®\EEÃã”Ùß–`WºÆ o­F˜aZT´zDâ±Ùežaæ*Š©kUHéºN¾T&ðÓÞœ&‹^"¢³YÎ_ç౓LÏ/¼íßìhNóÙÜO²!FMQyì¹Cühði[>—>ÿÉèî`èÂELÓÄåºzQ˜Ë¹<ºnИh )'à÷á”ëÏM×)–ËLÌÌ12>Áäì<îŸ â ÓB×õµTJES™_ÌØ¢+ÝOܸm+BJÕt–syrÅâURk"ÊíÆëõàv¹ ÓDÓ4U£RSÈå hºŽÛíâ¿üÅ7„‚×>×…¸çϱºbAîêmfWºT¨¾q®×0ªw}›.”96»Ìc#ë#¦þéõýüÌ@+^§Ìd¾Ìï<~|]ÙÛå•]WSÃ’ôk)ºAo<ÌÖT”¾x˜¨Ï½öYÍ0)(¹2Çç–^*¼¡³ËÉû·´³+ÝÀñÙeæJU¼²ýÚ«Ï«|`kûJjh…/>wÏÛœGQÑp:$nhodG:F:ä¿DLÍ«¼8“áÀÈ,SWẾքÔÜâ™\žŽæ4ý]Äc¼žW¤æ) ‹ËYÎ]¸Èáã§.‹ˆZ¥µ)Å?ýèhŒÇ¨©*ƒÏ¿À{Ò–Ï¥þ‰ŸcSO'CcÑ4çÊG:æ‹%jŠJsc‚ÆDŸo¥ÐƒùRuEüÎ0v#¯!5·¸ÄÜR†ž¶Vº;IÄ¢xW"¢ Ó ZSÈdsœãÐñ“Ì.,]öy57&ùÜÏ}T¢šªòä¡ùþ£ƒ¶|.ýòÇ?–Þn$$†/ŽSU|^ï•ù1Ë¢P® é:éD|­Ð¼,ËX–‰¢ÖEÔøô '†Î“ÉøßøXTMç÷ßG"EÑ´ Û•î o\^QCJÕtr…™\‡Ãqå~Ô²e™P0€ÏãÁív½*"JÕ4jŠŠ¢¨”*etÃ|ÝÃ!%¯ó\BJ .R«´GÜÕÓÌÎôk»¾Y¯Ú8?:2ÃøUHûgïØÄ=}͸e™‹¹¿uàó%ûn.ØÙŇ^™öô›«UTT4ƒ>nhOÒŸ¨‹)¯SÆáÐ_Q@ûØ\†£Ó˨†yE  pkÛW„Ô‹Ór5{ ©LEyUjè<‡û2oøŠŠ†[vp}k=•¯)ä#àv®lò^Ž˜:>»Ì㣳W¤VšÝ…ÔÜâó™ezÛÛèëj¯‹(·I’0M“êJDÔ™‘ /µ„ãÀ»ï¼•¶hºi’H¥Éæë·Í[B¢«½¿×‡i™¤29â©ÔŠ†Î›¦…K–hŒDÎQ²,cYZ5¬¼¬iä E,Û~Mï+„”@ ¼Ìº.„”@ ¼ºZ¢+ìçæþvvFéû(nÜ®JEÇ…|p^âS×naÏêv—Ìh2˧þ"©Rýî˜ôÑë¸ûÌJœg¿¡@òœfTvwGhkXÎ[j·, …2Gb)Ì$È”õS²$qßÀ*¶´5`9°wj‘xA«ËPóœnœÕúÙçOâv]øJ/Å%³¹5ÂæÖFº#~ËbêôØšKòøXŒ¡ÅÌþ¼zR±Å8ñdšu«{é_ÕKS¤*3ªÊ%­Ll1ÁàÈ{#~+¢Î¥¹!Âo}à=t¶¶`˜&ϾxˆùÑÏêr]úøýïbÛÆuÈ²ÌøÔ ÉL†p0ø†%G±\Fq»ioi¦!£¨ÈÕVÕRYc1‘dxbŠÁáQ4ÝxC"j Çq¸ïŽ[ioiÆ0Lé ™|ýn„!I­Q>?¦e‘ÌdH¤Ò+"¤*"J¦±!ŒÏãÁ«ª(в\q¨:eM_Qöë|†BJ ^f]BJ ^»Z¢5èåöu]ììj®´òQÑQ6LbùGbic(žY±ãü£ë¸~unYf$‘å?=üYͨÛë~nHû?>w÷ ´^ä4¥Ú¶­½‘®°¿Ò&IX”M‹D¡Ì‰Å ÏOljÊoXL¹e‰ûúØÔÁ²ž›Zd!_¦Á«Öݸ “ÛÖu¢, ©!—|Ñ>ßã’Y×ak[Ý‘ÀYâ·l˜,ÊOóÈÈ,'Þ€˜ª![Œ“LgèïëemoM a¼ªŠ$UDT±\b!‘âèÉö>vQ*¢Î¥!âw>x?]m­¦ÉÞCGøê~Z—ëÒ¯¿ç¶oZ,Ëœšž%žL ‡Î[rhš†ªª´E›ˆ„‚¨ŠŠ$IgÝÈÄ$ãӳ躼‚Õˆ–esß·ÒÙÅ0M’é,é\ŽzE’$:¢Í~,Ë&™£RE IDATÎO¥p»Ï_ü›¦…Û%Ó ãózñ( nŽ\qX®¶åiº~^"j !¤àeÖu!¤àõ ©%Zƒ^níïäòÎfº#‚ê銎rµ¢ãp,Åãcs_xãbêß]?Àµ}m¸e‰“ñ,úð t³n¯û¹!í+]‰“Ó \²ÌåMËí–!UÁí’q‡²i‘,jœŒgynj‘X®tÞbJuÉÜ;ÐÇÆ–0–íðÌdåý¢~OÝ‹fYgµ†~öù!T÷Åo=T\2ë£a¶µ75v¶íP2-ò%Ž-¤ytdŽÁ…×/aj]HÅ㤲9ÖövÓ¿ª‡¦HªR5¬¼Òšwdh˜ç½¤¢! ð»¿ö^ºÛÛ0-“}‡ñåïý¸.×¥Ý÷vlÞˆË%393G,ž 1~]ïaY6¦eâv¹i‹6PÜJõß, ¥óñCãLÍÎc;Ö9˲¹÷Ž[èlmÁ´,R™ ©lý )Y’hmn" `Û6‰Ôù )Ó´p»]4E*Qª[AQ”Ó­yºNIÓªQ¥óQK!%/E)@ àü…ÔmA7®igwwtYLYѱXÔ8:Ÿ:ïç%þÞm\ÓÛ‚K–8±˜å?þìeÓªÛë¾Ò~¡+qlÇA’$6D#ìèh¤+ äQ–s‘4Ó&UÖŽg94—b<•ǯ¼>ãq»¸o`ë£aLÛáé‰f³EÚƒ¾ºÓq–[CÇ’9>·wûÒeaÉ’Äúh˜MtEüU·,áT«Ý*S)ž‹q$öÚÛÔjUHÅãäòúzºèïí¡!Riï’¤Šˆ*–ÊÌ-Æåé‡j"¤:à÷ñ{z/½˜–Å£ƒ|ñ;?¬ËuéÃﺛ›q»\LÎÍ1;¿HSCä5½Ö0MLËÂïõÒÒØ@0@©Îô(‹Ì.Ä99>ÁäìÜ ã>ëx ‹{ï¼™î¶6,Û"•É’Ìd©W$ µ¹‰p(ˆmÛÄSiâ©4ÊëRgŠ(¯g)#JA’dlÛB7Ltà —//•X©g%!¤àeÖu!¤à ©%Z^n\ÓΕ=-t…ý„0ú7÷¬¢¿9ŒiÛ<9>ÏL¶@w8PWcb;HpÃêöåÖÐÏí§\ú,,ÇqØÔÚÀÖöFz#‚žŠ˜²«b*^(sl>ÍS§æya6ñêçZcBjIDõ÷õ²º§‹†PZɰ1«U5só‹eï¡£5Níózøý½Þ®,ËæÅÁ|áÛ߯ËuéC÷¼][7ãv¹™ŠÅ˜ž›§¹±áW¾&•Éb&mÑ&ÚZš ø|¸]•9c˜&¹BÉÙ9†OM2=·€z‘²åtÝàÞ;n¡§£˶Hgs$ÒêÇqhmn¢!ÂqS)âÉŠòê•­¦i¡*nÂ!|^/ª¢ *n ’eZº¡“É.ˆäBJ ^ŠR@ÀÊ ©%¼*·®ëäŠî(=‘j5ÎREÇbµ¢ããóšK¾æ÷ýôMÛÙÝÓ‚ .¤ùw ž×ñ?¼v 7,…´'²|vïI|ÊÅ©ÄÑL‹5M!¶¶7²®9LƒO]–O†e“Ó &ÓÆ’ŒÄsH¯ÒITݼk`k›Â–Í/ÆcL¦ ¬n ÖÕ˜è–Ç-ŸÕúOûNâWj+œ}]4̶¶FV5—«Ý–ÄT¢¨q|1Í㣱_)¦jEH-‰¨u«WÑ×ÝIc8´œ3dZ&…b‰¹Å8‡ŽŸdÿ‘Ar…ÚÛ%Í£*|òÃïgUw'¶msppˆÿó­ïÕåºôÁwÜÅÛp»ÜLÏÏ393G´©ñe6›/ i­-´47.‹(ÇqÐ ƒl>ÏälŒcãÄø}·…·¬ëÜ{û-¬êìÀ²-2¹<ñK1¶RضMKS#M‘HEH%S,¦R¨¿BH™¦…ª*åñ * ŠÛ] +wlLÓDÓu²ù"ùBW]ìÏ!¤à¥!%¬¼Z"ìQ¸}};;›—œ—*:´¥ŠŽ…4Ožšç…™W¯èxà–ììŠ"GæSüû‡Ôõuÿã븮¯Ò>Ïò¹}C]|äu“Þ?»º¢ô7‡iò©xÜ.$IZS³Ù"‡çS -d0lùeXB…{·¬buSòy|lŽS©<ëšÃu5&eÓ" ºy˪V\²ÄÐbEHjq·@Ö4‡¸¬³‰Þ† a‚rF>X¼ 1´˜áÑѹ—S—ZHÅãËeÖôt³º»‹†p¨Ú:TyˆÎ—ŠÄâ<~’½‡ŽR,—kö¾q»]üÁG>Àêž.lÛæð‰aþéß©Ëuéýo¿“+·oEq»™™_àÔô,-Íg)Ç![(b˜&ÑfZ£Mø<^\.Žc£éé\މ™9Ÿ8I2]‘ó·bYã¾ÛofUw'Žm“ÉXL¥¨W,Ë"ÚØH´±‡…dŠx"…ª¾TH-UD5V3¢w¥"ªÒúzZDer ¥Ò?v!¤à¥!%\8!µDƒW妵\Ñ}ÅŠŽ‹åÅÙW®˜ú‹[/ã²Îf$àP,ÅüY} ©ÃÖsÄÇU¹$Ç’Ó Zƒ>®ê‰²>¡ÙïÁëv!˦eS0Lf2EƼ8“D·ì³Ð#^•wm饯1„aY<2:Çx2Ϧ–H]I^7iò«gµ†~~ÿ0ÁZRK©¶ÍÚæ0;;›émòª¨²„CUü5N,dxòÔ<ÏO-.¿îR ©‰™Yúº:YÝÓE$ª¶QiÍ+–˜™_äÈÐ0Ï®èiÁ%Á±ù4ÿgÿð%—ׂnÙlˆ†ÙÞÑD_c¥bÊ]ÍÓ­¥V¾ ÏL,ðôÄÂ%RÿðµoïÙ=°éñU]„ƒÁå ˲È‹LÎÅ8zr”çFÓººw>õÑпªÇq862Ê?~õ[u¹.½û®[¹vçTEav!ÎèÄ$¶ãà÷yéjk¥1Æ£ªË"JÓtâ©4#S¥T*_rµD¾Xâ]·ÝÄÚÞÇ!W(‹'êVH¦Is$Bk´ ‰…d’…xGÅ4-¼•H(ˆßëEQ”j |ED™Õó2Ù<…b I¾¸×@)@ x)BH OH-áWÜÜÜßÁ5½­¬j¨îøæ’qœÓ΃ ž:§¢ã¿Þ¾“mí8À 3 >ýÈ‹u}ÝÏÍÄú§}ÃD¼µ!>ršAPUØÕÝÌ@[#í!þ3ÄTɨd‰¥80“ Á§òÎͽ¬j¢Yœa$‘㲪@¬%ÞH€Ý•ÖУói¾p >„Ôši±>ak{#ýÍ¡Šø­f¸é–E²¤3ÏòÄØÜÜrÙE}@\H$÷DBÁÇ+K×EfæxñØ ¬;µÄ'?ò~Ö÷­ÂÁáøÈ8ÿ•oÔåyÜ{Ç-\·ë2<ªB*“er.F{4J$Ä£¨È²Œe[”Ê‹‰$'OM28<†n5#¢–ÈK¼óÖY·ª‡\¡Òz±eÌJ¡ë a:¢Q$Ib!‘d!‘$à÷ÓâóxÎQT[óª"*—§P,"Éò%9v!¤à¥!%\|!µ„OqsKWö´°úWTt<=±À³ üõ»hkÀLÇyàуu}ݸe»º¢HT2±¾°„°·¶ÄGN3pËWt·°­£‘®°Ÿ€êÆ%ËX¶Sm¹,3•.°.&ê÷R6-~24ÍÉx–+º£u5&‹Å2kC\ÞUi =KñåGk3CêUÐL‹¾Æ »»£ô5iðV‚ë+­|6É’vcWØQÇÙ<®&ùB™ùEdÿÑALÓªëùü{zÖ¬`hìÿóKÿR—çqÏm7qÓÕ»QÜ –eaÙÖé–/Ë¢X.±H121Éøô ºn"רà)–ʼí¦ëÙ°º‡|¡ÄìÂò%’2oxNë: á]­­H²D6WÀqìÊŽyªŠRÍÿ³,Ã4)k™|¥"êRŸ³R@ðR„.ZÂëvqsWv·°¶ZÑ¡V3¦–*:FYV5éŽø±Ø;µÈ_¿o˜'ÇcÔëJþ×wìdk[E|˜IðÍ#ãuU‰cÚö鬢¦>u9,Ø´mEñdž‘Dhújþœ&3.ëlZ—ý3q¾stÛõ¦™ïEä'dggÓ7¬i¿¨ˆÿ³¿Ü#;<þf\G?ñþûX×$IŒLNñ™/|•Zþž ¸bû¶nXG{K3Ÿ·ËMe W汦ë<öì^GÆð¨J݉¦Ü|Í•q‘% Å"Su&¤,ËÂïó ñz<ø¼žeÙdÛ%­¼œU,•k¶úK)@ x)BH µ'¤–P]2{Ö´sMo+k›C´¼ÈUáaÙó…Ãñ,ÏN.òäx Ó®Ÿ5]–$þÛ;ÙÒÖ€åÀ¾©E¾sl².+q4Ãâò®fîÙÒ‹wI¨9`X69Í`2Sàd<‹m;´¼5{.ãé[–ßç¥!Äãñ ¸]¸].$I^®Ž*keæãIòÅbÍK6!¤à¥!%Ô®ZÂ-Kܹ¡›\ÞOУ,WIÙ¶ƒnÙdÊ:£É¿<5Ï/Æç1íÚorË2ÿíŽlj`ÙÏM-òÃãSu[‰“× þäú­´‡|ØŽCA·P]žjÈ®aÙä5ƒ™l‘áD–‚fÒð ÔØ_óÇ’9®^ÕÊæÖ–ÏO.ò“¡i\uº+×+1—+2™ÊßøˆR+wn﹇í›Ö#Ë2§¦gù̃_Å0Ìš9¾ÎÖvmÝÌÆµ«imnÄçñâr¹pM7HçrLLÏbZ—mÞˆÏë%‘Nó…ý>Z—cbšoÙ¹ƒ›7, ©éØ|MgHY–…Ïë¥1Æ«ª¸Ýîå]ótÃ\1‰D:Í|~ëeô7‡pÐ, Y’–™Ѫbj,™çñ±9~yj¾¦+¦¼nÿõölh cÙOO,ðÓ“3u[‰S4LþøúÚƒ>òºÉÓ ¸e‰ ÑÍ~^· Y–0-›‚a2›-rh.ÅX*GGÐGO$Pç1’ÈrÝêv6VÇåÙÉE~6<³\™÷f!–+1‘Ê !µ‚|ì¾w°cóF\.™‰™9þî‹_£¬é—ü¸VuupŶúûzˆ66,W?ÙŽ®$3YÆ&§8pô8'ÆNqÓÕ»yë×á÷úH¦Ó<øÖe»TªŒ®Ú±•˶lÂír‘/kVHÙ¶ßë%áõ¨(n7n— Ç¡’¥è†A©T&ÚÔˆâv“Îæˆ§RËÙ~µŠã8ü!¤à%!%Ô‡Š¼üù-;èk aXÇ2” “µÍ!½rïÛ¹¼*>&fçøŸ_ú:ÅRù’ÏúÕ«Ø9°‰þU=4FÂxTuYDišN<•fxb’ý‡ŸžY~Ýž+wñ¶›®'àó‘ÊdøÒw„»N+7mÛa×ÖÍìÚº·Ë]Ùe/6TCBʶm>áP¯Çƒâvã’]€ƒaZ¦¦é¤³9rÅ"^J_W'ª¢ÉçI¤2¦YÓãàØÿã‹BH Á¹!%Ô‡j úøÏ·ì`UCͲxxx–ÿýü×öµr]_ëšÃ4úTT— Iªd¥Ë:£‰{§ã<::‡VCÛËG¼*yëe¬mcX6OŽÇxb͉ŠÍ~Ï%S'YnZÓqZŽÏóÄxìM7ç eÆBH­$ºçmËâc*ã}éä …‹~›û×°{ÛVww ñ(*²,cÙ¥²ÆB"ÉÐØ)ödf~á%¯¿n×e¼ãÖ=|~RÙ,_þÞëV”;ìØ¼+·T„T©ÈÌÜR œmÛü>B!TU©Š¨Š([Q¥²F&—§¬i¸\.4]ÇïõÒÛÙŽª(ä EæI,˪éq°m›¿ùâ×…‚sBJ ¨!Õöóé›wÐ  Y?šá³{‡*‹9pm_W÷¶°©µ¦ª˜‚Êno™²Á©tžg'xtdŽr ˆ©f¿‡?¿å2V7…0,›ÇFçxfr¡nï!ËqøÔ[6Óð’Õ F9 Ë:+Ë´$©rîAUÁ§¸ð).\²Œe;”M‹D±ÌÐb–£ó)¼êESCñ ·®ëdmSEþb<ÆS§æßts>^(3*„ÔŠòÁwÜÅUñ1=?Ï?|ù›¤s¹‹öù;6m`×ÖÍôv¶ …PI’0-“b©ÌB"ÉàÈû ²H¾âû\sùvî¹í&‚~?él–¯|ÿ'ÔsÇêÖ ë¸jÇ6TÅ]mÙ[XÞ ôRàØ6~Ÿ†pZiÍs¹d ÓÄ0 ÊšF*›£¬égU§éºß祻½ ª/•˜-âÔø^³–eó™/ !%ç"„”@ PBª·!ÀŸÝ¸îH€²iñã¡i>¿ïäK~îÊžö¬i¯f©•Pm@·²e‰t_žšçñ±¥K8ÜôòÀÍ—±º1ˆnYü|dŽç§ëú>úä5›‰tr†3‰º½‡dIâ“×l¢Éï!]Ö™HÐ-Åõ«ü âQ ¨.¼Š¿Û…²ToVÚ.‡ãYF’Y\’DgÈAÏãøb†»6tÓ×İ,™ã¹:…/G²¨1,„ÔŠrÿ[oçšË·¡* ³ ‹üãW¿E<•¾ Ÿåõx¸jÇV¶¬_KW[+A¿y'6ôÈ‹ÌÎ/rôäÏ:B©¬½æ÷Þµu3ï¹ë6BÙ|ž¯ÿð!LˬÛqÙ°ºëv_^©(*–^¶Mñ‚â8ü~ÂÁ^Oe×<—\‘ïºab˜fµ"*‹¦¿2¯Ë4MBÁ­MMx=*%Mgj6VóR¦iò·_þ†R@pBH õ!¤ú›Cü‡=Ûè ù)&ßœäË/޾êëvwGÙ³º­š|ÕÝÞ$0l‡¬f0žÌ±&Î##sULõDüÙMÛé©V|ýdhšƒsɺ½‡—Ìï^½‰&ŸJº¤3•) Y6îטÕâ8öTò¥Š¿êFYÊS±+;(žJåNdqhú.Èy .¦¹{cÏYYeû¦ãoº9Ÿ*éœ\L !µ‚¼ûÎ[¹v×TEan1ÎÿþÚ·ekÜùÙ½m3Û6¬£­¥™€Ï‡Ûå®ÌÓ$—Ï39ãÈÐû;¯°ëË6oä½wßN8$[Èó=Œnèu;.ý«z¹áÊxU•B±ÄôERŽãòû ‡‚xT·Ë\QK­y%M#ͽbEÔ¹˜¦I$¢¹1‚ÏãAÓ &fç¨õçÝ0øŸ_ù¦R@pBH õ!¤ÖGÃüû¶ÑòQ4Lþõè_;4öš_¿»;ʵ}mlnõ{ñœ!¦ršÁd:ÏÞ©8Ì’¿bª¯1ÈŸÞ¸îp€’iñÃãSOÕí=äu»øí«6ÒèSI•t¦3• ©×†lÙ!BƒWŧ¸ñ*òi1eÙä4ƒ©L‘¡xǨ߳¢çqd>Å=[V•UöÂlâM7çÓe¡!¤V’wÝv×_±ª‹'øÜ×ÿ•¹Å•‘™­ÍMìÚº™ÍýkhmnÂïóâv¹«U6™\ŽÉÙŽòâàÐú¬í×óþ·ßI8$W(ð­ŸþœR¹\·ã²º»‹›®¾¯§"¤fæ/|æRÀç#²$¢Î©ˆÒ ƒR¹L:›C7Ì×µƒ¡eY„‚Zðz½è†ÎÄÌv?ÏhºÎÿúê·„‚sBJ ¨!µ©5Ÿ\¿•ö¼nòÍÃã|óÈ©×ý>;:𨳦-­ DÞ³*¦ršÁt¦À³“‹<6:Gº|áª^®âëÄb¦nï¡€êæWl Á§’.WŸi;œïfV¦åàW]Dý^|Š Û…ê’‘$ òÉk³¹"ÃñyÝ ÙïYWo„}3q>°}ÍrVÙNLq8–zÓÍùlÙàøBJ©ä·ìaÏU»ðª*ó‰$ÿô/ßyÃíam-ìÞº… kúhmnÄçñâr¹°mÍÐÉäòœšžeßác]‘óØº¡Ÿ¼ã."Áùbo?ô(…b±nÇ¥§£[¯½ ŸÇK¡Tɺ¿ð$$~á`UQP7²ìÂqìÓ"JÓH¥³æëQKX–MÐz/Ô‹*—5þþëßBJ ÎýÝ!„”@ Ô‡hkà¯ßJ[ÐKN3ùú¡1¾slâüºÚ¹ym›[hYS²„¹\…Sàù©8OŒÍ/j+~>¢þ䆭t†}t“o=ÅH"W·÷PØ£ðñ+ÖÓàUÉ”*]¶ã¼áì`Órð¸eZ^üªÕUS®êX “Ùl‘ñcÉíAß @ÿåÄ<ÿf×ú³²ÊŽÍ§ßts>«ŸBj%¹û¦ë—+q)>ÿÍï25;¯÷êëîd÷Ö-ô÷õÐÒØˆÇ£"K2¶c£é:©LŽÑÉ)=ÎÐØ©=-ëÖòÁwÜEC8L¾Xä»?F6Ÿ¯ÛqélkåŽë®ÁçõR(UvÙ[ч $‚UÅïõàQ=¸\2¶] +× “R¹L2“Å4­óQKØÕú–äÃÎ@‘ IDAT¤¦ëLÎŰíÚ/–Jüã¿|G)@ 8÷wˆR@PBj{GÿöÚ-´½ä4ƒ¯ãûƒ“oø}Ú¸ym'[ÚNWL¹$0m‡œn0•.°&Á££sÄ +×¶²¹µwýÀrÅ×7s*U¿}>•³k=¯Bº¤s*½²çbZª[¦Ùï! ¸ñ¸]xÜ2²$a;EÃb.Wäh,ÍñÅ ¡óSŽÎò;WozÓT®½yÝàXL©•ä®=×rË[®Äçñ°˜LóÏßú§ff_×{lXÓÇÎM¬íí¡1£ž!¢4ÅTŠ‘SSì;rŒSÓ³ä<6õ¯á×ÞùVÃa ¥"ßÿù¤²Ùº—¶æfîºñZü^ÅR‰©ØÊìJ'¡`ªâõ¨xT—\Ý5Ï0Ъ­y©lî ‹¨%ÇÁëQi‹6/ ©©¹V ©|±Èg¿ñ]!¤àÜß%BH A}©Ë;›ùÔµ›i xÉj_|a„Ÿ˜^±÷hkà†Õílmo¤5àÅ«¸‘«b*¯Lgм0“à¡á’+P1µµ½‘?ºn`¹âëk‡F™ÎÔo[LKÀËGvöö(¤«äÓrpÉÑ€¿â®äL!¦J†Åb¡Ì±…4'24û=¯KLýth†?¼nËYYeÉì›nÎt“£±¤R+Èí×]ÍíÕJœx*žý}Ʀf^Ók·¬[Ëîm[èëî$ âUU$IƲ-Jå2 ‰Cc§Øwä³óv×ÇkúøÐ=o£1¡P*ñ£Çž$žªß¶Õæ†î¾ù>År‰©¹7&¤dI"ðãõxð¨j5°ÜU©ˆªŠ¨b¹L*“ŲìQË8ª¢ÐÞÚR 5טŠÍcYµ-¤rùŸûÖ÷„‚sBJ ¨!µ»;Ê'¯ÙL4à!S6øçýÃülxfÅ?gcK„›Öv°µ½‘¶ª˜rU3¦ z¥=lßtœGFgYÈŸÅÔŽŽ&þíu[h T*¾¾ôÂ(±|©nï¡ö]¶–G![6Kæ*%Órehöy¨ >Å…Oqá’+­2%Ó"YÔZÌpt>EÄ«¾ª˜Ò-›‡Gf^’U6^Ç•k¯DÑ092'„ÔJrË[®äÎÞ‚ßë#‘Nóà¿þ€‘‰©_ùšË6odçÀ&z;;ˆ„‚¨Š‚$I˜–I±Tf!‘dpdŒýGW|ǾWbýêU|ø]wÓ‰P,—øñãO]´Ï¾DB!Þyëý~Jå2“çÙFé’e~^§R¥¨¸Ý.,Ë®Š(b¹’eÙ+,¢ÎÀírÑÕÖ‚×㥬kÌÄ0-«¦Ç “Ëñùoÿ@)@ 8!¤€úRWõ¶ðûWo¢Éï!]ÖùÜÞ“<6:wÁ>oC4ÂÍý ´5Ò¬ˆ)·\ Ô^Ê-:Pmå›Í¾þʦ]]ÍüÁ[¶, ¶/&q²ª.]a?ر–Ç]R©‹—‡%KŠ_uáu»ð)nWeW+Í´HuFYÒxÝ®WSÉ’ÆóS‹üÑõ´}ä4“9<Ædºð¦›ó%Ãâð\B©䯫vóÖ¯#àó‘Ìdøâw~ÈÉñ—æÜ)Š›Ý[·0°¾ŸUí„‚·€i™J%æaïá£dó÷þ[»ª‡Þûvš(•Ëüä¿$¶B»^ ~?÷Ýq3A€R¹ÌTlǶy­w.Y&è÷áñxªUQ .¹,¯:ºaP,i¤2,Û¹`"jùx\2Ýmmx=ʺÆìü"†iÖô¤2Yþù;?BJ ÎA)@  >„Ôµ}müöUiò©¤K:ÿðü žŸ¿àŸÛßæ–þ¶µ7ÑôâWܸd ˶)è&3U1õøØÜëj¹»ª·…ß»zÍUÁöO{O’ÕŒº½‡z¼oû‚ª›LYg,•¿R¯HØ£â¯VKù«b @3mÒeáx–ÑdY‚Îÿ¬×Ne _Ìœ•UöÕƒcÌd‹oº9¯™g…ZI®ß}9o¿å>?©L†/}ïÇœ_þwŸ×Õ۷2°¾ŸÎ¶‚~?JU^¦E¾Xdf~c'GyîàÊÚ¥Ôkzºøè}ï ÚØ@IÓøÙSÏ2›¯Ûqñz<¼ç®Û”´23± ÓD~•]9]²L0à¯fDUDÔRž—ahz¥5/É]Њ¨—;®®öV|Õ ©¹ùEôR‰t†¿û#!¤à„êCHݰºO\¹FŸJ²¤ó÷Ïç鉅‹öùkšBܼ¶ƒM´…|ø×rnÑ’˜:8—äááÙ×T1u]_¿u†`ûÇç‡(fÝÞCkšB¼gk~ÅMV«THI—èXBžJ_@qãWÝ(Õ‡OöÉT3®FYlÚƒ>Ž/f˜ÉøÃk·œ•UöFZ3kݲyq&.„Ô ò–;–[ÃÒÙ,_ùþO86-¢ÊšNY×Éå X–ÅÞJôur–Ò4æãèFmÿ1c1™äKßÿ©R@pBH õ!¤n^ÛÁÇw¯§Á§’,jüݳÇynrñ¢G_c[ú;ÙÞÞHGØ¿,¦,Û¡h˜Är%^˜Mòó‘Y¦3¯Üj³gM;¿yÅiÁö¿ž=ŽQãÁ´¿ŠuÍaîX…_q“Ñ N]à ©×‚e;= ^ŸâÆã–Q«S†e“ÓM¦3N,fÈ”uE¼fœÕJùφW$ľÖ0,›„ZQ®Ú±•{«­a™\Ž?þ¡`€ÍýkhmnZQŽã ™\ŽÉÙûŽãÐñ“5s=í|üþ{hij¤¬é<úì^NMÏÔí¸È’Ìûßq'‘`ˆ’Vfn!N©\Æív/ÿŒiZ¸d™†H¿×‹ª*¨ÊÙ­ye­ò_¾X¼¤™M²$ÑÓѶœ![Œ£éµ-¤æã ¾òÇ„‚sBJ ¨!uÛºNþÍ®õD¼ ‰¢Ægždßô¥Ë5ék rÓšvt6ÑöŸÓÊg1Ÿ/qp.É£#³/Š}K'¿¾{ ÞŠ`ûÛgŽc×ñï¤-îÙÜ‹Oq“ÕtÆ’ù‹]8ðŠ˜–ƒ_uõ{ñ).²³Ÿ°Ga±Pæožä…™Ä%?®U AnZÛÁåM´‡|T7.Y>«bêÐ\’ÇFçMžú¾}}Û¹ŽˆW!^ÐøÛgëúÚÒÚÀÛ7÷âs»ÈhãÉ\Í©%LËÁã–i xñ«nTWEL™ –Õ ¢~^·‹Ô› •ò•°‡ýS‹BH­ w\ wí¹UQ‡ÊwL Û±ÑtT&ËÈÄŽÙ°óZ¡³µ…ßxï½´E›Ðtƒ_ì=À𩉺Ó²ùÐ;ßJc$B¡T$¶ P*¡(nB!|^>¯UQ‘$©* 4]§¬iä El»vªWeI¢»½ Ÿ×KY×Yˆ'(iµ]Å9=7Ï7zD)@ 8!¤¸éŸ~Àeó’L¤VñîM=|è²µ„ªBê¿?uŒƒsµ³ygÈÏ-ë:ØÙ¥3ì[®˜²(&s¹‡cI©ˆ©·nìæÃ—Ÿlÿã—Çp¿JÈn-³µ½‘»7öàu»Èh:ã—(Ôüµ= :¨n™f¿‡€âÆãváqËÈ’„ã€,’DÙ°øú¡1Î% y”7Õœ·‡}BH­×ô±s`[Ö­¥©!‚T5±¶mS,—‰§Ò O²ïÈ1&fj?‹©-ÚÌ'Þwí-ÍhºÁSû_dhl¼nÇG7L>ü®»iŠD(–KLÌÄdhŽD–CËeI^®`+k:Z5#ª«VÏRš®±OR¬a!U*k<½ÿÐ??þ§>ð@Z|ã‚Ó!%Uö<øDƒÛÒ?#K|¸ïž-½¼ûZB7óù2ÿßSG9KÕÞÃ\ÐÇíë;ÙÑÑLwÄO`YL9” ‹ù|™Ã±$–ípÛº.B7 Õóñ\¤]š.;:š¸kC÷i!UC-{¯„i9¸d‰hÀƒ¿*¦–Æ Àq eöMÇÙ? YÒÞTbêùÉ!¤Þ[Ö­åŠm[XÕÝIC(TgÜ;‹Éûcÿ‘Afëæ¼Z››øÍ÷ÝGgkÝ0øåƒ«Ûq*i:½÷íD0-«RåvãU=g´æé”Êš®“/–jª"ê\ÎnÙÓYH&)–joã…RYcøÔÄã?õìÇþ÷_?pJ|Ë‚—"„”@ œÃžês[òµ&¦Þ½µû·­&¨º‰åKü?¿8ÊàBíþ±µ#ä㦵ììj¦; xF+_É0Ñ-›GA‘%æò%þß'âWÜu{ßììjæŽõ]x\.2åj…”TÇnZHö(D¼*íAïé*—ªHL5†âž›\d±P~Sˆ)!¤Îã‹£—mÞÈÎÍôt´ QI’°l Yªd“é†ÎOžø%=ùLÝcsc¿õþwÓÕÖ‚n˜<ûâ!Žž©Ëñ*Ët¶µðî;oÅëñTÿ¯ƒ$I˜¦EY×—eT¡X¬‹?‰J¨¹ÏëE3t) ¥RÍŸQ@ð:Öt!¤àå©51õÞí«y÷@ÕÍ\®Ä_ÿâC‹™š¿Ží!7®igww ]a?AÕâ’+ù2UéQÔM>»wˆS©|ÝŠŽÝÝQn_׉ꪟ ©s1­Š,¼¢§¯âÂqlÇÁ%WÆK3-EÑd–}Óq¦Ò‚u,¦„zí(Š›+¶ 0°~-½í„‚A”ê.m¦eR(•Èå‹Dðz<JE~ðÈ/xrß uw_4FÂüÎßCgk+†iòÜÁî«s(Ëøý>¶mègsÿZšÏj¥,”JhºAYÓÈJ8ÔÏóÀi!åAÓ â©4ùbñ’W©¬18:¶÷—Ïì½_ˆ(@ xkºR@ð«¹íÁŸïÀv>Üp)ãƒ;Öò®-•]ÜærEþËGIdëæ:FýnîïäÊž(a?zVkØl®Ä™ÏO-’ÓŒºSWõ´pKŠËE¶FCÍ_ ¦í°>&ìQ(ê&±|‰¯Z©fsU2¾tË&UÒId9K1žÈáQê¯ÝR©WÇçõpÕŽm ¬_Kgk ¿¥ÚZk˜ùBéùŽ%™ÉðÞ·ÝAS$B¡TâÇ?ÅãÏí«»û" ò;¿v?Ýmm˜–ÉÞCG9x|¨.޽P, X¿–þU=4FÂxª»æIRe­M¦3,&ST3çëïáèéh¯)à ‘ÊËç¹T n©¬qthøè“/ú=Z.¯sMBJ ^·|þ¡=²,?À%S¹¼y·™l‘ÿòÄaÆÎص®nö¼ w®ïæ­»i TZÃÀ²ʦÅb¡ÌÑùû§¤ê(³è-«Z¹qMŠK&[g-{gâ8Ðß"äQXÈ—y|,FkÐK³ßCGÈGÄ«¢TÃç Û&[68•Îs$–âdñ4<ó|ÝÍP Àï~è~zÚÛ1-“ýGyáØñš>æB±L8`Û†u¬íí!VE”„eÙȲ„$ɘ–É‹ƒ'PÜ ^Z·¿{«BJ7Lé4™\~¹ìb!D”@ ¼q„‚×É¥SÛµny·éL¿|ü0é|Ý^Çß½zw¬ïBqÉØv¥5Ì-KX”M‹D±ÌàB%³¨ÄÔu}mܰº½"¤ê´e*bp}4BÈSÉ*{dx–þæ0²,aÛªËEgÄG£×ƒÇ½$¦ršÁt¦ÀÁ¹$'38T‡k!¤^J[´‰][·°¹ ­Íø½å8º¡“É噜±÷ðQŸ8»mMO½ïD)•Ëüì©gøÙSÏÖÝø|üÞ‡ßKoG¦eqàè ŽÖä±.‰¨›6°º»‹†pª,˧B©D2•¡©1B8@7 NŽO`;µŽ…Tg;>OEH%3Ò™’|qÖ›RYãÀà‰…ç½_ˆ(@ xc!%çÉmÿüóØŽó€,±êb|Þo\±ž;×Wvq›Ìø‹Ç2)ÖíõûÄ•–CÀÓeù|‰¶ o¹5Ì®VLÅ‹eN,fxf¢¶ÅÔ «Û¹~un¹¾…”ãÀºh¸žŸ+ñÈè,¢‘åv=Ór(š&>·›®°&¿¯ÛU I¶lrºÁl¶Ä ³q^œ­ì¦X«c&„ÔiºÚZÙ½m V¯¢¥¹ Ÿ×ƒK®ÌCÍÐÉdóŒOϰïð1_aǹ¾®N>öîwÒÒÔ@IÓxäéçùÉ¿¬»9àózøý½Þ®,ËæÅÁì;|´¦Ž±P,Ó 1°¾Ÿ5½Ý4†C¨ŠŠ$iVvÒ‹-Æ9xü$#S|ø]w/‡´NNašªZ¿ùo§…”A*›#™Î _àêLÃ0yþȱ…}üÉçþêÏß‚à#„”@ ¼A.–˜ú­+7rûúN<.é<ÿùуÌåJu{Ý~ûªË!à§ÒyþûSGÙÒÖÈ5½­ô4yT¹ÒÎW6-âã iöNÇkr—·×´sm_nYªfHÕ§’‘XÛ¯TÂó›cSKùœêÓrÈëª[¦' ÙïÅëvá’Àr ºÉL¶Èá¹{§1kPL !kz»Ù5°™þ¾¢ x<*²$c;6š®“Lg™˜ä…c'89>ñ+ß«§£ß-M”5GŸÝË{²îæ€GUøý¿Ÿ¾îNlÛæÐñ“þÞwÑÖÜDY×yâ¹ý|ÿ‘'êo¸]üÁG>Àêž.lÛæð‰až}ñÐ%=¦%5°¾Ÿ¾î."¡À²ˆ²,‹|©ÈÔÜ<Ãã“LÌÎ.ï€P,kÜÿÖÛ–3±&gç(k:î3~¦®^èîhÅçñbZ&élŽÅd—kå…”¦éÆä\ìÁ·ß|Ãoˆo9@pÖt!¤`åØóà ²­ÿËæ$™ÈJ¾÷'¯ÙÌMk+¡ÙãÉŸ~äEE­n¯Õ§®ÝžjæÒh2ËONÌ0ÐvZ|ÌåK¸$èo³¶)DÈ«¢ÈÒ²˜Ju†âöN/2—½ôbê¶u\ÕSR9Í`¬N+¤T—L_ch9<ÿ™ÉÖGïú:Órˆ˸d‰¾† í!?Õ…K’°‡²a±X¬T¹í›Ž“(^úöËÿ…Ô–uk¹bû«º:hβ,‹’Vf!‘âÄè8û 2»°øºÞ»£%Êo¼ï^Ú£ÍhºÁ“{ð‡«»9 Ë2ŸúèXÓÛc;99ÌÓ^’c)Ë47Fرi«º:+Q’$aš&ùb‘™ùGƘŽ-,ï€x&ÅR™wßu«:+™X3±y ¥R] ©®öü^ßB*µº¿hšnœšžûÎ÷žyâ>ð@Z|»‚ ´¦ !%+Ï…Sxí–åÐìÑd–?{øEÒe½n¯Ñ_7ÀuÕÌ¥áx–Ÿ ϰåe*qbùŽ[#¬n öª¨.ÇqÐL‹dIçd<Ë3 Ìç/˜ºc}Wô´à’ [6êv—=ËE_cp9<ÿL‚ÕMÁ×üú%1e;kšBt†ý7î3rÁE“ñ ÏMÅY¸„cö‹’$‰Ë·läò-›èíl?Cj€iUò†)Ž ²ïÈ ñdê¼>§-ÚÄo¾ï>:Z¢è†ÁSû^äÛ=R—ëÓ§>öAú{{p‡£Ã£ürÿ õó Å2­ÍMlÝÐϪ®"¡ Š»2f†i’/™]XäèÉfb‹¨ê+Ë¥|ñÿgï¾ãã:ëDÿN™>ÒŒz³%Yî*.)NOìÛ!$¤„Ðb²ü–¥„ݽË.q~„»÷Þ {—»\n–5,BIHœêØŽ‹ÜmY½K£éíÌœrÿI‰Yž‘ž÷?ðb­™93£ÑœÏ>Ï÷¤¸ïÖ[h˜Ú‚8<>A<‘œÕ€3§ïg$j*ËñºÝè†A(%š %B” ¦‹ %‚páÌf˜zsÀyäé}Ä´lÁ>7sC×4T¢È'&¢<}jø÷©iñ†eÒZUB£ß‹ÏiË…)@ÓM‚)“(»'Š$çü‚¼ Q]Q†×ãÆ4M&C¡Ðy­øJ¥5:{ûžûõžw‹%‚0‡Ÿé"H ‚ \x·?Ù¨r»,ñà¹ÞƧnhãÚ©€s2åo³TV/Øçäï6­™Ùâv|"Âo»Fh®ðÿÑŸŽ¥°,‹U>–”zñ9íØ¦N¬2†I(¥q|"‘ }áÄœEŽ·¯ZÌ¥ueH@8¡?œ(ÈR^»E>÷ÌðücãjŠ]çu›ñ4‰¬A£ßCýæ‚M¿fáT†ÎÉ(‡GCtc8Ô¹Y¹1_ƒ”ÛéäŠum´®XFme9·{f+WV7ˆ' ŽŽs¸³‹Ý‡Hk³³Ò²Ôïã#ï¹—ÚÊÜðìWä»?² ?Ÿ>þà¬\Òˆ…Åñ®^vìÚsAï/¥iÔTTм¬ijÛt<”¦VD%襳·‘1gq…¼h<Á½·Þ2³q<$n’$*ËJ)özÎ;HM‡¨ç^ÜùÐWmïßVAæø3])A„¹s>aêÓ×pUýëçoŸÜ‹f˜û\|æÆµ3[ÜŽŒ…y¾gŒÕg¾ˆl8–Ä´`YY+Ê‹ñÍlå› SiSû†ô…¸m6Lݱz1ëksA*˜Ê0MäëRì°QWìžž2¡ºÈ5+·=K¡é&ÕÅ.šJN‰YÓ$šÎÒŽsp4Dg ‚rO˜ç[*).æ²5Í´­XFUy— UU°,ÈêY¢ñýãt?ÉÞCGÑ cVïß_TÄGÞw‹ªªÐ ]‡ùöOU¿{ÿ»XÙÔÀ‰î>ž{u÷¹ŸTZ£º¢œ¶•ËY\]I‘×sÚÖ¼h<ÁÀÈ(GOõ04:†Ëé8ëûˆÄܳõ&–7Ôca† E£H…XÌÉ©Š?¾â",Ó" 3 6ÈýLžw¢Aòà3])A„¹·qû“6SzLBzÇ™þÌ#›ÖrE}.àð7O¾†nîgxûMë¸lQ9ph,ÄË}ã¬*?û]ƒÑ†™[1µ¬¬˜—ý´­|át†î`Œ]ôGx.P˜º³¹žµ5¥L&5FbÉ‚|]üN;5Eî™áù]Á•^ç¬Þ‡¦›¤u"»ZŸ‹§‡*#Ó"¦eŠ$80âD ‚eYÈàäy¾©ªò2.okfõ²&*ËJp;](Š‚eYd²"±8}C#ì9x„ƒ':/رy<|ôý÷Ï\ÍíµCGùæO~Q¿þ¾ûY½t '{ûøí+»fõöSišÊ ÚV.cQuEö©Ï¦LV'30:NDZŒOÏ)DM‹Å“¼ã–3+¾‚áÁH´ ÿ†–—ø)).Ʋ,&Ba&‚Á©«þñçýð‰ÎÃ/í=ô¾¯}áÓA.*¤A.¢›r£,ËíÀ ìß~v*àÈÀ‘ñ0ýë× úØÿá–õ3+Š:FCìœ`Yiñ9ßÞ`4A:k°¦¦”¦Ò"J\¶I)Ñ IDATvSÛ´©maÝÁ»'è Ï~˜º»¥5Õ%X@ ‘f4ž*È×¥Ôå ÊëšžßJPáq\ûÒ ‹DVÇ¥*,ò¹)u;pªJî b†I,“e8šbÿð$û†ƒ¦9«³Á =H-ª®äò5­¬XÒ@Ei .§EΠײÂÑ=C¼vè(GOu_ðcó¸\|ôýïš¹šÛ¾#ÇØþŸ?+È߃¼ç>š—5!I}ý<óò«³r»éL†ÅÕU¬lj¤¾¦š"¯UQ‹¬®‹'èåHg7#ã碦Å)n¿ézV65"!ŠF „ {LR©¯˜2¿ ‹‰`ˆ‰Éö·ØÆ8¢^Ø×ñ±Ç?ÿÈA„¼ ‚” B8“0õ¹›×ÏÌ(:4âo~³· ù ›/a]M)p`$Èá KJ¼ç}»ƒÑä¹Òϲ²büS[ù$ ²FnÅTg JÇHn^‘Ë6;óŠîim ­ª§§ r†T™;¤¦‡çF”¹ô>uÃ"žÉbWdû<”yœ8UEòHdt†£):Fƒì 5f'LjZZ¿ˆKÛšYÖ°˜r¿‡ÃŽ,ɘ–‰–É G9Õ×ÏÞÃÇèìퟳcs:|üÁh¨«Á0L=Î×ôÓ‚ü|úð»ßIëòeH’DWÿO¿ü*çó9“ÑY\› Q‹ª«ðž6×KŸYÅv¬«‡Ñ‰À¬„¨i‰dš[7^“ lHDb1ÆÏñJŠù¢¤¸ˆò’’?¤DˆAÈo"H ‚ ä‘›r#²ü˜ kßü{㊢#Aþî©}}¬n¹”¶©E{‡&9:¦þ¯æöû FhºÉÊ +Ë‹)q9°+¯Ï+Ф3tãž<ïAÚ’$qokÍ•~L`4šd"©äUö*Äaýs*•ÖØ{ôøøž‡>õµ/|f»øv!‚ŸDAÈC›¿þô6Ó²Úe‰†éÿí+ŠöOò™§÷ìñÉ’Ä£[/¥e*à¼6 3¥®Ø=ë÷5IÖ VUøXYá£Ô•›W¹S‘t–Þpœ×tŸc˜R$‰w¶5Ò\éð`8’d2™F.À"UåuQîvL Ï2‘HãsÚæô1è†E ™F–$–”x©.rã±+(’„aY¤³¤ÆÑñ\˜šLjç¦ %Hµ®Xʆ5­4ÔÕâ+òâ°Û$Ã0H¦ÓLL9ÖÝËk‡Ž02¸hïE‘yxÛ{f®ævðD'_ûÞägÔﻋµ«W Ë2=C<ýò«g1>«ÔUUвù÷²´~1–eq¸ó_ýÎ ò3ê¡{ïd}óJdY¦op˜§_ÞEVÏžÁ{Ö ¾¦†•M ÔUWâu¹§®t8¢ô sèÄIÁ ¢¦i™,7l¸t&°%’)†ÆÆ :H¹Nj++¤\'O¤=þé¿üàûÿI|{A( "H ‚ €-ÿþ̶/m¹ä±–*¿ozEÑç~[¸r¨ _Ür)«*Š1L‹Wú'‰&©ð\ø“³H‚xF§µÊÏŠr¥n;EA–¦®ð–ÎЊÓ1äÔdU–ÏàxdîiidåÔñô…„ÓÔ RPWä¦Äí@ŽŽ‡‰jY§ÛÂT4¥/œ ŠÊï Šù¤Jý>.kk¦mÅ2*ËKñ8]S[¼^z=02FDZ¼vø(†aæõ{ècï+›8ÑÝÇÿúæw òsö}wÞÆåkZP•ÑQ~ûò.’éÓƒÔâêjV-[BuE— U™ž•!‰Ò30DÇñ“Dc‰‹¢¦é†Ée­«Ù°¶UQI¦Ó ŽŽ…¤Ri Y’ÿ[óò%_´Ûl"D ‚ 0¤A ÔÆí;ü²™yX…ÏÚc/u;ø¯7¯§©´ˆ¬aò\÷šn⽈[Ã" &“kªKXUá£ÂëÄ95c*k˜Ä´,‘û†'隌ö³^»ÊÝ­ ,--&k˜tNFIftìja)ôh,ÉÅÈ]ÍÈë­‡šnÖ Šìvj‹]”L ­—ÈmÁŒkY# :¦Vº¦…</fª®(ã²¶V/]BeY)n§sj‹—I&›ÛâÕ?<Â9tâTÁ¼‡þü½÷³zÙ$$Nööñåíß)ÈÏ×÷Üq+Ö¶aSUÇÆøí+»I$“duƒe ‹Y±¤á´Q–e’ÎdˆLmÍÛôñDò¢†¨7þ^¯[½‚+×­ÁnSI¦Ò `J¥5:{ûž{îÅ}õÑö^ñ-@¡ð‰ %‚Pà6n²Q5ävYâÁByÌ'Ÿ»y=KJ¼d ƒgN`ZnÛÅŸU4I0‘L³¶ºtj+Ÿ—ª ËúT˜ê$è qd<„M–ñ9lÜÕÒÀ’Ò"²†Áñ‰iÝ8§+ö]ìׯ/~§}æj޲$ÄpvݰHdu\ªBÏM™;·Òmúu‹gt†¢IŒL²o(ˆË¦\” Õþ/_ÛxÕšÖç–/i ²¬§Ã"熴k™ áXŒž!ö:ʱSÝ÷yôgï¹—–eK‘$‰S}ý<¶ýÛâWÍnßÊ•ëÖ`SU†ÇÇùŸü’ÕË–°zéj«*p9(rnF”–ÍŽÄ8Õ×Ï¡“]Ä≼QÓ, ZV,åšKÖb·ÙH¤Ò ŽŽÌk!B” Âü%‚” ÂHM‡¨öu|ìñÏ?²AaÞAJažºùñ'7ʲÜN†©†/lZâb)ÝàÇ(u;òz¢É@$Áh>u}ÕE.âŸØ]Ïç@$ÁH,Åǯ^×a˜Yy`˜iÝ •5Hfu© &ùûwטš!åŸ R»&fŽi¾y¥oœßžÞô“÷Ý´c.ï÷ƒ|~£ó6H½ïÎÛ¸|M ª¢28:Æ¿üÇ÷ˆÆy÷8«+ʸr].D••øq9_¿ÚaZÓÈê:· EV˜ù·×í*Ø×¥º¼œ­7\ƒÇå"•ÎͺAJÓ2Ù÷ø¿{ðþ?…A¤A€›ÿí©^Y’ÚŸzè–í íØoùúScÒ.Éøæê>×Ö”òW×µPáqÓ²üðP/ ~oÁ>‡?;>ÀßÜÐFµ×E2«óbïÍ•~$ üN»Š*Kèæt˜Òó2L™¦Ec©¿ÓAÖ0Ø5 Ø9?WHíðÃC="HͲw½} W­_‹Ý¦246Á¿~ûy1+«¶ª‚+×­aå’*JKp:ȲÄôÕ¡0§z8ÙÛÇèD€µ«W²õº«q9B!þý?†Ûé(Ø×¥¢´”Û6]‹Çå&•N346Žašsvÿš–ÉöŽüø‰Wv|x{{{X|ëA@)A6ý) À´è[ˆajãö~ÙÌ<¬˜<<a겺2>qM3'Q-Ëö°¤¤¨`Ÿ¿_ž䯯oÙ‚¸g0€SU°+2¥n^»ŠÛfÃe“§¯î6µ•O'‘љȓ0eYÐXâÅï´“1 ^íŸÀç²ÏË÷üÞ¡I¾ßÑ-‚Ô,»ïm›¹æÒµØm6†Ç|õ;?d"ºhgqMÖ¶±rIå%~;²$cš&)Mc"¢«€îþ!2Ù “á-Ë–rëÆkp;]Ãa¶ÿøç8ì…gK}>n¿éz¼n)-ÍÐèÜ)¢A„·"‚” ¯©i&t`šÏõÉêÅ6WaêŠÅ|üêÕ”¹„Ó¾°—e¥…¤~}rè´-ˆGÆr'àN›€,I”¸ì¸T—MÁ©*(²„iY¤u“tÖ žÉH¤1/b˜zcÒ ƒý”ÌÓ u`$Èwöw‰ 5ËîÙz3×]¶‡ÝÆèÄ$_ýî LÎùãX\SÍ•ës!ªÔïÃa·£È2†i’JkŒ&950HïÀðLˆšŠÄX±¤Û6]‡Çå"‰ðÿ»­p·¯úм¼ãæMy<¤5¡±qtø`÷—Jktöö=÷ë=;ï!JAøCDAàwƒÔyM åQ-K_(NZ7°ý‘²$ásØqÛU\êt˜’0-ÐtƒÔ‡©7©}ã”yæg:6áß_;)‚Ô,{û¦ë¸éê+p:ìLC<þƒŸÐ?¹œNîÞ|#þâbÒšÆÈD€L6{Þ·›Jk>Ñyø…}{üóì@A΂R‚ œyšfZ|CWÌö…¦6oz¦õpÃùÜΖuOƒÔÉ@”ÇwŸAj–ÝzÃ5Ür핸¡0ÿöÃ'èžõûYÞXφ5-,k¨Çï+š Q¦i’L§ÐÝ?HÏÀ¦uvûÓZ†_1÷½í¼n‘xŒïþìI Ó(Ø×Åépp÷–›().&­¥ L’Ö2HÒ¹}–ˆ%‚ ̤A8û õ†ø¦n~üɲ,·sŽaê¶U‹xð’e;lL$Ò|ÿ`oA©—úÆøØU«)Ò>Iæ‚ÔYnñ™S.›‚ۦⶩSÃÏsaJÓ ™, ¬aÎz˜2-h*õâsØIfu^é§Â㜗ïá®`Œÿóêq¤fû³ôÚ+Ùzý5¸œN&ÃaþýG?Z4;–7Ösåº6šêá/.Âi·#I2†aH¥p¢§áѳQÓ2Y¯ÛÍýoßL±ÇK4ç{¿ø Y=[°¯‹Ýfçž­7Qêó‘Òr³´’©²|vs±DˆAf“R‚ œ{°L"†Ìc¦llǶ êjBç¦ÞÑ\Ï{×5Qä°1OóƒC…¤vLœ6¤} ’ c˜(ç8sFF¢ÈaÃm=L©ŠŒiZh†IF7HduÆãéY S¦KKs+¤âWúÆ©ôÎÏ ÕŠó¿wAj–ÝtÕÞ¶éZÜNÁp˜í?þ9½ýç}»+–4pÅÚVšêQR\ŒÃnC’dtC'‘J161ɱ®FÆ8ß­u†aâ°Ûyàö­{½Dq~ø«§IkZÁ¾.6ÕÆ=[o¢Ìï'­iŒƒÄ)”3ÔžJkì=z||ÏCŸúÚ>³]|kAfƒR‚ œ_š¶ ÃÔ¿=u'ð˜,Ñp&ÿþžÖXÛ„×®2Oñ£C½¬®ðìñï Ì i§³ô‡ãè¦ÅùÎ@–¦Â”Ç®L]™OÅ6¦fVLes3¦2úù‡)Ó„¥eEøœ6bšÎ+ýcTy]óò=ÛNð/¯Aj–m¼â2Þ~ãõ3W§ûÆO~Á‰îÞs¾½–åK¹¤eKëSâ+Ân³Òi+¢Žw÷Ò;8|Æqå~–[ (2ï¾ãV|Þ"b‰?zò’©TÁ¾.ª¢rÏÖ›(/)!¥¥ ÃÄ EyËŸ3 ƒ‡EˆA.¤A˜ 5s2³€ÃÔæ¯?½Í´¬ö?¦î_³„ûÚñØUFb¹ Õ\Y¸Ajïð$zù ü.;‘t†¾pôfiGe1uU>—}*LÉ–YÓœ¹*_ ‘F;0•[!5¤²¼Ò?AÕ<]!5IòÏ/Aj–]wÙzÞqËF<.7¡©«Ó;Õ}Ö·Óºb—¶®¦iñ"|E^ì6ÛÔUór!jhlœÎž~z††PÿHT9ǯȼ÷oÃWTD<™à?ó,ñD¢`_EV¸gëMT”–Ö2ŒMNO$QUõþŒ¦e²ûŽûÜC÷Þùñ-AA¸DA`vƒÔNîûdIjê¡[¶/¼çó­ÃÔ{Ö5qOK.›ÊH,Éôô ©#Aþäòåøv"é,½¡Ø¹—eA±Ó†KUð8l3aÊ´ k˜dŒ©0•ÔHg³ S–H°ÄïÅï²Õ²ì P枟CÍG¢)þé¥Ã"HͲ«/YË]›oÄëvŽFùöÏ~Åá“]güó-Ë—rÅÚVÕž¢tÝ žJ2<6Áñ®^FGQdù‚‡$ɼûö­ø‹‹‰'“<ñôsDb±ÂýÂ/IܳõfªÊJIg2 ŽŒ‘Ö2Øl¿¤4-“íùñ¯ìøðöööõÿTAæøï“R‚ &HM[¨ajãö~ÙÌ<¬˜<,Éœ6 êÁK–ñŽæz\ªÂP4ÉOŽöÑ\ÀAêàXh檑t–žÐ…=qµ€"» ·MÁcWqÛmاÔ™ÛÊ—ÌL$Òg¦LÓB–%ü^J\¹°vp4H‘Ã6/ߟc±ÿãE¤fÛ†µ­Ü{ëͧ]®ãøÉ?ús­+–²amu5øŠŠÞ¢tâÉ$Ããéìbhlü­ˆzÓd$¸ãVJŠ‹I¤’üì™ç F"ûºÈ²Ì]›o¤º¼ -“¥gpˆlVÇný÷[„(Aa®‰ %‚À… R3'ü"LÍ„©‡.[Îí«ãT# ~zl  ‡šÏ\50šÎÒš›•–^»:µZJÁc·aWd,ëõSg¦tÓ®È,öy¦fae8>Á©*óò}9‘HóÏAj–]ÚºšûoÛB‘Ç3suºýGÿÁ¿¾yPk IDATëšWÒ´¸_‘›š $º¡O$pôT#cØmê\~EæÛ·Rêó‘H¥øÅ³/… öuq:œ¼mã5ÔT”“ÉféìÀ0 v;©´FÿÐè÷EˆAæšR‚ ÌMzƒçMÓlŸëá‹íaêCW®ð½må"ŠB$ÁÏŽöô ©ï[¿”¢9RÓLÓ¢ÈiÃcSqÙT¼öÜðs€Œa’1L™,“É ÉŒþ{ÃTÖ°pª2u>ÏÌÕ»C1äÙ„•g‚IGwAj–­[½òw®N÷Ú¡£§ýY–¸¤e5ëV¯¤¡®†b¯w&6é†A,‘dptŒݽ a·«áH$xûJý~’é¿ÚñcÉ‚}]н^n¼juUd²YöQ°¯‹ª¨Ü¶é:êª*Éê:»:}ã»?þy»Q‚ ÂÅ$‚” -HM{Yzø©m·XHÏy2«7Ú¥½7{ðÙîQ–ø½{,½á8÷¯Y‚×®ѲtO^œ 5Í0-Š6¼SWäóØÕÜV>^_1•Êæ†Ÿ'¦Â”¦¨²ÌÒ²"üN;Á¤Æ‘ñð¼!Õ²|þ·Dše­+–òÞwÜ6uuº$?yêYvîï`ÝêU\±®•EÕU§­ˆÊê:Ñx‚¡Ñqžèddb—#é[÷¿}3å%~RšÆS/îdpt¬`_—´–áÞ·ÝÂâêêçuCßæ°Û{Å_~AábAJ‹¤0-¾¡+fûŽm[Ô‰Âñ@¤ñË/ûâåueïZ\ «¤" îmkÄmS‰jo…Ô›¦…Ç®RævàRUœ6ÛÔŠ©Œi¢é&é¬ÎxB#šÎ È+Ê}øœ6&“{‡&©ô:çåû.‘ÑùÜ3ûEše«—5ñ¾;o›~ðx'.§“úÚjŠ<lêtˆÊÎlÍ;|²‹‘ñ œ{þˆ÷ݶ…ŠR?i-Ã3¯ì¢oh¸0ßëÉ4áhôÅ?}÷;ÿ¾iQÝA!Oˆ %‚@~©i 5LÝüø“WVø¿ti]Ù†B Sñ$÷´4àʳ 5m:L•»¸l 6EÆ®¼¾•/5H룱 %^Š6&i^êgI‰w^¾ßRYƒÏ>½O©Y¶ª©‘÷ß};¥¾b ÃDËdPUÛÔpüéQýù­yCããù±"ê÷¸÷ÖÍT–•Ö2<ûêz êµH$Ó‚¡—ÃÉä#þ‘‚ BžAJü RÓraÊþðŽmÔU 1L%Rܹº>oƒÔ´ÓVLÙrÛøloš1åRTYb<‘æ©ÎaZªüóò}¦é&Ÿyj¯R³ÈfSyû¦ë¹ñªË±Ûlä¾bZXVnET4ž wp˜c]=ŒŒp»y|4ïÜz3U奤3žßµ—S}ýñ:$’iÆB¡X"ù~¢A„|&‚” ù¤,“ˆ!óØ²Š²Ç¾vû¥"Lå©@RãöÕ‹q© Q-Cw0N>_œN7-¼v•R—÷Ô|)»"#H é¿:9D©ÓN‘sþÍ‘Ê&÷¤fƒÝfã’ÖÕ\ÖÚLC]5EïÌû_× ‚‘ýãt?ÉÄd»¶ZpÏ­7S]^†–ÉòÂìíËë‡<¢ñÄßí ŸÙ.þ² ‚ ùN)Aò7HM[Yæ‹|jSÛc5^×cŠ,…Ökóô¶¿çóWÕWÔåk˜ ¦4Þ¾j1NUÉëRo¦n»B…lj˦âRsÛù¦ÎÇ ¥4z‚qbZ§ª`Wåyó¾2L‹¿}ò5¤ÎƒÃngÚÖ®^I]UEªª’”»êãðø8¿|îEB‘Xa„¨)pÏ–›¨©('“ÍòâžýïîÉËÇšÉê ŽOˆ%‚ ¤AÈÿ Õ\éç¯oh¥ÊãŠ$²úcÅ[ûÂ{rajËŠÚ:_> ?"Z†·­\„C)¬ 5M7,ªLu‘‹ Y’Èí¶²È˜‘T†ÞpŒÁp§M¡ºÈUðï'Ë‚OýzRçÀa·sÕú5´¬XÊâê*¼n7Š¢YÝ@QdY!“Íð›w²çà*ËJ êM îÙr#µ•d²Y^Þ×ÁÑή¼{œ©¤ |ùËŸý«GÄ_rA¡Ðˆ %‚@þ©¶êþêºVª¼NbšÎ·tõ=qt ý©‡nÙ¾Ð^«?ñêG¯\\þuÅù³Ü"žÑÙ²¢¶`ƒä†|ë¦É†E8m †i¢›6YF’ kZDÓºƒ1úà ô®wR]‘þâž}ì^=pˆÞÁᳺm¯ÛÍÇÞÿ®™ÙK{åÅ×öãv: ë‚-×_MSý",Óâà‰N^Ùw`ÖïF„(Aa!AJüR[–×ñÐeËñ9m_~å({çq²e}9£8ÚwlÛ¸ ®Öt¡Â”CU¸¢¾E‚˜¦ÓŠÜsc˜~—Å>ª,“Èd9ˆ¢Èg^ÖtÃB‘%*§fL9U»ª LmåKdu ú#‰©«÷]Ü0µÐ‚T©ßÇ5—¬eÕÒ%¹­y'²,cZf.D…#œìég÷ÁÃg¢¦¹N>öþwÑPW;3{é…×öãrØ ê÷Á²`ËuW±´~1–eq¸ó/½¶Ön?‘L3 $≿ÿÚ>³]üA"¤AÈÿ õ¶•‹Øvé2Š6&iþ饣ìž<¿.“ˆ!ó˜)Û[ˆajIiñ·®ª¯¨›0å²)\¾¸ˆerƒ½ iZ”¸Ô»s³°2Y:Ï2HM›SåÞ©SUF–$Œ©05I2ŽcSeJ]gõÌB R¥%\¹®æåK©(õŸ¶"*Ñ„Âtöôóêƒ ŒŒ×}9ìv>ñà4Lmu;pô/ìÙ‡Ãn+¨ßË‚Í×^ɲ†z,ËâÈ©.^ܳï¼o7“ÕŸ!JAAJÈÿ õŽæzÞ»®‰"‡ñxšÿñÒ:F‚³sâµ€ÃÔæ¯?½­Áïùüù†)]åÒEå¹ ¥eé `² Üí ¦È5³õðädU>÷½‡ºa!KPáuâ±ÙpÙoZ15MÑŠa“J=s»Šf¾©šŠr6¬meõ²%”—”àr:PÃ0gBÔÉî>vuf`dtVîÓ¦ª|bÛ»Y²¸Ó49x¼“çwïÅnS ê÷Á0L6_w+°°8vª‡çw¿v^·™JjññPð/þûßþÅÿuAA)A„©0‘ßAêî–XÛD‘Ce,žæ_8Äá±ÙmG–I™ö§ÚüØÂ{ýsajËŠÚ:ß9l-*rظ¤® ˆjYú 0HYTxœTy¯©@U9ÿaXºa!ËL͘z=L©²„nš$2:#±]Á( QávÎÊýþ1ó5HÕT–³aM+«–.¡²¬§Ã"+¦AZÓ˜ G8ÑÕË«146>«÷-Ë2ŸüÀ{ff/:ÙÉó»÷¡*rAý>duƒ-×]ÅŠ% œèîå¹W÷œÓm¥’Z<e$xt{{û‚Šþ‚ ‚ðVDA ÿƒÔ}mÜ·f ›ÊX<Å{á0GÇ/ÌyiÑ'KRûSݲ}¡½þäÇ/ÿÝu UÔ»gós>—u5¥ÀT Å‘¤Â;þJ“ ¯ EÊGg †mÃnXHR.Lyí6ª‚Ë&£È2†i’È G“Sa *=® ¦æ[ª­¬àÊum¬hj ¢4¢dIžºjžF æXW»bd"pÁŽñ“xÏi[ÝvìÚ{N[?/&-“eËuW³ji#'zzyvç ¢Aá­‰ %‚@þ©w¯mâžÖÜ6•‘XŠ/=ˆÈ½Ï…¦6nßáßPSôÍ從i˜òO) ˆ¦3ô†âgtuº|SãuQêqÎl=윌b»+[tà *ÜN¼§ªâTsWå3-‹DÆ`$–¤'𬠦æKª«ªäŠum¬ZÚH¹ßÃaÏ…(Ë$­iŒ‚œèécçþÆÁ ~Œo{7Ëß°ÕmÇ®× .Ц´ [®»ŠæeMHHtööñÌ+»ÎègÉ4‘hâK"D ‚ Â[AJüRï[¿”»šëqÙT†cI¾¸ã §&çæJn&t`šÏõ‰ûÅv6aªÌí ­º ˆL)¥ƒT]‘›·cfëá©ÉÙ]!õfºaa‘ N»ŠË¦â²å†Ÿ›–E2k0Kq*Ã4M*f9LzZ\SÍ•ëÛXÑØ@™ß—[%˦A*­11ähW{ftbrÎŽñã>pÚV·gwî)¸ •L¥ÙrýÕ´,[Š$Iœêëç™W^å­¾6'’iÂÑèwÃÁЧ¿úh{¯øË*‚ oM)Aò?Hm»tw¬®Ç¥* E“|á¹zæþJnÏ›¦Ù¾ÃÔe•ž/7Wúßÿ‡Ÿ—{œ´Vù1H*Co8~^ÃÀ/Ë‚Å~~gn†VTËÒ=›“YNºaM=¹«ò¹l .›2µ•Ï"™Õ§è ÅÉ&å³4cªPƒTC] W¬mcyc=å%~v{.D)-ÍÄdˆ£§ºÙÕq˜ñÉàœ¿—>ú¾w¶Õíé—w\ %’l½þÚV,C’%ºûyê¥ü¾ïÍ"D ‚ ¹AJüR¼|·­\„SUŒ$ø‡ç:è'.ÖÃY aêÉÆµå¥ÿsmuÉ]§…))7{©¥ÊaA8¥ÑŠ_­n’iA£ßƒÏiŸÙzØšÛ°6½•¯Üýz˜rª ª’ Sé¬ÎèT˜ÒM“2×ù…©B R‹j§BÔbÊü¹%IÒi+¢ŽœêfçþƒÑ‹ö^úÈ{ï;m«Û“/î,¸¡æÑx‚-×]ÍšUË‘e™ž!ž~éU Ó˜ù7‰dš@0ôr<}¯Q‚ ‚pöDA ÿƒÔ‡6¬äÖ•u8…þH‚ÿúÛ E“õ1YX?ÍÊÖÃ;¶m]P'boS’$Qåu²ºÒ‡aZ„Óz‚qìj)ÓbIiÅÛÌÖÞÐÅ[é%~—·MÅ¥*8m¹«ò™Sa*M_8NÆ0)s9Î)LJZZ¿ˆË×´°¼±žRŸ»Ý63¬<™N3:àXW¯ìë ]ô÷Ò‡ß}/­Ë§¶ºõð«/aS•‚ú}Gcl½þÖ®^,ËôóôK¯¢úLˆ '“<þùGv ‚ ¹}ßAJ!ÿƒÔG®\Åæåµ8…¾pœöß`4–Êañ ]1Ûb˜j)ñÿîÖ† U^«*Š1L‹P*CO(†£ÀNÀ Ó¢i*HMo=ì _üYX2>——ªäâ”M™~žÎŒ'Ò¹S†E‰Ë~Va*߃ÔÒúE\¹®¥ ‹)).>mkÞtˆ:ÒÙÅîƒG.ꊨ7ûÐ÷жbùÌV·_<÷bÁ©`8ÊÖë¯f]ó*E¦oh„g^~•Ξ~¢Aa–ˆ %‚@þ©]½š›—Ö`SzC1>ûÌ&é¼zŒ 5LÝñg6>zëeí+Ê‹oЧƒT0†ÓV˜AÊç´Íl=ì'òfö,I;rÛø<¶Ü¾q+ßDR£/GÓÍ3Sù¤–7ÖsÅÚVšêQêóá°Û¤\ˆJ¤RŒ&9tò{:ŽŠFóî½ôÁûîšYYÔ30ÄÏŸ}¡à‚T fëõ×pIËjTE¡ox¤ï±o~wÛ?ÿÝ_í@A„Y!‚” ù¤¾¦™MM5Ø™î`Œ¿f?Á¤–—U‡Ï™²ý±Û6.¨Ë릵Q7Íöpâ†ñDW!©©°6M’o³¨eI¢ÈaÃ3µZÊmSQÓ´Hë¹Sá8iÝÄï|ë0•oAjUS#—¶5³¬~1%¾bv ͬˆ LrðD'¯î?D,‘ÈÛ÷ÒC÷ÞÉúæ•ÈrneÑÏ~»U)¬ß‡ñÉ[¯¿šËÚšûTEmWy»øK)‚ ³K)Aò?Hýåµ-ÜÐT*ËtMFyäé}DÒÙ¼}¼–IÄyl!†©'OÝùl×è¿Þ¾zQµÇ®Ìã~s ¦2 åaz#ŸÃŽÛž‹RN›ŠM–°,Hë‰4}¡8YÓ¢Øaû½a*_‚Tó²&.i]ͲúÅø‹‹°Ûl¹aå†A"bd<·5ïå½H¦Òyÿ^ÚvÏo\YÄOŸÙQPCÍ3YîÁáþûßvËg¯¿ü’íâ/¤ ‚ \"H ‚ ÿAê¿\ßÊuU¨²Lg Êß=µ—xFÏûçu!‡©Í_zÛÆ¦ê/]×XYYaêõ eG7M‚I‘X IÊó÷àwØs«¥ì*®©0eZ éd.L馅×~z˜ºØAªyYÖ¶²dQ¾"ïÔÖ< Ý0H$S säd»:O& æ½ÿÿØ»ï0»îúÀÿï{Îímîô*iF£^FÍ]¶Õ âÄØdƒÖ ù%›ü‚IlÀm„݈eI xBIHÈbY’-ÉE¶lõ^f4½·Ûë)ûÇÌȘbd{Ê=šÏë/|ï¹§|﹚÷sÎ÷üÑï¼—›–/Á®Úéèéåßö@Q¬¤RÉL¼«oðk_ûÜ'–_F!„brIBò?H=¸n9k甡*6. DùÌÞc¤rºeö¯i±)¶öÞ¿e÷Ì[£aj˼Ê25/7úù •Ó FRzâ)lÚ×—ÃŽÏiÇ÷sa*­é §2t„¤5CÅeW§-H-Ÿ7÷À­+—S[]E(èÇépކ(M#žJÒÛ?ȉó—8zö<‰dÊrcþûnY± »j§³·Ÿì;€¢ä÷HJ%3ñ‘hü=ÉÁ»Ã!„bÒIBò?H=´¡Ûf•¢*6.ôGøô³ÇÈê†åö³aÒ¦Øl31L=´ÿÄ7î™_ýQCuä鱡®Ð-H %3ô'R–Ü×~§¿slŽ)§§ª`š£sL %3tFÓ¤Ÿ8½¾~Võ‚Àø­yÓ4©Ý}œ<‰×NŸ#ÉXv¬è}Û¹me»îþ~þeïó(yz©„(!„búHBò?H=²q·Ì*EµÁ¹¾0ŸÚs ÃÂçïÙ!Û#W4Î ùvϤq¶~÷ÁЇjžXP¼7ßÂÔ›ƒ”Î@"Ã`2mÙ}mšpÙñ:øÆ&?wª &£·ò %3\èoØ2¿úàT®W:“]ïtØhºN<‘¤»€Ó¯ðêÉ3d²YËóßÿ­mܱº§ÃAwÿÿòìóywëg"™&þSorä¿KˆB!¦‡)!„ ÿƒT㦕¬©)AÎö…ùÔž£–ÞßóŠ|f}•ï¡dNk ¸gÒxËÇ0ešP[è'äv’ÑG'Jf,¿¯ Ó$8>ǔÎßiÇ>6ŸQF×7xö){†a®×tí@<‘¤£§3—›8rò Ù\î†ß÷½g wÞ´§ÃAÏÀ ÿòìsy³nã!*<<ò™'v6¶Ê¯ŸB1}$H !ù¤¾°y««‹±§{Gøô³Ç,½¿–ðàúåTø=$s?:Ûzè{'š§úö©é¶~÷ÁÐ}‹*ÿ¥¡"´aº'>ÿÅ ÕKNgo˜}m&·]Å7vKŸCU6(6Û”Ž¹¡pd}Woÿãç.pôìyr9í†×÷nÛÄÝ7¯ÆåtÐ;8Ä÷ìŸöu’%„Bä RBAþ©Ç¶®fee§z†yhïqKïï%e!>y÷2Êý9>ÝÂÏ´2 c†©=µ÷-ª{j:ÔaÂܱ[öÒÚhŠd²7ܾ6 Ÿk4H9UeC¹ß3¥cíÉýÛúgÏÐ4ý†Ïزu·®ÁítÒ78ÌžÝ?Z<§A"™fpxäåp2ùð“=<£Î+B!D¾“ %„äúâ¶5¬¨(ÄNtñȾ–ÞßË+ ùÄ]Ë(÷»‰e4þñT3ÿz®ýh`ò´¦wloIãp:ÔùssH¥4žh’X6wÃîkÝ0y®©{ÃëNåç~äáÇÖ«pàFÇïÛx7o¿·ËIÿÐ?Þ³oÊ缓%„Bä? RBAþ©Û×°¼¼8Ö5Hãþ“–Þß++‹ø«»–RêsËäøÎñf~z±ã—^7SÃÔæ'÷¬¿gѬ¯ß\S²lªÂ”aÂÜ"?.'©œFW4Iâ¼ìçMÇSöfBºgýl^{+—‹á0?|f/SõïM QB!„uHBࡽÇÍ×;óóDm³ñ•íkXRÂ^ïà ÏŸ²ôþ¾©º˜Ö.¥Äç"’ÎñÇ®°çrׯ}½„©ÉS¦ uE \’9ÎH‚Ô |[Hš,ÛmwÝŽÇåfpd„ülòƒT"™¦od¤#O|ö¬: IDAT[?²[~Õ„Bˆü'AJ!€öpÂ<׿@s'{†ójÝìŠÂ—·¯aqYºaòjÇ8méý}ë¬RþâŽÅ{]„ÓY¾ýÚežkîyË÷˜]a—¡8wܱ~F=¦}*”9~…”ÛI"«ÑNÑ%HM´™¤6¯½•{Ö݉Çíf(æûÿ÷ÙI›C*›Óèì%„BX)!„tÃ4ÓšÎ`2͹¾0{¯ts¾??š‡Ë®ò¥mkXTD7L·ð¥ƒÖRwÌ)ãÏo_L‘ÇI8•åï\âPKïu½w&‡©­OíÛ±~nÅ—ïª-+›è0e˜P?¤âYÖ‘8šaÜÐûS‚ÔäØpÛͼwã]x݆#þé§{€‰ÿ÷f*™‰÷ ÿåW?ý—ß–_1!„Âz$H !Ñ Ó©*†IZÓé‹§8Ó;Âþ¦. F¦uÝ|N;o]Í‚’ šaòRk_yᬥ÷÷ºº þäÖ…zœ §²|ã• ¼ÜÖÿ¶–1¦ž¿kãL¯“¦Lê‹ÝbÖ‘ú þo R“ãî[ÖðþMëðy<ŒD"üã©T2‰Æ¿Ñ“ܹ»±1ŒB!,I‚”BǺ†Ì9…~.ņaBZÓé¥8Õ3ÌÞ+]´ŒÄ§eÝ‚.m]ͼ⠚aðBK_}ÑÚAjc}%|ËBn'ÃÉ _;|#ïhY†I›b³5î½Ëî™6nÇÃÔ–y•eªb{W˽e/@ÛA,“ãêp Rm&©µkVò;[7àóx G£|ïß6!Ë•%„BÜX$H !ðÛß}Þ|Ï‚jn®)¡®ÐßåÀ®Ø0L“Tn4LïâÙ+ÝtFSºn…'_زŠú¢ 9Ýà`K/ûÒ9Kïïmó«¹ÿ¦ù¸ &2|íðyÞí¤ò35L­ß}0´¶¦àñ{æWÔãPïd&`ê GƒT4“ãêpì†ßw¤&Çm«øÝmñ{}Db1¾÷ï?{W“š'’i"ÑÄ—%D !„7 RBl}j¯ r;Ù4¯’Ûf•2;äÇï´cWtà ‘Õé'9Ö9ÄÞ¦nº£É)Y·¯‹/lYEma€œ®ó\sÿëðKïïßZTÇWÏ#èr0Hó?_:ljLÞ0iØÿß¶þd&áõ»†>ÜPóÄ‚âà½o7L&¨6s }„ÜNÂé,­ÃñÑJu“ 59nnXÊ}ïÙBÀç#ñýŸ>K&›EU•·µœD2M8ý§ððÈgžØÙØ*¿TB!ÄE‚”BðFWâu±y^·Î*¥ºÀ;¦Í0Id5º¢ ^ï乿zc©I]·r¿‡ÆÍ+™ò“Óuö^鿝^´ôþþÀ’ÙüÁÊz.;ýñ4_}ñ,§{G&úc†Ñ8ÕÁaº‡©yEß¿Þù¥tÃÄ¡*Ì.ð›h¾e$ŽM‚Ô„› AjõÒEüþ{·ôù‰&âüàÿî%–Hât\ßx”%„BÌ ¤„‚_RãJ|n¶Í¯â–šRª‚¼;vņnšÄ³íáG»Ùw¥›¡dfRÖ­*àå³›V0'ä'£ëì¹ÜÅG.YzÿÞ²Z>¸¢¿ÓNo<Åß¼p–s}“v'Î S{jï[T÷TCEhÃo Sšaâ¶+ÔGƒÔH*KÛˆ\!5fBZ±xÿù}ï!è÷K$øá3ûGð¸]où¾D2ÍàðÈËñhô%D !„7> RBÁ¯RãÊýÞ³°šU•ÅTxñ:T› Í0‰gst„¼Ö9È3—»ˆgrºn³ |<²q³ |¤5Ÿ]êäÛ¯_¶ôþþàŠ:î[V‹×1¤v:Ã¥Išá!Û{wl99“Æöõ„©œnàuØ©z =ù°=¿á÷©É±|á<þàý÷PO&øñ³ÏÓ;0ˆßëù•¯Qádòá'{ø B!„˜$H !¿9H›ò³i^%«ªŠ¨ xñ8ì¨6È&ñLŽöH‚—ÛúÙw¥›dN›u«-ôóІj‚>RšÎ\èà©cW,½¿ÿ˪z~gÉl<;=±$_7.»Š Ð ƒp:KóPŒ—Ûû9Ð܃f¼½ZTZÀ§Ö-§Âï!™Óøá™V¾ºÅÒûûOn]ÈöÕ¸T•¶pœ/<ŠîhrÚÖÇ4ˆè » Źk&†©µuO~¨¡®¾2à!àrÐOsq BÐí¸¡·]‚Ôä˜_;›÷¾¢Pˆd:ÅϾľïµÙL³QB”B!ÆIBÞ}×PQÈÆúJ–•Rêsá²^a’Õ "cWL¼ÚË ­}\ïùwiyˆ¿¾{~ñ¬Æ÷Oµð£³­–ÞßvÛ"¶Î¯Â1¤÷Ÿ¤/žšöõšÉaê‰#—v|hÅÜÆ€Ë>§/žæLï%>× ½Í¤&ÇÜÙ5ü×ß}?%……¤ÒéÈ3ïú½{65Ê/B!~ž)!„`â‚Ô¸UUEl™WÅ¢ÒÅ^nûè\<Ý`8•¡i0ƾ¦nŽt üÆe5Tò‰»–QæwËh|ïd3?9ßnéýýw,fS}%U¥u$Æg÷`0™É›õ3 "(4î»ë®™ö]0LsGO,Õx²{xNeÐsCo«©ÉQ[]Åý÷} RZÚ•Êdvù<ž0B!„¿@‚”B0ñAjÜ­³JÙT_É‚’ …®ñ0¥†© ýa^íåµÎÁ_»ŒUUEüåK)õ¹‰er<}¼™ÿ¸Øaéýý—w.e]]U¡y8Ê#{ONgón= “6ÅfkÜ{ÿ–Ý3í;ñí×.jIyèQC½aïÛ“ 5_"5•»>ú¡ìª()‘%„Bˆ_K‚”B0yA ÀÜUWÎ]µå,() ÐãÄ¡Œ†©¬®3˜Ìp¾/Ì¡–^Žv ýÒû×TóñµK)ñ¹ˆfrúpCÍ Šƒ÷ÞˆaJ‚Ô„{:c×ØÝØ(!J!„¿ùï$ RB1¹AêÚ X?·‚;kËYP$ävâT 2ºÎ`"ùþ0û›º9Ó;rí}·Ô”ð±µK(öº§³<ùúö7u[z?¸n9k甡*6.Fùô³ÇIå´¼_oä ÓØ1ÕcºÝ¨aJ‚Ô„yÚ¦iOìll•_!„B\÷ßG¤„bj‚Ô8Åfc]]9ë+©+ Pà S¦i’Êé $Òœía_S7ú#Ü1»Œ?¿c1E'áT–¿?r‰C-½–Þßmhà¶Y¥¨Š‹ÜsŒ¬nXi†Ñ8ÃÔ,­üî¢ÒÐ{}N»å·G‚Ô»&!J!„ï˜)!„`jƒÔ8§ª°±¾’»jË©+ôp;q*6 RšN<Å©ž†’i~gé =NFRYþîÕ‹¼ØÚgéýýÈÆÜ2«ÕçúÂ|jÏ1 þU¼‡>»iEãÜ¢ÀÁ™ô}Y¿{Oí}‹êžj¨m°r˜’ õΘ†yÈPlO>öðŒ÷B!„˜X¤„‚é Rã<•­ó«¹mV)s ý\ca*Óˆg5B'.Ua(•åë‡ÏóJû€¥÷wãæ•¬©.AÎôðàžc–ÜŽš/l\Éìß¡´¦ïð9í­3é{cõ0%Aêí‘%„Bˆ‰$AJ!˜Þ 5Îïr°u^·Ô”P[ä'àt` SŠ °ÙHå4¾yäÏX|RóG·¬bUU16àTïŸyÖšAjNÈÏè)ð‘Öt~z±ãé'Ž_j<¸c{ëLúþ¬ß½§öýófýôæš’eV S¤®„(!„BL RBA~©qEë+¹}v)5>‚.ªbýÃÐ4éŠ&9ÔÒǾ¦nº£IKîïÇ·®fEe'{†yxïqKnG]¡Ÿ‡6¬ :è%¥éüû…vvkÂ0yZS¦6?¹gý=‹f}Ý*aJ‚Ô[3 Ífšßzü‘Ýò+!„Bˆ‰&AJ!ȯ 5®Äëbóü*¶/¨¦ÒïÁfRºaÏæèŒ$y½sçš{è‹§,µ¿wn[ÃòŠBLàXןے㦾(Àg64Pð’Êiüëùv¾{¢ùÚ×àó†âÜupÇúðLú>Y%LIúÕ$D !„b*HBò3Hûƒ•sù`CN»Šiš×náÓ “DN£m$α®!ž¹ÜE$Íÿ›/o_ÃÒ²p´sÏ?wÒ’ãfAI×5PðÊiüè\ÿxòê›^cDt…]35LmœWýÏwÕ–•åc˜’ õ "¦Í|@B”B!¦äï RB‘ßAê÷–ÕòÁuøv’Y¡T†B ¯ÃŽbm슩Žp‚W;xörñ¬–·ûÚ®(ìܾ†%eè&ià±§,9n•ð©uË©ð{Hæ4~x¦•ïŸnù•¯ÉajëSûv¬Ÿ[ñå| S¤ÆDtƒ]š[Ûµ»±qFM!„BL RBA~©6ÔqßòZ¼;½ñß=ÑL}Q€••ET¢™n's‹ü”ùÜo SÑLŽ–á/·õó\sMÏ›íñ9í<¾u5 J‚h†ÉK­}|å…³–7Ë+ ùÄ]Ë(÷»‰e4þñT3ÿz®ýúZ€I›b³5ÎÄ0õÐþ߸g~õG=Õ1ë1“ƒ”®ñy QB!„˜N¤„‚üR´ºž,™ƒÇ®ÒKòRk%^÷Xš&UanQ€RŸ·]E±AÎ0‰¦³\ŒòjÇÏ5÷Óißž·ƒÇ¶¬¦¾8ˆfjéå¼xÎ’ãfEeuçRÊünb™ß=ÑÌ¿_èx[˘©ajý7Ô<± 8xït…©¤ž¶iZã;[åÌ/„Bˆé$AJ!Èï uÿšù¼wñ,‚”i˜‡Cß!!J!„ùF‚”BßAêOn]ÈöÕ¸T•¶pœŽH‚¬ö›o½S>‡Y!E.»r-L '3\ SG:¦t{*>·i%sB~2ºÎ³—»ùû#-9nn©)áck—PìuNgyòõ+ìoꞨÅÏÐ0µ§ö¾EuOME˜º‘ƒ”i˜‡ ÅÖøäcϨñ#„Bë %„äwú³Û±u~U¥=§+’$»þIÊvÝNMÈK¡Û…SU°Ù £ %3\ès¨¥—×:§d{j ||vã føHk:?»ÔÉ·_¿lÉqsûìRþÿÛSäuNeyâµK¼Ú;¡Ÿa˜<­©FãÁÛ[gÒwr*ÂÔ¤$D !„Â*$H !ù¤þâŽÅlª¯Ä¡ªtDât„“ïè©y‡ŠSU™òò8qª ¦ ]g(™á|_˜W{9Þ=4©ÛS[èç¡ Ô}¤4ÿ¸ÐÁSÇ®XrÜÜY[ΟݶˆB“‘T–¿{õ"/¶öMÊgÍÔ0µu÷¾•ÛçU}÷æš’e¦n¤ e@› ;$D !„Â*$H !ù¤>~çRÖ×UàP:# ÚÂñëºeï×ñ:UN'7NņaBZÓL¦9ßáÙË]œïŸœ§Á×øÌ†ª^R9Ÿ\hç;Ç›-9nÖÕUð§·.$äq2œÊò¿_¹ÀáIž›k¦†©ÍOîYÏ¢Y_ŸÈ0u#)Úl¦Ùø­ÇÙ-gr!„BX‰)!„ ¿ƒÔ'îZÆÝu娅®h‚–á89ÝxWË´Ù Äç&äqt9ð9cWL™¤r:ý‰4g{GØÛÔͥȄnÏ‚’ ®k 2à!™ÓøñÙ6þñÔUKŽ›õ•üñ- ¹ '3|íð…)™“Ë4ˆè » ŹëàŽõá™ô]È0eå %!J!„V'AJ!Èï õÉ»—qgm9vÅFw4EópMŸ˜ÕUU·ŸCÅëtàPl˜&¤4¾xŠS=Ãì½ÒÍÕáØ„|æ’²Ÿ¼{å~‰œÆN·ðƒ3­–7[æWñ‘›Pàv0”̰ëåó¼>EsqÁ Sÿgï6ÖW~ó®Ú²²w¦,¤ "ºÁ®'w>Ü(gn!„BX™)!„ ¿ƒÔ§×7pÇìRÔ± Õ4E7&vuUÅFuЋÏåÀï´ãuر+6Œ±+¦úâ)Žw³ïJ7máø»ú¬eå…üõÝË(÷»‰e4þéÔUþå\›%ÇÍ{Tó_ošOÐå`0‘áo_>DZ®¡)_™¦¶>µoÇú¹_~'aÊRAj,Dinm×îÆÆuŒ…Bqc’ %„äwzhC·Íz#H]Œ2Yçn‡ªPôt9ð:ÆÂ”ª &ÉœFo,ű®!ö5uÑI¾£ÏXQYÄ'îZJ©ÏM,“ã;'šùé…KŽ›÷.šÅ‡W×p9H¤ùŸ/ãD÷ð´­aÒ¦Øl{ïß²{æ}‡GÃÔ–y•eªb»®÷X"HIˆB!Ä J‚”BßAê³Wpó¬R ;–äò`&ymv•ª€‡ Û‰Ë®àwÚ±+ ša’ÈjtG“í书ºco/L­©.æµK(õ¹‰früÃÑ+‡¾xН¼p–óýyy1Ë!Ã0§:¾L·ß¦ò0HIˆB!ÄŒ!AJ!Èï õøÖÕ¬¨,FƒTÓ4©k?6Mg^q€ª ÃŽjƒœaÏäèˆ$x¹­ŸýMÝijگ\ÆÝuåüé­‹(ô8IeùÆ«y©µÏ’ãæC+êø½eµxvzã)v:Ã¥H>¯ò S{jï[T÷TCEhÃÏO|ž/AÊ4ÌC†bk|ò±‡gÔqB!ÄÌ&AJ!Èï õ¥mkh¨(ĺ" š†bÓ¾NYÃÀ¡(Ì- Páw†)ÅFV7ˆer´Äy¡µƒW{IæÞ¦6ÖWòÇ·, äv2œÌð¿^¹À«í–7¸²ž{—ÎÆã°ÓKòŃghŠZaÕ¡ØØ»cËÉ™ô=SóŠýJ}îiR¢„B1“IBò7HÙ€ÛobYyèΓ 5.g¸ì*u…~ÊüÜvõÚSÑt–æá¯´°¯©›œnŒîëùUü·›Pàv0˜ÈðµÃçy½sÐ’ãæÃ«çñÛKf㱫tÇ’<~à4W‡c–YÃäiM5îØÞ:“¾ïëw冀ëJ#¦±{:‚”b˜¢„B1ÓIBò7H)6_Þ¾†¥å!tsô ©æ¡ü àPêŠü”ùܸì*6@3 "éMÃQ^ià¹æ¶Î¯bÇšy]iþö¥óï²ä¸¹Í|Þ»x»Jg4ÁãNÓ:·Üv¸ìêÓ_Ú¾ºqYYa«œ „B!ÄT %„äor¨ ;·­aqYºaÒMru(¯À1·]aNáa «DÒYš‡bij·Î*!àrÐOó?^:Ç©žaKŽ›Þ¼€{Öà¶«tD<úü):" Ëm‡ßiçñ­k˜_|Z3ŒÜv5,g!„B1™$H !ù¤<•/n]ÃÂÒàhŠ$-qK˜ÍfÃëT™]à£ØëºvÅTF7Èê^‡Šj³ÑOñÕÎr¶Ïšýãÿ»e!ïYXKUi$øÂs'éŠ&-·n'mYE}qœnD¶ôîúòá+»îX/aJ!„BLÎß ¤„"ƒ”ßiç±­«YPR€ftF’´XhŽ"»jÃc·3;ä£Ð㼦°ÙHç4¾{â*?:ÛjÉqóßo[ÄÖùU¸T•¶pœÆçNÒKYn;м.¾°ys‹ätƒW{øÛÏGt…]†â”0%„B!&œ)!„ ƒTÛÁc[VS_ R Z†­7G‘Û¡âTUf…¼”ùÜØÓ4éO¤9Ù3Ì -}–›ÜüÏo_Ì–y•8T•Ö‘8Ï ?ž¶Üñ)õ¹ùüæ•ÔÈé:û›{øúá £ÇÈ ¢+ìzþþ­r¦B!„E‚”B¿AªÈãâ [Þ¸r¥+jÍ 5ÎiW©/ Pô`»JJ7L2ºÎP2Ãù¾0û›{,3§ÔÇîXÂÆúJªBËpŒÏî?ÁP2c¹ãRî÷иy%sB~rºÎ³Wºù»W/¾é5†I›b³5î½Ën9c!„BˆwK‚”B¿Aê¯\éŒ&iµpð9í¬©.ÆiWÑ Ã4q¨ ¦i’ÖtâiÎõ‡Ù{¥›óýù}§ØÇï\Êúº ªBóp”Ïî;ÁH*k¹cRôòÙ+˜ò“Ñuö\îâ‰#—~åk%L !„Bˆ‰ AJ!Èß UðиiåµPÐu©ÇÉÒò.Ue$•¡;–¢2à!èvâTl&¤5¾xŠÓ½#ìoêæò`4/·åw-ãîºrìŠBÓP”‡÷'šÉYî˜Ì*ðñÈÆÌ*ð‘Öt~v©“o¿~ù-ßc˜´a;ödûA9ƒ!„Bˆ·K‚”B¿Aª:èå³›V2»ÀwéR¿›…%AªJ{8Η¡¡¢Ûf•RW àràS9ÞxŠS=#ìkêλ' ~òîeÜY[Ž]±qyp4HųšåŽIm¡Ÿ‡64Pô‘Òt~z¡ƒ8våzß~È0ŒF SB!„âí %„äošòñȆÔŒ]¹Ò³~ª*ð2·Ð?6xŒ¿;r ]%‘Õh¨,dEEu…~.vņnš¤r£WLëâÙ+ÝtFy±-®[ÎÚ9e¨ŠKQ>³÷©œn¹cR_à3¨ xIå4~r¡ïo~»‹‘0%„B!®›)!„ ƒT]¡Ÿ‡6¬ :è%¥éôD“´ŽX;HÍ)ô3«À‡CU¸:ãï\Äë°¿ñæ•YS]̬~§‡ª &ÉœFO,ɱ®!ö7uÓINë¶<´¡Ûf•¢*6.DxpÏ1²ºa¹c² $ȃë¨ xHå4~t®õÌQ¬ø«º´<Ä_ß½Œ ¿‡xVãû§ZøÑÙÖ Y¶„)!„Bñ+ÿ/AJ!`ç¡3æ¡–^ô<»ºeIYˆOÞ½ŒŠÀh(軂ÔÒòÅ^×µÛܾyäA·ó7¾Ï­ª,( ÒP>zÅ”×aG±f˜Ä³9:#I^m`Ï•.âSô¤»ÆÍ+YS=¤Îôðàžc–<& …|â®e”ùÝÄ2ß;ÙÌOηO쇘æ×²ª«ñàŽõa9ã!„B RBôÆRfÓP”W:8xµ—\žÌ´¬<Ä_ß½œò±PÐOÒ>’°ô¾^VQH‘×…jƒs}a¾õúeBפ²ºAÎ0¸cv«*‹¨xFÔbCÓ ¢™‘/µöñ|sϤ?ñîÑ-«XUUŒ 8Õ;ÂgžµfZYYÄ_ݵ”RŸ›X&ÇwŽ7óÓ‹þ9¦ADWØe(Î]¦„B!f6 RB¦ifuƒH:KËpœÛú8tµwÚ'¨^QYÄ_ݹtìÊ•=±ak©¥å!Š}nàl_˜o¿~‰Bëm-#–ÉáPÖÎ)ceee>7î±[ùrcaªu$ÎËm£aj²ž|÷øÖÕ¬¨,àTÏ0í=nÉc²¦º˜¯]J‰ÏE4“ãŽ^á™Ë]“öy¦„B!„)!„R9ÝtÛ`ô*œp:KëHœZúx¡µŒ¦OËz­®*æãw.¡Ôç&šÉÑMNû“åÞ­%å!J}nlÀéÞž:v庯úE±LÃÎí³Ki¨(¤ÌïÁmWQl3L¢é,MC1Žt °¿©{Âã·­aEE!&p¢{ˆGö°ä1¹¹¦„Ö.¡Øë"’Îñäë—Ù×Ô=éŸkDPhÜwÿÖ]rB!„˜Y$H !p¨¥×œ_¤ÐãĩެaIei‰ñJû®NÞ•6o >vÇJ|£¡ ;š¤;jí µ¸,D™ß ÀÉža¾s¼‰‚w¤ÆÅ29.·Í.eyy!e~7.UÅfƒÜX`¼:ç¥Ö>\í™°'áíܾ†åå£AêXןÛoÍ uÛìRþâöÅy]„SY¾ùÚ%\í²Ï7LÚ›­qïý[vËÙH!„bf %„À¶§öšwÌ)ãÎÚr–Pä}#Lßvu8Æëƒìkê!•Ó¦d½nUÊÇî é,]Ñ$½Ñ”¥÷õ¢²*üLàx×ß;u•à[úT¾H:;i¡àOn]HáX(èŠ$è[7HÙl°°´€Ê€çZÄùñÙ6|Nû¤|^,“£*èá–šR•PèqáTl6Èh£aê|˜Zú8Òñö”SUع} ‹J Ð “W;xüÀiK—õ•üñ- ¹ '3|íð…·½?&É!Ã0%L !„BÜx$H !¿:H[YYÄæyU,*½bÊmWQÆžæ6zÅTœ×:9pµ‡‘ÔĆ©_ Ñ$RŠbc~qª Ý0y¥}€¿ÐŽÇaŸÔÏerÌ)ô±ºªd,L9qª ¦ Y]g(™á|„W{8Ö5t]ËtÛU¾´m Kƒè†ÉËmýìnƒ‰ _;|ž×;ói%L !„BÜ`$H !o¤Æ­®*æîºr––…(ö¹¯=ÍM3Lâ™­á8Ç»†Ø{¥›ð]1µe~¹inCÉ ‘C‰Œe÷³ªØ˜_¤2àA‹8?»Ô‰Û®NÉçÇ29j ýÜ:«”yÅBî±0¤5ÁÄèS{¯tq¶/ü–Ëò9í<¾u5 J‚h†ÉK­}|å…³–<.÷,¬aÇšy]iþö¥óïÊ»õ41ÿ-§˜ܱ½UÎZB!„Ö&AJ!¸¾ 5nEeëêÊYZ^Hé¯SÑ$¯w²÷J×»¾bêCAg4ɰ…ƒ”CU¨/Pð¢‡ZzÙßÔƒSU¦t=9ÚŸÛg—RW ÀãÄ©Ø0TN§?‘ælßû›º¹Ðù•˺<¶u5óŠƒh†Á -}|õEk©÷/žÅYUOÀå ?žæ¼tŽS=Ãy»¾†ÉÓšj4J˜B!„°. RBÁÛ Rã–”…ØT_ÉÒòe~ž±[ù4Ý –ÍÑMñZçš{é‹¿³'ã½oñ,þh, $ÒtF’ '­¤œv•ú¢9ÝàÀÕ^íÅ1ÅAj\&§3¯$Èšêbê ýÜNŠ Ó„”¦ÓOqªg„}M]4 ÅÞôÞÛÉ£[WQ_$§ÆµÿùÒ9K—{—ÎáC+æpÙé‹§ù›Îr¶o$ï×[”B!„uIBÞY·¸¬€ÍõU£aÊçÆí°c›c*žÕèŠ&9Ú9:ÇTOìí…©ßY:›ÿ¼¢þZ(èŒ$§²–ÝÏn‡ÊÜ¢å~9]gs/µöaW”i]¯Œ¦³¸,DCE!s‹\ìŠ Ã4IåtúâiNt±¯©›Ö‘8Å^_Ø¼Šº¢9Ýàùæ¾vø¼%Ë}Ëkùý†:|;}ñ_yá,çûÖY >o(Î]w¬ËÙL!„Â$H !ï.H[XRÀ¦y•,¯(¤ÌçÆãPQm64Ã$‘ÓèŒ$9Ñ=Äþ¦îëSã¡Àï´ÓOÑI±pòü\Êè:{¯tójǪ͖ëg˜&󊃬®*fN¡€Ó]UÐ “dN£/–âX÷û®t“Öt>¿yµ…þѸÖÔÃ×_¹`Éãò¡uüÞ²Z¼ŽÑq¶óÐ. D,µ ¦ADWØ%aJ!„Â$H !¤ÆÍ/ ²©¾’åå…T¦º£INt³ïJ7ݱä[.ëƒ+ê¸ïçBAg$I4mÝ åuÚ©+ Pîs“Ötž¹ÜÉñ®!ly¤®ý8bcAiÕUET}øvª‚6¦º£I.DXS]LUÀ{-®ýÝ«-y\þpe=÷.Ça§'–ä‹ÏÐ4µä¶H˜B!„° RBÁÄ©qs‹lª¯¤¡²ª€ÃŽjÝ4Id5zc)Žw³¿©›ŽHâºBAg4I,³ì~ö¹ìÔú)óyHk:?½ØÁ™Þü«È†%嬬,¢*èÅ7v;¦>v+ŸCUp© iÍà™Ë»”ÕUÅÔøð9íŒßl˜Êé\Œp¸½ŸýM=Ä,¦>zóîYXƒÛ®ÒIð…çOÑùk®Ú³* SB!„ùE‚”B05Aj\uÐË–ùU¬ª,¦*8zÅ”:öD·dN§/–âTïèSë+yÏ‚ÑPÐIÐM’´p*ð8© ù(öºIæ4~t¶ÍrsÅ29tÓä‹g³nnNõ'ætƒh&G[8ÎK­ý¸Úc‰€ø§·.bÛ‚*\ªJ[8Ξ;õç7³°C†a4îÿÈöƒræB!„˜>¤„‚© R㪂^6Ï«dUe15^|NûØS£óõ'Ò˜&Ôxq( má8ÝÑ$©œnÙýò8™Sè§Äë"–ÑøÁ™ZG¬wkXw4‰Çaçs›VPæsc9ÃDµÙPm£ÿ;šÉÑ<åÕŽö_é&£y»=~ûb¶Ì«Ä1¤÷Ÿ¤/žºÑ¿ö‡²Š±ãàŽí­rB!„˜z¤„‚é R㪂^6Ì­à¦êª ¼øvìÊèÝLÓÄ®ŒÞÖMÑ2#má Uèu1§ÀG±ÏE,“ãŸN]¥#b½+qzbI|ŸZ·œÊ€‡Œ®Ó6’ÀeW(÷{pÙUÛèS‘tŽ«Ã1·÷ó\s¹< SqÇb6Õ©–áŸÛ‚Ádæ†ÿÞ†±A®”B!„˜¤„‚é RãÊý6ÖWpKM)UÁ±0¥*׿(Êh:W‡c„ÓYâkÞ¶Wäs1»ÀG±×E4“ã{'¯Òµ^ê¥ð»ì|êîå”û=$r?8ݹþ0›ê+Y^^H©ßÛ®£a*œÎru8Ρ–^µô¢ùóûûñ;—²¾®‡ªÐ<å³ûN0’ÊÞðß{ RB!„ÓG‚”BAj\¡ÇɶÕÜ\]‚’ ®±¨aɬF¤„‚ü R®[ÎÚÚrŠTN'£ëøª‚a˜¤5ÁdšŽp‚´fä}˜*¸© úyœŒ¤²üŸ£Wˆ¤­wkØ@"MÈíä¯îZJ©ÏM,“ã;Ç›ùéÅŽ_ùúµsʸ«¶œE¥z\מ̗Õu†’Y.ô‡y¡µ#ÓsÅÔ§×7pÇìRTÅÆÅ(ŸyöØèx78 RB!„ÓG‚”B¿Aê3ë¸},\Œñbk·Ì*¡¦ÀGÐ5¦Ls4L $ÒtDd4ÃÈÏs{EÐCuÀKÈãd8™áÛG/[r>¬ÁDšB¯‹¯]B©ÏM4“ãŽ^á™Ë]où¾»ëʹ«¶œ…%zœcÇ2ºÎp2Ãùþû›º9Ù3<¥ÛóІn›5:Î.ôGxðÙcy9ùúD“ %„B1}$H !ù¤Ù¸‚[f•¢Úà|„÷ÅeWÙ2¯ŠÛf•2»ÐOÀåÀ¡Ø®…¡d†¶pM7ó.*T½T½¸ %3|ëµË$sÖ RCÉ Å^¬]B±×E$ãÿ½ÌÞ+Ý¿ù‡¸³¶œMó*©/ »bêçÃâùþ{¯tq®/<¥ãLÎ÷‡ùäžcÌ„HB!„˜>¤„‚ü RŸÛ´’›jJP€sýa>ùÌÑkÿÍë°³u~7Õ”0·ÐOÀíÄ¡Ø0ÌÑ[Á†“:" ²ºAVË0UôR]à%èr0Hóí×/“ÊYïÖ°‘T†b¯›Ý±˜"¯‹p:Ë·^»ÌóÍ=×½ ‡ª°~nëê*¨+ôt;qŽ¿´¦ÓŸHsºg˜ýM=\ŒLêö4n^ÉšêÑqv¶/̧öß{ RB!„ÓG‚”B¿Aêó›W±¦ºp¦w„Ÿ=öK¯ñ:ìl¨¯à¶Y¥Ì- ¼)lduáT–®H’”¦M{˜ª z©)ðp9è§ùÖë—ÉhV RYÊünþüöÅyœ„SYþþÈ%µô¾íe_ñvûìRê~!,¦5ÞXŠÓ½Ãì»ÒMóplR¶çÑ-«XU5:ÎN÷Žðé_1ÎnD¤„B!¦)!„ ƒÔχ‚“=Ã<´÷ø¯}­Û®rw]9w×U0'ä#ävŽÞ d´Ñ§ºuG“$sÓ¦Fƒ”€ËN_<Í7_»dɹŠ"é,å~ÿý¶EŽMÐþW/òRkß;^æøo·Ì*½âÍåÀ®ØÐMHå4úâiNtñì•.ÚÉ ݞǷ®fEe§~Ã8»‘HB!„˜>¤„‚ü Ro]ÍÊÊ"LàD÷ì;ñßãRÖÏ­äÎÚ2j ýŒ…) S#é á$Y]'¥é0…[^ô2;äÃë°ÓOñÍ×.¡ÖûЦsT=üÉ- G'hOeùúáó¼ÒþWàv²±¾‚;f—1+ä#0öTEÍ0Iæ4zc)Žw ±·©›ÎÈÄ„©/n[ʊ·5În¤„B!¦)!„ ƒÔÎmkX> Žu ñ¹ý× œªÂ†¹•Ü6»tlòl'e4LeõÑ0ÕI‘Õu’9©ø9¨ z™òáqØé‰)¿CÑLŽš —Þ²€ÛÉP2Ãÿ:|#öE›æUrûì2ªƒ^üN;vUA7LY®h‚c]Cìoê¦'–zwãlû–—bǺiÜrF|ï%H !„BL RBA~)›ÍÆ—·¯aiY8Ú9ÈçŸ{û¡À®ØXWWÁsʘ_$äyóS‘t–žX’TN'™Ó'õéjUA/s ýxì*ݱ$O¹dÉñÏæ˜]àçþ›æSàv0˜ÈðµÃçy½spÂ?+ävò[‹j¸¹¦„ª ŸÃŽªØFÃTN£3’àh×{.u1œÊ¼£qö•íkX26Î^ïà ÏŸšß{ RB!„Óø÷Ž)!„ÈÏ eWlìÜ~KÊ ÐMx­c€GßE(°wÖ–³~nõÅŠ<.\êWL…ÓYzb)Ù©œ1)aªªÀKmÈÛ®ÒMðÍ×.c³àxId5j ýìX3ïÚÿöåóϬxØ6¿šUUEÔŒÞö¨Ø@7LâÙÑ0õjÇÏ^î"šÉ½q¦°sûšÑqf˜é䱤„B!Ää’ %„ägr© _Ú¾†E¥£¡à•ö¾xðô»^®b³±vNÙµ0Uèvá²+Øxs˜2L“‘Tcçxª.ð2g,HµG|óÈEÔ±Û­$™Ó˜[àVÕ R_}ñ§z†'ý³g‡|lžWÅêªb*þæÅ³“òYkç”±vN‹JC{ߦ9dV#–Ñè‰%ßU˜ª RU¥u$Γ¯_FU¬wÓ^ZÓYXRÀWÔáwÚé§ø›Îr®/<åë2¿$ȦúJVVQæ÷à¶«(6È&±t–¦¡¯v °¿©›¬nüÒû.o]ͼâÑqöBK_¤q–o$H !„BL RBA~©ÛÉ£[WQ_$§jéýìÝwt\i^àýïM•«T¥R*IVvÎv»s»: /³ K\Þe†ÉMÓ“ Ë ;À–xÙ]XÂmw»£sJ¶%U)”Jªœî½ïU²Ý3ÓÙvUõü>çÌ9sf¬Ò­çÞ+©¾çyžËzñìmýžÛº[Ø5aeK€°Ç‰S×*Kù,›l±D¶d’Ì—ˆ¥s”¿EÜx;• åÇÐTFçSüÙÑaq©BÙdu[¬ïÃcèL§s|åÐiÎÏ,Öì˜V·5±g°“õ!Z½.\ú¨¸˜/1:ŸâÅ+Óì‰Q¶¬š^gõB‚”B!DíHBê3H…=NžÚ³™þf?%ÓbÿH”¯½tîŽ|ï-]aö FXÙÚT Sš†ª*ß0cªD,•£ô.ÂTwÐKȇ®ª Ç“ü·#X øk¨hZ¬mòáµ½x h*Ç—žâÒ\²æÇ¶®=Ä#Ë;YÓÖD›×u#*V÷‰§Ø?å…ñlÛ®éuVk¤„B!jG‚”BPŸAªÍçâÉÝ›é ù(™&Ï GùúËçïè1ÜÕæáþV·¯Ï˜R([6ÙR™t±L¶Xb*ùÎÂÔ² —¾]U¸4—äžWA«^”L‹õ!¾oMnC'šÊòÅçO1OÕÍ1nŒ4óèòNV/EÅ›ÂÔ|®Èå¹$ûG£ Ç“5¿ÎjE‚”B!DíHBê3HEün~u÷&zƒ> ¦É3—¦øýW/ÔäX¶t…ÙÞ×ÎÚ¶ -ÕY7KûåJer¥ÊSSÉì[¦žjÒT… ³IþæLc)Ó²Ù ñ=«{pë“ÉJOÕݱ޳¬•¬lmzÃÆõ…²E"W`<‘f0ì§Åãªùuv§IB!„¨ RBA}©î&ŸÝµ‰ž&/ù²É?_šà_»TÓcZ×b÷`„5íAÚªû½1L™$«Kù eó›¾¾'裯ه¦À¹™EþêÔ(¸‰”iÛlŠ4óÝ«–áÒ5&3|þÀ)®,¤ëö˜êk¯l\ßÖD³Û‰CS±íÊŒ)]UÐ5•BÙä.\«ùuv§HB!„¨ RBA}©Þ ÏìÜ@w5Hýý…küÉ‘ËuqlkÛƒììdM{V¯ ·®¡U÷˜ÊVÃTªX SùÒ0ÕòÑò¡ggøócø ½á®Û¶ÙÜæ;WvãÒ5®-fxjÿI&3uìõµ³c ƒåá!·‡¦‚¢ Tß×øB†¯¿|žÓ±Äþ¾— %„BQ;¤„‚ú RÍ~>µc]¹²Éÿwî*zl¸®ŽqM[]ƒÖ-͘2t´êS¹’I¶T&S,3•Ê’/™ôUƒ”œŠ%øÓ£Ã\FC^3[»Â<¾¢ §¦qe!ÍSûN2•Ê6ı«ŠÂöþvv DXÑR SŠR™©V2-&“YÎN/ðÜðçf>°÷½)!„BˆÚ‘ %„Ôg øÔŽõDür¥2wö*qb¤.ÇoUk;"¬ïÑásá24TEÁ¬†©t±D®l¢) '£óüñë—hñºòšÙÖÝÂcË;1ªAêɽ'˜Nçê=84•ÿkc??°®—®aØ6– ù²Ét:Ç©X‚½ÃSuñÁ[M‚”B!DíHBê3H­lm⯧Ãç&[*ó×gÆùË“cu=ŽCá»#lèÑáwã¾iÆT¦TÆPU܆†iÙ›Šó¯]$â÷4ä5soO+{#šÆx"Å{O0—É7ÜûXÙÒÄ'w¬'âwcÚP4MtUEW•J˜*•™Îä9>gïp´.7n¯$H !„BÔŽ)!„ >ƒÔš¶ ¿òð::|nÒÅ2ujŒ¿>=Þã9ö³{°“ !Úýn<†Ž®*ض жÍd2ÇŸ?…®*ø·lïÞ6vD04•ÑùOì=Î|¶ÐpïcM[_Ù¾Žöjø<=À¥kô}øº¦Vgº•™Nç865ϳ—'¹ºiøû^‚”B!DíHBê3H­ïñË­£Ýç"U(óßOŽð¿Î^m¨qíùØ3ÔɦH3m>7§þ†½Š†ã)NFçyeb˲*Lmïkg{†¦2Oò™ç޳˜/6ܵ¿®=ÄǶ߸ÎþÇÉQöDÙ5áþÞ6º›<ø†¦R¶l²¥2Ñd–£“qöŽL1±˜mØû^‚”B!DíHBê3HmŒ4óË­¥Õë"U(ñçÇGøûó×r|{‚^[ÞŇVu_ªž ˜–E¶dMf9KðÊÕÊ ¦vôwðP;ºªry.Égž;FªPj¸s³1ÒÌ/=¸–6_å:û‹£üŸs•ðt9x|Ew/k¥»Éƒ·:ÓÍ´m2E“‰Å ǦâƒÔ¶î~á5„=Nó%þøÈ%ž»<Õ°cìw|áÑ- …˜¶ÍÄb†BÙ¤+P ºZYVÙ«(ÏéX‚—¯Í/™u¦ö urO+šªpa6É'Ÿ9J¡l6Üy¹«+Ì/<°–oå:û“#—yæòä·ü·]{†:ÙÒ¦«Éƒç¦MëSÅ“‹Y_™aßÈ‹ùúS¤„B!jG‚”BPŸA꾞V~þ¾Õ4{œ,ä‹üÁ«90kØ1¹<õÈf›”L‹çÇbJ&ÝA/aƒ Ë‰Ï¡£k*V5LŪaê•k³äJåº S.ïäÞe• u~f‘OЦÉ3—§ÐT—¦]ÿ7N]£3àÆëÐñ•ÿh7…©Ùl3Ó ^»6Çb¾XÓ0õ]+»ÙÚÝ‚ œ™^àãOiÈóòP_;?wï*BÕðù»¯\àÅw>W¶4±{(†Žm>7.]C] Sù"£ó)^¾:ËÞá)Šu4‹L‚”B!DíHBê3HíŒðÓw¯ èr0Ÿ-ðµ—ÎóêµÙ†㮀‡'vo¢§ÉKÁ4yúÒ$>‡ò-þ­ÓÐèð¹ñ;o„©ëKùÊ&ñLž³3 ¼te–d¡6aêC«–±µ+ŒœŽ%øÄ3Gò¼ìèàg–Âg¶Àï¼|žW®¾·ëlu[{†:YߢÍçÂ¥WbcÉ´XÌWfL‹qp,FÙªý-'AJ!„¢v$H !õ¤[ÞÅOܵœ&—Á\¦À×^:Çës ;ƽAŸÙ¹î&/ù²ÉÞá)<†þ–_ãÐ5"~~§»¦Œj˜Ê—Mæ2yÎÏ.òòÕY¹Â Sß³z›;+AêDtžO?{¬!ÏËž¡N~jÛr‚.ñl¯>Çkïó:[ßâÑå¬n ÒêuâÔ5E¡X6I䊌̧xnxŠ—®ÔvÆŸ)!„BˆÚ‘ %„ÔgúΕÝüØÖ!NƒÙLžß~ñǦâ ;ÆÍ~>µc]¹²É¡Ñ†¦¾£¯54•HÀßaàqè¸ CU°m(˜&³™<çf921Çl&GÂÔ÷®éac¤¨©Ï4hz|E%|œ•ðùÛ‡ÏrtòÖ\g[»Â쌰ª5HØãÀ¡i( M‹ù\‹³IÅ8\£0%AJ!„¢v$H !ð§G‡íçÇbL%³usLÿbMÿjÓ~§ÁL:Ïo½x–“Ñù†ãå->ùðz"~¹R™—ßò0]Sè xð:ŒëûLÕ=¦Š¦I<[àüÌ"G&爥r·5L}ÿÚ^6t„°c“qžØ{¼!ÏˇV-ã_oÄ_ ŸÿéųŸºµ×Ùý=m<ÔßΪÖ&šÝNÕ¹¦ÎÍ,ðÂøô{^*ø^IB!„¨ RB,ä‹öT2ˉè<Fc\I¤k~Lß¿¶—Þ8€ß©3Îó‡Îpf:Ѱc¼ªµ‰?¼žˆßM¦Xæõ‰9Ì÷¸®©Dün|½¦Žj˜*˜&óÙ"—æyåÚìm S?°®uíA,àèäOî=Ñçå{×ôð/7 âwê̤óüæ g8»=×ÙC}ílïogEKÍn†¦bCLÜ7½c3%H !„BÔŽ)!„,Û¶-Û&[2‰¦²œ‰-ðüXŒ‹³‹5;¦¬ïã#úñ9tb©¿~è çfvŒ×¶ùØöutøÜ¤‹e޼ µD×Ú}nÕ=¦|N‡¦bÛ6…²I<[är<ÉKWf˜Nߺ0¥( ?¸®—5m• õúµYžÚ²!Ïˇ×U§ϡKçøÍCg83}{¯³ûÚÙ3a0ì'èràÐ5l{i_°ÊŒ©g/OÝö+AJ!„¢v$H !,”l¡¡* –m“+Uö%:;³À¡±éš,•ûèÆ>¼®¡MåøÊÁÓ\œ[lØ1Þiæ—\K›ÏEªPâèTÓ¼5¿ƒ4U¡ÃïÆï4ð:>‡~}ªByi¿¢EŽMŹºyßaJS~`}kÚš0mxõê,Ÿ?ИAê‡6ôó‘õ}x étޝ:Íù™ÛéªÊŽîo§?ä§ÉU‰‰K³Ü¦ÓyNÇìžâÂm ä„B!jG‚”Bÿý䨽)ÒLgÀƒÏ¡£©7žäÏVbÆþÑè-Ûìùø‘̓|ßš܆ÎT*Ë—ž?Åp<Õ°c¼¥3Ì/>¸†V¯‹d¡Ä±Éøûž!õTU©.å3p~§¡Þد(‘+pi.Éñhœ+ ¼oó”¿7c¨• µªµ Ó²yùê,_|þTCž—nàÃko„Ï/<Å¥¹äûþNMe÷P'÷÷¶Ñòp9®ï –/›L§sœŠ&Ø;2Åå[|\¤„B!jG‚”BPyÊ^gÀÃŽþîêÓé÷àsÕ=nn,ÿŽ'94>Íáñin÷OÏÛ:Ä÷¬îÁ­kL&³|áÀIÆê`o«÷j[w ÿáþ5´x,æKœˆÆ)›·gUU!äv`ÛÐÝä!èºy¿"‹…|å\™œãêBÏ» SN]ãÖõ²¢%€iÙ¾2מnÈóòáó‹N12ççÛÐxd¨“{–µ2Ðì¯ÆDÓ†\©Ìt:ωhœg.Oݲ=Þ$H !„BÔŽ)!„ ¤–þ{Èí`ç@„{–µ²¬ÉSù`|Óò¯…|‘ËsI^Ÿ˜ãùÑ(Óº-ÇôSÛVð]+»qé‹>wà$W2 ;Æ÷õ´òó÷­¦Ùãd!_äT,A©lÝÖï©(à24¼†NÄï!èvà\:—¦ÅB®Èè|Š—¯Ípm1ûŽgL¹ ¯íeyK€²eñÂø4¿qèLCž—ߺœï^½¬nÂgÀi°k0½m,kò^¿ÿÊ–M®T&–Êqt*ÎÞá©÷}?HB!„¨ RBÁƒÔ¡³{(¶®ú›}4¹×—•,‹Å|‘ñDš×'æØ7%S,ßÒcú™»Wò+»pjW3<µï“ÉlÃŽñC}íüܽ«¹$rEÎN/P(›wæ—.]Ãmèt*3¦œºŠÂÍ3¦R›Œ3šHã1´·|=¯£¤Õ up,Æo½p¶!ÏK½†Ï ËÁž¡Nîïm¥+àÅçÐÑ«3¦²¥2‹ŽNÆÙ7e*õÞî RB!„µ#AJ!øÖAj‰¡U6_¾wY+CáÀõ˜P²lR…‹^¹:ËÁ±ñlá–ÓÏÝ»ŠG—wâÔ4®,¤yrß b©\ÃŽñÎ~æî•ÝæsE.Ì.+šwô–fL95®&!—‡¦¢(Pº)LŠÎ3Oá|“0åw|ÿº^šý”L‹£Q¾zø\Cž—zŸM.ƒïZ¹Œ»º[ènòà5t4µòðL±ÌµÅ,G'çxúÒ仾÷$H !„BÔŽ)!„à­ƒÔÍîëieç@„¡°Ÿ°Ç‰S×P…’i‘.–˜Jf92çàhì}¨ÿùûV³g(‚¡iŒ'RüêÞÌfò ;Æ,ïä'ïZNÐå ž-0O’.”ks0ÕS†ªÒôr¿1L-æKŒ%RŸšg8žÄ©¿1L59+Aª/ä§dšì‰ò;/oÈóÒ(á³ÝçæÑålí ÓðâuhhŠBÙ²IËL&+Qø™ËS,æ‹ïè5%H !„BÔŽ)!„à©%wu…Ù1aek€°Ç…K×Ð0«³6&“YNF2þ÷ãù…Ö°s ‚¡©ŒÎ§xbïqæoÑì«ZøŽ]üø]Ë 8 f3yÆi’ùRÍËÐU\šFw“·«Kù,›äMË2ÇiÕý§Bnß·¶—Þ ’iòÜp”¯¿Ü˜Aêá3ͯî=^×ásY“—=Cléj&â÷à©Î˜*›©B‰k‹^º:Ãþ‘(‹os}IB!„¨ RBÁ»RK6Fšy¨¯uíAZ½.܆†ªT–eK&±TŽ3Ó žqavñ]½ö/=¸–‡:ÐU•‘x’Ïû­hZ¤òEÆiŽOÍs~vî€çz*˜&Ï\šâ÷_½Ðçåæð96Ÿâ³ >{C>êdsg˜vŸ ÷7„©ñDš—®Î¼åþn¤„B!jG‚”BðÞƒÔ’-v t°¾#Tµq#LåK&³Ùç¦x~,ÆÉèü;zÍm_ÇC}íèªÊå¹$Ÿ~ö(éb¹aÇøû×öòÃð;u¦Óy&ë+H-14‡®²¬:cÊ¡i¨ÊýÂÆæS ÇSÜßÛJwÀKÁ4ù§‹üák—ò¼üÒƒkÙÞß¡-…ÏãïxÉ[=löóÈòN6Ešióºp:ªeË&Y(1Oòêµ9ž»<ùMOÄ” %„BQ;¤„‚÷¤–ô…|ìèï`Sg3Ê̺¦bY6ù²I<[àâÜ"F¢™Œ¿åk}âáõ<ÐÛ†¦*\œMò©g’+™ ;Æ?¸¾ÚÐÏ¡3Îqm1Ëb®~Ç®)èÕ=¦Z½.œšVÙcʲÉ˸ §¦’+›üÃ… þøõÆ R{hõß>Ÿ;FºÐx3ñV¶6±g0†¥0¥k×÷KJŒÎ§xñJe)_©¦$H !„BÔŽ)!„àÖ©%~´sWW ]>§¡*Ø@¡lÏŽ'ya|š¯Ìð­~zçî]ÖŠ¦*\˜]äO¥ø 3<ÉG7ðáu½xŒÊ ©k‹’ 0GQºJo“—V_%L©  ((€iÙœŒÍóëÏ0Ÿk¼=¾nŸ—æ’|ò™cäJ;om{G†:YÛ¤ÕW S@±úÅÑù4ûG¢¼0>MÙ4%H !„BÔêïl RBqëƒÔ’&—ƒÝƒî^ÖBO“¿ÓÀ¨nŒ]4-¹J˜zmbŽçGcÊ7f@}v×Fî^ÖŠ¦ÀÙé>þôQ¬þ™ý#›ù¾5=¸Ê ©« R4G§®ÑÛä¥ÍWY¦6/™\š[䵉9ŒÆ˜k §!~jÇîë¹>?ùôÑoZÚÖˆ6t„xtykÚšn<³zßÅsŽNÆùÛÓã;ÿôz^~ !„BÔàÏk RBqû‚Ô·¡³g0Â]Ý- 4ûhr90ÔJ˜*YÉ|‰ñ…ÊÓÜöO‘)–yr÷&¶v· g¦øøÓGzŒbër>´zn]#–Îqu!MºÐx3qLÛ&èv°µ3|=.ÚPÙL»XâÚB†£SqöG"LÝ>ÏÍ,ò‰§P¶>8ÜÝÝÂŽV·iv;ph60ÎñW§ÆvþÇ×>/?…B!î< RBÁíRKtUeÇ@÷õ´2ör9qêjeæFuÓì‰Å ¯^›e[W ë:B(À©X‚O>s´¡Çø§·­à;Wv㪩ñDšlƒnÒ~e!Í÷®é!ìqbSÙWJS”ë›i§‹%¦’YŽLÆyúÒd]?µîWwoâ®jø<;³ÀÇÿùÄ¿ èm㡾vV¶6áuèijþæôøÎ?¼þyù („BqçIBî\ºÙ=ËZÙ5ayØO³ÇYÝ„Y¹>ÓÆPU<†ŽeÛœˆÎóég5ôÿì=«xlE'NM#šÊ2žÈ4ì^EÉ,­è¤Ãç&W2K¤qéÕ¥|Úõ0U&šÊòúÄûF¢ÄR¹º{/¿¶g3[»Â(ÀéX‚O4xø|;öµÓôÏæ9—%{B!„5"AJ!¨MZrWW˜‡:XÕÚDØSÙ„YS–~J+`ÛDS9~mß Æé†ãwßjŠ`h±TޱDŠ|ƒ>50–αg¨“vŸ‹T¡Ìÿ89Ê™é»;YߢÝçÂmèèªBÙ²HËL%³›Š³$ÆÄb¦nÞËçÙÌæÎJ:Kð©xº™º••­LËæð•¾|ðô·Í}/AJ!„¢v$H !õ¤BnO=²™Áæ%Óâù±¿ýâÙ7ü]Uy¸¿ûzÚjñr9qê* P´lR…“‹^½6ÇÁ±³™|MßÓ¯íÙÌÖ®0 Kå¸0Û¸AJQ6w6r;˜ÏùúËçI拸 ý]¿–®)¬i ²¦-Hw“·:cª²ùy¾d2“És*–àùÑØ-º¢Ï¡óùGoÌÄ{a|šß8tæÛæ¾— %„BQÿ©%H !Dý©“§ÙL_ÈOÉ4Ù7åw^:ÿ¦ÿþže­ìè`EK€f§¦¡ª eÓ"U,1•Ìql2Îóc1&jôD·Ï?º…M‘f¦Ó9.4ð )CSÙ t9˜Ïø—Ï“+™84õ}½æòp€õ!–UÔ®U–òåKef³ÎN'Ø;½eaªÉeðùG¶0X‰wp,Æo½pöÛæ¾— %„BQ;¤„‚ú Rí>7OîÙD_ÐGÑ4yöò¿ûÊ…·ýº­]avôw°ªµ‰°×…K×ЪOt+3•Ìq26Ï‘(cwø‰n_|l+;BØ4~rëÚC4¹ âÙ_=|Ó²ÑTå}¿¶®* …lŠ4ÓUÝÄþæ§+ÎdòœŸY`ïp”3Ó‰÷õ½šÝ•ð9Ðì§dZòÕÃç¾mî{ RB!„µ#AJ!¨¿ Õé÷ðÄîô}L“§/Mò_^½øŽ¿~}Gˆ‡úÚYߢÍëÂmh¨JeX¶d2“Îq:¶ÀÁ±[¿ ì[þ²¾ôøVÖ·‡°€ÙtŽó ¤|NÕmANƒ¹Lß>|…ÊR¾[Ɔ­MlŠTfLy—ž®hC¾l2—Ésv¦rOÍ¿§oÑâuñÔžM×gâí‰òŸßb&Þ)!„BˆÚ‘ %„Ô_ênòòÄ®,kò’/›üÓÅ þèõKïúu›ý쌰¡#D‡ßƒ×¡¡) fuãì¹lsï3j¼ª¢ð•Ç·²¶=ˆi7~jr;XÙÀï4˜ÍäùíÏ¢©êmù^¦m³º5Èúö ½!¾j˜²l(”Mæ²yÎÍ,òÂXŒ#“ñwõÚK3ñzƒ>J¦É3—§ø½w0ïƒB‚”B!DíHBê/Hõ†||fçº^re“8?9zù½¿^ÐÇÃílŽ„é x®ïO´´ l>WàÒ\’ý#Q^Ÿ˜»åïÇÐT¾üØVV·5aZv%„M/4ìõÒìq2öãw̤ó|õð¹[²\ï­Ø¶ÍPK€ Íô…|n SKçðÂì"F¢ï8Lu<<±ë½ÏÄkt¤„B!jG‚”BPAj Ùϧwn Óï!W6ùßç®ðçÇFÞ÷ë¶ûÜìè`[w ]>§CU°BÙb>W`8žäð•McÝ¢ßnCã‹neekÓ²‰g œmà ÕêsÑòãwêLWƒ”~›ƒÔÓ¶ùÙÚ¦7èÅïràÐTlÛ¦P6‰g \œK²oxêmÃÔ²&/Ÿ½3ñ•)!„BˆÚ‘ %„Ô_ZÞà“o âw“+•ùÛ³Wøo'FoÙëû{†"ÜÝÝJOÐKÀi W—œ•,‹…\‘áx’#“qöDÉ—Í÷õý|Nƒ/<²…å-•§¹Å3…;²wÕíÒpÓôásèÄR9¾öÒù;¤–M‹¡°Ÿ­]-ôVÏ¡Q}Ê_%L¹0»À‹ã3¼tuæ[¾Æ­ž‰×h$H !„BÔŽ)!„ þ‚ÔªÖ&>þðz"~7™b™¿>=Î_ž»åßÇmhìŒpWW Ca?M.ÆMa*U(1žHóúÄ{‡§HËïéû]>÷èf›”L‹™LŽK³É†½^ºš<,kòâ1tbé_{éÜõq»ÓЦÉòp#!úC~š\7âbÑ4™Ï¹8»Èá+3¼xe†›ïãL¼ÿsî*vløÛæ¾— %„BQ;¤„‚ú RkÛƒ|lû::|nÒÅ2yrŒ¿93~Û¾Ÿ®*lïïàþž6†Â~Bn'N]EŠ–MºPbb1Ãks‹1“ο«×oñ8ùµ=›éo®<Ím*•c4žjØë¥'è¥+àÁmèDSYþóËç¯G Z)”LÃ~6Fš û ºšŠmWÂT"_dx.Å¡ñ/ŽÏ`Ùvu&Þz"~Ïm™‰Wï$H !„BÔŽ)!„ þ‚Ô†Ž¿üÐ:Ú|.R…2ÿíÄÿûÜÕ;ò½ïYÖÊΖ·{œ85 UU(›éb™Éd–ãSqžqm1óŽ^séin}AEÓäêB†« ™†½^úš}DüܺÆT*Ë×_:®©uql¹’IoÐËÝËZhö¬î1/[$òFâ)öDIäŠ×Ãg¶tûfâÕ+ RB!„µ#AJ!¨¿ µ¹³™ÿøàZZ½.R…vl„¸píŽÖ®0;ú;XÝÚDØãÄeèhJeSíLÑd*™åôt‚ý#QFçßz¶Ó7>Ím,‘fj1Û°×Ë`ØO›Ï[טHføÝW.ÔlÉÞ›É^S- „üÝ•0¥³²OØt:GOÐG“Ë ],óW§ÆøëÓãß6÷½)!„BˆÚ‘ %„Ô_ÚÚæXK‹×I²PâOŽ\æéK“59–uí!¶÷·³®=D»Ï…ÛÐP˶ɖLfÒ9NÇ84{Ó'çõ½|vçFº«Os»O2û.—ýÕ“¡–m^.]cb1Ãï½z¡æKöÞL¶dÒô²¥+ÌP8p}Æ”¢@Ù²ÑUUÈ—Lþßã#·uih½‘ %„BQ;¤„‚ú R÷,kåßß¿š°ÇÉB¾È¿~™½ÃS5=¦f?»#lèñ{ð:44EÁ´!_*3—-p~v‘ƒ£1ŽMÅßðµý!ŸÞ¹‘®@eóìó³ ,d‹ {½ …´û]85«‹~ÿ• ןpW¯2Å2}!/#aV¶¹ªû„)ªjP¼ IDAT `Ù6W2üÝÙ+µãÆæÙ§§¤ 冽^Ã:ªAêÊBšÿòêźRKR…ÝMîYÖÊÚöa¥úÿ•-‹ùl‘ñ…4‡Æb›¦P6?°÷½)!„BˆÚ‘ %„Ô_ÚÞßÎÏÞ³ŠÛA"Wäw_¹À‹ãÓu5fí>7÷·³­»…î&/>§CU°BÙ"‘+p9žä¥+3L§ó|lû:"~7™b™£SqJåÆ3öñ»14ñDš?|­q‚Ô’T¡Ä¶î>²¾§®aØ6( EÓ"™/2–Hsx|šƒcÓdKåÜ}/AJ!„¢v$H !õ¤v Fø7ÛVt;˜ÏùÏ/ã嫳u9v>§Á#C¶u·Òôp×÷S*Y•ͳg2yzƒ^üÎÊæÙ¯Öé{y§nRcó)þðµ‹8t­áÞ‡®*üü}•¥¡EË&S(á24œš†¢@ɲI勌̧x}b޽ÃÑT˜’ %„BQ;¤„‚ú R uò“Û–t9ˆg |íð¹ëËßê•Kר5á®®0Ë[4U7϶íÊ“ù–6Ï.”Mþñ§Ѱ×ËPK€ŸCSOñG¯_Ä¡5^rê?wï*š«3ñöOt;XÓ$ìq~S˜º²áÕk³<7ÏKWfØÚfG«Û‚´x¸t (Z6©B‰k ^¾:ÃÑ(‹ùRÃÞ÷¤„B!jG‚”BPAêC«–ñ¯· âwÌfòü§Ïr|j¾áÆõîîv FXߢÅãD©F©²e“.–ÈË,ä‹,äŠ 5ãææ uy.É=r g.Ùó; þÍÝ+ºÌg |í¥ó¼zíÆrÊ‘fö EXÕÚD‹Ç…K×PU…’iUÂÔb†W®ÎòüXŒùl¡áÞ¿)!„BˆÚ‘ %„Ô_úÞ5=üËMƒø:3é<¿ùÂNÅ ;¾ÿjÓ ÙPÙ<ÛÆ²AU*Kù2E“™LމÅ,ÙR‡Zÿ›ƒUƒ”¦*\œMòÿÆ©« w^Bn?y× š\ñl¯>ÇëßbièÆH3»#¬im"ì­„)­º”/](qe!ÃÑÉ9öD‰7P˜’ %„BQ;¤„‚ú R^×ËoÀçЉ¥süÆ¡3œ^hØñ}¨¯Ÿ½wa·ƒ‚i‘Èñ:t<††ª(X¶M¶d2“Îse!iÙ(uü~†Â:n4UáÂì"zt¸!gHµz]üØÖ!Õ™x¿}øÇÞbièÆH3ÛûÚY×¢µ¦TåÆŒ·k ŽNÅyúâ$ ùbÝ¿ RB!„µ#AJ!¨¿ õCúùÈú>¼h*ÇWžæÂìbÃŽïÎ~æî{‰%ð:tZ½.Bn'^‡†¦(˜6äKeâÙW3M ˪¿ßSCáM4ÎÍ,ògdžq5`êð»ùÑÍ7–†þæ g9}û¥¡kÚ‚ìè`}Gˆ6Ÿw5L•,›L±Ìd2Ëk³ìŽ2›É×íû— %„BQ;¤„øè_²çêèƒóG7 ðk{q• õ僧¸4—lØñýƧŽ%Ò”M CS ¹x^CÇmh蚊eÙL“x¶Àäb–|Ù¤dZuó~†Â~:›¼¨À¹™þüØ.£ñ‚Tw“‡n¬, ®. =ý.–†®jmb÷`„u!Ú½.܆ŽVÝc*],3•ÌrdrŽ#1¦RÙº{ÿ¤„B!jG‚”BT¢‚}t2ξáh]|pþÑ-ƒ|ïê܆ÎT*Ëœbd>Õ°ã{óSç³E®.¦)šíƾKM.CÃmè¸ ‡ª`…²E"W`2™EQrjý«k¨%@gÀƒ œžNðßOŒ6ä’½¾ÚÐ}iè¯<ù™w¿4tEK€=C•S7…)Ó²ÈËL,f96gÿH”Édý„) RB!„µ#AJ!€²eÙ™b™©dîúçk‹™šÏOl]·V/íkL$3|áÀ)Æé†ß›Ÿ¸/26ŸÆ´mtõ;EÙ6\n]Ãk¸Fu“ó’Uy²[¶T&[,Må0k´œoy5H)À©X‚¿:5†¡5Þ¦æƒa??¸®Q R_>xš‹ïcièP8À®Á6Ešió¹¯ïfZ6ÙRe)߉è<Ï^žbª”)!„BˆÚ‘ %„T‚Te£ÊSß&“NDçÙ7\›0õÓÛVð+»qé×3|nÿÉš²÷ëæ§.æK Ç“(Š‚ª¼Y(°ñ» šœ<†ŽÓPo„)Ó"S*“+™¤‹%¢ÉÜ_η¤NFçùÛ³WÐ¥áÎËÊ–&¾mOuih–/<Íå[°4´?äcÏP'"!"~Ï7…©X*ÇÑ©8{‡£LÔ𺖠%„BQ;¤„¸8·hwø8ÿÛ{VòøŠ.œšÆ•…4Oí?Y3JÞ«\ßw}iØb¾Èp<…¦¾}À)[6>‡NØã¬,ãÓTšŠBeíl©L¾d’*–ˆ¥räKæy?Ë[t<ØÀñ©8ÿçÜU” RkÚ‚ü‹5=¸u©T–/8Åè-\Úò±s0ÂæH3‘€Ï–ò™L§sÕð;ÅX fJB!„¨ RBüÔß¶÷ v²1ÒLgÀsÓSßì7|p~nxŠ+wàƒóÿ}ï*]Þ‰CÓ_HóäÞL§s ;¾?¼±Ÿ¨. [,y‡AjIÙ´q­^Cái84UU(›ÖõÙR‹ù"‰\‘L±|[ßÏŠê )8:çïÏ7fZßzÃÒÐÏ8u[®ïޠ݃6w6Óáwãuèhªú†S'£óì‰ÞѽÒ$H !„BÔŽ)!„ý“gm€ž —]6w†é T>8ëªJ¹úÁy&ãd4Á¾‘)†ã·ïƒó¿¿5»#šÆx"ÅÏg.[hØñý‘̓|ßšÊÒ°÷¤–”M§®ö¸ð:tœšŠCWßg39®-fÉ•Ê×—ùÝj+Ztú=XÀ‘‰9þþµ†\²·)Òü†¥¡Oí?y[—Ðuú=ìYakg ÷õS– ¹j˜:K°o$Êpüö?UR‚”B!DíHBn©%]»‡*œ#7^CG×*3:îÄç_|p-;ú;04•‘ù$O8û7ftä«3:NÅr~æýpþ•íëx°¯]U¸4—äÓÏ»íû"ÝN7oҞȸ²áV®p[ SÍ'>‡KWñTã†mÛäË&óÙ‹Yòeó}=•OS†n R/]­)·ÑxA꾞ֺXÚîs³k°ƒmÝ­tÜøº¦bY6¹²ÉL:ÇÙ™ö G97³p˾¯)!„BˆÚ‘ %„¼}ºùƒóζu·\S7Ïè˜Í83xßœ?¹c÷÷´¢© f“|ꙣäËfÃŽïÒ&í†V™!uåÍz3MN‡†ËÐñ:Fu¿ªBÙ"‘/0¹˜EU¹"ïö÷`%Hùé x([6‡¯Ìð÷ç¯áuè w^ìk¿¾4tt>ÅgŸ;VÓ¥¡a“݃îYÖJW“ŸÃ@Wìêl·ÊŒ©ÏÆ8K¼ïï'AJ!„¢v$H !ïñôцßm_ÇC}•MÚïtZR¶l¼·¡ãÐTšŠ¢(×ÃT¾¦¦S9r¥o½DÒ¡kt<ô…*AêÀh”¸0AÈíh¨sbZ6.ïäÞ64Uáâl’OÖéÒРËÁÃÜ×ÓJOÐGÀi`Ü´?Ø\¦ÀÅÙEöDßU˜’ %„BQ;¤„‚÷¤nþà¼k0ÂÝÝo>£ãÂì"ûF¦8>õæ3¦>÷Èf6w†Q€“±Ÿz¦±ƒÔ'^=|Ìg‹Œ/¤ÑïpZR6m\†F«·¦œš†CSQU…²i‘/›¤ %óE¹â7Ír¿›¾Ÿ’i²w$Ê?^˜ ìq6Ô9)™ßµªû KC?ñÌÑ÷µáûíæsèìŒð@O=A/~—‡ª`S ¿Ùf94>Í«×fßöõ$H !„BÔŽ)!„àÖ©%M.ƒ‡û;¸¿·ž&/—ã]ÍèøÂ£[ØiàDtžÏ<{¬¡Ç÷æ=±æ³EÆitM©é1•M§®ö¸ð:tœšŠCWQ•ÊÓ3E“ÙLމÅ,¹²‰^Ý…Ýeht<ô}M“g.OñÏ'hñºêœM‹ïY½Œ»—µ¢)pvz?}«þ.p: U6?ï UfLéÕe˜Esi)ß"/]™áð•™7} RB!„µ#AJ!¸õAj‰ÇÐÙ=áþž6zƒÞÊŒ)MŶo|p>7³È ß0£ãKmeCG86牽Çz|oÞ+‘+0–È`Ô8H-)›6º¦ö8ñ.CÃuS˜Ê•*{%Ò85…N¿›Þ ‚iòô¥Iþùâ$m¾Æ Rù²É‡×ö²µ»²4ôÌôúHC½—®±g¨“mÝ- …ý•ð[ÝíhšÌçŠ\žKrh|šÃãÓ|ãM.AJ!„¢v$H !·/H-q:{ª3:úßbFÇá+3¼|e†/?~ëÚƒXÀщ9žÜw¢¡Ç÷É=›¸««˜Ï¯£ µ¤lÚ( ´TgL¹ ·¡¡©*¦U™Ù–ÈÈËÝZ<.òe“º8ÁÓ—&éð»êœäJe>²¾Ÿ-]•¥¡§b >Ù KC]ºÆöþvêk§/ä#èràÐÔêR>‹D®Àp<Åþ‘(/_¹¦$H !„BÔŽ)!„àö©›?8ïŠpOw+ƒÕŽêSK3:†ãIzƒ>º›<˜6¼vm–Ïí?ÙÐã{óžXó¹W™š/Ù{3KaªÙãÄç0pé*CG¯.¹,”-tUA×TŠe“ÿuî*Ï\š¤3ài¨s’)–ùè¦6U—†žŒÎóé_jh*÷w°½¯þfM.GeVP0-泎LÆùßç®0±˜• %„BQC¤„‚;¤–8uí}ílïÿÖ3:TUÁ¡*”-›—®Îð…§z|¿ðè6Eš±©,Ù»²©Ù¦æïVÀéÀëÐp:CÇPEAlÛæÚb–¿=s…k‹܆Ö0ç$],ñ#›‡ØX]z|*ÎgŸ;þ¸Ÿ MeÇ@ô´1Ðì'èv`¨•ûk:ãžãŸ/MJB!„¨! RBÁRKÕÝ4£Ã¡©7‚0“Îñǯ_æÐXŒFý‰ýåÇ·²¾=t=H]]È 5H°m¸ \º†Ïat(ÕMÎË–E<[`l>ÍéX‚Ëñ$FuVN=KJüøÖ¡ëçåÈäOî=ñº¯uUaÇ@„{–µ°¼%€ÇЉg üÝ™+<7<%AJ!„¢†$H !µ RKÕ÷÷´1öÓêu¡Vƒ‡iÙLgr\žKòòÕYÅ([ó³[U¾òøVÖ¶1mHd \[l¬ µÄ²lЦſÎf\†^ùm(™©B‰«‹NF Ï%¡Žßb²Pâ'ïZÎÚ¶Ê^e¯_›å©_ú¦ìÛû;è z‰g œŠÍË’=!„BˆZÿ&AJ!j¤–èªÂw¬ìæÇ¶ ás×gI-…Å|‘‘ù/ŽOsplš²eÕýØêªÊWßÊê¶&LË&‘+2±˜AmÀ MåØi¦ÃïÆ²m2E‡¦àÔ5E¡dZ¤ %&“YNO'¸8“¤h™×c½HJüÔ¶¬ik´áÕ«³|þÀÉo«û^‚”B!DíHBê'H4»<õÈf†Â~l f%fܼ9ób¾Èè|š£Q^Ÿ®ëS.]ãKmeekÓ²™Ï™LfiÐÅ\¦ÀºŽ >7éb™ÃWfÐU…•-M„=N\º†ª*”M‹L©ÌT2ˉè<ǦâË~§Qï#](ñÓw¯dUõ¼¼|u–/>êÛê¾— %„BQ;¤„‚ú R-^OíÙD_ÈOÉ49;³H®Tf0ì'ärâÔU hZ,TÃÔ‹W¦98£hÖߌ)ŸCç neyK€²e1Ÿ+MæP4H-䊬n Òîs‘*”929‡K×ðþÿìÝyt%w}çýOíUw×ÚRkiõ¦n»ïÆXÛ†8!&Âd&v ž $$Áa˜ç<99C¡Ó£ìCN†yf’Æ6  Øí}Á½ª7µöåî·¶_Õóǽ’»—n[RÝj}^çp’CÔÒ­ºUÒÍûü~ßÒU¤u iCCJW¡«2dIZZE5Y®â¹É<›í‰ÈÃTÅõñë1Øž„ø—SÓøâÃÏ®©ûžAŠˆˆˆ(: RDDh® µ.eá÷^‰ ¹!ðÀÑqüÓ gpùº,®íiÇÖ¶ Z,º¢@’곋ò¶‹ãs%<:6‹ïŸ€ã‹¦9·YSÇçn¿ ›Û2ðD€…šƒÉr-¶×JÅõ1ØžAGÒDÉñpd¶O05¾¡ÈÚ“’šSS`ž¦jžÀLÅÆóSy»÷*llMÃõ'ÒMWâ¤l_`sk}ð|Ññpl®Oª²ô5¾!IõcOé,M¥)Pd"aûsU‡gŠxäÌ f*öª‡)ÛøÀ ƒØÜZ…Ÿ˜Äÿðù5uß3HE‡AŠˆÍ¤úsI|jÏèÍ&aûÿçð:6ŽŽ¤¹ô5%ÇCWÚ }ÙEz}¨67Q´]œÊWðÓSøÞè$jžÙñt¦LÜÛUØØ’‚'fªf*vl¯?1K¡=i `{8±P‚+è_ç}­¥0¥ÂTXš M‘†!_`®êàè\ ŽÍàl¡ºjaÊñ~ýúmK¡ð{£ø“ùÉšºï¤ˆˆˆˆ¢Ã ED„æ Røäž+ГI æ üïNãû'¦Ð–0~êkKއޤ‰7mèĶö ZCµ%I‚+”l'óüë©i<|b%Ç[õãYŸNàÓ·] ¹\!0]±1Wuâ{±„õhØš0·]œZ¨À4Ez­†¬¡#©+05 U¶8¨Þ¯o»<:[Ä3“ó81_†©)+zŽ/ðÁ¶c  :6/ýè…5uß3HE‡AŠˆÍ¤¶´¥ñûC»±>@Íóñ?r?:=ƒKÅSr60 ŒQŸ/•ÔT$tš\S^P‚âÉ…2ž™\ÀÑÙâR´ZnžðÁ¶7«ìO|hMÝ÷ RDDDD~®f""j® 5ØžÁ}·îFwÚBÕóñ?ž;…ÇÏÎ!c¾öV®Å0uMO.ïÌ¢=aÂ8'L•§óe[¨äxhµ \ß׎9´'ÍóVL•c… ~tzß=>¼í®Øñœ»âËñ&Ê5Vðç­†\ 9KGÞ®>?q‘=j‰/B$tí –¦ÀPèŠ I’à‰eÇÃx©Šg'ó84S€+dIzÃÇ`û¿yÓeK³ÊþùÐüÅcG×Ô}Ï EDDD)""4WÚ¹.‡ß}Ë.¬K™(9>þþéQš) ©«ý½Jއ¬©ã¦þ\Þ™CÇb˜’%øK«p*xäÌ,FF'0»³¶µgññ[wa}ÆB͘(UQŒ`–ÕrhI!gê(Ôê3º‚°þT½7Â! UFGÒDBW¡+õ0¥4Þ«Šçc¼XÅÓ“óxüì\?xCÐkžÞ¼ã¼YeÃ[S÷=ƒQt¤ˆˆÐ\AêŠîV|ìÍ;Й2Qr<üÍS£8>W‚õ†\—CÃMýرîÅSŠTj\Éõp&_Ácgçðã˜]Ƨà]Þ™Ãï½e'ºÒªžÀx±Š²ï µ±%¬©!_sq2_^Öï틺*£-a ©©0T†*C–$aˆj#ê=7™Ç£c³¨yþë S×ÃÇnÙyÞ¬²¿~òøšºï¤ˆˆˆˆ¢Ã ED„æ RW¯oÃo½ùrt$MùÄ1ŒªÐ—a¸õb˜º®·»ºZЙ4aj*äF˜*»Æ UwVÙß>5Šÿï'§×Ô}Ï EDDD)""4Wºuc>xÃ6´X:æk.þôÇ/ÀñW'à”š"ãÆ¾\ÙÝŠui MYš[´¦žš˜ÇGÇ/hÅÔ-ëð¡F`+;Úä•@ IDAT>NæËðƒø)SUЗM"¡©(:õRRD¯% ´Q߯—ÔT$tZck§(4f\=;¹€£sÅ¥mŸsUYSÃo¿yÇy³ÊþÏ¡±5uß3HE‡AŠˆÍ¤nÛÜ÷_7ˆœ¥c¾êàË?>´ê+ŠJŽE–pS'®èjAw&±¦D¢êù˜,ÕðÄø<<6ޱBå¿×Ц.üúõõÀVr}œ˜/!ˆñßž„¦¢'“@BSQp<œ\áRB!R††S‡¥©0Tyi¾'”\c… žŸÇ 3yÌU¬K™K[) ¶‡ýÅ·œ]S÷=ƒQt¤ˆˆÐ\Aꎭëñk×"kj˜«:ø¯?:ÙŠ¢Å0uCo®\ߊõóâV>©r OMÌã;ÇÆqâež8·wËz¼ïº­È™:JއÑù2BÄ÷oOJ×°>mÁÒT£óeHRs¼6_„Hè Ú&,M¡*Ð’TŸ Vv<Œ—ªxììò5¿tå&´5f•í;x_S÷=ƒQt¤ˆˆÐ\AêÎm½¸÷š-Èf*6¾üãCA´/¯äxÐ×ö¶ãªîVt¥-$uŠ,Ÿ·bêé‰y|÷øŽÏ¿8èûmƒ=øÕk¶"kj«>|%¤u Ý™,UAÁñpb¾Ô4Aj‘/BªŒŽ¤‰„®BWêaêÜ™`EÇC{€©*X¨¹ø³Gãá“kê¾g""""Šƒš+H½ó²>üòU›‘^ R?:Ñ$¿«ƒ0„.+ØÑ•Å®u­XŸ±–VL!Pó|L”jxfrß9VS?»½÷\]lEÇÃñ¹æ 8#kêèJY0UÇʼnˆ†š__„ÐUm IM…¡*0T²$! Y I°=¿zÿý™kê¾g""""Šƒš+HýüŽ~ü»+6#m¨˜*×WH5ãïjM‘qyg—w¶ 7›@r)L…¨ySeÏLÎC!îØÚƒ´¡¢äx8:[„,Ç·Hµ˜::Ï RM´eï•ø"„"KhOH4ÂÔâûabºbãàØ,¾b OM̯‰ûžAŠˆˆˆ(: RDDh® u÷®¼g÷F¤t“å¾ü£CM}î EÆ`G;×åЛM"uÎV¾šçÃÒ†M–Ph)%ÆAªÕ2Й2a( vc…TLÇ! Ck¬ô2!5^übHœ«:8<[À÷ŽOà±³s—ô}Ï EDDD)""4WúÅ+6âîHê*&J5|ùLJ‡Þ¡+2¶¶g°»«=™Rº M‘ë«»ÑÃöN,”Pv|¨J<£T{ÂDGÒ€®Äg…ÔKù¢ ¯ï뀩)ÃAB‘ëï—ã ÌU]Ÿ/âû'¦ðÓS¸?-0HE‡AŠˆÍ¤þý•›q׎~XšŠ‰R_þñaÄiA‘"IìÈâÊî¬Ï$5ôó¶†EÇÃLņ/ÂØ…©Ž¤‰ö„MQPlÒ¡æÂB ¶g14T]“år¦^_ͦÈWX¨¹86WÄ£c³9>G—Ì}Ï EDDD)""4Wº÷ê-ø¹Ëûa© Ϋø³GAŽañÃÛ:²xû`:“õ­a!„pEÇ(»æª\?ˆM˜êLZhKÐŘmÙ;ÿý¶´¥‘648~€C3(²„´¡!khHè*4¹¦¼ @Ñöp2_ÆÁ±Y¦›Î«xfbßÀ‰…rl΃Qt¤ˆˆÐ\Aêÿºé2ܾ¥š¢àäB~ððÒ,Ÿ¸™¯:øÙí½¸móâñ”ð§?>KSQq}ìêjÁÝ­h„©úSùê3‹¿¾•o®ê æ‰¦ SëÓ ´XõAíEÇíPsU–±©%«¤ž™ZXÚš÷r|BWd´% $t¦ªÀPeÈ’„ Qõ¦J5<;µ€ïŸ˜ÂO¦óM¤ˆˆˆˆ¢Ã EDàö}ä%Ùfx-yÓåxëænhŠŒó%|íà‘Ø®*Ø.~v{†6vASdŸ/âÏ~|I]]úWØÖžÁ®®lnM#mêÐdéœ0 êù˜­Ú¨¹Ñ‡©žL9³¤JŽ‡Ñ˜®Ò-iXªÒز·pA×™/BȲ„ޤ„¦ÂTUXš E’ ¶'0SuðÂtŸ˜Ä“ãóM{¤ˆˆˆˆ¢Ã ED`hx$'îG•:Lýö›wàÖsξƒG_uåJ3+»Þ±­·l\U–qt¶ˆ¯{0HE‡AŠˆèe P…|¿,ážÕþÙ¿uÞÜ8Gf‹Øÿø11 RŽ/ðÎËúpc_Y¡™¾òÈadMý‚þí¦Ö4®^߆­)dM}i––ÔÃTÅõ±PsPv}¨«T…ú²IdM€¼íât¾ËR)]Co6CQPõ|™-.Ëqd  Mµ¦×®+äk.ŽÎñØØ,¾;: Ç‘ž)"""¢è0H½Š(ÂÔ'†vã¦þÎ_>~ ºªÄòüùA€wlïÃõ}P$àù©<¾úè´XúÛØÔ–Æî® ¶g]ÚÊ÷b˜ªyõáç׿¨mg¯G6‰L#HÍ×\œ-VbùÞd =™4EAÍóqx¶ðºWH½TÖ¿¿©ÊHéºz^L,ÚNåË886‹‡Ž£ìú‘œ)"""¢è0H]€¡áz ¸u¥Öì¹7ô×ÎO¦ ø›'ÇvË^†xçö>\ÛÛ À³S ØwðZ,㢿WÅó±1—Â5=mØÒ–A‹¥Ÿ·•ÏjžÀlÕ^ÑSý¹ÔҖùªƒ‰R5–ïMÎÔÑN@Säú ©™äe>g"‘ÒUä, M¡*Ð7Qr<œ-TðÈ™YŒœ˜ÄlÅ^Ýë“AŠˆˆˆ(2 RDDaï¾C²,ß SŸ¹íJ\ÛÛÀóÓyüíS£±j?wY®Zß ÀÓ“ øoEÎÔ_÷÷«¸>z³ ÜÔ߉M­i´XúÒ–F7àxj~}ÅÔJ„© ˜­Ø˜,×bù¾´ZÖ¥,hŠŒŠçáÈLqÅV—ù"„¥)hOšç‡)I‚/”\ãÅž8;‡‘“+¬Îª3)"""¢è0H½+¦þpïU¸¦§pž\Àן=UŽï ©»vlÀ•Ý­<51¿zâØÍz-Ïdž\×ö´cK[¹ÆV>Iª?±¯âúõÿx>JŽ·l±e1H¦Ë5L—íXÎjKÔƒ”*˨¸ŽÌW|»£/BªŒŽ¤‰„¦Bo„)ED¢âú/ÖðôäªîëS2$lhI!m¨!0^¨b®j/ûì¥Õ°.e¡=a@‘%”GæV/H-òEYÚf#L©°4Š$A„!lO`¶êà'Óõ0õäøü²þ|)"""¢è0H-£åSÿùmל·¢èŸÅ7H9¾À/_½—wf!Bà‘Ó3øÆó§ÔÕû™%Çú”…7mèÄÖ¶ Ú: U$IpE€²ã¡æÕ·ó•]ÿ¢¶ÛÉ’„\ )C…BŒª˜¯9+¾Õm%t¥hKP$ äø8:[„ªDs¾!K@kÂ@R×`© ,MªÈ‚¶/0_spd¶ˆïŸÀÁ±Ùeù¹ RDDDDÑa""Z·ïà£p¿$#{Q¿”% _|û5ç­(úæá±ØžG¸÷ê-ØÞ‘BüëéüÓ §aiêŠÿì’ã¡+máÆ¾ ¶gÑšÐa( d©þ„·ªë¡ìú¨y%Ç» 0¥H6œ¤Nå+ÈÛ,z£B=éZd%×ÃÑÙbS<ÍQ‚„¬©!¡©05 M…&K8~€ùšƒcsEüðä4~pr Áøà EDDDáç>)"¢•14<’“÷£J€^h˜Rd _|ûµK+Š=3ƒGÎÆö¸BàW®ÙŠÁö,ü ÀONá[GÎÂT•U{ ‹aêÚžv\Ö™E[€ÞS^#LU\5_ h¿z˜R% ý-)¤t ~àÄBEÛƒ¦Ä,H…@_6‰œ¥CPt<›+5Õq„2F=LYš‚¤¦. ÷÷‚ùš‹£sE<~vß=>Ûý3¤ˆˆˆˆ¢Ã ED´Â.&LŠŒ/¼ýlïÈB!~tzí±»Bà}×bs[~àáÑI1‰é²ýšß“AŠˆˆˆ(: RDD«lhøÀ€*äû_.L¥ Ÿ¿ýjlm¯¯(úÁ‰)Œœ˜Œí±z"À®ß†-)¸Bà¡cøá©©¥­WQ*9²¦Ž›7t6¶ò™°T²,Áo„©úSùò¶ ItEF.…„¦Â‡f °}c· .‡Å •3u„ ¶‹“ åX g÷EKSО4—”&ËKï[Ùõq¶XÅ“ãsÄ™B心Qt¤ˆˆ"òra*kêøÜWaskžðð‰IüàäTlÑ>xÃ6lÈ¥àoÇ#c3P¤æ ‹+¦®ëmÃÎu-hOÖWLÉà!jž…š‹’ã¡âùØÑ™ƒÕRÏOà ±ª3±–ùA*P¨¹8•/Çêi¾a¨2Ú“&’š ]‘¡«  aˆŠ+0^¬â™Éy|ot£ó¥Ÿú RDDDDÑa""ŠØÃ^‰ ü·¶% |vïUØØØâöÝãø×ÓÓ±=6øà ÛÑ—MÂö¾yx OŽÏA’š/|”CÇ }íØ±.‡Î”u^˜*»fÊ6:’&²¦G<;™‡˜Z¼‚”„ØØ’B‹¥C„@¾æàL¾9fO êaJSd´% $u†ªÀTeÈ’„ Qõ¦Ë5<7•ÇÃ'&ñüT~éß2HE‡AŠˆ¨IìÝw`¨+“¸ÿþÛ®ºuq‹ÛƒÇ&ðÈ™™Ø“C|è†mèÍ$Qóþù…3xvj¡©_sÉñÔUÜØ×]-X—²ÎÛÊ'KÒÒÿþ£3õÕ^–ÏR-–„ÈÛ.ΪˆaZâ‹’t$M$u¦ªÂÒd(’¶çc¶êà…™Äãs RDDDDb""j2ÇçKC¹ÔýŽ·~ûÈ8;;߃ Þ¸ ëÓ Ô<ÿë…ÓøÉt!/½äx°4oêïÀ®®t&MXšº´­-0W±q¶TÂz|‹‹—©ùš‹ñb’ÿûg1LÕWLi°T–¦@UdAGÌU-â;ÇÆ÷|þm׌ð·Ñêc""jR××7ýÉNNmHZ<ÿÈøà Û±>c¡âúøÆó§pd¶«c(9LMÁ}¸®·=™äF¹ ÂúV¾š æù(Øn,ÂÔ‹AÊ€˜¯:˜(Õ.‰ uþõ'!cjHh ,M…¥©Ðe !Ç0Wµ÷ôf“#ümCDDDÁg5)"¢ævÇþïMÚg{2V_Ü”,ºa;ºÒÊ®|ö$Ž¿Ìpé8(9Ö¥,üæ›.Cªñ>,þ AÛ¨yUÏG¾æ"@óþ}R¹Fš­Ø˜®Ø—ì=È,MERS‘Ш²ŒáU–Gø[†ˆˆˆhõ1HÅÄb˜ÚÔšêÓ9¯Y‘d|èÆmX—2Qr|üó'pr¡Û÷ o»øý[wa}&?Qt<芌„¦.=Ý­æ Ø¾@Ùõ1[±†€ª4×Ò£ 1Ð’FÎÒታU3—pZ:î0DÚÐ5uXªŠ0 ÷ä,}„¿]ˆˆˆˆVƒQŒ ä²Zø;™äGR†šjö׫Ê2~ãÆmèHš(9þîéQŒª±=ÿEÇÃ}·îBWÊBÕóñƒ“S¸¼3Irfý)oª,ÁÔߔa*B ´¦3 xB`ºbc®ê¬™ûÈB$4OœÝóËWoáo"""¢ÕÇ EDCCÃ#¹ž¤r_»i|¸™Ã”©ÊøÀõõ Ut<üÍ“Ç1QªÅö¼W\¿÷–K[ŽÍÂT芌ք”®"¡iç?Ýͨy>*®™& Sa ´¤3u¸B`²TÃ‚í®©{¨æ |æÁ'ø”="""¢ˆ0HÅX³‡)KSñë×¢-a o»ø«'ŽÇzkXÍøÝ·ì\Ú‚øüÔÀÔ€,Ih±êÛÁ,M©*Pd AÂöØž@Ùõ0[±D¦Î RŽ˜(ÕP`""""¢UÄ EDt >0Ðe&þ¨3i¼·™Ÿ§ ï¿n­–Ž|ÍÅþÇa¡ß­a®ð±7ï@gª¾ñø| Ž/`¨Êy_§HR}NQc€¶¥)S!?h̘ò0S±«¦Î R¶/0^¬¢äzkêž±=O3HE†AŠˆèÒla*cèxßu[ÑbéX¨¹ø‹ÇŽÆz%Žøí7ïXš‰5:_†ã èêË™_ S ­¾bÊj<ÝÍB8~}øùâV>„«¦Î R5_àl±Š ƒ­")"¢KP³„©VËÀ¯\³9KÇ|ÕÁ×;‚²ãÇö¼aˆßºyÚ“ŠŽ‡S eؾ€öO=”% YCGBWa©‹aJBŽ/P[å0õÒ 5V¨ êùkê±}O?À EDDD)"¢KØÞ}†Ú’æçÖ¥¬›£SíI÷^½YSÃlÅÁ׉uø|äæËÑ–0P°=œÉW`ûâ‚’ YS;gÅ”º¦\Àö|T¼úSù<±ra*M-)dM5ÏÇé|¶kêÞp|O1HE÷ÙšAŠˆèÒU˜êL™¸çê-Èf*6¾öèÔüø†U–ðoº ­!íc…j=HÉŽÔ¥)Hh*šÚ~^SŽ/Pq=ÌTx"Xö0„À¦Ö²†Žªçãd¾ OkêžpüŸzàq)"""¢ˆ0H­!{÷Ê%Ì/­O[;W#L­Ï$ðï¯Ü„´¡aºlãÏã e¨ ~ãÆíKCÚÏ*pEE~}ÁH†„´¡!¡¿¦TEF„pDרx>¦Ëö²†© 6·ÖWH•]'Êð)""""Z= RDDkÐû¼7mhŸíÉX}+¦DbcK ï½bRºŠÉr þè‘X¯ÄIh*>xÃ6´X:ò¶‡Óù2ü „ü;‘ÔSI]i<™O…ÖSK+¦¼úŒ)×ãa*€ÍmidM %Çlj…‚5öyÀþàÛ RDDDDQa""ZÃÃÔ¦ÖTŸþƒ¹/–lÏâßî@RW1Qªá«†ˆñß´¡á× "gé(Ø.Nå+Ai™vÔ…!OåS`é0%KCÀ ‚¥§òÍVl8o LÕWH-)ÇçKkîÚg""""Šƒá=ÿðû»RÖ§ 5µœÿÿÎu9¼{ÇXšŠ‰R_}ôH¬WâäL¿vÝVäLÛÃÉ…VâhÂȘ,UAÒЖÂTžàŠF˜ª:°=qQa* HÀÆ\ 9KGÑñ0ºƒ”'|’AŠˆˆˆ(2 RDDÉõ$•ûÚMãÃ˦jžÀ5=mø7—÷ÃRœ-VñÕGÇúµ& üê5[‘55l'V6ä„Òº†„¦ ©«HèôÅ0Ô·òU=™Š}Áa*BȲ„ ¹Z,}UŽ£y"Ä'¿ýƒQD¤ˆˆè<˦ªžú:ðÎí}0Uc… ¾úè‘eÛÞ…sŸX´=Œ®RÈ C ¥«ÕR ’º]‘†/ΘºÐ0å!tEF_6Ù˜…åâÔBkíÓ€/B|‚AŠˆˆˆ(2 RDDô²ÃT›©üõ >/»nX‡;·õÂPœ.Tð•GA•åØž“î´…ÿpÕf¤W9H- ‚iSCRSai*Rz}ø9Pß"éŠ×Ã\ÕEÕõ_6Ly"„©ÊèÉ&—žxªPÁZû<à!>q€AŠˆˆˆ(* RDDôª†† t™‰?êLx0Ut<ܶ¹wl]CQp2_ÆW= -ÆAª/›Ä{¯Ø„´¡¢`×g/E±âk1LYªŠ¤® ¥kK¡Ï êaªêú˜¯9(»>Ôs芺"/mÙ›¯¹8/¯¹ëšAŠˆˆˆ(Z RDDtA.6LåmoìÁÞÍÝÐ'JøÚÁ£çÅ‘¸hIá=»7"¥«(8.FçÊ‘nAAˆ´¡!Õx"_RWë[ùð⊩šW~^i„)ÇPe›ÛÒÈ™:櫎ϗ–VZ­"ñû RDDDD‘a""¢‹RSæ—;“Ö¯¦æ«ÞyYölꆦÈ8>_Ä_>~ RŒ‡HmnMãî]HhêÒÓéšápD"©«hK°T¦&/­DsƒŽÀö|LWmŠ,a°=‹¬©a®êàðL ]]S×1ƒQ´¤ˆˆèuÙ»ïÀP[Òüܺ”uóË…©ÙŠwïÀ[6®ƒ*Ë86WÄßÓ+ á!ÊŽ‡ªç£êù(9déÒ S!€“AŠˆˆˆ(* RDD´¢\|Tîfr!ûçOÅúX®íiÇÛ×Ç6H@Íðƒ×÷vÀÔˆ €„Ðd’xAˆŠë¡âú¨yEǽdÃÔï1HE†AŠˆˆVœ+‚Ü“óý»'G?–2ÔT\ãúÞvܱu=4EA1¦AÊö„¸²» –ª`¡æàè\ ½ÙÚtE©‡) êù¨x5ÏGÑöp©u))"""¢è0HѪÉõ$•ûÚMãÃq S7õwà¶ÍÝKAjt®»yK¶/ CÂήXª‚ùšƒcsEª‚VË@ÊÐj @—¸Aˆªç£æú(»>Ê®wÉ\ RDDDDÑa""¢U74<’뵤/´&̦ -6¯ûæ Ø³©š"Çv…”ã h²ŒË:s0Aêø\ a}š,MEgÒDBWa( tE†,KK+¦ª®JcÆTÜ1HE‡AŠˆˆ"34|` ËLüQgÒxoÂÔ[ÖáÖM]Pe¹ñ”½Rìæ+¹~CU°­#CQ0Wup¦P9/¬-?oOšH6”¦È%ÀBÔ<¿>cÊ(Ønl¯?)"""¢è0HQä⦆6uá–uç©2b¶c®ÐTlmKCWÌÕœÉW^v¥—/B誌ö„¤®ÁT:LÕŸÊÏ0Å EDDD)""jw ?xe‹¡ý×u)ëæf S·mîÆÍ:¡ÈŠŽ‡Ñ¹2d9^çØ!R†ŠM-)hŠ‚ùªÓ…꫆5_„P IIM…¥ÕgLɧòÕ¼úù*ž’í!@<>[0HE‡AŠˆˆšÎÞ}†Ú’æçš-Lݾe=nêïh)£sÅØ 5÷ƒCÆ\ š"×·ìåËt/ S¦¦ÂPd(S5O æ×·óA“Æ`""""Šƒ5­f SoÛÚƒú; H@Ññp|®%fAJ!r–޾lª,c®jãt¾rQÇዊ,¡³1cÊT誥±•ÏöÅy3¦š5L1HE‡AŠˆˆšÞÞ¿xà]YSÿ{2V_”aêg{p]_d%×Ãñ¹2”˜mÙ ‚- =™TYÆlÅÆ™Båu…µÅ0Õž4j̘2T²$A4ÂTÕ¨y>òM¦¤ˆˆˆˆ¢Ã EDD±qÇþïMÚg£ S?»­×ô¶CF}…Ô‰ùRì¶ì!О0ж Èf+N*PßÀqø"„,)IMƒ¥)0^²bªæ T=ùšÛ43¦¤ˆˆˆˆ¢Ã EDD±³¦6µ¦úôU\¢ôÎí}¸º§ €¼íâô+<®™…!Б4±.e¾¤ò¨Ê?_„e4fL½¦TY‚ÔgLÙ¾@Ùõ0[q†X–Ÿûz1HE‡AŠˆˆbëßüí÷>7I~$e¨©Uùy—÷ãÊîV@¾æâL±)†ç­3i¢#eA‘€™ŠÓù*´e C¾!Iõ0•Ò5ªK“¡È2D æ¨y~=LU ¢0Å EDDD)""е¡á‘\OR¹¯Ý4>¼Òaê] ˜¯:/UcyκSZ“&d³U§òe¬ÄJ3_„€t$L¤ ¦ªÂTëOå Â5/€Ýx*ßtÅ^õ0Å EDDD)""º$¬F˜z÷Ž ØÕÕ‚ÀlÅÆd¹ËsÕ“N %a@jÇéBÚ n}ôEˆ!:“’º KSaiõáçAÂöØ^}+ßLÅ^µ­| RDDDDÑa""¢KÊÐð.3ñGIã½Ë9ø\ðîع.‡ÀLÙÆt%~A* ¾\9S€ÆSö–wËÞ+ñEý3ÇâSù,M¥)­|õáçŽÿb˜ ‚• S RDDDDÑa""¢KÒr‡)Y’ð ;7`ǺDL•j˜­Ú±;/A ä’ÈšúÒJ¯±bõ =eïb-nåkO¼¦LUªÔÔë ÔÎ~.‚pEƒQt¤ˆˆè’¶\aJ‘$ܽk—uf!‚¥ækNìÎG„ØØšFÆÐ" Rç}³ $4–ªÀÔêOå B,…©Šëc¦b/{˜b""""Šƒ­ {÷jKšŸ[—²n~=aJSdüÂÎlïÈ@!ΫÈÛnì΃Blj©À\ÅÆÙbŠíóeHÈZ:,U©Ç)MY~îúl_ ìú˜­ÚðÅò„))"""¢è0HÑšòzÔ¡*ø…0Øž„8S¨ äx±;þÅ •55ˆ°y‚Ô"Y’1êÛø’Zýž»•Ï*®‡éÊS RDDDDÑa""¢5ébÔ¥©x÷Ž ØÚž8µPFÅócwÜç© Ä\Õiª µH–$¤ ÉÆj©„¦BUdAWÔWL-nåóDðºÂƒQt¤ˆˆhM»cÿƒ÷¦ í³=«ïÕÂTR¯©Ímx"ÀÉ…j¾ˆÝñ¾\/V!7Y:WÖБÐëQÊÔTh²„0Äyaj¶jÃõ/.L1HE‡AŠˆˆ/†©M­©>]‘êÿž64ܵc6µ¦á‰Çç‹pE»ã|1HéðƒóU㥚¸GB9C¯¯–ÒUX0„€'8B ìø˜«Úp.0L1HE‡AŠˆˆèïùû‡ßß•²þ8e¨©sÿû¬©ã®ýhIÃGæŠAüþ†ž¤<`¡æ`¢Tƒ$ÅçÒ†KS‘ÔU$Ï S®àŠ·ò½V˜b""""ŠƒÑK äz’Ê}í¦ñáÅ0Õbéøù0KÁ/L)~Ç„ÀÆ–ÔRš«:˜*Ç+H-JéRzcÆ”®AWd„a}Æ”ã T=ÓåWS RDDDDÑa"""z‹a*­ª¿1ØžI¿kG?6äRp„ÀóSù¦~!ÎR³UÓ;Žm †@ÚP‘Ð5$ÃÏuEF,…©Šçc®â æ‰óƒQt¤ˆˆˆ^ÃÐðHnCZûè¾õŠÏôe“°}g&`¨rìŽ% –r¦WÌTÌVíØ¿GA"³8cJS‘ÒU¨rýýñ‚Ž ây˜¯º(9. Ua""""ŠƒÑ:™/ôe’÷Û¾¸ç©‰9Xš»cxiš®Ø˜«:—Ì{!Ò¦KUlléÓaXS®Pu}L•køoÜó௽m„W6Ñêc"""ºHe×xèØÄÚÆ/&õxE©ðœ-{Ž_Ró5ç’{‚ DÒÐÑ5˜šŒÔâŒ)Ô·òaˆƒc³{nÝØ5Â+šˆˆˆhõ1H½NCÃîìÿúUëÛ®K˜:7HÙB`ºdcÁv.Ù÷H!Rºúâv>]…¡Ô·ò9"ØcªÊ¯d"""¢ÕÇ EDDôíÝw`èÎí}_º®·}g³‡©06¶¦5tؾÀT¹†¼í^òïQ=LihMè°T†*C•å=Š,ð &"""Z} RDDDË$.aj %¬¡Áö&Ë5Ö@Z´¸b 5ô=ë3‰^¹DDDD«AŠˆˆh™íÝw`èöÁÞýoêïØØŒajqË^͘(ÕPtÜ5÷«xr|nϧÞzå¯X"""¢ÕÇ EDD´BîØÿà½C›º¾xË@gg3…©Å Uõ&JU”oͽ7óUÿyäé=½ïí#¼R‰ˆˆˆVƒÑ [ S·oéîTd)Ú?ü6¶¤‘15T]ñRewí©…š‹/|AŠˆˆˆ(²Ï¥ RDDD«ã³ß}ú‹·mîþ-KS´ÈþðØØšFÆÐPu}œ-UQqý5÷^lŸÿ.ƒQdŸK¤ˆˆˆVÏÐðHîžÝ½_lËÜE˜’ acK SCÅóq¶PEÕ[{Aªh{øÜwŸb""""ŠƒQ¢ S²$a WRe×ÇÙB5_¬¹ó_r<ü§ï0HE…AŠˆˆ(BCÃ#¹_ÚÑý×Û;rïXÁçŠ$aC.‰Œ©£ìø+V`¯Á Uv}|ö¡'¤ˆˆˆˆ" EDDÔ†† ܽ}ãþÝ]¹=+¦TYB¶±BÊñqºP+Ö^ª¸>þAŠˆˆˆ(2 RDDDMd¥Ã”&ËèË&‘15”\gòk3HÕ<Ï<øƒQD¤ˆˆˆšÐÐðŸÛÒ÷O×õ¶ï\®0†€¡*èÉZÈ:JއÓù ¼ Xsç×ö>Í EDDD)""¢&¶wß¡;·÷}i9ÂT–¦`}:¬©¡h{8S(à ÖÞgÇøÔ RDDDDQa"""ŠåS"‘ÐTôdÈŠŽ‡Ó øáÚ[!åúþàǤˆˆˆˆ" EDD#{ÿâw½us÷Woèì¼Ø0å!š‚¾li£¾BêT¾ ±? ¸"À|›AŠˆˆˆ(* RDDD1tÇþïÚÔõÅ‹ S~ÂRôçRH*Jއ ekð³€'|’AŠˆˆˆ(2 RDDD1¶¦nßÒÝ©ÈÒ«~­'BªŒM­i¤teÇÇèBiM)_„øÄ·c""""ŠƒÑ%à“=ùå;·ö¼ßÒ핾Æ4YÆ`{ MEÅó1:WB€5¤‚Ÿ8À EDDD)""¢KÄÐðHîžÝ½_lËÜõraÊñȰ£3KSQó}-!\ƒAJ!~ŸAŠˆˆˆ(2 RDDD—˜W SŽ/HØÕÕKU`û‡g kòaˆû¾Å EDDD)""¢KÔÐðHîîíÝßØÝ•Û“ÔUØž\ÑÝ SUPóŽ0HðJ!"""Z} RDDD—¸¡áwo߸ÿÊî–=óUoÝܽ¤Ï ­Ás„À}ß:È EDDD)""¢5bhøÀÀo^wùýo\¡(¨z>ŽÎ×ä¹Càã RDDDD‘a"""Zcªž?¤+ÊýÛ½õT¾¼&σQ´¤ˆˆˆÖ¨#³Å¡ÿõ“Ó|cÇUI]]sÇÿ{ßd""""Š ƒÑ·wß¡;·÷}éºÞök)L1HE‡AŠˆˆˆwìðÞ¡M]_¼e ³s-„))"""¢è0HÑyÖJ˜b""""Šƒ½¬Å0uû–îNE–.¹ãc""""Šƒ½¢¡á‘ÜͽÙÏß¹µçý–¦h—Ò±1HE‡AŠˆˆˆ^ÓÐðHîžÝ½_lËÜu©„))"""¢è0HÑ[ S[ZÓï‰û|))"""¢è0HÑE>0p÷öûwwåöÄ5L1HE‡AŠˆˆˆ^·8‡))"""¢è0HÑ64|`à®Áþ¯_µ¾íú¸„))"""¢è0HѲٻïÀÐÛû¾t]oûÎfS RDDDDÑa"""¢e‡0Å EDDD)"""Z1{÷º}°wÿ›ú;66[˜b""""Šƒ­¸;ö?xïЦ®/Þ2ÐÙÙ,aŠAŠˆˆˆ(: RDDD´jÃÔí[º;YŠôµ0HE‡AŠˆˆˆVÝùþsŸ¼yCçg,MÑ¢z RDDDDÑa"""¢H äîÙÝû•Á¶Ì]Q„))"""¢è0HQ¤¢ S RDDDDÑa"""¢¦04<’û¥Ý½½#÷ŽÕ|Î EDDD)"""j*CÃîÞ¾qÿî®Üž• S RDDDDÑa"""¢¦´ÒaŠAŠˆˆˆ(: RDDDÔÔ†† üÜ–¾º®·}çr†))"""¢è0HQ,ìÝw`èÎí}_Z®0Å EDDD)"""Š•å S RDDDDÑa"""¢XÚ»ïÀÐ[·ô|ý–ÎÎצ¤ˆˆˆˆ¢Ã EDDD±vÇþïÚÔõÅ‹ S RDDDDÑa"""¢KÂb˜º}Kw§"K¯ùõ RDDDDÑa"""¢KÊ'zòËwníy¿¥)Ú«}ƒQt¤ˆˆˆè’34<’»gwïWÛ2w½R˜b""""Šƒ]²^-L1HE‡AŠˆˆˆ.yCÃ#¹»·wcwWnÏâàs)"""¢è0HÑš14|`àîí÷ßÔß¾‡AŠˆˆˆ(: RDDÿ;vL 0𥠅±2† F¤ ¥.²p'!c€ß9ï瘙y¯õ©Ð3¤HR¤ )R†)C €”!@Ê eH2¤HR¤ )Rj%Øú^fÛhIEND®B`‚sparse-0.16.0a9/docs/logo.svg000066400000000000000000000545651463475501500157670ustar00rootroot00000000000000 sparse-0.16.0a9/docs/operations.rst000066400000000000000000000212741463475501500172120ustar00rootroot00000000000000.. currentmodule:: sparse Operations on :obj:`COO` and :obj:`GCXS` arrays =============================================== .. _operations-operators: Operators --------- :obj:`COO` and :obj:`GCXS` objects support a number of operations. They interact with scalars, :doc:`Numpy arrays `, other :obj:`COO` and :obj:`GCXS` objects, and :obj:`scipy.sparse.spmatrix` objects, all following standard Python and Numpy conventions. For example, the following Numpy expression produces equivalent results for both Numpy arrays, COO arrays, or a mix of the two: .. code-block:: python np.log(X.dot(beta.T) + 1) However some operations are not supported, like operations that implicitly cause dense structures, or numpy functions that are not yet implemented for sparse arrays. .. code-block:: python np.linalg.cholesky(x) # sparse cholesky not implemented This page describes those valid operations, and their limitations. :obj:`elemwise` ~~~~~~~~~~~~~~~ This function allows you to apply any arbitrary broadcasting function to any number of arguments where the arguments can be :obj:`SparseArray` objects or :obj:`scipy.sparse.spmatrix` objects. For example, the following will add two arrays: .. code-block:: python sparse.elemwise(np.add, x, y) .. warning:: Previously, :obj:`elemwise` was a method of the :obj:`COO` class. Now, it has been moved to the :obj:`sparse` module. .. _operations-auto-densification: Auto-Densification ~~~~~~~~~~~~~~~~~~ Operations that would result in dense matrices, such as operations with :doc:`Numpy arrays ` raises a :obj:`ValueError`. For example, the following will raise a :obj:`ValueError` if :code:`x` is a :obj:`numpy.ndarray`: .. code-block:: python x + y However, all of the following are valid operations. .. code-block:: python x + 0 x != y x + y x == 5 5 * x x / 7.3 x != 0 x == 0 ~x x + 5 We also support operations with a nonzero fill value. These are operations that map zero values to nonzero values, such as :code:`x + 1` or :code:`~x`. In these cases, they will produce an output with a fill value of :code:`1` or :code:`True`, assuming the original array has a fill value of :code:`0` or :code:`False` respectively. If densification is needed, it must be explicit. In other words, you must call :obj:`SparseArray.todense` on the :obj:`SparseArray` object. If both operands are :obj:`SparseArray`, both must be densified. Operations with NumPy arrays ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In certain situations, operations with NumPy arrays are also supported. For example, the following will work if :code:`x` is :obj:`COO` and :code:`y` is a NumPy array: .. code-block:: python x * y The following conditions must be met when performing element-wise operations with NumPy arrays: * The operation must produce a consistent fill-values. In other words, the resulting array must also be sparse. * Operating on the NumPy arrays must not increase the size when broadcasting the arrays. Operations with :obj:`scipy.sparse.spmatrix` -------------------------------------------- Certain operations with :obj:`scipy.sparse.spmatrix` are also supported. For example, the following are all allowed if :code:`y` is a :obj:`scipy.sparse.spmatrix`: .. code-block:: python x + y x - y x * y x > y x < y In general, operating on a :code:`scipy.sparse.spmatrix` is the same as operating on :obj:`COO` or :obj:`GCXS`, as long as it is to the right of the operator. .. note:: Results are not guaranteed if :code:`x` is a :obj:`scipy.sparse.spmatrix`. For this reason, we recommend that all Scipy sparse matrices should be explicitly converted to :obj:`COO` or :obj:`GCXS` before any operations. Broadcasting ------------ All binary operators support :doc:`broadcasting `. This means that (under certain conditions) you can perform binary operations on arrays with unequal shape. Namely, when the shape is missing a dimension, or when a dimension is :code:`1`. For example, performing a binary operation on two :obj:`COO` arrays with shapes :code:`(4,)` and :code:`(5, 1)` yields an object of shape :code:`(5, 4)`. The same happens with arrays of shape :code:`(1, 4)` and :code:`(5, 1)`. However, :code:`(4, 1)` and :code:`(5, 1)` will raise a :obj:`ValueError`. .. _operations-elemwise: Element-wise Operations ----------------------- :obj:`COO` and :obj:`GCXS` arrays support a variety of element-wise operations. However, as with operators, operations that map zero to a nonzero value are not supported. To illustrate, the following are all possible, and will produce another :obj:`SparseArray`: .. code-block:: python np.abs(x) np.sin(x) np.sqrt(x) np.conj(x) np.expm1(x) np.log1p(x) np.exp(x) np.cos(x) np.log(x) As above, in the last three cases, an array with a nonzero fill value will be produced. Notice that you can apply any unary or binary :doc:`numpy.ufunc ` to :obj:`COO` arrays, and :obj:`numpy.ndarray` objects and scalars and it will work so long as the result is not dense. When applying to :obj:`numpy.ndarray` objects, we check that operating on the array with zero would always produce a zero. .. _operations-reductions: Reductions ---------- :obj:`COO` and :obj:`GCXS` objects support a number of reductions. However, not all important reductions are currently implemented (help welcome!) All of the following currently work: .. code-block:: python x.sum(axis=1) np.max(x) np.min(x, axis=(0, 2)) x.prod() :obj:`SparseArray.reduce` ~~~~~~~~~~~~~~~~~~~~~~~~~ This method can take an arbitrary :doc:`numpy.ufunc ` and performs a reduction using that method. For example, the following will perform a sum: .. code-block:: python x.reduce(np.add, axis=1) .. note:: This library currently performs reductions by grouping together all coordinates along the supplied axes and reducing those. Then, if the number in a group is deficient, it reduces an extra time with zero. As a result, if reductions can change by adding multiple zeros to it, this method won't be accurate. However, it works in most cases. Partial List of Supported Reductions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Although any binary :doc:`numpy.ufunc ` should work for reductions, when calling in the form :code:`x.reduction()`, the following reductions are supported: * :obj:`COO.sum` * :obj:`COO.max` * :obj:`COO.min` * :obj:`COO.prod` .. _operations-indexing: Indexing -------- :obj:`COO` and :obj:`GCXS` arrays can be :obj:`indexed ` just like regular :obj:`numpy.ndarray` objects. They support integer, slice and boolean indexing. However, currently, numpy advanced indexing is not properly supported. This means that all of the following work like in Numpy, except that they will produce :obj:`SparseArray` arrays rather than :obj:`numpy.ndarray` objects, and will produce scalars where expected. Assume that :code:`z.shape` is :code:`(5, 6, 7)` .. code-block:: python z[0] z[1, 3] z[1, 4, 3] z[:3, :2, 3] z[::-1, 1, 3] z[-1] All of the following will raise an :obj:`IndexError`, like in Numpy 1.13 and later. .. code-block:: python z[6] z[3, 6] z[1, 4, 8] z[-6] Advanced Indexing ~~~~~~~~~~~~~~~~~ Advanced indexing (indexing arrays with other arrays) is supported, but only for indexing with a *single array*. Indexing a single array with multiple arrays is not supported at this time. As above, if :code:`z.shape` is :code:`(5, 6, 7)`, all of the following will work like NumPy: .. code-block:: python z[[0, 1, 2]] z[1, [3]] z[1, 4, [3, 6]] z[:3, :2, [1, 5]] Package Configuration --------------------- By default, when performing something like ``np.array(COO)``, we allow the array to be converted into a dense one. To prevent this and raise a :obj:`RuntimeError` instead, set the environment variable ``SPARSE_AUTO_DENSIFY`` to ``0``. If it is desired to raise a warning if creating a sparse array that takes no less memory than an equivalent desne array, set the environment variable ``SPARSE_WARN_ON_TOO_DENSE`` to ``1``. .. _operations-other: Other Operations ---------------- :obj:`COO` and :obj:`GCXS` arrays support a number of other common operations. Among them are :obj:`dot`, :obj:`tensordot`, :obj:`einsum`, :obj:`concatenate` and :obj:`stack`, :obj:`transpose ` and :obj:`reshape `. You can view the full list on the :doc:`API reference page `. .. note:: Some operations require zero fill-values (such as :obj:`nonzero `) and others (such as :obj:`concatenate`) require that all inputs have consistent fill-values. For details, check the API reference. sparse-0.16.0a9/docs/quickstart.rst000066400000000000000000000034451463475501500172210ustar00rootroot00000000000000.. currentmodule:: sparse Getting Started =============== Install ------- If you haven't already, install the ``sparse`` library .. code-block:: bash pip install sparse Create ------ To start, lets construct a sparse :obj:`COO` array from a :obj:`numpy.ndarray`: .. code-block:: python import numpy as np import sparse x = np.random.random((100, 100, 100)) x[x < 0.9] = 0 # fill most of the array with zeros s = sparse.COO(x) # convert to sparse array These store the same information and support many of the same operations, but the sparse version takes up less space in memory .. code-block:: python >>> x.nbytes 8000000 >>> s.nbytes 1102706 >>> s For more efficient ways to construct sparse arrays, see documentation on :doc:`Constructing Arrays `. Compute ------- Many of the normal Numpy operations work on :obj:`COO` objects just like on :obj:`numpy.ndarray` objects. This includes arithmetic, :doc:`numpy.ufunc ` operations, or functions like tensordot and transpose. .. code-block:: python >>> np.sin(s) + s.T * 1 However, operations which map zero elements to nonzero will usually change the fill-value instead of raising an error. .. code-block:: python >>> y = s + 5 However, if you're sure you want to convert a sparse array to a dense one, you can use the ``todense`` method (which will result in a :obj:`numpy.ndarray`): .. code-block:: python y = s.todense() + 5 For more operations see the :doc:`Operations documentation ` or the :doc:`API reference `. sparse-0.16.0a9/docs/roadmap.rst000066400000000000000000000103451463475501500164470ustar00rootroot00000000000000Roadmap ======= For a brochure version of this roadmap, see `this link `_. Background ---------- The aim of PyData/Sparse is to create sparse containers that implement the ndarray interface. Traditionally in the PyData ecosystem, sparse arrays have been provided by the ``scipy.sparse`` submodule. All containers there depend on and emulate the ``numpy.matrix`` interface. This means that they are limited to two dimensions and also don’t work well in places where ``numpy.ndarray`` would work. PyData/Sparse is well on its way to replacing ``scipy.sparse`` as the de-facto sparse array implementation in the PyData ecosystem. Topics ------ * More storage formats * Better performance/algorithms * Covering more of the NumPy API * SciPy Integration * Dask integration for high scalability * CuPy integration for GPU-acceleration * Maintenance and General Improvements More Storage Formats -------------------- In the sparse domain, you have to make a choice of format when representing your array in memory, and different formats have different trade-offs. For example: * CSR/CSC are usually expected by external libraries, and have good space characteristics for most arrays * DOK allows in-place modification and writes * LIL has faster writes if written to in-order. * BSR allows block-writes and reads The most important formats are, of course, CSR and CSC, because they allow zero-copy interaction with a number of libraries including MKL, LAPACK and others. This will allow PyData/Sparse to quickly reach the functionality of ``scipy.sparse``, accelerating the path to its replacement. Better Performance/Algorithms ----------------------------- There are a few places in scipy.sparse where algorithms are sub-optimal, sometimes due to reliance on NumPy which doesn’t have these algorithms. We intend to both improve the algorithms in NumPy, giving the broader community a chance to use them; as well as in PyData/Sparse, to reach optimal efficiency in the broadest use-cases. Covering More of the NumPy API ------------------------------ Our eventual aim is to cover all areas of NumPy where algorithms exist that give sparse arrays an edge over dense arrays. Currently, PyData/Sparse supports reductions, element-wise functions and other common functions such as stacking, concatenating and tensor products. Common uses of sparse arrays include linear algebra and graph theoretic subroutines, so we plan on covering those first. SciPy Integration ----------------- PyData/Sparse aims to build containers and elementary operations on them, such as element-wise operations, reductions and so on. We plan on modifying the current graph theoretic subroutines in ``scipy.sparse.csgraph`` to support PyData/Sparse arrays. The same applies for linear algebra and ``scipy.sparse.linalg``. CuPy integration for GPU-acceleration ------------------------------------- CuPy is a project that implements a large portion of NumPy’s ndarray interface on GPUs. We plan to integrate with CuPy so that it’s possible to accelerate sparse arrays on GPUs. Completed Tasks =============== Dask Integration for High Scalability ------------------------------------- Dask is a project that takes ndarray style containers and then allows them to scale across multiple cores or clusters. We plan on tighter integration and cooperation with the Dask team to ensure the highest amount of Dask functionality works with sparse arrays. Currently, integration with Dask is supported via array protocols. When more of the NumPy API (e.g. array creation functions) becomes available through array protocols, it will be automatically be supported by Dask. (Partial) SciPy Integration --------------------------- Support for ``scipy.sparse.linalg`` has been completed. We hope to add support for ``scipy.sparse.csgraph`` in the future. More Storage Formats -------------------- GCXS, a compressed n-dimensional array format based on the GCRS/GCCS formats of `Shaikh and Hasan 2015 `_, has been added. In conjunction with this work, the CSR/CSC matrix formats have been are now a part of pydata/sparse. We plan to add better-performing algorithms for many of the operations currently supported. sparse-0.16.0a9/examples/000077500000000000000000000000001463475501500151555ustar00rootroot00000000000000sparse-0.16.0a9/examples/__init__.py000066400000000000000000000000001463475501500172540ustar00rootroot00000000000000sparse-0.16.0a9/examples/elemwise_example.py000066400000000000000000000041071463475501500210560ustar00rootroot00000000000000import importlib import operator import os import sparse from utils import benchmark import numpy as np import scipy.sparse as sps LEN = 10000 DENSITY = 0.001 ITERS = 3 rng = np.random.default_rng(0) if __name__ == "__main__": print("Elementwise Example:\n") for func_name in ["multiply", "add", "greater_equal"]: print(f"{func_name} benchmark:\n") s1_sps = sps.random(LEN, LEN, format="csr", density=DENSITY, random_state=rng) * 10 s1_sps.sum_duplicates() s2_sps = sps.random(LEN, LEN, format="csr", density=DENSITY, random_state=rng) * 10 s2_sps.sum_duplicates() # ======= Finch ======= os.environ[sparse._ENV_VAR_NAME] = "Finch" importlib.reload(sparse) s1 = sparse.asarray(s1_sps.asformat("csc"), format="csc") s2 = sparse.asarray(s2_sps.asformat("csc"), format="csc") func = getattr(sparse, func_name) # Compile result_finch = func(s1, s2) # Benchmark benchmark(func, args=[s1, s2], info="Finch", iters=ITERS) # ======= Numba ======= os.environ[sparse._ENV_VAR_NAME] = "Numba" importlib.reload(sparse) s1 = sparse.asarray(s1_sps) s2 = sparse.asarray(s2_sps) func = getattr(sparse, func_name) # Compile result_numba = func(s1, s2) # Benchmark benchmark(func, args=[s1, s2], info="Numba", iters=ITERS) # ======= SciPy ======= s1 = s1_sps s2 = s2_sps if func_name == "multiply": func, args = s1.multiply, [s2] elif func_name == "add": func, args = operator.add, [s1, s2] elif func_name == "greater_equal": func, args = operator.ge, [s1, s2] result_scipy = func(*args) # Benchmark benchmark(func, args=args, info="SciPy", iters=ITERS) np.testing.assert_allclose(result_numba.todense(), result_scipy.toarray()) np.testing.assert_allclose(result_finch.todense(), result_numba.todense()) np.testing.assert_allclose(result_finch.todense(), result_scipy.toarray()) sparse-0.16.0a9/examples/matmul_example.py000066400000000000000000000032221463475501500205400ustar00rootroot00000000000000import importlib import os import sparse from utils import benchmark import numpy as np import scipy.sparse as sps LEN = 100000 DENSITY = 0.00001 ITERS = 3 rng = np.random.default_rng(0) if __name__ == "__main__": print("Matmul Example:\n") a_sps = sps.random(LEN, LEN - 10, format="csr", density=DENSITY, random_state=rng) * 10 a_sps.sum_duplicates() b_sps = sps.random(LEN - 10, LEN, format="csr", density=DENSITY, random_state=rng) * 10 b_sps.sum_duplicates() # ======= Finch ======= os.environ[sparse._ENV_VAR_NAME] = "Finch" importlib.reload(sparse) a = sparse.asarray(a_sps) b = sparse.asarray(b_sps) @sparse.compiled def sddmm_finch(a, b): return a @ b # Compile result_finch = sddmm_finch(a, b) # Benchmark benchmark(sddmm_finch, args=[a, b], info="Finch", iters=ITERS) # ======= Numba ======= os.environ[sparse._ENV_VAR_NAME] = "Numba" importlib.reload(sparse) a = sparse.asarray(a_sps) b = sparse.asarray(b_sps) def sddmm_numba(a, b): return a @ b # Compile result_numba = sddmm_numba(a, b) # Benchmark benchmark(sddmm_numba, args=[a, b], info="Numba", iters=ITERS) # ======= SciPy ======= def sddmm_scipy(a, b): return a @ b a = a_sps b = b_sps result_scipy = sddmm_scipy(a, b) # Benchmark benchmark(sddmm_scipy, args=[a, b], info="SciPy", iters=ITERS) # np.testing.assert_allclose(result_numba.todense(), result_scipy.toarray()) # np.testing.assert_allclose(result_finch.todense(), result_numba.todense()) # np.testing.assert_allclose(result_finch.todense(), result_scipy.toarray()) sparse-0.16.0a9/examples/mttkrp_example.py000066400000000000000000000027321463475501500205670ustar00rootroot00000000000000import importlib import os import sparse from utils import benchmark import numpy as np I_ = 1000 J_ = 25 K_ = 1000 L_ = 100 DENSITY = 0.0001 ITERS = 3 rng = np.random.default_rng(0) if __name__ == "__main__": print("MTTKRP Example:\n") B_sps = sparse.random((I_, K_, L_), density=DENSITY, random_state=rng) * 10 D_sps = rng.random((L_, J_)) * 10 C_sps = rng.random((K_, J_)) * 10 # ======= Finch ======= os.environ[sparse._ENV_VAR_NAME] = "Finch" importlib.reload(sparse) B = sparse.asarray(B_sps.todense(), format="csf") D = sparse.asarray(np.array(D_sps, order="F")) C = sparse.asarray(np.array(C_sps, order="F")) @sparse.compiled def mttkrp_finch(B, D, C): return sparse.sum(B[:, :, :, None] * D[None, None, :, :] * C[None, :, None, :], axis=(1, 2)) # Compile result_finch = mttkrp_finch(B, D, C) # Benchmark benchmark(mttkrp_finch, args=[B, D, C], info="Finch", iters=ITERS) # ======= Numba ======= os.environ[sparse._ENV_VAR_NAME] = "Numba" importlib.reload(sparse) B = sparse.asarray(B_sps, format="gcxs") D = D_sps C = C_sps def mttkrp_numba(B, D, C): return sparse.sum(B[:, :, :, None] * D[None, None, :, :] * C[None, :, None, :], axis=(1, 2)) # Compile result_numba = mttkrp_numba(B, D, C) # Benchmark benchmark(mttkrp_numba, args=[B, D, C], info="Numba", iters=ITERS) np.testing.assert_allclose(result_finch.todense(), result_numba.todense()) sparse-0.16.0a9/examples/sddmm_example.py000066400000000000000000000035701463475501500203530ustar00rootroot00000000000000import importlib import os import sparse from utils import benchmark import numpy as np import scipy.sparse as sps LEN = 10000 DENSITY = 0.00001 ITERS = 3 rng = np.random.default_rng(0) if __name__ == "__main__": print("SDDMM Example:\n") a_sps = rng.random((LEN, LEN - 10)) * 10 b_sps = rng.random((LEN - 10, LEN)) * 10 s_sps = sps.random(LEN, LEN, format="coo", density=DENSITY, random_state=rng) * 10 s_sps.sum_duplicates() # ======= Finch ======= os.environ[sparse._ENV_VAR_NAME] = "Finch" importlib.reload(sparse) s = sparse.asarray(s_sps) a = sparse.asarray(np.array(a_sps, order="F")) b = sparse.asarray(np.array(b_sps, order="C")) @sparse.compiled def sddmm_finch(s, a, b): return sparse.sum( s[:, :, None] * (a[:, None, :] * sparse.permute_dims(b, (1, 0))[None, :, :]), axis=-1, ) # Compile result_finch = sddmm_finch(s, a, b) # Benchmark benchmark(sddmm_finch, args=[s, a, b], info="Finch", iters=ITERS) # ======= Numba ======= os.environ[sparse._ENV_VAR_NAME] = "Numba" importlib.reload(sparse) s = sparse.asarray(s_sps) a = a_sps b = b_sps def sddmm_numba(s, a, b): return s * (a @ b) # Compile result_numba = sddmm_numba(s, a, b) # Benchmark benchmark(sddmm_numba, args=[s, a, b], info="Numba", iters=ITERS) # ======= SciPy ======= def sddmm_scipy(s, a, b): return s.multiply(a @ b) s = s_sps.asformat("csr") a = a_sps b = b_sps result_scipy = sddmm_scipy(s, a, b) # Benchmark benchmark(sddmm_scipy, args=[s, a, b], info="SciPy", iters=ITERS) np.testing.assert_allclose(result_numba.todense(), result_scipy.toarray()) np.testing.assert_allclose(result_finch.todense(), result_numba.todense()) np.testing.assert_allclose(result_finch.todense(), result_scipy.toarray()) sparse-0.16.0a9/examples/spmv_add_example.py000066400000000000000000000034441463475501500210440ustar00rootroot00000000000000import importlib import os import sparse from utils import benchmark import numpy as np import scipy.sparse as sps LEN = 100000 DENSITY = 0.000001 ITERS = 3 rng = np.random.default_rng(0) if __name__ == "__main__": print("SpMv_add Example:\n") A_sps = sps.random(LEN - 10, LEN, format="csc", density=DENSITY, random_state=rng) * 10 x_sps = rng.random((LEN, 1)) * 10 y_sps = rng.random((LEN - 10, 1)) * 10 # ======= Finch ======= os.environ[sparse._ENV_VAR_NAME] = "Finch" importlib.reload(sparse) A = sparse.asarray(A_sps) x = sparse.asarray(np.array(x_sps, order="C")) y = sparse.asarray(np.array(y_sps, order="C")) @sparse.compiled def spmv_finch(A, x, y): return sparse.sum(A[:, None, :] * sparse.permute_dims(x, (1, 0))[None, :, :], axis=-1) + y # Compile result_finch = spmv_finch(A, x, y) # Benchmark benchmark(spmv_finch, args=[A, x, y], info="Finch", iters=ITERS) # ======= Numba ======= os.environ[sparse._ENV_VAR_NAME] = "Numba" importlib.reload(sparse) A = sparse.asarray(A_sps, format="csc") x = x_sps y = y_sps def spmv_numba(A, x, y): return A @ x + y # Compile result_numba = spmv_numba(A, x, y) assert sparse.nonzero(result_numba)[0].size > 5 # Benchmark benchmark(spmv_numba, args=[A, x, y], info="Numba", iters=ITERS) # ======= SciPy ======= def spmv_scipy(A, x, y): return A @ x + y A = A_sps x = x_sps y = y_sps result_scipy = spmv_scipy(A, x, y) # Benchmark benchmark(spmv_scipy, args=[A, x, y], info="SciPy", iters=ITERS) np.testing.assert_allclose(result_numba, result_scipy) np.testing.assert_allclose(result_finch.todense(), result_numba) np.testing.assert_allclose(result_finch.todense(), result_scipy) sparse-0.16.0a9/examples/utils.py000066400000000000000000000005021463475501500166640ustar00rootroot00000000000000import time from collections.abc import Callable, Iterable from typing import Any def benchmark(func: Callable, args: Iterable[Any], info: str, iters: int): print(info) start = time.time() for _ in range(iters): func(*args) elapsed = time.time() - start print(f"Took {elapsed / iters} s.\n") sparse-0.16.0a9/pyproject.toml000066400000000000000000000044651463475501500162640ustar00rootroot00000000000000[build-system] requires = ["setuptools>=64", "setuptools_scm>=8"] build-backend = "setuptools.build_meta" [project] name = "sparse" dynamic = ["version"] description = "Sparse n-dimensional arrays for the PyData ecosystem" readme = "README.rst" dependencies = ["numpy>=1.17", "numba>=0.49"] maintainers = [{ name = "Hameer Abbasi", email = "hameerabbasi@yahoo.com" }] requires-python = ">=3.10" license = { file = "LICENSE" } keywords = ["sparse", "numpy", "scipy", "dask"] classifiers = [ "Development Status :: 2 - Pre-Alpha", "Operating System :: OS Independent", "License :: OSI Approved :: BSD License", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3 :: Only", "Intended Audience :: Developers", "Intended Audience :: Science/Research", ] [project.optional-dependencies] docs = ["sphinx", "sphinx_rtd_theme", "scipy"] tests = [ "dask[array]", "pytest>=3.5", "pytest-cov", "pytest-xdist", "pre-commit", "scipy", "sparse[finch]", ] tox = ["sparse[tests]", "tox"] all = ["sparse[docs,tox]", "matrepr"] finch = ["finch-tensor>=0.1.30"] [project.urls] Documentation = "https://sparse.pydata.org/" Source = "https://github.com/pydata/sparse/" Repository = "https://github.com/pydata/sparse.git" "Issue Tracker" = "https://github.com/pydata/sparse/issues" Discussions = "https://github.com/pydata/sparse/discussions" [project.entry-points.numba_extensions] init = "sparse.numba_backend._numba_extension:_init_extension" [tool.setuptools.packages.find] where = ["."] include = ["sparse", "sparse.*"] [tool.setuptools_scm] version_file = "sparse/_version.py" [tool.ruff] exclude = ["sparse/_version.py"] line-length = 120 [tool.ruff.lint] select = ["F", "E", "W", "I", "B", "UP", "YTT", "BLE", "C4", "T10", "ISC", "ICN", "PIE", "PYI", "RSE", "RET", "SIM", "PGH", "FLY", "NPY", "PERF"] [tool.ruff.lint.isort.sections] numpy = ["numpy", "numpy.*", "scipy", "scipy.*"] [tool.ruff.format] quote-style = "double" docstring-code-format = true [tool.ruff.lint.isort] section-order = [ "future", "standard-library", "first-party", "third-party", "numpy", "local-folder", ] sparse-0.16.0a9/pytest.ini000066400000000000000000000004101463475501500153630ustar00rootroot00000000000000[pytest] addopts = --cov-report term-missing --cov-report html --cov-report=term:skip-covered --cov sparse --cov-config .coveragerc --junitxml=junit/test-results.xml filterwarnings = ignore::PendingDeprecationWarning testpaths = sparse junit_family=xunit2 sparse-0.16.0a9/release-procedure.md000066400000000000000000000012471463475501500172730ustar00rootroot00000000000000* Update changelog in docs/changelog.rst and commit. * Tag commit ```bash git tag -a x.x.x -m 'Version x.x.x' ``` * Push to github ```bash git push pydata main --tags ``` * Upload to PyPI ```bash git clean -xfd # remove all files in directory not in repository python -m build --wheel --sdist # make packages twine upload dist/* # upload packages ``` * Enable the newly-pushed tag for documentation: https://readthedocs.org/projects/sparse-nd/versions/ * Wait for conda-forge to realise that the build is too old and make a PR. * Edit and merge that PR. * Announce the release on: * numpy-discussion@python.org * python-announce-list@python.org sparse-0.16.0a9/setup.cfg000066400000000000000000000004551463475501500151640ustar00rootroot00000000000000[flake8] # References: # https://flake8.readthedocs.io/en/latest/user/configuration.html # https://flake8.readthedocs.io/en/latest/user/error-codes.html # Note: there cannot be spaces after comma's here exclude = __init__.py .asv/ .tox/ max-line-length = 120 [bdist_wheel] universal=1 sparse-0.16.0a9/sparse/000077500000000000000000000000001463475501500146345ustar00rootroot00000000000000sparse-0.16.0a9/sparse/__init__.py000066400000000000000000000023521463475501500167470ustar00rootroot00000000000000import os import warnings from enum import Enum from ._version import __version__, __version_tuple__ # noqa: F401 __array_api_version__ = "2022.12" class BackendType(Enum): Numba = "Numba" Finch = "Finch" _ENV_VAR_NAME = "SPARSE_BACKEND" if _ENV_VAR_NAME in os.environ: warnings.warn("Selectable backends feature in `sparse` might change in the future.", FutureWarning, stacklevel=1) _backend_name = os.environ[_ENV_VAR_NAME] else: _backend_name = BackendType.Numba.value if _backend_name not in {BackendType.Numba.value, BackendType.Finch.value}: warnings.warn(f"Invalid backend identifier: {_backend_name}. Selecting Numba backend.", UserWarning, stacklevel=1) BACKEND = BackendType.Numba else: BACKEND = BackendType[_backend_name] del _backend_name if BackendType.Finch == BACKEND: from sparse.finch_backend import * # noqa: F403 from sparse.finch_backend import __all__ else: from sparse.numba_backend import * # noqa: F403 from sparse.numba_backend import ( # noqa: F401 __all__, _common, _compressed, _coo, _dok, _io, _numba_extension, _settings, _slicing, _sparse_array, _umath, _utils, ) sparse-0.16.0a9/sparse/finch_backend/000077500000000000000000000000001463475501500173725ustar00rootroot00000000000000sparse-0.16.0a9/sparse/finch_backend/__init__.py000066400000000000000000000061501463475501500215050ustar00rootroot00000000000000try: import finch # noqa: F401 except ModuleNotFoundError as e: raise ImportError("Finch not installed. Run `pip install sparse[finch]` to enable Finch backend") from e from finch import ( SparseArray, abs, acos, acosh, add, all, any, arange, asarray, asin, asinh, astype, atan, atan2, atanh, bitwise_and, bitwise_invert, bitwise_left_shift, bitwise_or, bitwise_right_shift, bitwise_xor, bool, can_cast, ceil, compiled, complex64, complex128, compute, conj, cos, cosh, divide, e, empty, empty_like, equal, exp, expm1, eye, finfo, float16, float32, float64, floor, floor_divide, full, full_like, greater, greater_equal, iinfo, imag, inf, int8, int16, int32, int64, int_, isfinite, isinf, isnan, lazy, less, less_equal, linspace, log, log1p, log2, log10, logaddexp, logical_and, logical_or, logical_xor, matmul, max, min, multiply, nan, negative, newaxis, nonzero, not_equal, ones, ones_like, permute_dims, pi, positive, pow, prod, random, real, remainder, reshape, round, sign, sin, sinh, sqrt, square, subtract, sum, tan, tanh, tensordot, trunc, uint, uint8, uint16, uint32, uint64, where, zeros, zeros_like, ) __all__ = [ "SparseArray", "abs", "acos", "acosh", "add", "asarray", "asin", "asinh", "astype", "atan", "atan2", "atanh", "bitwise_and", "bitwise_invert", "bitwise_left_shift", "bitwise_or", "bitwise_right_shift", "bitwise_xor", "bool", "compiled", "complex64", "complex128", "compute", "cos", "cosh", "divide", "eye", "float16", "float32", "float64", "floor_divide", "int8", "int16", "int32", "int64", "int_", "lazy", "matmul", "multiply", "negative", "permute_dims", "positive", "pow", "prod", "random", "sin", "sinh", "subtract", "sum", "tan", "tanh", "tensordot", "uint", "uint8", "uint16", "uint32", "uint64", "max", "min", "all", "any", "log", "log10", "log1p", "log2", "sqrt", "exp", "expm1", "sign", "round", "floor", "ceil", "nonzero", "where", "full", "full_like", "ones", "ones_like", "zeros", "zeros_like", "equal", "not_equal", "less", "less_equal", "greater", "greater_equal", "e", "pi", "inf", "nan", "newaxis", "isnan", "isfinite", "isinf", "reshape", "square", "logaddexp", "trunc", "remainder", "logical_and", "logical_or", "logical_xor", "finfo", "iinfo", "can_cast", "real", "imag", "conj", "empty", "empty_like", "arange", "linspace", ] sparse-0.16.0a9/sparse/numba_backend/000077500000000000000000000000001463475501500174055ustar00rootroot00000000000000sparse-0.16.0a9/sparse/numba_backend/__init__.py000066400000000000000000000111271463475501500215200ustar00rootroot00000000000000from numpy import ( add, bitwise_and, bitwise_not, bitwise_or, bitwise_xor, can_cast, ceil, complex64, complex128, conj, cos, cosh, divide, e, exp, expm1, finfo, float16, float32, float64, floor, floor_divide, greater, greater_equal, iinfo, inf, int8, int16, int32, int64, isfinite, less, less_equal, log, log1p, log2, log10, logaddexp, logical_and, logical_not, logical_or, logical_xor, multiply, nan, negative, newaxis, not_equal, pi, positive, remainder, sign, sin, sinh, sqrt, square, subtract, tan, tanh, trunc, uint8, uint16, uint32, uint64, ) from numpy import arccos as acos from numpy import arccosh as acosh from numpy import arcsin as asin from numpy import arcsinh as asinh from numpy import arctan as atan from numpy import arctan2 as atan2 from numpy import arctanh as atanh from numpy import bool_ as bool from numpy import invert as bitwise_invert from numpy import left_shift as bitwise_left_shift from numpy import power as pow from numpy import right_shift as bitwise_right_shift from ._common import ( SparseArray, abs, all, any, asarray, asnumpy, astype, broadcast_arrays, broadcast_to, concat, concatenate, dot, einsum, empty, empty_like, equal, eye, full, full_like, imag, isinf, isnan, matmul, max, mean, min, moveaxis, nonzero, ones, ones_like, outer, pad, permute_dims, prod, real, reshape, round, squeeze, stack, std, sum, tensordot, var, vecdot, zeros, zeros_like, ) from ._compressed import GCXS from ._coo import COO, as_coo from ._coo.common import ( argmax, argmin, argwhere, asCOO, clip, diagonal, diagonalize, expand_dims, flip, isneginf, isposinf, kron, matrix_transpose, nanmax, nanmean, nanmin, nanprod, nanreduce, nansum, result_type, roll, sort, take, tril, triu, unique_counts, unique_values, where, ) from ._dok import DOK from ._io import load_npz, save_npz from ._umath import elemwise from ._utils import random __all__ = [ "COO", "DOK", "GCXS", "SparseArray", "abs", "acos", "acosh", "add", "all", "any", "argmax", "argmin", "argwhere", "asCOO", "as_coo", "asarray", "asin", "asinh", "asnumpy", "astype", "atan", "atan2", "atanh", "bitwise_and", "bitwise_invert", "bitwise_left_shift", "bitwise_not", "bitwise_or", "bitwise_right_shift", "bitwise_xor", "bool", "broadcast_arrays", "broadcast_to", "can_cast", "ceil", "clip", "complex128", "complex64", "concat", "concatenate", "conj", "cos", "cosh", "diagonal", "diagonalize", "divide", "dot", "e", "einsum", "elemwise", "empty", "empty_like", "equal", "exp", "expand_dims", "expm1", "eye", "finfo", "flip", "float16", "float32", "float64", "floor", "floor_divide", "full", "full_like", "greater", "greater_equal", "iinfo", "imag", "inf", "int16", "int32", "int64", "int8", "isfinite", "isinf", "isnan", "isneginf", "isposinf", "kron", "less", "less_equal", "load_npz", "log", "log10", "log1p", "log2", "logaddexp", "logical_and", "logical_not", "logical_or", "logical_xor", "matmul", "matrix_transpose", "max", "mean", "min", "moveaxis", "multiply", "nan", "nanmax", "nanmean", "nanmin", "nanprod", "nanreduce", "nansum", "negative", "newaxis", "nonzero", "not_equal", "ones", "ones_like", "outer", "pad", "permute_dims", "pi", "positive", "pow", "prod", "random", "real", "remainder", "reshape", "result_type", "roll", "round", "save_npz", "sign", "sin", "sinh", "sort", "sqrt", "square", "squeeze", "stack", "std", "subtract", "sum", "take", "tan", "tanh", "tensordot", "tril", "triu", "trunc", "uint16", "uint32", "uint64", "uint8", "unique_counts", "unique_values", "var", "vecdot", "where", "zeros", "zeros_like", ] sparse-0.16.0a9/sparse/numba_backend/_common.py000066400000000000000000002072461463475501500214210ustar00rootroot00000000000000import builtins import warnings from collections.abc import Iterable from functools import reduce, wraps from itertools import chain from operator import index, mul import numba from numba import literal_unroll import numpy as np from ._coo.common import asCOO from ._sparse_array import SparseArray from ._utils import ( _zero_of_dtype, check_zero_fill_value, equivalent, normalize_axis, ) def _is_scipy_sparse_obj(x): """ Tests if the supplied argument is a SciPy sparse object. """ if hasattr(x, "__module__") and x.__module__.startswith("scipy.sparse"): return True return False def _check_device(func): @wraps(func) def wrapped(*args, **kwargs): device = kwargs.get("device", None) if device not in {"cpu", None}: raise ValueError("Device must be `'cpu'` or `None`.") return func(*args, **kwargs) return wrapped def _is_sparse(x): """ Tests if the supplied argument is a SciPy sparse object, or one from this library. """ return isinstance(x, SparseArray) or _is_scipy_sparse_obj(x) @numba.njit def nan_check(*args): """ Check for the NaN values in Numpy Arrays Parameters ---------- Union[Numpy Array, Integer, Float] Returns ------- Boolean Whether Numpy Array Contains NaN """ for i in literal_unroll(args): ia = np.asarray(i) if ia.size != 0 and np.isnan(np.min(ia)): return True return False def check_class_nan(test): """ Check NaN for Sparse Arrays Parameters ---------- test : Union[sparse.COO, sparse.GCXS, scipy.sparse.spmatrix, Numpy Ndarrays] Returns ------- Boolean Whether Sparse Array Contains NaN """ from ._compressed import GCXS from ._coo import COO if isinstance(test, GCXS | COO): return nan_check(test.fill_value, test.data) if _is_scipy_sparse_obj(test): return nan_check(test.data) return nan_check(test) def tensordot(a, b, axes=2, *, return_type=None): """ Perform the equivalent of :obj:`numpy.tensordot`. Parameters ---------- a, b : Union[SparseArray, np.ndarray, scipy.sparse.spmatrix] The arrays to perform the :code:`tensordot` operation on. axes : tuple[Union[int, tuple[int], Union[int, tuple[int]], optional The axes to match when performing the sum. return_type : {None, COO, np.ndarray}, optional Type of returned array. Returns ------- Union[SparseArray, numpy.ndarray] The result of the operation. Raises ------ ValueError If all arguments don't have zero fill-values. See Also -------- numpy.tensordot : NumPy equivalent function """ from ._compressed import GCXS # Much of this is stolen from numpy/core/numeric.py::tensordot # Please see license at https://github.com/numpy/numpy/blob/main/LICENSE.txt check_zero_fill_value(a, b) if _is_scipy_sparse_obj(a): a = GCXS.from_scipy_sparse(a) if _is_scipy_sparse_obj(b): b = GCXS.from_scipy_sparse(b) try: iter(axes) except TypeError: axes_a = list(range(-axes, 0)) axes_b = list(range(axes)) else: axes_a, axes_b = axes try: na = len(axes_a) axes_a = list(axes_a) except TypeError: axes_a = [axes_a] na = 1 try: nb = len(axes_b) axes_b = list(axes_b) except TypeError: axes_b = [axes_b] nb = 1 # a, b = asarray(a), asarray(b) # <--- modified as_ = a.shape nda = a.ndim bs = b.shape ndb = b.ndim equal = True if nda == 0 or ndb == 0: pos = int(nda != 0) raise ValueError(f"Input {pos} operand does not have enough dimensions") if na != nb: equal = False else: for k in range(na): if as_[axes_a[k]] != bs[axes_b[k]]: equal = False break if axes_a[k] < 0: axes_a[k] += nda if axes_b[k] < 0: axes_b[k] += ndb if not equal: raise ValueError("shape-mismatch for sum") # Move the axes to sum over to the end of "a" # and to the front of "b" notin = [k for k in range(nda) if k not in axes_a] newaxes_a = notin + axes_a N2 = 1 for axis in axes_a: N2 *= as_[axis] newshape_a = (-1, N2) olda = [as_[axis] for axis in notin] notin = [k for k in range(ndb) if k not in axes_b] newaxes_b = axes_b + notin N2 = 1 for axis in axes_b: N2 *= bs[axis] newshape_b = (N2, -1) oldb = [bs[axis] for axis in notin] if builtins.any(dim == 0 for dim in chain(newshape_a, newshape_b)): res = asCOO(np.empty(olda + oldb), check=False) if isinstance(a, np.ndarray) or isinstance(b, np.ndarray): res = res.todense() return res at = a.transpose(newaxes_a).reshape(newshape_a) bt = b.transpose(newaxes_b).reshape(newshape_b) res = _dot(at, bt, return_type) return res.reshape(olda + oldb) def matmul(a, b): """Perform the equivalent of :obj:`numpy.matmul` on two arrays. Parameters ---------- a, b : Union[SparseArray, np.ndarray, scipy.sparse.spmatrix] The arrays to perform the :code:`matmul` operation on. Returns ------- Union[SparseArray, numpy.ndarray] The result of the operation. Raises ------ ValueError If all arguments don't have zero fill-values, or the shape of the two arrays is not broadcastable. See Also -------- numpy.matmul : NumPy equivalent function. COO.__matmul__ : Equivalent function for COO objects. """ check_zero_fill_value(a, b) if not hasattr(a, "ndim") or not hasattr(b, "ndim"): raise TypeError(f"Cannot perform dot product on types {type(a)}, {type(b)}") if check_class_nan(a) or check_class_nan(b): warnings.warn("Nan will not be propagated in matrix multiplication", RuntimeWarning, stacklevel=1) # When b is 2-d, it is equivalent to dot if b.ndim <= 2: return dot(a, b) # when a is 2-d, we need to transpose result after dot if a.ndim <= 2: res = dot(a, b) axes = list(range(res.ndim)) axes.insert(-1, axes.pop(0)) return res.transpose(axes) # If a can be squeeze to a vector, use dot will be faster if a.ndim <= b.ndim and np.prod(a.shape[:-1]) == 1: res = dot(a.reshape(-1), b) shape = list(res.shape) shape.insert(-1, 1) return res.reshape(shape) # If b can be squeeze to a matrix, use dot will be faster if b.ndim <= a.ndim and np.prod(b.shape[:-2]) == 1: return dot(a, b.reshape(b.shape[-2:])) if a.ndim < b.ndim: a = a[(None,) * (b.ndim - a.ndim)] if a.ndim > b.ndim: b = b[(None,) * (a.ndim - b.ndim)] for i, j in zip(a.shape[:-2], b.shape[:-2], strict=True): if i != 1 and j != 1 and i != j: raise ValueError("shapes of a and b are not broadcastable") def _matmul_recurser(a, b): if a.ndim == 2: return dot(a, b) res = [] for i in range(builtins.max(a.shape[0], b.shape[0])): a_i = a[0] if a.shape[0] == 1 else a[i] b_i = b[0] if b.shape[0] == 1 else b[i] res.append(_matmul_recurser(a_i, b_i)) mask = [isinstance(x, SparseArray) for x in res] if builtins.all(mask): return stack(res) res = [x.todense() if isinstance(x, SparseArray) else x for x in res] return np.stack(res) return _matmul_recurser(a, b) def dot(a, b): """ Perform the equivalent of :obj:`numpy.dot` on two arrays. Parameters ---------- a, b : Union[SparseArray, np.ndarray, scipy.sparse.spmatrix] The arrays to perform the :code:`dot` operation on. Returns ------- Union[SparseArray, numpy.ndarray] The result of the operation. Raises ------ ValueError If all arguments don't have zero fill-values. See Also -------- numpy.dot : NumPy equivalent function. COO.dot : Equivalent function for COO objects. """ check_zero_fill_value(a, b) if not hasattr(a, "ndim") or not hasattr(b, "ndim"): raise TypeError(f"Cannot perform dot product on types {type(a)}, {type(b)}") if a.ndim == 1 and b.ndim == 1: if isinstance(a, SparseArray): a = asCOO(a) if isinstance(b, SparseArray): b = asCOO(b) return (a * b).sum() a_axis = -1 b_axis = -2 if b.ndim == 1: b_axis = -1 return tensordot(a, b, axes=(a_axis, b_axis)) def _dot(a, b, return_type=None): from ._compressed import GCXS from ._coo import COO from ._sparse_array import SparseArray out_shape = (a.shape[0], b.shape[1]) if builtins.all(isinstance(arr, SparseArray) for arr in [a, b]) and builtins.any( isinstance(arr, GCXS) for arr in [a, b] ): a = a.asformat("gcxs") b = b.asformat("gcxs", compressed_axes=a.compressed_axes) if isinstance(a, GCXS) and isinstance(b, GCXS): if a.nbytes > b.nbytes: b = b.change_compressed_axes(a.compressed_axes) else: a = a.change_compressed_axes(b.compressed_axes) if a.compressed_axes == (0,): # csr @ csr compressed_axes = (0,) data, indices, indptr = _dot_csr_csr_type(a.dtype, b.dtype)( out_shape, a.data, b.data, a.indices, b.indices, a.indptr, b.indptr ) elif a.compressed_axes == (1,): # csc @ csc # a @ b = (b.T @ a.T).T compressed_axes = (1,) data, indices, indptr = _dot_csr_csr_type(b.dtype, a.dtype)( out_shape[::-1], b.data, a.data, b.indices, a.indices, b.indptr, a.indptr, ) out = GCXS( (data, indices, indptr), shape=out_shape, compressed_axes=compressed_axes, prune=True, ) if return_type == np.ndarray: return out.todense() if return_type == COO: return out.tocoo() return out if isinstance(a, GCXS) and isinstance(b, np.ndarray): if a.compressed_axes == (0,): # csr @ ndarray if return_type is None or return_type == np.ndarray: return _dot_csr_ndarray_type(a.dtype, b.dtype)(out_shape, a.data, a.indices, a.indptr, b) data, indices, indptr = _dot_csr_ndarray_type_sparse(a.dtype, b.dtype)( out_shape, a.data, a.indices, a.indptr, b ) out = GCXS( (data, indices, indptr), shape=out_shape, compressed_axes=(0,), prune=True, ) if return_type == COO: return out.tocoo() return out if return_type is None or return_type == np.ndarray: # csc @ ndarray return _dot_csc_ndarray_type(a.dtype, b.dtype)(a.shape, b.shape, a.data, a.indices, a.indptr, b) data, indices, indptr = _dot_csc_ndarray_type_sparse(a.dtype, b.dtype)( a.shape, b.shape, a.data, a.indices, a.indptr, b ) compressed_axes = (1,) out = GCXS( (data, indices, indptr), shape=out_shape, compressed_axes=compressed_axes, prune=True, ) if return_type == COO: return out.tocoo() return out if isinstance(a, np.ndarray) and isinstance(b, GCXS): at = a.view(type=np.ndarray).T bt = b.T # constant-time transpose if b.compressed_axes == (0,): if return_type is None or return_type == np.ndarray: out = _dot_csc_ndarray_type(bt.dtype, at.dtype)(bt.shape, at.shape, bt.data, bt.indices, bt.indptr, at) return out.T data, indices, indptr = _dot_csc_ndarray_type_sparse(bt.dtype, at.dtype)( bt.shape, at.shape, bt.data, b.indices, b.indptr, at ) out = GCXS( (data, indices, indptr), shape=out_shape, compressed_axes=(0,), prune=True, ) if return_type == COO: return out.tocoo() return out # compressed_axes == (1,) if return_type is None or return_type == np.ndarray: out = _dot_csr_ndarray_type(bt.dtype, at.dtype)(out_shape[::-1], bt.data, bt.indices, bt.indptr, at) return out.T data, indices, indptr = _dot_csr_ndarray_type_sparse(bt.dtype, at.dtype)( out_shape[::-1], bt.data, bt.indices, bt.indptr, at ) out = GCXS((data, indices, indptr), shape=out_shape, compressed_axes=(1,), prune=True) if return_type == COO: return out.tocoo() return out if isinstance(a, COO) and isinstance(b, COO): # convert to csr a_indptr = np.empty(a.shape[0] + 1, dtype=np.intp) a_indptr[0] = 0 np.cumsum(np.bincount(a.coords[0], minlength=a.shape[0]), out=a_indptr[1:]) b_indptr = np.empty(b.shape[0] + 1, dtype=np.intp) b_indptr[0] = 0 np.cumsum(np.bincount(b.coords[0], minlength=b.shape[0]), out=b_indptr[1:]) coords, data = _dot_coo_coo_type(a.dtype, b.dtype)( out_shape, a.coords, b.coords, a.data, b.data, a_indptr, b_indptr ) out = COO( coords, data, shape=out_shape, has_duplicates=False, sorted=False, prune=True, ) if return_type == np.ndarray: return out.todense() if return_type == GCXS: return out.asformat("gcxs") return out if isinstance(a, COO) and isinstance(b, np.ndarray): b = b.view(type=np.ndarray).T if return_type is None or return_type == np.ndarray: return _dot_coo_ndarray_type(a.dtype, b.dtype)(a.coords, a.data, b, out_shape) coords, data = _dot_coo_ndarray_type_sparse(a.dtype, b.dtype)(a.coords, a.data, b, out_shape) out = COO(coords, data, shape=out_shape, has_duplicates=False, sorted=True) if return_type == GCXS: return out.asformat("gcxs") return out if isinstance(a, np.ndarray) and isinstance(b, COO): a = a.view(type=np.ndarray) if return_type is None or return_type == np.ndarray: return _dot_ndarray_coo_type(a.dtype, b.dtype)(a, b.coords, b.data, out_shape) b = b.T coords, data = _dot_ndarray_coo_type_sparse(a.dtype, b.dtype)(a, b.coords, b.data, out_shape) out = COO(coords, data, shape=out_shape, has_duplicates=False, sorted=True, prune=True) if return_type == GCXS: return out.asformat("gcxs") return out if isinstance(a, np.ndarray) and isinstance(b, np.ndarray): return np.dot(a, b) raise TypeError("Unsupported types.") def _memoize_dtype(f): """ Memoizes a function taking in NumPy dtypes. Parameters ---------- f : Callable Returns ------- wrapped : Callable Examples -------- >>> def func(dt1): ... return object() >>> func = _memoize_dtype(func) >>> func(np.dtype("i8")) is func(np.dtype("int64")) True >>> func(np.dtype("i8")) is func(np.dtype("i4")) False """ cache = {} @wraps(f) def wrapped(*args): key = tuple(arg.name for arg in args) if key in cache: return cache[key] result = f(*args) cache[key] = result return result return wrapped @numba.jit(nopython=True, nogil=True) def _csr_csr_count_nnz(out_shape, a_indices, b_indices, a_indptr, b_indptr): # pragma: no cover """ A function for computing the number of nonzero values in the resulting array from multiplying an array with compressed rows with an array with compressed rows: (a @ b).nnz. Parameters ---------- out_shape : tuple The shape of the output array. a_indices, a_indptr : np.ndarray The indices and index pointer array of ``a``. b_data, b_indices, b_indptr : np.ndarray The indices and index pointer array of ``b``. """ n_row, n_col = out_shape nnz = 0 mask = np.full(n_col, -1) for i in range(n_row): row_nnz = 0 for j in a_indices[a_indptr[i] : a_indptr[i + 1]]: for k in b_indices[b_indptr[j] : b_indptr[j + 1]]: if mask[k] != i: mask[k] = i row_nnz += 1 nnz += row_nnz return nnz @numba.jit(nopython=True, nogil=True) def _csr_ndarray_count_nnz(out_shape, indptr, a_indices, a_indptr, b): # pragma: no cover """ A function for computing the number of nonzero values in the resulting array from multiplying an array with compressed rows with a dense numpy array: (a @ b).nnz. Parameters ---------- out_shape : tuple The shape of the output array. indptr : ndarray The empty index pointer array for the output. a_indices, a_indptr : np.ndarray The indices and index pointer array of ``a``. b : np.ndarray The second input array ``b``. """ nnz = 0 for i in range(out_shape[0]): cur_row = a_indices[a_indptr[i] : a_indptr[i + 1]] for j in range(out_shape[1]): for k in cur_row: if b[k, j] != 0: nnz += 1 break indptr[i + 1] = nnz return nnz @numba.jit(nopython=True, nogil=True) def _csc_ndarray_count_nnz(a_shape, b_shape, indptr, a_indices, a_indptr, b): # pragma: no cover """ A function for computing the number of nonzero values in the resulting array from multiplying an array with compressed columns with a dense numpy array: (a @ b).nnz. Parameters ---------- a_shape, b_shape : tuple The shapes of the input arrays. indptr : ndarray The empty index pointer array for the output. a_indices, a_indptr : np.ndarray The indices and index pointer array of ``a``. b : np.ndarray The second input array ``b``. """ nnz = 0 mask = np.full(a_shape[0], -1) for i in range(b_shape[1]): col_nnz = 0 for j in range(b_shape[0]): for k in a_indices[a_indptr[j] : a_indptr[j + 1]]: if b[j, i] != 0 and mask[k] != i: mask[k] = i col_nnz += 1 nnz += col_nnz indptr[i + 1] = nnz return nnz def _dot_dtype(dt1, dt2): return (np.zeros((), dtype=dt1) * np.zeros((), dtype=dt2)).dtype @_memoize_dtype def _dot_csr_csr_type(dt1, dt2): dtr = _dot_dtype(dt1, dt2) @numba.jit( nopython=True, nogil=True, locals={"data_curr": numba.np.numpy_support.from_dtype(dtr)}, ) def _dot_csr_csr(out_shape, a_data, b_data, a_indices, b_indices, a_indptr, b_indptr): # pragma: no cover """ Utility function taking in two ``GCXS`` objects and calculating their dot product: a @ b for a and b with compressed rows. Parameters ---------- out_shape : tuple The shape of the output array. a_data, a_indices, a_indptr : np.ndarray The data, indices, and index pointer arrays of ``a``. b_data, b_indices, b_indptr : np.ndarray The data, indices, and index pointer arrays of ``b``. """ # much of this is borrowed from: # https://github.com/scipy/scipy/blob/main/scipy/sparse/sparsetools/csr.h # calculate nnz before multiplying so we can use static arrays nnz = _csr_csr_count_nnz(out_shape, a_indices, b_indices, a_indptr, b_indptr) n_row, n_col = out_shape indptr = np.empty(n_row + 1, dtype=np.intp) indptr[0] = 0 indices = np.empty(nnz, dtype=np.intp) data = np.empty(nnz, dtype=dtr) next_ = np.full(n_col, -1) sums = np.zeros(n_col, dtype=dtr) nnz = 0 for i in range(n_row): head = -2 length = 0 next_[:] = -1 for j, av in zip( # noqa: B905 a_indices[a_indptr[i] : a_indptr[i + 1]], a_data[a_indptr[i] : a_indptr[i + 1]], ): for k, bv in zip( # noqa: B905 b_indices[b_indptr[j] : b_indptr[j + 1]], b_data[b_indptr[j] : b_indptr[j + 1]], ): sums[k] += av * bv if next_[k] == -1: next_[k] = head head = k length += 1 for _ in range(length): if next_[head] != -1: indices[nnz] = head data[nnz] = sums[head] nnz += 1 temp = head head = next_[head] next_[temp] = -1 sums[temp] = 0 indptr[i + 1] = nnz if len(indices) == (n_col * n_row): for i in range(len(indices) // n_col): j = n_col * i k = n_col * (1 + i) data[j:k] = data[j:k][::-1] indices[j:k] = indices[j:k][::-1] return data, indices, indptr return _dot_csr_csr @_memoize_dtype def _dot_csr_ndarray_type(dt1, dt2): dtr = _dot_dtype(dt1, dt2) @numba.jit( nopython=True, nogil=True, locals={"data_curr": numba.np.numpy_support.from_dtype(dtr)}, ) def _dot_csr_ndarray(out_shape, a_data, a_indices, a_indptr, b): # pragma: no cover """ Utility function taking in one `GCXS` and one ``ndarray`` and calculating their dot product: a @ b for a with compressed rows. Returns a dense result. Parameters ---------- a_data, a_indices, a_indptr : np.ndarray The data, indices, and index pointers of ``a``. b : np.ndarray The second input array ``b``. out_shape : Tuple[int] The shape of the output array. """ b = np.ascontiguousarray(b) # ensure memory aligned out = np.zeros(out_shape, dtype=dtr) for i in range(out_shape[0]): val = out[i] for k in range(a_indptr[i], a_indptr[i + 1]): ind = a_indices[k] v = a_data[k] for j in range(out_shape[1]): val[j] += v * b[ind, j] return out return _dot_csr_ndarray @_memoize_dtype def _dot_csr_ndarray_type_sparse(dt1, dt2): dtr = _dot_dtype(dt1, dt2) @numba.jit( nopython=True, nogil=True, locals={"data_curr": numba.np.numpy_support.from_dtype(dtr)}, ) def _dot_csr_ndarray_sparse(out_shape, a_data, a_indices, a_indptr, b): # pragma: no cover """ Utility function taking in one `GCXS` and one ``ndarray`` and calculating their dot product: a @ b for a with compressed rows. Returns a sparse result. Parameters ---------- a_data, a_indices, a_indptr : np.ndarray The data, indices, and index pointers of ``a``. b : np.ndarray The second input array ``b``. out_shape : Tuple[int] The shape of the output array. """ indptr = np.empty(out_shape[0] + 1, dtype=np.intp) indptr[0] = 0 nnz = _csr_ndarray_count_nnz(out_shape, indptr, a_indices, a_indptr, b) indices = np.empty(nnz, dtype=np.intp) data = np.empty(nnz, dtype=dtr) current = 0 for i in range(out_shape[0]): for j in range(out_shape[1]): val = 0 nonzero = False for k in range(a_indptr[i], a_indptr[i + 1]): ind = a_indices[k] v = a_data[k] val += v * b[ind, j] if b[ind, j] != 0: nonzero = True if nonzero: data[current] = val indices[current] = j current += 1 return data, indices, indptr return _dot_csr_ndarray_sparse @_memoize_dtype def _dot_csc_ndarray_type_sparse(dt1, dt2): dtr = _dot_dtype(dt1, dt2) @numba.jit( nopython=True, nogil=True, locals={"data_curr": numba.np.numpy_support.from_dtype(dtr)}, ) def _dot_csc_ndarray_sparse(a_shape, b_shape, a_data, a_indices, a_indptr, b): # pragma: no cover """ Utility function taking in one `GCXS` and one ``ndarray`` and calculating their dot product: a @ b for a with compressed columns. Returns a sparse result. Parameters ---------- a_data, a_indices, a_indptr : np.ndarray The data, indices, and index pointers of ``a``. b : np.ndarray The second input array ``b``. a_shape, b_shape : Tuple[int] The shapes of the input arrays. """ indptr = np.empty(b_shape[1] + 1, dtype=np.intp) nnz = _csc_ndarray_count_nnz(a_shape, b_shape, indptr, a_indices, a_indptr, b) indices = np.empty(nnz, dtype=np.intp) data = np.empty(nnz, dtype=dtr) sums = np.zeros(a_shape[0]) mask = np.full(a_shape[0], -1) nnz = 0 indptr[0] = 0 for i in range(b_shape[1]): head = -2 length = 0 for j in range(b_shape[0]): u = b[j, i] if u != 0: for k in range(a_indptr[j], a_indptr[j + 1]): ind = a_indices[k] v = a_data[k] sums[ind] += u * v if mask[ind] == -1: mask[ind] = head head = ind length += 1 for _ in range(length): if sums[head] != 0: indices[nnz] = head data[nnz] = sums[head] nnz += 1 temp = head head = mask[head] mask[temp] = -1 sums[temp] = 0 return data, indices, indptr return _dot_csc_ndarray_sparse @_memoize_dtype def _dot_csc_ndarray_type(dt1, dt2): dtr = _dot_dtype(dt1, dt2) @numba.jit( nopython=True, nogil=True, locals={"data_curr": numba.np.numpy_support.from_dtype(dtr)}, ) def _dot_csc_ndarray(a_shape, b_shape, a_data, a_indices, a_indptr, b): # pragma: no cover """ Utility function taking in one `GCXS` and one ``ndarray`` and calculating their dot product: a @ b for a with compressed columns. Returns a dense result. Parameters ---------- a_data, a_indices, a_indptr : np.ndarray The data, indices, and index pointers of ``a``. b : np.ndarray The second input array ``b``. a_shape, b_shape : Tuple[int] The shapes of the input arrays. """ b = np.ascontiguousarray(b) # ensure memory aligned out = np.zeros((a_shape[0], b_shape[1]), dtype=dtr) for i in range(b_shape[0]): for k in range(a_indptr[i], a_indptr[i + 1]): ind = a_indices[k] v = a_data[k] val = out[ind] for j in range(b_shape[1]): val[j] += v * b[i, j] return out return _dot_csc_ndarray @_memoize_dtype def _dot_coo_coo_type(dt1, dt2): dtr = _dot_dtype(dt1, dt2) @numba.jit( nopython=True, nogil=True, locals={"data_curr": numba.np.numpy_support.from_dtype(dtr)}, ) def _dot_coo_coo(out_shape, a_coords, b_coords, a_data, b_data, a_indptr, b_indptr): # pragma: no cover """ Utility function taking in two ``COO`` objects and calculating their dot product: a @ b. Parameters ---------- a_shape, b_shape : tuple The shapes of the input arrays. a_data, a_coords : np.ndarray The data and coordinates of ``a``. b_data, b_coords : np.ndarray The data and coordinates of ``b``. """ # much of this is borrowed from: # https://github.com/scipy/scipy/blob/main/scipy/sparse/sparsetools/csr.h n_row, n_col = out_shape # calculate nnz before multiplying so we can use static arrays nnz = _csr_csr_count_nnz(out_shape, a_coords[1], b_coords[1], a_indptr, b_indptr) coords = np.empty((2, nnz), dtype=np.intp) data = np.empty(nnz, dtype=dtr) next_ = np.full(n_col, -1) sums = np.zeros(n_col, dtype=dtr) nnz = 0 for i in range(n_row): head = -2 length = 0 next_[:] = -1 for j, av in zip( # noqa: B905 a_coords[1, a_indptr[i] : a_indptr[i + 1]], a_data[a_indptr[i] : a_indptr[i + 1]], ): for k, bv in zip( # noqa: B905 b_coords[1, b_indptr[j] : b_indptr[j + 1]], b_data[b_indptr[j] : b_indptr[j + 1]], ): sums[k] += av * bv if next_[k] == -1: next_[k] = head head = k length += 1 for _ in range(length): if next_[head] != -1: coords[0, nnz] = i coords[1, nnz] = head data[nnz] = sums[head] nnz += 1 temp = head head = next_[head] next_[temp] = -1 sums[temp] = 0 return coords, data return _dot_coo_coo @_memoize_dtype def _dot_coo_ndarray_type(dt1, dt2): dtr = _dot_dtype(dt1, dt2) @numba.jit(nopython=True, nogil=True) def _dot_coo_ndarray(coords1, data1, array2, out_shape): # pragma: no cover """ Utility function taking in one `COO` and one ``ndarray`` and calculating a "sense" of their dot product. Acually computes ``s1 @ x2.T``. Parameters ---------- data1, coords1 : np.ndarray The data and coordinates of ``s1``. array2 : np.ndarray The second input array ``x2``. out_shape : Tuple[int] The output shape. """ out = np.zeros(out_shape, dtype=dtr) didx1 = 0 while didx1 < len(data1): oidx1 = coords1[0, didx1] didx1_curr = didx1 for oidx2 in range(out_shape[1]): didx1 = didx1_curr while didx1 < len(data1) and coords1[0, didx1] == oidx1: out[oidx1, oidx2] += data1[didx1] * array2[oidx2, coords1[1, didx1]] didx1 += 1 return out return _dot_coo_ndarray @_memoize_dtype def _dot_coo_ndarray_type_sparse(dt1, dt2): dtr = _dot_dtype(dt1, dt2) @numba.jit( nopython=True, nogil=True, locals={"data_curr": numba.np.numpy_support.from_dtype(dtr)}, ) def _dot_coo_ndarray(coords1, data1, array2, out_shape): # pragma: no cover """ Utility function taking in one `COO` and one ``ndarray`` and calculating a "sense" of their dot product. Acually computes ``s1 @ x2.T``. Parameters ---------- data1, coords1 : np.ndarray The data and coordinates of ``s1``. array2 : np.ndarray The second input array ``x2``. out_shape : Tuple[int] The output shape. """ out_data = [] out_coords = [] # coords1.shape = (2, len(data1)) # coords1[0, :] = rows, sorted # coords1[1, :] = columns didx1 = 0 while didx1 < len(data1): current_row = coords1[0, didx1] cur_didx1 = didx1 oidx2 = 0 while oidx2 < out_shape[1]: cur_didx1 = didx1 data_curr = 0 while cur_didx1 < len(data1) and coords1[0, cur_didx1] == current_row: data_curr += data1[cur_didx1] * array2[oidx2, coords1[1, cur_didx1]] cur_didx1 += 1 if data_curr != 0: out_data.append(data_curr) out_coords.append((current_row, oidx2)) oidx2 += 1 didx1 = cur_didx1 if len(out_data) == 0: return np.empty((2, 0), dtype=np.intp), np.empty((0,), dtype=dtr) return np.array(out_coords).T, np.array(out_data) return _dot_coo_ndarray @_memoize_dtype def _dot_ndarray_coo_type(dt1, dt2): dtr = _dot_dtype(dt1, dt2) @numba.jit(nopython=True, nogil=True) def _dot_ndarray_coo(array1, coords2, data2, out_shape): # pragma: no cover """ Utility function taking in two one ``ndarray`` and one ``COO`` and calculating a "sense" of their dot product. Acually computes ``x1 @ s2.T``. Parameters ---------- array1 : np.ndarray The input array ``x1``. data2, coords2 : np.ndarray The data and coordinates of ``s2``. out_shape : Tuple[int] The output shape. """ out = np.zeros(out_shape, dtype=dtr) for oidx1 in range(out_shape[0]): for didx2 in range(len(data2)): oidx2 = coords2[1, didx2] out[oidx1, oidx2] += array1[oidx1, coords2[0, didx2]] * data2[didx2] return out return _dot_ndarray_coo @_memoize_dtype def _dot_ndarray_coo_type_sparse(dt1, dt2): dtr = _dot_dtype(dt1, dt2) @numba.jit( nopython=True, nogil=True, locals={"data_curr": numba.np.numpy_support.from_dtype(dtr)}, ) def _dot_ndarray_coo(array1, coords2, data2, out_shape): # pragma: no cover """ Utility function taking in two one ``ndarray`` and one ``COO`` and calculating a "sense" of their dot product. Acually computes ``x1 @ s2.T``. Parameters ---------- array1 : np.ndarray The input array ``x1``. data2, coords2 : np.ndarray The data and coordinates of ``s2``. out_shape : Tuple[int] The output shape. """ out_data = [] out_coords = [] # coords2.shape = (2, len(data2)) # coords2[0, :] = columns, sorted # coords2[1, :] = rows for oidx1 in range(out_shape[0]): data_curr = 0 current_col = 0 for didx2 in range(len(data2)): if coords2[0, didx2] != current_col: if data_curr != 0: out_data.append(data_curr) out_coords.append([oidx1, current_col]) data_curr = 0 current_col = coords2[0, didx2] data_curr += array1[oidx1, coords2[1, didx2]] * data2[didx2] if data_curr != 0: out_data.append(data_curr) out_coords.append([oidx1, current_col]) if len(out_data) == 0: return np.empty((2, 0), dtype=np.intp), np.empty((0,), dtype=dtr) return np.array(out_coords).T, np.array(out_data) return _dot_ndarray_coo # Copied from : https://github.com/numpy/numpy/blob/59fec4619403762a5d785ad83fcbde5a230416fc/numpy/core/einsumfunc.py#L523 # under BSD-3-Clause license : https://github.com/numpy/numpy/blob/v1.24.0/LICENSE.txt def _parse_einsum_input(operands): """ A copy of the numpy parse_einsum_input that does not cast the operands to numpy array. Returns ------- input_strings : str Parsed input strings output_string : str Parsed output string operands : list of array_like The operands to use in the numpy contraction Examples -------- The operand list is simplified to reduce printing: >>> np.random.seed(123) >>> a = np.random.rand(4, 4) >>> b = np.random.rand(4, 4, 4) >>> _parse_einsum_input(("...a,...a->...", a, b)) ('za,xza', 'xz', [a, b]) # may vary >>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0])) ('za,xza', 'xz', [a, b]) # may vary """ if len(operands) == 0: raise ValueError("No input operands") if isinstance(operands[0], str): subscripts = operands[0].replace(" ", "") operands = operands[1:] # Ensure all characters are valid for s in subscripts: if s in ".,->": continue if s not in np.core.einsumfunc.einsum_symbols: raise ValueError(f"Character {s} is not a valid symbol.") else: tmp_operands = list(operands) operand_list = [] subscript_list = [] for _ in range(len(operands) // 2): operand_list.append(tmp_operands.pop(0)) subscript_list.append(tmp_operands.pop(0)) output_list = tmp_operands[-1] if len(tmp_operands) else None operands = operand_list subscripts = "" last = len(subscript_list) - 1 for num, sub in enumerate(subscript_list): for s in sub: if s is Ellipsis: subscripts += "..." else: try: s = index(s) except TypeError as e: raise TypeError("For this input type lists must contain either int or Ellipsis") from e subscripts += np.core.einsumfunc.einsum_symbols[s] if num != last: subscripts += "," if output_list is not None: subscripts += "->" for s in output_list: if s is Ellipsis: subscripts += "..." else: try: s = index(s) except TypeError as e: raise TypeError("For this input type lists must contain either int or Ellipsis") from e subscripts += np.core.einsumfunc.einsum_symbols[s] # Check for proper "->" if ("-" in subscripts) or (">" in subscripts): invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1) if invalid or (subscripts.count("->") != 1): raise ValueError("Subscripts can only contain one '->'.") # Parse ellipses if "." in subscripts: used = subscripts.replace(".", "").replace(",", "").replace("->", "") unused = list(np.core.einsumfunc.einsum_symbols_set - set(used)) ellipse_inds = "".join(unused) longest = 0 if "->" in subscripts: input_tmp, output_sub = subscripts.split("->") split_subscripts = input_tmp.split(",") out_sub = True else: split_subscripts = subscripts.split(",") out_sub = False for num, sub in enumerate(split_subscripts): if "." in sub: if (sub.count(".") != 3) or (sub.count("...") != 1): raise ValueError("Invalid Ellipses.") # Take into account numerical values if operands[num].shape == (): ellipse_count = 0 else: ellipse_count = builtins.max(operands[num].ndim, 1) ellipse_count -= len(sub) - 3 if ellipse_count > longest: longest = ellipse_count if ellipse_count < 0: raise ValueError("Ellipses lengths do not match.") if ellipse_count == 0: split_subscripts[num] = sub.replace("...", "") else: rep_inds = ellipse_inds[-ellipse_count:] split_subscripts[num] = sub.replace("...", rep_inds) subscripts = ",".join(split_subscripts) out_ellipse = "" if longest == 0 else ellipse_inds[-longest:] if out_sub: subscripts += "->" + output_sub.replace("...", out_ellipse) else: # Special care for outputless ellipses output_subscript = "" tmp_subscripts = subscripts.replace(",", "") for s in sorted(set(tmp_subscripts)): if s not in (np.core.einsumfunc.einsum_symbols): raise ValueError(f"Character {s} is not a valid symbol.") if tmp_subscripts.count(s) == 1: output_subscript += s normal_inds = "".join(sorted(set(output_subscript) - set(out_ellipse))) subscripts += "->" + out_ellipse + normal_inds # Build output string if does not exist if "->" in subscripts: input_subscripts, output_subscript = subscripts.split("->") else: input_subscripts = subscripts # Build output subscripts tmp_subscripts = subscripts.replace(",", "") output_subscript = "" for s in sorted(set(tmp_subscripts)): if s not in np.core.einsumfunc.einsum_symbols: raise ValueError(f"Character {s} is not a valid symbol.") if tmp_subscripts.count(s) == 1: output_subscript += s # Make sure output subscripts are in the input for char in output_subscript: if char not in input_subscripts: raise ValueError(f"Output character {char} did not appear in the input") # Make sure number operands is equivalent to the number of terms if len(input_subscripts.split(",")) != len(operands): raise ValueError("Number of einsum subscripts must be equal to the number of operands.") return (input_subscripts, output_subscript, operands) def _einsum_single(lhs, rhs, operand): """Perform a single term einsum, i.e. any combination of transposes, sums and traces of dimensions. Parameters ---------- lhs : str The indices of the input array. rhs : str The indices of the output array. operand : SparseArray The array to perform the einsum on. Returns ------- output : SparseArray """ from ._coo import COO if lhs == rhs: if not rhs: # ensure scalar output return operand.sum() return operand if not isinstance(operand, SparseArray): # just use numpy for dense input return np.einsum(f"{lhs}->{rhs}", operand) # else require COO for operations, but check if should convert back to_output_format = getattr(operand, "from_coo", lambda x: x) operand = asCOO(operand) # check if repeated / 'trace' indices mean we are only taking a subset where = {} for i, ix in enumerate(lhs): where.setdefault(ix, []).append(i) selector = None for locs in where.values(): loc0, *rlocs = locs if rlocs: # repeated index if len({operand.shape[loc] for loc in locs}) > 1: raise ValueError("Repeated indices must have the same dimension.") # only select data where all indices match subselector = (operand.coords[loc0] == operand.coords[rlocs]).all(axis=0) if selector is None: selector = subselector else: selector &= subselector # indices that are removed (i.e. not in the output / `perm`) # are handled by `has_duplicates=True` below perm = [lhs.index(ix) for ix in rhs] new_shape = tuple(operand.shape[i] for i in perm) # select the new COO data if selector is not None: new_coords = operand.coords[:, selector][perm] new_data = operand.data[selector] else: new_coords = operand.coords[perm] new_data = operand.data if not rhs: # scalar output - match numpy behaviour by not wrapping as array return new_data.sum() return to_output_format(COO(new_coords, new_data, shape=new_shape, has_duplicates=True)) def einsum(*operands, **kwargs): """ Perform the equivalent of :obj:`numpy.einsum`. Parameters ---------- subscripts : str Specifies the subscripts for summation as comma separated list of subscript labels. An implicit (classical Einstein summation) calculation is performed unless the explicit indicator '->' is included as well as subscript labels of the precise output form. operands : sequence of SparseArray These are the arrays for the operation. dtype : data-type, optional If provided, forces the calculation to use the data type specified. Default is ``None``. **kwargs : dict, optional Any additional arguments to pass to the function. Returns ------- output : SparseArray The calculation based on the Einstein summation convention. """ lhs, rhs, operands = _parse_einsum_input(operands) # Parse input check_zero_fill_value(*operands) if "dtype" in kwargs and kwargs["dtype"] is not None: operands = [o.astype(kwargs["dtype"]) for o in operands] if len(operands) == 1: return _einsum_single(lhs, rhs, operands[0]) # if multiple arrays: align, broadcast multiply and then use single einsum # for example: # "aab,cbd->dac" # we first perform single term reductions and align: # aab -> ab.. # cbd -> .bcd # (where dots represent broadcastable size 1 dimensions), then multiply all # to form the 'minimal outer product' and do a final single term einsum: # abcd -> dac # get ordered union of indices from all terms, indicies that only appear # on a single term will be removed in the 'preparation' step below terms = lhs.split(",") total = {} sizes = {} for t, term in enumerate(terms): shape = operands[t].shape for ix, d in zip(term, shape, strict=False): if d != sizes.setdefault(ix, d): raise ValueError(f"Inconsistent shape for index '{ix}'.") total.setdefault(ix, set()).add(t) for ix in rhs: total[ix].add(-1) aligned_term = "".join(ix for ix, apps in total.items() if len(apps) > 1) # NB: if every index appears exactly twice, # we could identify and dispatch to tensordot here? parrays = [] for term, array in zip(terms, operands, strict=True): # calc the target indices for this term pterm = "".join(ix for ix in aligned_term if ix in term) if pterm != term: # perform necessary transpose and reductions array = _einsum_single(term, pterm, array) # calc broadcastable shape shape = tuple(array.shape[pterm.index(ix)] if ix in pterm else 1 for ix in aligned_term) parrays.append(array.reshape(shape) if array.shape != shape else array) aligned_array = reduce(mul, parrays) return _einsum_single(aligned_term, rhs, aligned_array) def stack(arrays, axis=0, compressed_axes=None): """ Stack the input arrays along the given dimension. Parameters ---------- arrays : Iterable[SparseArray] The input arrays to stack. axis : int, optional The axis along which to stack the input arrays. compressed_axes : iterable, optional The axes to compress if returning a GCXS array. Returns ------- SparseArray The output stacked array. Raises ------ ValueError If all elements of :code:`arrays` don't have the same fill-value. See Also -------- numpy.stack : NumPy equivalent function """ from ._compressed import GCXS if not builtins.all(isinstance(arr, GCXS) for arr in arrays): from ._coo import stack as coo_stack return coo_stack(arrays, axis) from ._compressed import stack as gcxs_stack return gcxs_stack(arrays, axis, compressed_axes) def concatenate(arrays, axis=0, compressed_axes=None): """ Concatenate the input arrays along the given dimension. Parameters ---------- arrays : Iterable[SparseArray] The input arrays to concatenate. axis : int, optional The axis along which to concatenate the input arrays. The default is zero. compressed_axes : iterable, optional The axes to compress if returning a GCXS array. Returns ------- SparseArray The output concatenated array. Raises ------ ValueError If all elements of :code:`arrays` don't have the same fill-value. See Also -------- numpy.concatenate : NumPy equivalent function """ from ._compressed import GCXS if not builtins.all(isinstance(arr, GCXS) for arr in arrays): from ._coo import concatenate as coo_concat return coo_concat(arrays, axis) from ._compressed import concatenate as gcxs_concat return gcxs_concat(arrays, axis, compressed_axes) concat = concatenate @_check_device def eye(N, M=None, k=0, dtype=float, format="coo", *, device=None, **kwargs): """Return a 2-D array in the specified format with ones on the diagonal and zeros elsewhere. Parameters ---------- N : int Number of rows in the output. M : int, optional Number of columns in the output. If None, defaults to `N`. k : int, optional Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype : data-type, optional Data-type of the returned array. format : str, optional A format string. Returns ------- I : SparseArray of shape (N, M) An array where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one. Examples -------- >>> eye(2, dtype=int).todense() # doctest: +NORMALIZE_WHITESPACE array([[1, 0], [0, 1]]) >>> eye(3, k=1).todense() # doctest: +SKIP array([[0., 1., 0.], [0., 0., 1.], [0., 0., 0.]]) """ from ._coo import COO if M is None: M = N N = int(N) M = int(M) k = int(k) data_length = builtins.min(N, M) if k > 0: data_length = builtins.max(builtins.min(data_length, M - k), 0) n_coords = np.arange(data_length, dtype=np.intp) m_coords = n_coords + k elif k < 0: data_length = builtins.max(builtins.min(data_length, N + k), 0) m_coords = np.arange(data_length, dtype=np.intp) n_coords = m_coords - k else: n_coords = m_coords = np.arange(data_length, dtype=np.intp) coords = np.stack([n_coords, m_coords]) data = np.array(1, dtype=dtype) return COO(coords, data=data, shape=(N, M), has_duplicates=False, sorted=True).asformat(format, **kwargs) @_check_device def full(shape, fill_value, dtype=None, format="coo", order="C", *, device=None, **kwargs): """Return a SparseArray of given shape and type, filled with `fill_value`. Parameters ---------- shape : int or tuple of ints Shape of the new array, e.g., ``(2, 3)`` or ``2``. fill_value : scalar Fill value. dtype : data-type, optional The desired data-type for the array. The default, `None`, means `np.array(fill_value).dtype`. format : str, optional A format string. compressed_axes : iterable, optional The axes to compress if returning a GCXS array. order : {'C', None} Values except these are not currently supported and raise a NotImplementedError. Returns ------- out : SparseArray Array of `fill_value` with the given shape and dtype. Examples -------- >>> full(5, 9).todense() # doctest: +NORMALIZE_WHITESPACE array([9, 9, 9, 9, 9]) >>> full((2, 2), 9, dtype=float).todense() # doctest: +SKIP array([[9., 9.], [9., 9.]]) """ from sparse import COO if dtype is None: dtype = np.array(fill_value).dtype if not isinstance(shape, tuple): shape = (shape,) if order not in {"C", None}: raise NotImplementedError("Currently, only 'C' and None are supported.") data = np.empty(0, dtype=dtype) coords = np.empty((len(shape), 0), dtype=np.intp) return COO( coords, data=data, shape=shape, fill_value=fill_value, has_duplicates=False, sorted=True, ).asformat(format, **kwargs) @_check_device def full_like(a, fill_value, dtype=None, shape=None, format=None, *, device=None, **kwargs): """Return a full array with the same shape and type as a given array. Parameters ---------- a : array_like The shape and data-type of the result will match those of `a`. dtype : data-type, optional Overrides the data type of the result. format : str, optional A format string. compressed_axes : iterable, optional The axes to compress if returning a GCXS array. Returns ------- out : SparseArray Array of `fill_value` with the same shape and type as `a`. Examples -------- >>> x = np.ones((2, 3), dtype="i8") >>> full_like(x, 9.0).todense() # doctest: +NORMALIZE_WHITESPACE array([[9, 9, 9], [9, 9, 9]]) """ if format is None and not isinstance(a, np.ndarray): format = type(a).__name__.lower() elif format is None: format = "coo" compressed_axes = kwargs.pop("compressed_axes", None) if hasattr(a, "compressed_axes") and compressed_axes is None: compressed_axes = a.compressed_axes return full( a.shape if shape is None else shape, fill_value, dtype=(a.dtype if dtype is None else dtype), format=format, **kwargs, ) def zeros(shape, dtype=float, format="coo", *, device=None, **kwargs): """Return a SparseArray of given shape and type, filled with zeros. Parameters ---------- shape : int or tuple of ints Shape of the new array, e.g., ``(2, 3)`` or ``2``. dtype : data-type, optional The desired data-type for the array, e.g., `numpy.int8`. Default is `numpy.float64`. format : str, optional A format string. compressed_axes : iterable, optional The axes to compress if returning a GCXS array. Returns ------- out : SparseArray Array of zeros with the given shape and dtype. Examples -------- >>> zeros(5).todense() # doctest: +SKIP array([0., 0., 0., 0., 0.]) >>> zeros((2, 2), dtype=int).todense() # doctest: +NORMALIZE_WHITESPACE array([[0, 0], [0, 0]]) """ return full(shape, fill_value=0, dtype=np.dtype(dtype), format=format, device=device, **kwargs) def zeros_like(a, dtype=None, shape=None, format=None, *, device=None, **kwargs): """Return a SparseArray of zeros with the same shape and type as ``a``. Parameters ---------- a : array_like The shape and data-type of the result will match those of `a`. dtype : data-type, optional Overrides the data type of the result. format : str, optional A format string. compressed_axes : iterable, optional The axes to compress if returning a GCXS array. Returns ------- out : SparseArray Array of zeros with the same shape and type as `a`. Examples -------- >>> x = np.ones((2, 3), dtype="i8") >>> zeros_like(x).todense() # doctest: +NORMALIZE_WHITESPACE array([[0, 0, 0], [0, 0, 0]]) """ return full_like(a, fill_value=0, dtype=dtype, shape=shape, format=format, device=device, **kwargs) def ones(shape, dtype=float, format="coo", *, device=None, **kwargs): """Return a SparseArray of given shape and type, filled with ones. Parameters ---------- shape : int or tuple of ints Shape of the new array, e.g., ``(2, 3)`` or ``2``. dtype : data-type, optional The desired data-type for the array, e.g., `numpy.int8`. Default is `numpy.float64`. format : str, optional A format string. compressed_axes : iterable, optional The axes to compress if returning a GCXS array. Returns ------- out : SparseArray Array of ones with the given shape and dtype. Examples -------- >>> ones(5).todense() # doctest: +SKIP array([1., 1., 1., 1., 1.]) >>> ones((2, 2), dtype=int).todense() # doctest: +NORMALIZE_WHITESPACE array([[1, 1], [1, 1]]) """ return full(shape, fill_value=1, dtype=np.dtype(dtype), format=format, device=device, **kwargs) def ones_like(a, dtype=None, shape=None, format=None, *, device=None, **kwargs): """Return a SparseArray of ones with the same shape and type as ``a``. Parameters ---------- a : array_like The shape and data-type of the result will match those of `a`. dtype : data-type, optional Overrides the data type of the result. format : str, optional A format string. compressed_axes : iterable, optional The axes to compress if returning a GCXS array. Returns ------- out : SparseArray Array of ones with the same shape and type as `a`. Examples -------- >>> x = np.ones((2, 3), dtype="i8") >>> ones_like(x).todense() # doctest: +NORMALIZE_WHITESPACE array([[1, 1, 1], [1, 1, 1]]) """ return full_like(a, fill_value=1, dtype=dtype, shape=shape, format=format, device=device, **kwargs) def empty(shape, dtype=float, format="coo", *, device=None, **kwargs): return full(shape, fill_value=0, dtype=np.dtype(dtype), format=format, device=device, **kwargs) empty.__doc__ = zeros.__doc__ def empty_like(a, dtype=None, shape=None, format=None, *, device=None, **kwargs): return full_like(a, fill_value=0, dtype=dtype, shape=shape, format=format, device=device, **kwargs) empty_like.__doc__ = zeros_like.__doc__ def outer(a, b, out=None): """ Return outer product of two sparse arrays. Parameters ---------- a, b : sparse.SparseArray The input arrays. out : sparse.SparseArray The output array. Examples -------- >>> import numpy as np >>> import sparse >>> a = sparse.COO(np.arange(4)) >>> o = sparse.outer(a, a) >>> o.todense() array([[0, 0, 0, 0], [0, 1, 2, 3], [0, 2, 4, 6], [0, 3, 6, 9]]) """ from ._coo import COO from ._sparse_array import SparseArray if isinstance(a, SparseArray): a = COO(a) if isinstance(b, SparseArray): b = COO(b) return np.multiply.outer(a.flatten(), b.flatten(), out=out) def asnumpy(a, dtype=None, order=None): """Returns a dense numpy array from an arbitrary source array. Args: a: Arbitrary object that can be converted to :class:`numpy.ndarray`. order ({'C', 'F', 'A'}): The desired memory layout of the output array. When ``order`` is 'A', it uses 'F' if ``a`` is fortran-contiguous and 'C' otherwise. Returns: numpy.ndarray: Converted array on the host memory. """ from ._sparse_array import SparseArray if isinstance(a, SparseArray): a = a.todense() return np.asarray(a, dtype=dtype, order=order) # this code was taken from numpy.moveaxis # (cf. numpy/core/numeric.py, lines 1340-1409, v1.18.4) # https://github.com/numpy/numpy/blob/v1.18.4/numpy/core/numeric.py#L1340-L1409 def moveaxis(a, source, destination): """ Move axes of an array to new positions. Other axes remain in their original order. Parameters ---------- a : SparseArray The array whose axes should be reordered. source : int or List[int] Original positions of the axes to move. These must be unique. destination : int or List[int] Destination positions for each of the original axes. These must also be unique. Returns ------- SparseArray Array with moved axes. Examples -------- >>> import numpy as np >>> import sparse >>> x = sparse.COO.from_numpy(np.ones((2, 3, 4, 5))) >>> sparse.moveaxis(x, (0, 1), (2, 3)) """ if not isinstance(source, Iterable): source = (source,) if not isinstance(destination, Iterable): destination = (destination,) source = normalize_axis(source, a.ndim) destination = normalize_axis(destination, a.ndim) if len(source) != len(destination): raise ValueError("`source` and `destination` arguments must have the same number of elements") order = [n for n in range(a.ndim) if n not in source] for dest, src in sorted(zip(destination, source, strict=True)): order.insert(dest, src) return a.transpose(order) def pad(array, pad_width, mode="constant", **kwargs): """ Performs the equivalent of :obj:`numpy.pad` for :obj:`SparseArray`. Note that this function returns a new array instead of a view. Parameters ---------- array : SparseArray Sparse array which is to be padded. pad_width : {sequence, array_like, int} Number of values padded to the edges of each axis. ((before_1, after_1), … (before_N, after_N)) unique pad widths for each axis. ((before, after),) yields same before and after pad for each axis. (pad,) or int is a shortcut for before = after = pad width for all axes. mode : str Pads to a constant value which is fill value. Currently only constant mode is implemented constant_values : int The values to set the padded values for each axis. Default is 0. This must be same as fill value. Returns ------- SparseArray The padded sparse array. Raises ------ NotImplementedError If mode != 'constant' or there are unknown arguments. ValueError If constant_values != self.fill_value See Also -------- :obj:`numpy.pad` : NumPy equivalent function """ if not isinstance(array, SparseArray): raise NotImplementedError("Input array is not compatible.") if mode.lower() != "constant": raise NotImplementedError(f"Mode '{mode}' is not yet supported.") if not equivalent(kwargs.pop("constant_values", _zero_of_dtype(array.dtype)), array.fill_value): raise ValueError("constant_values can only be equal to fill value.") if kwargs: raise NotImplementedError("Additional Unknown arguments present.") from ._coo import COO array = array.asformat("coo") pad_width = np.broadcast_to(pad_width, (len(array.shape), 2)) new_coords = array.coords + pad_width[:, 0:1] new_shape = tuple([array.shape[i] + pad_width[i, 0] + pad_width[i, 1] for i in range(len(array.shape))]) new_data = array.data return COO(new_coords, new_data, new_shape, fill_value=array.fill_value) def format_to_string(format): if isinstance(format, type): if not issubclass(format, SparseArray): raise ValueError(f"invalid format: {format}") format = format.__name__.lower() if isinstance(format, str): return format raise ValueError(f"invalid format: {format}") @_check_device def asarray(obj, /, *, dtype=None, format="coo", copy=False, device=None): """ Convert the input to a sparse array. Parameters ---------- obj : array_like Object to be converted to an array. dtype : dtype, optional Output array data type. format : str, optional Output array sparse format. device : str, optional Device on which to place the created array. copy : bool, optional Boolean indicating whether or not to copy the input. Returns ------- out : Union[SparseArray, numpy.ndarray] Sparse or 0-D array containing the data from `obj`. Examples -------- >>> x = np.eye(8, dtype="i8") >>> sparse.asarray(x, format="COO") """ if format not in {"coo", "dok", "gcxs", "csc", "csr"}: raise ValueError(f"{format} format not supported.") from ._compressed import CSC, CSR, GCXS from ._coo import COO from ._dok import DOK format_dict = {"coo": COO, "dok": DOK, "gcxs": GCXS, "csc": CSC, "csr": CSR} if isinstance(obj, COO | DOK | GCXS | CSC | CSR): return obj.asformat(format) if _is_scipy_sparse_obj(obj): sparse_obj = format_dict[format].from_scipy_sparse(obj) if dtype is None: dtype = sparse_obj.dtype return sparse_obj.astype(dtype=dtype, copy=copy) if np.isscalar(obj) or isinstance(obj, np.ndarray | Iterable): sparse_obj = format_dict[format].from_numpy(np.asarray(obj)) if dtype is None: dtype = sparse_obj.dtype return sparse_obj.astype(dtype=dtype, copy=copy) raise ValueError(f"{type(obj)} not supported.") def _support_numpy(func): """ In case a NumPy array is passed to `sparse` namespace function we want to flag it and dispatch to NumPy. """ @wraps(func) def wrapper_func(*args, **kwargs): x = args[0] if isinstance(x, np.ndarray | np.number): warnings.warn( f"Sparse {func.__name__} received dense NumPy array instead " "of sparse array. Dispatching to NumPy function.", RuntimeWarning, stacklevel=2, ) return getattr(np, func.__name__)(*args, **kwargs) return func(*args, **kwargs) return wrapper_func def all(x, /, *, axis=None, keepdims=False): return x.all(axis=axis, keepdims=keepdims) def any(x, /, *, axis=None, keepdims=False): return x.any(axis=axis, keepdims=keepdims) def permute_dims(x, /, axes=None): return x.transpose(axes=axes) def max(x, /, *, axis=None, keepdims=False): return x.max(axis=axis, keepdims=keepdims) def mean(x, /, *, axis=None, keepdims=False, dtype=None): return x.mean(axis=axis, keepdims=keepdims, dtype=dtype) def min(x, /, *, axis=None, keepdims=False): return x.min(axis=axis, keepdims=keepdims) def prod(x, /, *, axis=None, dtype=None, keepdims=False): return x.prod(axis=axis, keepdims=keepdims, dtype=dtype) def std(x, /, *, axis=None, correction=0.0, keepdims=False): return x.std(axis=axis, ddof=correction, keepdims=keepdims) def sum(x, /, *, axis=None, dtype=None, keepdims=False): return x.sum(axis=axis, keepdims=keepdims, dtype=dtype) def var(x, /, *, axis=None, correction=0.0, keepdims=False): return x.var(axis=axis, ddof=correction, keepdims=keepdims) def abs(x, /): return x.__abs__() def reshape(x, /, shape, *, copy=None): return x.reshape(shape=shape) def astype(x, dtype, /, *, copy=True): return x.astype(dtype, copy=copy) @_support_numpy def squeeze(x, /, axis=None): """Remove singleton dimensions from array. Parameters ---------- x : SparseArray Input array. axis : int or tuple[int, ...], optional The singleton axes to remove. By default all singleton axes are removed. Returns ------- output : SparseArray Array with singleton dimensions removed. """ return x.squeeze(axis=axis) @_support_numpy def broadcast_to(x, /, shape): return x.broadcast_to(shape) def broadcast_arrays(*arrays): shape = np.broadcast_shapes(*[a.shape for a in arrays]) return [a.broadcast_to(shape) for a in arrays] def equal(x1, x2, /): return x1 == x2 @_support_numpy def round(x, /, decimals=0, out=None): return x.round(decimals=decimals, out=out) @_support_numpy def isinf(x, /): return x.isinf() @_support_numpy def isnan(x, /): return x.isnan() def nonzero(x, /): return x.nonzero() def imag(x, /): return x.imag def real(x, /): return x.real def vecdot(x1, x2, /, *, axis=-1): """ Computes the (vector) dot product of two arrays. Parameters ---------- x1, x2 : array_like Input sparse arrays axis : int The axis to reduce over. Returns ------- out : Union[SparseArray, numpy.ndarray] Sparse or 0-D array containing dot product. """ ndmin = builtins.min((x1.ndim, x2.ndim)) if not (-ndmin <= axis < ndmin) or x1.shape[axis] != x2.shape[axis]: raise ValueError("Shapes must match along `axis`.") if np.issubdtype(x1.dtype, np.complexfloating): x1 = np.conjugate(x1) return np.sum(x1 * x2, axis=axis) sparse-0.16.0a9/sparse/numba_backend/_compressed/000077500000000000000000000000001463475501500217105ustar00rootroot00000000000000sparse-0.16.0a9/sparse/numba_backend/_compressed/__init__.py000066400000000000000000000002101463475501500240120ustar00rootroot00000000000000from .common import concatenate, stack from .compressed import CSC, CSR, GCXS __all__ = ["GCXS", "CSR", "CSC", "concatenate", "stack"] sparse-0.16.0a9/sparse/numba_backend/_compressed/common.py000066400000000000000000000073111463475501500235540ustar00rootroot00000000000000import numpy as np from .._utils import can_store, check_consistent_fill_value, normalize_axis def concatenate(arrays, axis=0, compressed_axes=None): from .compressed import GCXS check_consistent_fill_value(arrays) arrays = [arr if isinstance(arr, GCXS) else GCXS(arr, compressed_axes=(axis,)) for arr in arrays] axis = normalize_axis(axis, arrays[0].ndim) dim = sum(x.shape[axis] for x in arrays) shape = list(arrays[0].shape) shape[axis] = dim assert all(x.shape[ax] == arrays[0].shape[ax] for x in arrays for ax in set(range(arrays[0].ndim)) - {axis}) if compressed_axes is None: compressed_axes = (axis,) if arrays[0].ndim == 1: from .._coo.common import concatenate as coo_concat arrays = [arr.tocoo() for arr in arrays] return coo_concat(arrays, axis=axis) # arrays may have different compressed_axes # concatenating becomes easy when compressed_axes are the same arrays = [arr.change_compressed_axes((axis,)) for arr in arrays] ptr_list = [] for i, arr in enumerate(arrays): if i == 0: ptr_list.append(arr.indptr) continue ptr_list.append(arr.indptr[1:]) indptr = np.concatenate(ptr_list) indices = np.concatenate([arr.indices for arr in arrays]) data = np.concatenate([arr.data for arr in arrays]) ptr_len = arrays[0].indptr.shape[0] nnz = arrays[0].nnz total_nnz = sum(int(arr.nnz) for arr in arrays) if not can_store(indptr.dtype, total_nnz): indptr = indptr.astype(np.min_scalar_type(total_nnz)) for i in range(1, len(arrays)): indptr[ptr_len:] += nnz nnz = arrays[i].nnz ptr_len += arrays[i].indptr.shape[0] - 1 return GCXS( (data, indices, indptr), shape=tuple(shape), compressed_axes=arrays[0].compressed_axes, fill_value=arrays[0].fill_value, ).change_compressed_axes(compressed_axes) def stack(arrays, axis=0, compressed_axes=None): from .compressed import GCXS check_consistent_fill_value(arrays) arrays = [arr if isinstance(arr, GCXS) else GCXS(arr, compressed_axes=(axis,)) for arr in arrays] axis = normalize_axis(axis, arrays[0].ndim + 1) assert all(x.shape[ax] == arrays[0].shape[ax] for x in arrays for ax in set(range(arrays[0].ndim)) - {axis}) if compressed_axes is None: compressed_axes = (axis,) if arrays[0].ndim == 1: from .._coo.common import stack as coo_stack arrays = [arr.tocoo() for arr in arrays] return coo_stack(arrays, axis=axis) # arrays may have different compressed_axes # stacking becomes easy when compressed_axes are the same ptr_list = [] for i in range(len(arrays)): shape = list(arrays[i].shape) shape.insert(axis, 1) arrays[i] = arrays[i].reshape(shape).change_compressed_axes((axis,)) if i == 0: ptr_list.append(arrays[i].indptr) continue ptr_list.append(arrays[i].indptr[1:]) shape[axis] = len(arrays) indptr = np.concatenate(ptr_list) indices = np.concatenate([arr.indices for arr in arrays]) data = np.concatenate([arr.data for arr in arrays]) ptr_len = arrays[0].indptr.shape[0] nnz = arrays[0].nnz total_nnz = sum(int(arr.nnz) for arr in arrays) if not can_store(indptr.dtype, total_nnz): indptr = indptr.astype(np.min_scalar_type(total_nnz)) for i in range(1, len(arrays)): indptr[ptr_len:] += nnz nnz = arrays[i].nnz ptr_len += arrays[i].indptr.shape[0] - 1 return GCXS( (data, indices, indptr), shape=tuple(shape), compressed_axes=arrays[0].compressed_axes, fill_value=arrays[0].fill_value, ).change_compressed_axes(compressed_axes) sparse-0.16.0a9/sparse/numba_backend/_compressed/compressed.py000066400000000000000000000751451463475501500244420ustar00rootroot00000000000000import copy as _copy import operator from collections.abc import Iterable from functools import reduce from typing import Union import numpy as np from numpy.lib.mixins import NDArrayOperatorsMixin from .._coo.common import linear_loc from .._coo.core import COO from .._sparse_array import SparseArray from .._utils import ( _zero_of_dtype, can_store, check_compressed_axes, check_fill_value, equivalent, normalize_axis, ) from .convert import _1d_reshape, _transpose, uncompress_dimension from .indexing import getitem def _from_coo(x, compressed_axes=None, idx_dtype=None): if x.ndim == 0: if compressed_axes is not None: raise ValueError("no axes to compress for 0d array") return ((x.data, x.coords, []), x.shape, None, x.fill_value) if x.ndim == 1: if compressed_axes is not None: raise ValueError("no axes to compress for 1d array") return ((x.data, x.coords[0], ()), x.shape, None, x.fill_value) compressed_axes = normalize_axis(compressed_axes, x.ndim) if compressed_axes is None: # defaults to best compression ratio compressed_axes = (np.argmin(x.shape),) check_compressed_axes(x.shape, compressed_axes) axis_order = list(compressed_axes) # array location where the uncompressed dimensions start axisptr = len(compressed_axes) axis_order.extend(np.setdiff1d(np.arange(len(x.shape)), compressed_axes)) reordered_shape = tuple(x.shape[i] for i in axis_order) row_size = np.prod(reordered_shape[:axisptr]) col_size = np.prod(reordered_shape[axisptr:]) compressed_shape = (row_size, col_size) shape = x.shape if idx_dtype and not can_store(idx_dtype, max(max(compressed_shape), x.nnz)): raise ValueError( f"cannot store array with the compressed shape {compressed_shape} and nnz {x.nnz} with dtype {idx_dtype}." ) if not idx_dtype: idx_dtype = x.coords.dtype if not can_store(idx_dtype, max(max(compressed_shape), x.nnz)): idx_dtype = np.min_scalar_type(max(max(compressed_shape), x.nnz)) # transpose axes, linearize, reshape, and compress linear = linear_loc(x.coords[axis_order], reordered_shape) order = np.argsort(linear) linear = linear[order] coords = np.empty((2, x.nnz), dtype=idx_dtype) strides = 1 for i, d in enumerate(compressed_shape[::-1]): coords[-(i + 1), :] = (linear // strides) % d strides *= d indptr = np.empty(row_size + 1, dtype=idx_dtype) indptr[0] = 0 np.cumsum(np.bincount(coords[0], minlength=row_size), out=indptr[1:]) indices = coords[1] data = x.data[order] return ((data, indices, indptr), shape, compressed_axes, x.fill_value) class GCXS(SparseArray, NDArrayOperatorsMixin): """ A sparse multidimensional array. This is stored in GCXS format, a generalization of the GCRS/GCCS formats from 'Efficient storage scheme for n-dimensional sparse array: GCRS/GCCS': https://ieeexplore.ieee.org/document/7237032. GCXS generalizes the CRS/CCS sparse matrix formats. For arrays with ndim == 2, GCXS is the same CSR/CSC. For arrays with ndim >2, any combination of axes can be compressed, significantly reducing storage. GCXS consists of 3 arrays. Let the 3 arrays be RO, CO and VL. The first element of array RO is the integer 0 and later elements are the number of cumulative non-zero elements in each row for GCRS, column for GCCS. CO stores column indexes of non-zero elements at each row for GCRS, column for GCCS. VL stores the values of the non-zero array elements. The superiority of the GCRS/GCCS over traditional (CRS/CCS) is shown by both theoretical analysis and experimental results, outlined in the linked research paper. Parameters ---------- arg : tuple (data, indices, indptr) A tuple of arrays holding the data, indices, and index pointers for the nonzero values of the array. shape : tuple[int] (COO.ndim,) The shape of the array. compressed_axes : Iterable[int] The axes to compress. prune : bool, optional A flag indicating whether or not we should prune any fill-values present in the data array. fill_value: scalar, optional The fill value for this array. Attributes ---------- data : numpy.ndarray (nnz,) An array holding the nonzero values corresponding to :obj:`GCXS.indices`. indices : numpy.ndarray (nnz,) An array holding the coordinates of every nonzero element along uncompressed dimensions. indptr : numpy.ndarray An array holding the cumulative sums of the nonzeros along the compressed dimensions. shape : tuple[int] (ndim,) The dimensions of this array. See Also -------- DOK : A mostly write-only sparse array. """ __array_priority__ = 12 def __init__( self, arg, shape=None, compressed_axes=None, prune=False, fill_value=None, idx_dtype=None, ): from .._common import _is_scipy_sparse_obj if _is_scipy_sparse_obj(arg): arg = self.from_scipy_sparse(arg) if isinstance(arg, np.ndarray): (arg, shape, compressed_axes, fill_value) = _from_coo(COO(arg), compressed_axes) elif isinstance(arg, COO): (arg, shape, compressed_axes, fill_value) = _from_coo(arg, compressed_axes, idx_dtype) elif isinstance(arg, GCXS): if compressed_axes is not None and arg.compressed_axes != compressed_axes: arg = arg.change_compressed_axes(compressed_axes) (arg, shape, compressed_axes, fill_value) = ( (arg.data, arg.indices, arg.indptr), arg.shape, arg.compressed_axes, arg.fill_value, ) if shape is None: raise ValueError("missing `shape` argument") check_compressed_axes(len(shape), compressed_axes) if len(shape) == 1: compressed_axes = None self.data, self.indices, self.indptr = arg if self.data.ndim != 1: raise ValueError("data must be a scalar or 1-dimensional.") self.shape = shape if fill_value is None: fill_value = _zero_of_dtype(self.data.dtype) self._compressed_axes = tuple(compressed_axes) if isinstance(compressed_axes, Iterable) else None self.fill_value = self.data.dtype.type(fill_value) if prune: self._prune() def copy(self, deep=True): """Return a copy of the array. Parameters ---------- deep : boolean, optional If True (default), the internal coords and data arrays are also copied. Set to ``False`` to only make a shallow copy. """ return _copy.deepcopy(self) if deep else _copy.copy(self) @classmethod def from_numpy(cls, x, compressed_axes=None, fill_value=None, idx_dtype=None): coo = COO.from_numpy(x, fill_value=fill_value, idx_dtype=idx_dtype) return cls.from_coo(coo, compressed_axes, idx_dtype) @classmethod def from_coo(cls, x, compressed_axes=None, idx_dtype=None): (arg, shape, compressed_axes, fill_value) = _from_coo(x, compressed_axes, idx_dtype) return cls(arg, shape=shape, compressed_axes=compressed_axes, fill_value=fill_value) @classmethod def from_scipy_sparse(cls, x, /, *, fill_value=None): if x.format == "csc": return cls((x.data, x.indices, x.indptr), shape=x.shape, compressed_axes=(1,), fill_value=fill_value) x = x.asformat("csr") return cls((x.data, x.indices, x.indptr), shape=x.shape, compressed_axes=(0,), fill_value=fill_value) @classmethod def from_iter(cls, x, shape=None, compressed_axes=None, fill_value=None, idx_dtype=None): return cls.from_coo( COO.from_iter(x, shape, fill_value), compressed_axes, idx_dtype, ) @property def dtype(self): """ The datatype of this array. Returns ------- numpy.dtype The datatype of this array. See Also -------- numpy.ndarray.dtype : Numpy equivalent property. scipy.sparse.csr_matrix.dtype : Scipy equivalent property. """ return self.data.dtype @property def nnz(self): """ The number of nonzero elements in this array. Returns ------- int The number of nonzero elements in this array. See Also -------- COO.nnz : Equivalent :obj:`COO` array property. DOK.nnz : Equivalent :obj:`DOK` array property. numpy.count_nonzero : A similar Numpy function. scipy.sparse.csr_matrix.nnz : The Scipy equivalent property. """ return self.data.shape[0] @property def format(self): """ The storage format of this array. Returns ------- str The storage format of this array. See Also ------- scipy.sparse.dok_matrix.format : The Scipy equivalent property. Examples ------- >>> import sparse >>> s = sparse.random((5, 5), density=0.2, format="dok") >>> s.format 'dok' >>> t = sparse.random((5, 5), density=0.2, format="coo") >>> t.format 'coo' """ return "gcxs" @property def nbytes(self): """ The number of bytes taken up by this object. Note that for small arrays, this may undercount the number of bytes due to the large constant overhead. Returns ------- int The approximate bytes of memory taken by this object. See Also -------- numpy.ndarray.nbytes : The equivalent Numpy property. """ return self.data.nbytes + self.indices.nbytes + self.indptr.nbytes @property def _axis_order(self): axis_order = list(self.compressed_axes) axis_order.extend(np.setdiff1d(np.arange(len(self.shape)), self.compressed_axes)) return axis_order @property def _axisptr(self): # array location where the uncompressed dimensions start return len(self.compressed_axes) @property def _compressed_shape(self): row_size = np.prod(self._reordered_shape[: self._axisptr]) col_size = np.prod(self._reordered_shape[self._axisptr :]) return (row_size, col_size) @property def _reordered_shape(self): return tuple(self.shape[i] for i in self._axis_order) @property def T(self): return self.transpose() @property def mT(self): if self.ndim < 2: raise ValueError("Cannot compute matrix transpose if `ndim < 2`.") axis = list(range(self.ndim)) axis[-1], axis[-2] = axis[-2], axis[-1] return self.transpose(axis) def __str__(self): summary = ( f"" ) return self._str_impl(summary) __repr__ = __str__ __getitem__ = getitem def _reduce_calc(self, method, axis, keepdims=False, **kwargs): if axis[0] is None or np.array_equal(axis, np.arange(self.ndim, dtype=np.intp)): x = self.flatten().tocoo() out = x.reduce(method, axis=None, keepdims=keepdims, **kwargs) if keepdims: return (out.reshape(np.ones(self.ndim, dtype=np.intp)),) return (out,) r = np.arange(self.ndim, dtype=np.intp) compressed_axes = [a for a in r if a not in set(axis)] x = self.change_compressed_axes(compressed_axes) idx = np.diff(x.indptr) != 0 indptr = x.indptr[:-1][idx] indices = (np.arange(x._compressed_shape[0], dtype=self.indptr.dtype))[idx] data = method.reduceat(x.data, indptr, **kwargs) counts = x.indptr[1:][idx] - x.indptr[:-1][idx] arr_attrs = (x, compressed_axes, indices) n_cols = x._compressed_shape[1] return (data, counts, axis, n_cols, arr_attrs) def _reduce_return(self, data, arr_attrs, result_fill_value): x, compressed_axes, indices = arr_attrs # prune data mask = ~equivalent(data, result_fill_value) data = data[mask] indices = indices[mask] out = GCXS( (data, indices, []), shape=(x._compressed_shape[0],), fill_value=result_fill_value, compressed_axes=None, ) return out.reshape(tuple(self.shape[d] for d in compressed_axes)) def change_compressed_axes(self, new_compressed_axes): """ Returns a new array with specified compressed axes. This operation is similar to converting a scipy.sparse.csc_matrix to a scipy.sparse.csr_matrix. Returns ------- GCXS A new instance of the input array with compression along the specified dimensions. """ if new_compressed_axes == self.compressed_axes: return self if self.ndim == 1: raise NotImplementedError("no axes to compress for 1d array") new_compressed_axes = tuple( normalize_axis(new_compressed_axes[i], self.ndim) for i in range(len(new_compressed_axes)) ) if new_compressed_axes == self.compressed_axes: return self if len(new_compressed_axes) >= len(self.shape): raise ValueError("cannot compress all axes") if len(set(new_compressed_axes)) != len(new_compressed_axes): raise ValueError("repeated axis in compressed_axes") arg = _transpose(self, self.shape, np.arange(self.ndim), new_compressed_axes) return GCXS( arg, shape=self.shape, compressed_axes=new_compressed_axes, fill_value=self.fill_value, ) def tocoo(self): """ Convert this :obj:`GCXS` array to a :obj:`COO`. Returns ------- sparse.COO The converted COO array. """ if self.ndim == 0: return COO( np.array([]), self.data, shape=self.shape, fill_value=self.fill_value, ) if self.ndim == 1: return COO( self.indices[None, :], self.data, shape=self.shape, fill_value=self.fill_value, ) uncompressed = uncompress_dimension(self.indptr) coords = np.vstack((uncompressed, self.indices)) order = np.argsort(self._axis_order) return ( COO( coords, self.data, shape=self._compressed_shape, fill_value=self.fill_value, ) .reshape(self._reordered_shape) .transpose(order) ) def todense(self): """ Convert this :obj:`GCXS` array to a dense :obj:`numpy.ndarray`. Note that this may take a large amount of memory if the :obj:`GCXS` object's :code:`shape` is large. Returns ------- numpy.ndarray The converted dense array. See Also -------- DOK.todense : Equivalent :obj:`DOK` array method. COO.todense : Equivalent :obj:`COO` array method. scipy.sparse.coo_matrix.todense : Equivalent Scipy method. """ if self.compressed_axes is None: out = np.full(self.shape, self.fill_value, self.dtype) if len(self.indices) != 0: out[self.indices] = self.data else: if len(self.data) != 0: out[()] = self.data[0] return out return self.tocoo().todense() def todok(self): from .. import DOK return DOK.from_coo(self.tocoo()) # probably a temporary solution def to_scipy_sparse(self, accept_fv=None): """ Converts this :obj:`GCXS` object into a :obj:`scipy.sparse.csr_matrix` or `scipy.sparse.csc_matrix`. Parameters ---------- accept_fv : scalar or list of scalar, optional The list of accepted fill-values. The default accepts only zero. Returns ------- :obj:`scipy.sparse.csr_matrix` or `scipy.sparse.csc_matrix` The converted Scipy sparse matrix. Raises ------ ValueError If the array is not two-dimensional. ValueError If all the array doesn't zero fill-values. """ import scipy.sparse check_fill_value(self, accept_fv=accept_fv) if self.ndim != 2: raise ValueError("Can only convert a 2-dimensional array to a Scipy sparse matrix.") if 0 in self.compressed_axes: return scipy.sparse.csr_matrix((self.data, self.indices, self.indptr), shape=self.shape) return scipy.sparse.csc_matrix((self.data, self.indices, self.indptr), shape=self.shape) def asformat(self, format, **kwargs): """ Convert this sparse array to a given format. Parameters ---------- format : str A format string. Returns ------- out : SparseArray The converted array. Raises ------ NotImplementedError If the format isn't supported. """ from .._utils import convert_format format = convert_format(format) ret = None if format == "coo": ret = self.tocoo() elif format == "dok": ret = self.todok() elif format == "csr": ret = CSR(self) elif format == "csc": ret = CSC(self) elif format == "gcxs": compressed_axes = kwargs.pop("compressed_axes", self.compressed_axes) return self.change_compressed_axes(compressed_axes) if len(kwargs) != 0: raise TypeError(f"Invalid keyword arguments provided: {kwargs}") if ret is None: raise NotImplementedError(f"The given format is not supported: {format}") return ret def maybe_densify(self, max_size=1000, min_density=0.25): """ Converts this :obj:`GCXS` array to a :obj:`numpy.ndarray` if not too costly. Parameters ---------- max_size : int Maximum number of elements in output min_density : float Minimum density of output Returns ------- numpy.ndarray The dense array. See Also -------- sparse.GCXS.todense: Converts to Numpy function without checking the cost. sparse.COO.maybe_densify: The equivalent COO function. Raises ------- ValueError If the returned array would be too large. """ if self.size > max_size and self.density < min_density: raise ValueError("Operation would require converting large sparse array to dense") return self.todense() def flatten(self, order="C"): """ Returns a new :obj:`GCXS` array that is a flattened version of this array. Returns ------- GCXS The flattened output array. Notes ----- The :code:`order` parameter is provided just for compatibility with Numpy and isn't actually supported. """ if order not in {"C", None}: raise NotImplementedError("The `order` parameter is not supported.") return self.reshape(-1) def reshape(self, shape, order="C", compressed_axes=None): """ Returns a new :obj:`GCXS` array that is a reshaped version of this array. Parameters ---------- shape : tuple[int] The desired shape of the output array. compressed_axes : Iterable[int], optional The axes to compress to store the array. Finds the most efficient storage by default. Returns ------- GCXS The reshaped output array. See Also -------- numpy.ndarray.reshape : The equivalent Numpy function. sparse.COO.reshape : The equivalent COO function. Notes ----- The :code:`order` parameter is provided just for compatibility with Numpy and isn't actually supported. """ shape = tuple(shape) if isinstance(shape, Iterable) else (shape,) if order not in {"C", None}: raise NotImplementedError("The 'order' parameter is not supported") if any(d == -1 for d in shape): extra = int(self.size / np.prod([d for d in shape if d != -1])) shape = tuple([d if d != -1 else extra for d in shape]) if self.shape == shape: return self if self.size != reduce(operator.mul, shape, 1): raise ValueError(f"cannot reshape array of size {self.size} into shape {shape}") if len(shape) == 0: return self.tocoo().reshape(shape).asformat("gcxs") if compressed_axes is None: if len(shape) == self.ndim: compressed_axes = self.compressed_axes elif len(shape) == 1: compressed_axes = None else: compressed_axes = (np.argmin(shape),) if self.ndim == 1: arg = _1d_reshape(self, shape, compressed_axes) else: arg = _transpose(self, shape, np.arange(self.ndim), compressed_axes) return GCXS( arg, shape=tuple(shape), compressed_axes=compressed_axes, fill_value=self.fill_value, ) @property def compressed_axes(self): return self._compressed_axes def transpose(self, axes=None, compressed_axes=None): """ Returns a new array which has the order of the axes switched. Parameters ---------- axes : Iterable[int], optional The new order of the axes compared to the previous one. Reverses the axes by default. compressed_axes : Iterable[int], optional The axes to compress to store the array. Finds the most efficient storage by default. Returns ------- GCXS The new array with the axes in the desired order. See Also -------- :obj:`GCXS.T` : A quick property to reverse the order of the axes. numpy.ndarray.transpose : Numpy equivalent function. """ if axes is None: axes = list(reversed(range(self.ndim))) # Normalize all axes indices to positive values axes = normalize_axis(axes, self.ndim) if len(np.unique(axes)) < len(axes): raise ValueError("repeated axis in transpose") if not len(axes) == self.ndim: raise ValueError("axes don't match array") axes = tuple(axes) if axes == tuple(range(self.ndim)): return self if self.ndim == 2: return self._2d_transpose() shape = tuple(self.shape[ax] for ax in axes) if compressed_axes is None: compressed_axes = (np.argmin(shape),) arg = _transpose(self, shape, axes, compressed_axes, transpose=True) return GCXS( arg, shape=shape, compressed_axes=compressed_axes, fill_value=self.fill_value, ) def _2d_transpose(self): """ A function for performing constant-time transposes on 2d GCXS arrays. Returns ------- GCXS The new transposed array with the opposite compressed axes as the input. See Also -------- scipy.sparse.csr_matrix.transpose : Scipy equivalent function. scipy.sparse.csc_matrix.transpose : Scipy equivalent function. numpy.ndarray.transpose : Numpy equivalent function. """ if self.ndim != 2: raise ValueError(f"cannot perform 2d transpose on array with dimension {self.ndim}") compressed_axes = [(self.compressed_axes[0] + 1) % 2] shape = self.shape[::-1] return GCXS( (self.data, self.indices, self.indptr), shape=shape, compressed_axes=compressed_axes, fill_value=self.fill_value, ) def dot(self, other): """ Performs the equivalent of :code:`x.dot(y)` for :obj:`GCXS`. Parameters ---------- other : Union[GCXS, COO, numpy.ndarray, scipy.sparse.spmatrix] The second operand of the dot product operation. Returns ------- {GCXS, numpy.ndarray} The result of the dot product. If the result turns out to be dense, then a dense array is returned, otherwise, a sparse array. Raises ------ ValueError If all arguments don't have zero fill-values. See Also -------- dot : Equivalent function for two arguments. :obj:`numpy.dot` : Numpy equivalent function. scipy.sparse.csr_matrix.dot : Scipy equivalent function. """ from .._common import dot return dot(self, other) def __matmul__(self, other): from .._common import matmul try: return matmul(self, other) except NotImplementedError: return NotImplemented def __rmatmul__(self, other): from .._common import matmul try: return matmul(other, self) except NotImplementedError: return NotImplemented def _prune(self): """ Prunes data so that if any fill-values are present, they are removed from both indices and data. Examples -------- >>> coords = np.array([[0, 1, 2, 3]]) >>> data = np.array([1, 0, 1, 2]) >>> s = COO(coords, data).asformat("gcxs") >>> s._prune() >>> s.nnz 3 """ mask = ~equivalent(self.data, self.fill_value) self.data = self.data[mask] if len(self.indptr): coords = np.stack((uncompress_dimension(self.indptr), self.indices)) coords = coords[:, mask] self.indices = coords[1] row_size = self._compressed_shape[0] indptr = np.empty(row_size + 1, dtype=self.indptr.dtype) indptr[0] = 0 np.cumsum(np.bincount(coords[0], minlength=row_size), out=indptr[1:]) self.indptr = indptr else: self.indices = self.indices[mask] def isinf(self): return self.tocoo().isinf().asformat("gcxs", compressed_axes=self.compressed_axes) def isnan(self): return self.tocoo().isnan().asformat("gcxs", compressed_axes=self.compressed_axes) class _Compressed2d(GCXS): class_compressed_axes: tuple[int] def __init__(self, arg, shape=None, compressed_axes=None, prune=False, fill_value=0): if not hasattr(arg, "shape") and shape is None: raise ValueError("missing `shape` argument") if shape is not None and hasattr(arg, "shape"): raise NotImplementedError("Cannot change shape in constructor") nd = len(shape if shape is not None else arg.shape) if nd != 2: raise ValueError(f"{type(self).__name__} must be 2-d, passed {nd}-d shape.") super().__init__( arg, shape=shape, compressed_axes=compressed_axes, prune=prune, fill_value=fill_value, ) def __str__(self): summary = ( f"<{type(self).__name__}: shape={self.shape}, dtype={self.dtype}, nnz={self.nnz}, " f"fill_value={self.fill_value}>" ) return self._str_impl(summary) __repr__ = __str__ @property def ndim(self) -> int: return 2 @classmethod def from_numpy(cls, x, fill_value=0, idx_dtype=None): coo = COO.from_numpy(x, fill_value=fill_value, idx_dtype=idx_dtype) return cls.from_coo(coo, cls.class_compressed_axes, idx_dtype) class CSR(_Compressed2d): """ The CSR or CRS scheme stores a n-dimensional array using n+1 one-dimensional arrays. The 3 arrays are same as GCRS. The remaining n-2 arrays are for storing the indices of the non-zero values of the sparse matrix. CSR is simply the transpose of CSC. Sparse supports 2-D CSR. """ class_compressed_axes: tuple[int] = (0,) def __init__(self, arg, shape=None, compressed_axes=class_compressed_axes, prune=False, fill_value=0): if compressed_axes != self.class_compressed_axes: raise ValueError(f"CSR only accepts rows as compressed axis but got: {compressed_axes}") super().__init__(arg, shape=shape, compressed_axes=compressed_axes, fill_value=fill_value) @classmethod def from_scipy_sparse(cls, x, /, *, fill_value=None): x = x.asformat("csr", copy=False) return cls((x.data, x.indices, x.indptr), shape=x.shape, fill_value=fill_value) def transpose(self, axes: None = None, copy: bool = False) -> Union["CSC", "CSR"]: axes = normalize_axis(axes, self.ndim) if axes not in [(0, 1), (1, 0), None]: raise ValueError(f"Invalid transpose axes: {axes}") if copy: self = self.copy() if axes == (0, 1): return self return CSC((self.data, self.indices, self.indptr), self.shape[::-1]) class CSC(_Compressed2d): """ The CSC or CCS scheme stores a n-dimensional array using n+1 one-dimensional arrays. The 3 arrays are same as GCCS. The remaining n-2 arrays are for storing the indices of the non-zero values of the sparse matrix. CSC is simply the transpose of CSR. Sparse supports 2-D CSC. """ class_compressed_axes: tuple[int] = (1,) def __init__(self, arg, shape=None, compressed_axes=class_compressed_axes, prune=False, fill_value=0): if compressed_axes != self.class_compressed_axes: raise ValueError(f"CSC only accepts columns as compressed axis but got: {compressed_axes}") super().__init__(arg, shape=shape, compressed_axes=compressed_axes, fill_value=fill_value) @classmethod def from_scipy_sparse(cls, x, /, *, fill_value=None): x = x.asformat("csc", copy=False) return cls((x.data, x.indices, x.indptr), shape=x.shape, fill_value=fill_value) def transpose(self, axes: None = None, copy: bool = False) -> Union["CSC", "CSR"]: axes = normalize_axis(axes, self.ndim) if axes not in [(0, 1), (1, 0), None]: raise ValueError(f"Invalid transpose axes: {axes}") if copy: self = self.copy() if axes == (0, 1): return self return CSR((self.data, self.indices, self.indptr), self.shape[::-1]) sparse-0.16.0a9/sparse/numba_backend/_compressed/convert.py000066400000000000000000000250761463475501500237540ustar00rootroot00000000000000import operator from functools import reduce import numba from numba.typed import List import numpy as np from .._coo.common import linear_loc from .._utils import check_compressed_axes, get_out_dtype @numba.jit(nopython=True, nogil=True) def convert_to_flat(inds, shape, dtype): """ Converts the indices of either the compressed or uncompressed axes into a linearized form. Prepares the inputs for compute_flat. """ shape_bins = transform_shape(np.asarray(shape)) increments = List() for i in range(len(inds)): increments.append((inds[i] * shape_bins[i]).astype(dtype)) operations = 1 for inc in increments[:-1]: operations *= inc.shape[0] if operations == 0: return np.empty(0, dtype=dtype) cols = increments[-1].repeat(operations).reshape((-1, operations)).T.flatten() if len(increments) == 1: return cols return compute_flat(increments, cols, operations) @numba.jit(nopython=True, nogil=True) def compute_flat(increments, cols, operations): # pragma: no cover """ Iterates through indices and calculates the linearized indices. """ start = 0 end = increments[-1].shape[0] positions = np.zeros(len(increments) - 1, dtype=np.intp) pos = len(increments) - 2 for _ in range(operations): to_add = 0 for j in range(len(increments) - 1): to_add += increments[j][positions[j]] cols[start:end] += to_add start += increments[-1].shape[0] end += increments[-1].shape[0] for j in range(pos, -1, -1): positions[j] += 1 if positions[j] == increments[j].shape[0]: positions[j] = 0 else: break return cols @numba.jit(nopython=True, nogil=True) def transform_shape(shape): # pragma: no cover """ turns a shape into the linearized increments that it represents. For example, given (5,5,5), it returns np.array([25,5,1]). """ shape_bins = np.empty(len(shape), dtype=np.intp) shape_bins[-1] = 1 for i in range(len(shape) - 1): shape_bins[i] = np.prod(shape[i + 1 :]) return shape_bins @numba.jit(nopython=True, nogil=True) def uncompress_dimension(indptr): # pragma: no cover """converts an index pointer array into an array of coordinates""" uncompressed = np.empty(indptr[-1], dtype=indptr.dtype) for i in range(len(indptr) - 1): uncompressed[indptr[i] : indptr[i + 1]] = i return uncompressed @numba.jit(nopython=True, nogil=True) def is_sorted(arr): # pragma: no cover """ function to check if an indexing array is sorted without repeats. If it is, we can use the faster slicing algorithm. """ # numba doesn't recognize the new all(...) format for i in range(len(arr) - 1): # noqa: SIM110 if arr[i + 1] <= arr[i]: return False return True @numba.jit(nopython=True, nogil=True) def _linearize( x_indices, shape, new_axis_order, new_reordered_shape, new_compressed_shape, new_linear, new_coords, ): # pragma: no cover for i, n in enumerate(x_indices): current = unravel_index(n, shape) current_t = current[new_axis_order] new_linear[i] = ravel_multi_index(current_t, new_reordered_shape) new_coords[:, i] = unravel_index(new_linear[i], new_compressed_shape) def _1d_reshape(x, shape, compressed_axes): check_compressed_axes(shape, compressed_axes) new_size = np.prod(shape) end_idx = np.searchsorted(x.indices, new_size, side="left") # for resizeing in one dimension if len(shape) == 1: return (x.data[:end_idx], x.indices[:end_idx], []) new_axis_order = list(compressed_axes) new_axis_order.extend(np.setdiff1d(np.arange(len(shape)), compressed_axes)) new_axis_order = np.asarray(new_axis_order) new_reordered_shape = np.array(shape)[new_axis_order] axisptr = len(compressed_axes) row_size = np.prod(new_reordered_shape[:axisptr]) col_size = np.prod(new_reordered_shape[axisptr:]) new_compressed_shape = np.array((row_size, col_size)) x_indices = x.indices[:end_idx] new_nnz = x_indices.size new_linear = np.empty(new_nnz, dtype=np.intp) coords_dtype = get_out_dtype(x.indices, max(max(new_compressed_shape), x.nnz)) new_coords = np.empty((2, new_nnz), dtype=coords_dtype) _linearize( x_indices, np.array(shape), new_axis_order, new_reordered_shape, new_compressed_shape, new_linear, new_coords, ) order = np.argsort(new_linear) new_coords = new_coords[:, order] indptr = np.empty(row_size + 1, dtype=coords_dtype) indptr[0] = 0 np.cumsum(np.bincount(new_coords[0], minlength=row_size), out=indptr[1:]) indices = new_coords[1] data = x.data[:end_idx][order] return (data, indices, indptr) def _resize(x, shape, compressed_axes): from .compressed import GCXS check_compressed_axes(shape, compressed_axes) size = reduce(operator.mul, shape, 1) if x.ndim == 1: end_idx = np.searchsorted(x.indices, size, side="left") indices = x.indices[:end_idx] data = x.data[:end_idx] out = GCXS((data, indices, []), shape=(size,), fill_value=x.fill_value) return _1d_reshape(out, shape, compressed_axes) uncompressed = uncompress_dimension(x.indptr) coords = np.stack((uncompressed, x.indices)) linear = linear_loc(coords, x._compressed_shape) sorted_axis_order = np.argsort(x._axis_order) linear_dtype = get_out_dtype(x.indices, np.prod(shape)) c_linear = np.empty(x.nnz, dtype=linear_dtype) _c_ordering( linear, c_linear, np.asarray(x._reordered_shape), np.asarray(sorted_axis_order), np.asarray(x.shape), ) order = np.argsort(c_linear, kind="mergesort") data = x.data[order] indices = c_linear[order] end_idx = np.searchsorted(indices, size, side="left") indices = indices[:end_idx] data = data[:end_idx] out = GCXS((data, indices, []), shape=(size,), fill_value=x.fill_value) return _1d_reshape(out, shape, compressed_axes) @numba.jit(nopython=True, nogil=True) def _c_ordering(linear, c_linear, reordered_shape, sorted_axis_order, shape): # pragma: no cover for i, n in enumerate(linear): # c ordering current_coords = unravel_index(n, reordered_shape)[sorted_axis_order] c_linear[i] = ravel_multi_index(current_coords, shape) def _transpose(x, shape, axes, compressed_axes, transpose=False): """ An algorithm for reshaping, resizing, changing compressed axes, and transposing. """ check_compressed_axes(shape, compressed_axes) uncompressed = uncompress_dimension(x.indptr) coords = np.stack((uncompressed, x.indices)) linear = linear_loc(coords, x._compressed_shape) sorted_axis_order = np.argsort(x._axis_order) if len(shape) == 1: dtype = get_out_dtype(x.indices, shape[0]) c_linear = np.empty(x.nnz, dtype=dtype) _c_ordering( linear, c_linear, np.asarray(x._reordered_shape), np.asarray(sorted_axis_order), np.asarray(x.shape), ) order = np.argsort(c_linear, kind="mergesort") data = x.data[order] indices = c_linear[order] return (data, indices, []) new_axis_order = list(compressed_axes) new_axis_order.extend(np.setdiff1d(np.arange(len(shape)), compressed_axes)) new_linear = np.empty(x.nnz, dtype=np.intp) new_reordered_shape = np.array(shape)[new_axis_order] axisptr = len(compressed_axes) row_size = np.prod(new_reordered_shape[:axisptr]) col_size = np.prod(new_reordered_shape[axisptr:]) new_compressed_shape = np.array((row_size, col_size)) coords_dtype = get_out_dtype(x.indices, max(max(new_compressed_shape), x.nnz)) new_coords = np.empty((2, x.nnz), dtype=coords_dtype) _convert_coords( linear, np.asarray(x.shape), np.asarray(x._reordered_shape), sorted_axis_order, np.asarray(axes), np.asarray(shape), np.asarray(new_axis_order), new_reordered_shape, new_linear, new_coords, new_compressed_shape, transpose, ) order = np.argsort(new_linear, kind="mergesort") new_coords = new_coords[:, order] if len(shape) == 1: indptr = [] indices = coords[0, :] else: indptr = np.empty(row_size + 1, dtype=coords_dtype) indptr[0] = 0 np.cumsum(np.bincount(new_coords[0], minlength=row_size), out=indptr[1:]) indices = new_coords[1] data = x.data[order] return (data, indices, indptr) @numba.jit(nopython=True, nogil=True) def unravel_index(n, shape): # pragma: no cover """ implements a subset of the functionality of np.unravel_index. """ out = np.zeros(len(shape), dtype=np.intp) i = 1 while i < len(shape) and n > 0: cur = np.prod(shape[i:]) out[i - 1] = n // cur n -= out[i - 1] * cur i += 1 out[-1] = n return out @numba.jit(nopython=True, nogil=True) def ravel_multi_index(arr, shape): # pragma: no cover """ implements a subset of the functionality of np.ravel_multi_index. """ total = 0 for i, a in enumerate(arr[:-1], 1): total += a * np.prod(shape[i:]) total += arr[-1] return total @numba.jit(nopython=True, nogil=True) def _convert_coords( linear, old_shape, reordered_shape, sorted_axis_order, axes, shape, new_axis_order, new_reordered_shape, new_linear, new_coords, new_compressed_shape, transpose, ): # pragma: no cover if transpose: for i, n in enumerate(linear): # c ordering current_coords = unravel_index(n, reordered_shape)[sorted_axis_order] # transpose current_coords_t = current_coords[axes][new_axis_order] new_linear[i] = ravel_multi_index(current_coords_t, new_reordered_shape) # reshape new_coords[:, i] = unravel_index(new_linear[i], new_compressed_shape) else: for i, n in enumerate(linear): # c ordering current_coords = unravel_index(n, reordered_shape)[sorted_axis_order] # linearize c_current = ravel_multi_index(current_coords, old_shape) # compress c_compressed = unravel_index(c_current, shape) c_compressed = c_compressed[new_axis_order] new_linear[i] = ravel_multi_index(c_compressed, new_reordered_shape) # reshape new_coords[:, i] = unravel_index(new_linear[i], new_compressed_shape) sparse-0.16.0a9/sparse/numba_backend/_compressed/indexing.py000066400000000000000000000242711463475501500240750ustar00rootroot00000000000000from collections.abc import Iterable from itertools import zip_longest from numbers import Integral import numba from numba.typed import List import numpy as np from .._slicing import normalize_index from .convert import convert_to_flat, is_sorted, uncompress_dimension def getitem(x, key): """ GCXS arrays are stored by transposing and reshaping them into csr matrices. For indexing, we first convert the n-dimensional key to its corresponding 2-dimensional key and then iterate through each of the relevent rows and columns. """ from .compressed import GCXS if x.ndim == 1: result = x.tocoo()[key] if np.isscalar(result): return result return GCXS.from_coo(result) key = list(normalize_index(key, x.shape)) # zip_longest so things like x[..., None] are picked up. if len(key) != 0 and all(isinstance(k, slice) and k == slice(0, dim, 1) for k, dim in zip_longest(key, x.shape)): return x # return a single element if all(isinstance(k, int) for k in key): return get_single_element(x, key) shape = [] compressed_inds = np.zeros(len(x.shape), dtype=np.bool_) uncompressed_inds = np.zeros(len(x.shape), dtype=np.bool_) # which axes will be compressed in the resulting array shape_key = np.zeros(len(x.shape), dtype=np.intp) # remove Nones from key, evaluate them at the end Nones_removed = [k for k in key if k is not None] count = 0 for i, ind in enumerate(Nones_removed): if isinstance(ind, Integral): continue if isinstance(ind, slice): shape_key[i] = count shape.append(len(range(ind.start, ind.stop, ind.step))) if i in x.compressed_axes: compressed_inds[i] = True else: uncompressed_inds[i] = True elif isinstance(ind, Iterable): shape_key[i] = count shape.append(len(ind)) if i in x.compressed_axes: compressed_inds[i] = True else: uncompressed_inds[i] = True count += 1 # reorder the key according to the axis_order of the array reordered_key = [Nones_removed[i] for i in x._axis_order] # if all slices have a positive step and all # iterables are sorted without repeats, we can # use the quicker slicing algorithm pos_slice = True for ind in reordered_key[x._axisptr :]: if isinstance(ind, slice): if ind.step < 0: pos_slice = False elif isinstance(ind, Iterable) and not is_sorted(ind): pos_slice = False # convert all ints and slices to iterables before flattening for i, ind in enumerate(reordered_key): if isinstance(ind, Integral): reordered_key[i] = np.array([ind]) elif isinstance(ind, slice): reordered_key[i] = np.arange(ind.start, ind.stop, ind.step) elif isinstance(ind, np.ndarray) and ind.ndim > 1: raise IndexError("Only one-dimensional iterable indices supported.") reordered_key[i] = reordered_key[i].astype(x.indices.dtype, copy=False) reordered_key = List(reordered_key) shape = np.array(shape) # convert all indices of compressed axes to a single array index # this tells us which 'rows' of the underlying csr matrix to iterate through rows = convert_to_flat( reordered_key[: x._axisptr], x._reordered_shape[: x._axisptr], x.indices.dtype, ) # convert all indices of uncompressed axes to a single array index # this tells us which 'columns' of the underlying csr matrix to iterate through cols = convert_to_flat( reordered_key[x._axisptr :], x._reordered_shape[x._axisptr :], x.indices.dtype, ) starts = x.indptr[:-1][rows] # find the start and end of each of the rows ends = x.indptr[1:][rows] if np.any(compressed_inds): compressed_axes = shape_key[compressed_inds] row_size = shape[compressed_axes] if len(compressed_axes) == 1 else np.prod(shape[compressed_axes]) # if only indexing through uncompressed axes else: compressed_axes = (0,) # defaults to 0 row_size = 1 # this doesn't matter if not np.any(uncompressed_inds): # only indexing compressed axes compressed_axes = (0,) # defaults to 0 row_size = starts.size indptr = np.empty(row_size + 1, dtype=x.indptr.dtype) indptr[0] = 0 if pos_slice: arg = get_slicing_selection(x.data, x.indices, indptr, starts, ends, cols) else: arg = get_array_selection(x.data, x.indices, indptr, starts, ends, cols) data, indices, indptr = arg size = np.prod(shape[1:]) if not np.any(uncompressed_inds): # only indexing compressed axes uncompressed = uncompress_dimension(indptr) if len(shape) == 1: indices = uncompressed indptr = None else: indices = uncompressed % size indptr = np.empty(shape[0] + 1, dtype=x.indptr.dtype) indptr[0] = 0 np.cumsum(np.bincount(uncompressed // size, minlength=shape[0]), out=indptr[1:]) if not np.any(compressed_inds): if len(shape) == 1: indptr = None else: uncompressed = indices // size indptr = np.empty(shape[0] + 1, dtype=x.indptr.dtype) indptr[0] = 0 np.cumsum(np.bincount(uncompressed, minlength=shape[0]), out=indptr[1:]) indices %= size arg = (data, indices, indptr) # if there were Nones in the key, we insert them back here compressed_axes = np.array(compressed_axes) shape = shape.tolist() for i in range(len(key)): if key[i] is None: shape.insert(i, 1) compressed_axes[compressed_axes >= i] += 1 compressed_axes = tuple(compressed_axes) shape = tuple(shape) if len(shape) == 1: compressed_axes = None return GCXS(arg, shape=shape, compressed_axes=compressed_axes, fill_value=x.fill_value) @numba.jit(nopython=True, nogil=True) def get_slicing_selection(arr_data, arr_indices, indptr, starts, ends, col): # pragma: no cover """ When the requested elements come in a strictly ascending order, as is the case with acsending slices, we can iteratively reduce the search space, leading to better performance. We loop through the starts and ends, each time evaluating whether to use a linear filtering procedure or a binary-search-based method. """ indices = [] ind_list = [] for i, (start, end) in enumerate(zip(starts, ends)): # noqa: B905 inds = [] current_row = arr_indices[start:end] if current_row.size < col.size: # linear filtering count = 0 col_count = 0 nnz = 0 while col_count < col.size and count < current_row.size: if current_row[-1] < col[col_count] or current_row[count] > col[-1]: break if current_row[count] == col[col_count]: nnz += 1 ind_list.append(count + start) indices.append(col_count) count += 1 col_count += 1 elif current_row[count] < col[col_count]: count += 1 else: col_count += 1 indptr[i + 1] = indptr[i] + nnz else: # binary searches prev = 0 size = 0 col_count = 0 while col_count < len(col): while ( col_count < len(col) and size < len(current_row) and col[col_count] < current_row[size] ): # skip needless searches col_count += 1 if col_count >= len(col): # check again because of previous loop break if current_row[-1] < col[col_count] or current_row[size] > col[-1]: break s = np.searchsorted(current_row[size:], col[col_count]) size += s s += prev if not (s >= current_row.size or current_row[s] != col[col_count]): s += start inds.append(s) indices.append(col_count) size += 1 prev = size col_count += 1 ind_list.extend(inds) indptr[i + 1] = indptr[i] + len(inds) ind_list = np.array(ind_list, dtype=np.intp) indices = np.array(indices, dtype=indptr.dtype) data = arr_data[ind_list] return (data, indices, indptr) @numba.jit(nopython=True, nogil=True) def get_array_selection(arr_data, arr_indices, indptr, starts, ends, col): # pragma: no cover """ This is a very general algorithm to be used when more optimized methods don't apply. It performs a binary search for each of the requested elements. Consequently it roughly scales by O(n log avg(nnz)). """ indices = [] ind_list = [] for i, (start, end) in enumerate(zip(starts, ends)): # noqa: B905 inds = [] current_row = arr_indices[start:end] if len(current_row) == 0: indptr[i + 1] = indptr[i] continue for c in range(len(col)): s = np.searchsorted(current_row, col[c]) if not (s >= current_row.size or current_row[s] != col[c]): s += start inds.append(s) indices.append(c) ind_list.extend(inds) indptr[i + 1] = indptr[i] + len(inds) ind_list = np.array(ind_list, dtype=np.intp) indices = np.array(indices, dtype=indptr.dtype) data = arr_data[ind_list] return (data, indices, indptr) def get_single_element(x, key): """ A convience function for indexing when returning a single element. """ key = np.array(key)[x._axis_order] # reordering the input ind = np.ravel_multi_index(key, x._reordered_shape) row, col = np.unravel_index(ind, x._compressed_shape) current_row = x.indices[x.indptr[row] : x.indptr[row + 1]] item = np.searchsorted(current_row, col) if not (item >= current_row.size or current_row[item] != col): item += x.indptr[row] return x.data[item] return x.fill_value sparse-0.16.0a9/sparse/numba_backend/_coo/000077500000000000000000000000001463475501500203245ustar00rootroot00000000000000sparse-0.16.0a9/sparse/numba_backend/_coo/__init__.py000066400000000000000000000015601463475501500224370ustar00rootroot00000000000000from .common import ( argmax, argmin, argwhere, clip, concatenate, diagonal, diagonalize, expand_dims, flip, isneginf, isposinf, kron, nanmax, nanmean, nanmin, nanprod, nanreduce, nansum, result_type, roll, sort, stack, take, tril, triu, unique_counts, unique_values, where, ) from .core import COO, as_coo __all__ = [ "COO", "as_coo", "argmax", "argmin", "argwhere", "clip", "concatenate", "diagonal", "diagonalize", "expand_dims", "flip", "isneginf", "isposinf", "kron", "nanmax", "nanmean", "nanmin", "nanprod", "nanreduce", "nansum", "result_type", "roll", "sort", "stack", "take", "tril", "triu", "unique_counts", "unique_values", "where", ] sparse-0.16.0a9/sparse/numba_backend/_coo/common.py000066400000000000000000001264021463475501500221730ustar00rootroot00000000000000import operator import warnings from collections.abc import Iterable from functools import reduce from typing import Any, NamedTuple import numba import numpy as np from .._sparse_array import SparseArray from .._utils import ( can_store, check_consistent_fill_value, check_zero_fill_value, is_unsigned_dtype, isscalar, normalize_axis, ) def asCOO(x, name="asCOO", check=True): """ Convert the input to :obj:`COO`. Passes through :obj:`COO` objects as-is. Parameters ---------- x : Union[SparseArray, scipy.sparse.spmatrix, numpy.ndarray] The input array to convert. name : str, optional The name of the operation to use in the exception. check : bool, optional Whether to check for a dense input. Returns ------- COO The converted :obj:`COO` array. Raises ------ ValueError If ``check`` is true and a dense input is supplied. """ from .._common import _is_sparse from .core import COO if check and not _is_sparse(x): raise ValueError(f"Performing this operation would produce a dense result: {name}") if not isinstance(x, COO): x = COO(x) return x def linear_loc(coords, shape): if shape == () and len(coords) == 0: # `np.ravel_multi_index` is not aware of arrays, so cannot produce a # sensible result here (https://github.com/numpy/numpy/issues/15690). # Since `coords` is an array and not a sequence, we know the correct # dimensions. return np.zeros(coords.shape[1:], dtype=np.intp) return np.ravel_multi_index(coords, shape) def kron(a, b): """Kronecker product of 2 sparse arrays. Parameters ---------- a, b : SparseArray, scipy.sparse.spmatrix, or np.ndarray The arrays over which to compute the Kronecker product. Returns ------- res : COO The kronecker product Raises ------ ValueError If all arguments are dense or arguments have nonzero fill-values. Examples -------- >>> from sparse import eye >>> a = eye(3, dtype="i8") >>> b = np.array([1, 2, 3], dtype="i8") >>> res = kron(a, b) >>> res.todense() # doctest: +SKIP array([[1, 2, 3, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 2, 3, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 2, 3]], dtype=int64) """ from .._common import _is_sparse from .._umath import _cartesian_product from .core import COO check_zero_fill_value(a, b) a_sparse = _is_sparse(a) b_sparse = _is_sparse(b) a_ndim = np.ndim(a) b_ndim = np.ndim(b) if not (a_sparse or b_sparse): raise ValueError("Performing this operation would produce a dense result: kron") if a_ndim == 0 or b_ndim == 0: return a * b a = asCOO(a, check=False) b = asCOO(b, check=False) # Match dimensions max_dim = max(a.ndim, b.ndim) a = a.reshape((1,) * (max_dim - a.ndim) + a.shape) b = b.reshape((1,) * (max_dim - b.ndim) + b.shape) a_idx, b_idx = _cartesian_product(np.arange(a.nnz), np.arange(b.nnz)) a_expanded_coords = a.coords[:, a_idx] b_expanded_coords = b.coords[:, b_idx] o_coords = a_expanded_coords * np.asarray(b.shape)[:, None] + b_expanded_coords o_data = a.data[a_idx] * b.data[b_idx] o_shape = tuple(i * j for i, j in zip(a.shape, b.shape, strict=True)) return COO(o_coords, o_data, shape=o_shape, has_duplicates=False) def concatenate(arrays, axis=0): """ Concatenate the input arrays along the given dimension. Parameters ---------- arrays : Iterable[SparseArray] The input arrays to concatenate. axis : int, optional The axis along which to concatenate the input arrays. The default is zero. Returns ------- COO The output concatenated array. Raises ------ ValueError If all elements of :code:`arrays` don't have the same fill-value. See Also -------- numpy.concatenate : NumPy equivalent function """ from .core import COO check_consistent_fill_value(arrays) if axis is None: axis = 0 arrays = [x.flatten() for x in arrays] arrays = [x if isinstance(x, COO) else COO(x) for x in arrays] axis = normalize_axis(axis, arrays[0].ndim) assert all(x.shape[ax] == arrays[0].shape[ax] for x in arrays for ax in set(range(arrays[0].ndim)) - {axis}) nnz = 0 dim = sum(x.shape[axis] for x in arrays) shape = list(arrays[0].shape) shape[axis] = dim data = np.concatenate([x.data for x in arrays]) coords = np.concatenate([x.coords for x in arrays], axis=1) if not can_store(coords.dtype, max(shape)): coords = coords.astype(np.min_scalar_type(max(shape))) dim = 0 for x in arrays: if dim: coords[axis, nnz : x.nnz + nnz] += dim dim += x.shape[axis] nnz += x.nnz return COO( coords, data, shape=shape, has_duplicates=False, sorted=(axis == 0), fill_value=arrays[0].fill_value, ) def stack(arrays, axis=0): """ Stack the input arrays along the given dimension. Parameters ---------- arrays : Iterable[SparseArray] The input arrays to stack. axis : int, optional The axis along which to stack the input arrays. Returns ------- COO The output stacked array. Raises ------ ValueError If all elements of :code:`arrays` don't have the same fill-value. See Also -------- numpy.stack : NumPy equivalent function """ from .core import COO check_consistent_fill_value(arrays) assert len({x.shape for x in arrays}) == 1 arrays = [x if isinstance(x, COO) else COO(x) for x in arrays] axis = normalize_axis(axis, arrays[0].ndim + 1) data = np.concatenate([x.data for x in arrays]) coords = np.concatenate([x.coords for x in arrays], axis=1) shape = list(arrays[0].shape) shape.insert(axis, len(arrays)) nnz = 0 new = np.empty(shape=(coords.shape[1],), dtype=np.intp) for dim, x in enumerate(arrays): new[nnz : x.nnz + nnz] = dim nnz += x.nnz coords = [coords[i] for i in range(coords.shape[0])] coords.insert(axis, new) coords = np.stack(coords, axis=0) return COO( coords, data, shape=shape, has_duplicates=False, sorted=(axis == 0), fill_value=arrays[0].fill_value, ) def triu(x, k=0): """ Returns an array with all elements below the k-th diagonal set to zero. Parameters ---------- x : COO The input array. k : int, optional The diagonal below which elements are set to zero. The default is zero, which corresponds to the main diagonal. Returns ------- COO The output upper-triangular matrix. Raises ------ ValueError If :code:`x` doesn't have zero fill-values. See Also -------- numpy.triu : NumPy equivalent function """ from .core import COO check_zero_fill_value(x) if not x.ndim >= 2: raise NotImplementedError("sparse.triu is not implemented for scalars or 1-D arrays.") mask = x.coords[-2] + k <= x.coords[-1] coords = x.coords[:, mask] data = x.data[mask] return COO(coords, data, shape=x.shape, has_duplicates=False, sorted=True) def tril(x, k=0): """ Returns an array with all elements above the k-th diagonal set to zero. Parameters ---------- x : COO The input array. k : int, optional The diagonal above which elements are set to zero. The default is zero, which corresponds to the main diagonal. Returns ------- COO The output lower-triangular matrix. Raises ------ ValueError If :code:`x` doesn't have zero fill-values. See Also -------- numpy.tril : NumPy equivalent function """ from .core import COO check_zero_fill_value(x) if not x.ndim >= 2: raise NotImplementedError("sparse.tril is not implemented for scalars or 1-D arrays.") mask = x.coords[-2] + k >= x.coords[-1] coords = x.coords[:, mask] data = x.data[mask] return COO(coords, data, shape=x.shape, has_duplicates=False, sorted=True) def nansum(x, axis=None, keepdims=False, dtype=None, out=None): """ Performs a ``NaN`` skipping sum operation along the given axes. Uses all axes by default. Parameters ---------- x : SparseArray The array to perform the reduction on. axis : Union[int, Iterable[int]], optional The axes along which to sum. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. dtype : numpy.dtype The data type of the output array. Returns ------- COO The reduced output sparse array. See Also -------- :obj:`COO.sum` : Function without ``NaN`` skipping. numpy.nansum : Equivalent Numpy function. """ assert out is None x = asCOO(x, name="nansum") return nanreduce(x, np.add, axis=axis, keepdims=keepdims, dtype=dtype) def nanmean(x, axis=None, keepdims=False, dtype=None, out=None): """ Performs a ``NaN`` skipping mean operation along the given axes. Uses all axes by default. Parameters ---------- x : SparseArray The array to perform the reduction on. axis : Union[int, Iterable[int]], optional The axes along which to compute the mean. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. dtype : numpy.dtype The data type of the output array. Returns ------- COO The reduced output sparse array. See Also -------- :obj:`COO.mean` : Function without ``NaN`` skipping. numpy.nanmean : Equivalent Numpy function. """ assert out is None x = asCOO(x, name="nanmean") if not (np.issubdtype(x.dtype, np.floating) or np.issubdtype(x.dtype, np.complexfloating)): return x.mean(axis=axis, keepdims=keepdims, dtype=dtype) mask = np.isnan(x) x2 = where(mask, 0, x) # Count the number non-nan elements along axis nancount = mask.sum(axis=axis, dtype="i8", keepdims=keepdims) if axis is None: axis = tuple(range(x.ndim)) elif not isinstance(axis, tuple): axis = (axis,) den = reduce(operator.mul, (x.shape[i] for i in axis), 1) den -= nancount if (den == 0).any(): warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=1) num = np.sum(x2, axis=axis, dtype=dtype, keepdims=keepdims) with np.errstate(invalid="ignore", divide="ignore"): if num.ndim: return np.true_divide(num, den, casting="unsafe") return (num / den).astype(dtype if dtype is not None else x.dtype) def nanmax(x, axis=None, keepdims=False, dtype=None, out=None): """ Maximize along the given axes, skipping ``NaN`` values. Uses all axes by default. Parameters ---------- x : SparseArray The array to perform the reduction on. axis : Union[int, Iterable[int]], optional The axes along which to maximize. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. dtype : numpy.dtype The data type of the output array. Returns ------- COO The reduced output sparse array. See Also -------- :obj:`COO.max` : Function without ``NaN`` skipping. numpy.nanmax : Equivalent Numpy function. """ assert out is None x = asCOO(x, name="nanmax") ar = x.reduce(np.fmax, axis=axis, keepdims=keepdims, dtype=dtype) if (isscalar(ar) and np.isnan(ar)) or np.isnan(ar.data).any(): warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=1) return ar def nanmin(x, axis=None, keepdims=False, dtype=None, out=None): """ Minimize along the given axes, skipping ``NaN`` values. Uses all axes by default. Parameters ---------- x : SparseArray The array to perform the reduction on. axis : Union[int, Iterable[int]], optional The axes along which to minimize. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. dtype : numpy.dtype The data type of the output array. Returns ------- COO The reduced output sparse array. See Also -------- :obj:`COO.min` : Function without ``NaN`` skipping. numpy.nanmin : Equivalent Numpy function. """ assert out is None x = asCOO(x, name="nanmin") ar = x.reduce(np.fmin, axis=axis, keepdims=keepdims, dtype=dtype) if (isscalar(ar) and np.isnan(ar)) or np.isnan(ar.data).any(): warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=1) return ar def nanprod(x, axis=None, keepdims=False, dtype=None, out=None): """ Performs a product operation along the given axes, skipping ``NaN`` values. Uses all axes by default. Parameters ---------- x : SparseArray The array to perform the reduction on. axis : Union[int, Iterable[int]], optional The axes along which to multiply. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. dtype : numpy.dtype The data type of the output array. Returns ------- COO The reduced output sparse array. See Also -------- :obj:`COO.prod` : Function without ``NaN`` skipping. numpy.nanprod : Equivalent Numpy function. """ assert out is None x = asCOO(x) return nanreduce(x, np.multiply, axis=axis, keepdims=keepdims, dtype=dtype) def where(condition, x=None, y=None): """ Select values from either ``x`` or ``y`` depending on ``condition``. If ``x`` and ``y`` are not given, returns indices where ``condition`` is nonzero. Performs the equivalent of :obj:`numpy.where`. Parameters ---------- condition : SparseArray The condition based on which to select values from either ``x`` or ``y``. x : SparseArray, optional The array to select values from if ``condition`` is nonzero. y : SparseArray, optional The array to select values from if ``condition`` is zero. Returns ------- COO The output array with selected values if ``x`` and ``y`` are given; else where the array is nonzero. Raises ------ ValueError If the operation would produce a dense result; or exactly one of ``x`` and ``y`` are given. See Also -------- numpy.where : Equivalent Numpy function. """ from .._umath import elemwise x_given = x is not None y_given = y is not None if not (x_given or y_given): check_zero_fill_value(condition) condition = asCOO(condition, name=str(np.where)) return tuple(condition.coords) if x_given != y_given: raise ValueError("either both or neither of x and y should be given") return elemwise(np.where, condition, x, y) def argwhere(a): """ Find the indices of array elements that are non-zero, grouped by element. Parameters ---------- a : array_like Input data. Returns ------- index_array : numpy.ndarray See Also -------- :obj:`where`, :obj:`COO.nonzero` Examples -------- >>> import sparse >>> x = sparse.COO(np.arange(6).reshape((2, 3))) >>> sparse.argwhere(x > 1) array([[0, 2], [1, 0], [1, 1], [1, 2]]) """ return np.transpose(a.nonzero()) def argmax(x, /, *, axis=None, keepdims=False): """ Returns the indices of the maximum values along a specified axis. When the maximum value occurs multiple times, only the indices corresponding to the first occurrence are returned. Parameters ---------- x : SparseArray Input array. The fill value must be ``0.0`` and all non-zero values must be greater than ``0.0``. axis : int, optional Axis along which to search. If ``None``, the function must return the index of the maximum value of the flattened array. Default: ``None``. keepdims : bool, optional If ``True``, the reduced axes (dimensions) must be included in the result as singleton dimensions, and, accordingly, the result must be compatible with the input array. Otherwise, if ``False``, the reduced axes (dimensions) must not be included in the result. Default: ``False``. Returns ------- out : numpy.ndarray If ``axis`` is ``None``, a zero-dimensional array containing the index of the first occurrence of the maximum value. Otherwise, a non-zero-dimensional array containing the indices of the maximum values. """ return _arg_minmax_common(x, axis=axis, keepdims=keepdims, mode="max") def argmin(x, /, *, axis=None, keepdims=False): """ Returns the indices of the minimum values along a specified axis. When the minimum value occurs multiple times, only the indices corresponding to the first occurrence are returned. Parameters ---------- x : SparseArray Input array. The fill value must be ``0.0`` and all non-zero values must be less than ``0.0``. axis : int, optional Axis along which to search. If ``None``, the function must return the index of the minimum value of the flattened array. Default: ``None``. keepdims : bool, optional If ``True``, the reduced axes (dimensions) must be included in the result as singleton dimensions, and, accordingly, the result must be compatible with the input array. Otherwise, if ``False``, the reduced axes (dimensions) must not be included in the result. Default: ``False``. Returns ------- out : numpy.ndarray If ``axis`` is ``None``, a zero-dimensional array containing the index of the first occurrence of the minimum value. Otherwise, a non-zero-dimensional array containing the indices of the minimum values. """ return _arg_minmax_common(x, axis=axis, keepdims=keepdims, mode="min") def _replace_nan(array, value): """ Replaces ``NaN``s in ``array`` with ``value``. Parameters ---------- array : COO The input array. value : numpy.number The values to replace ``NaN`` with. Returns ------- COO A copy of ``array`` with the ``NaN``s replaced. """ if not np.issubdtype(array.dtype, np.floating): return array return where(np.isnan(array), value, array) def nanreduce(x, method, identity=None, axis=None, keepdims=False, **kwargs): """ Performs an ``NaN`` skipping reduction on this array. See the documentation on :obj:`COO.reduce` for examples. Parameters ---------- x : COO The array to reduce. method : numpy.ufunc The method to use for performing the reduction. identity : numpy.number The identity value for this reduction. Inferred from ``method`` if not given. Note that some ``ufunc`` objects don't have this, so it may be necessary to give it. axis : Union[int, Iterable[int]], optional The axes along which to perform the reduction. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. **kwargs : dict Any extra arguments to pass to the reduction operation. Returns ------- COO The result of the reduction operation. Raises ------ ValueError If reducing an all-zero axis would produce a nonzero result. See Also -------- COO.reduce : Similar method without ``NaN`` skipping functionality. """ arr = _replace_nan(x, method.identity if identity is None else identity) return arr.reduce(method, axis, keepdims, **kwargs) def roll(a, shift, axis=None): """ Shifts elements of an array along specified axis. Elements that roll beyond the last position are circulated and re-introduced at the first. Parameters ---------- a : COO Input array shift : int or tuple of ints Number of index positions that elements are shifted. If a tuple is provided, then axis must be a tuple of the same size, and each of the given axes is shifted by the corresponding number. If an int while axis is a tuple of ints, then broadcasting is used so the same shift is applied to all axes. axis : int or tuple of ints, optional Axis or tuple specifying multiple axes. By default, the array is flattened before shifting, after which the original shape is restored. Returns ------- res : ndarray Output array, with the same shape as a. """ from .core import COO, as_coo a = as_coo(a) # roll flattened array if axis is None: return roll(a.reshape((-1,)), shift, 0).reshape(a.shape) # roll across specified axis # parse axis input, wrap in tuple axis = normalize_axis(axis, a.ndim) if not isinstance(axis, tuple): axis = (axis,) # make shift iterable if not isinstance(shift, Iterable): shift = (shift,) elif np.ndim(shift) > 1: raise ValueError("'shift' and 'axis' must be integers or 1D sequences.") # handle broadcasting if len(shift) == 1: shift = np.full(len(axis), shift) # check if dimensions are consistent if len(axis) != len(shift): raise ValueError("If 'shift' is a 1D sequence, 'axis' must have equal length.") if not can_store(a.coords.dtype, max(a.shape + shift)): raise ValueError( f"cannot roll with coords.dtype {a.coords.dtype} and shift {shift}. Try casting coords to a larger dtype." ) # shift elements coords, data = np.copy(a.coords), np.copy(a.data) try: for sh, ax in zip(shift, axis, strict=True): coords[ax] += sh coords[ax] %= a.shape[ax] except TypeError as e: if is_unsigned_dtype(coords.dtype): raise ValueError( f"rolling with coords.dtype as {coords.dtype} is not safe. Try using a signed dtype." ) from e return COO( coords, data=data, shape=a.shape, has_duplicates=False, fill_value=a.fill_value, ) def diagonal(a, offset=0, axis1=0, axis2=1): """ Extract diagonal from a COO array. The equivalent of :obj:`numpy.diagonal`. Parameters ---------- a : COO The array to perform the operation on. offset : int, optional Offset of the diagonal from the main diagonal. Defaults to main diagonal (0). axis1 : int, optional First axis from which the diagonals should be taken. Defaults to first axis (0). axis2 : int, optional Second axis from which the diagonals should be taken. Defaults to second axis (1). Examples -------- >>> import sparse >>> x = sparse.as_coo(np.arange(9).reshape(3, 3)) >>> sparse.diagonal(x).todense() array([0, 4, 8]) >>> sparse.diagonal(x, offset=1).todense() array([1, 5]) >>> x = sparse.as_coo(np.arange(12).reshape((2, 3, 2))) >>> x_diag = sparse.diagonal(x, axis1=0, axis2=2) >>> x_diag.shape (3, 2) >>> x_diag.todense() array([[ 0, 7], [ 2, 9], [ 4, 11]]) Returns ------- out: COO The result of the operation. Raises ------ ValueError If a.shape[axis1] != a.shape[axis2] See Also -------- :obj:`numpy.diagonal` : NumPy equivalent function """ from .core import COO if a.shape[axis1] != a.shape[axis2]: raise ValueError("a.shape[axis1] != a.shape[axis2]") diag_axes = [axis for axis in range(len(a.shape)) if axis != axis1 and axis != axis2] + [axis1] diag_shape = [a.shape[axis] for axis in diag_axes] diag_shape[-1] -= abs(offset) diag_idx = _diagonal_idx(a.coords, axis1, axis2, offset) diag_coords = [a.coords[axis][diag_idx] for axis in diag_axes] diag_data = a.data[diag_idx] return COO(diag_coords, diag_data, diag_shape) def diagonalize(a, axis=0): """ Diagonalize a COO array. The new dimension is appended at the end. .. WARNING:: :obj:`diagonalize` is not :obj:`numpy` compatible as there is no direct :obj:`numpy` equivalent. The API may change in the future. Parameters ---------- a : Union[COO, np.ndarray, scipy.sparse.spmatrix] The array to diagonalize. axis : int, optional The axis to diagonalize. Defaults to first axis (0). Examples -------- >>> import sparse >>> x = sparse.as_coo(np.arange(1, 4)) >>> sparse.diagonalize(x).todense() array([[1, 0, 0], [0, 2, 0], [0, 0, 3]]) >>> x = sparse.as_coo(np.arange(24).reshape((2, 3, 4))) >>> x_diag = sparse.diagonalize(x, axis=1) >>> x_diag.shape (2, 3, 4, 3) :obj:`diagonalize` is the inverse of :obj:`diagonal` >>> a = sparse.random((3, 3, 3, 3, 3), density=0.3) >>> a_diag = sparse.diagonalize(a, axis=2) >>> (sparse.diagonal(a_diag, axis1=2, axis2=5) == a.transpose([0, 1, 3, 4, 2])).all() True Returns ------- out: COO The result of the operation. See Also -------- :obj:`numpy.diag` : NumPy equivalent for 1D array """ from .core import COO, as_coo a = as_coo(a) diag_shape = a.shape + (a.shape[axis],) diag_coords = np.vstack([a.coords, a.coords[axis]]) return COO(diag_coords, a.data, diag_shape) def isposinf(x, out=None): """ Test element-wise for positive infinity, return result as sparse ``bool`` array. Parameters ---------- x Input out, optional Output array Examples -------- >>> import sparse >>> x = sparse.as_coo(np.array([np.inf])) >>> sparse.isposinf(x).todense() array([ True]) See Also -------- numpy.isposinf : The NumPy equivalent """ from .core import elemwise return elemwise(lambda x, out=None, dtype=None: np.isposinf(x, out=out), x, out=out) def isneginf(x, out=None): """ Test element-wise for negative infinity, return result as sparse ``bool`` array. Parameters ---------- x Input out, optional Output array Examples -------- >>> import sparse >>> x = sparse.as_coo(np.array([-np.inf])) >>> sparse.isneginf(x).todense() array([ True]) See Also -------- numpy.isneginf : The NumPy equivalent """ from .core import elemwise return elemwise(lambda x, out=None, dtype=None: np.isneginf(x, out=out), x, out=out) def result_type(*arrays_and_dtypes): """Returns the type that results from applying the NumPy type promotion rules to the arguments. See Also -------- numpy.result_type : The NumPy equivalent """ return np.result_type(*(_as_result_type_arg(x) for x in arrays_and_dtypes)) def _as_result_type_arg(x): if not isinstance(x, SparseArray): return x if x.ndim > 0: return x.dtype # 0-dimensional arrays give different result_type outputs than their dtypes return x.todense() @numba.jit(nopython=True, nogil=True) def _diagonal_idx(coordlist, axis1, axis2, offset): """ Utility function that returns all indices that correspond to a diagonal element. Parameters ---------- coordlist : list of lists Coordinate indices. axis1, axis2 : int The axes of the diagonal. offset : int Offset of the diagonal from the main diagonal. Defaults to main diagonal (0). """ return np.array([i for i in range(len(coordlist[axis1])) if coordlist[axis1][i] + offset == coordlist[axis2][i]]) def clip(a, a_min=None, a_max=None, out=None): """ Clip (limit) the values in the array. Return an array whose values are limited to ``[min, max]``. One of min or max must be given. Parameters ---------- a a_min : scalar or `SparseArray` or `None` Minimum value. If `None`, clipping is not performed on lower interval edge. a_max : scalar or `SparseArray` or `None` Maximum value. If `None`, clipping is not performed on upper interval edge. out : SparseArray, optional If provided, the results will be placed in this array. It may be the input array for in-place clipping. `out` must be of the right shape to hold the output. Its type is preserved. Returns ------- clipped_array : SparseArray An array with the elements of `self`, but where values < `min` are replaced with `min`, and those > `max` with `max`. Examples -------- >>> import sparse >>> x = sparse.COO.from_numpy([0, 0, 0, 1, 2, 3]) >>> sparse.clip(x, a_min=1).todense() # doctest: +NORMALIZE_WHITESPACE array([1, 1, 1, 1, 2, 3]) >>> sparse.clip(x, a_max=1).todense() # doctest: +NORMALIZE_WHITESPACE array([0, 0, 0, 1, 1, 1]) >>> sparse.clip(x, a_min=1, a_max=2).todense() # doctest: +NORMALIZE_WHITESPACE array([1, 1, 1, 1, 2, 2]) See Also -------- numpy.clip : Equivalent NumPy function """ a = asCOO(a, name="clip") return a.clip(a_min, a_max) def expand_dims(x, /, *, axis=0): """ Expands the shape of an array by inserting a new axis (dimension) of size one at the position specified by ``axis``. Parameters ---------- a : COO Input COO array. axis : int Position in the expanded axes where the new axis is placed. Returns ------- result : COO An expanded output COO array having the same data type as ``x``. Examples -------- >>> import sparse >>> x = sparse.COO.from_numpy([[1, 0, 0, 0, 2, -3]]) >>> x.shape (1, 6) >>> y1 = sparse.expand_dims(x, axis=1) >>> y1.shape (1, 1, 6) >>> y2 = sparse.expand_dims(x, axis=2) >>> y2.shape (1, 6, 1) """ x = _validate_coo_input(x) if not isinstance(axis, int): raise IndexError(f"Invalid axis position: {axis}") axis = normalize_axis(axis, x.ndim + 1) new_coords = np.insert(x.coords, obj=axis, values=np.zeros(x.nnz, dtype=np.intp), axis=0) new_shape = list(x.shape) new_shape.insert(axis, 1) new_shape = tuple(new_shape) from .core import COO return COO( new_coords, x.data, shape=new_shape, fill_value=x.fill_value, ) def flip(x, /, *, axis=None): """ Reverses the order of elements in an array along the given axis. The shape of the array is preserved. Parameters ---------- a : COO Input COO array. axis : int or tuple of ints, optional Axis (or axes) along which to flip. If ``axis`` is ``None``, the function must flip all input array axes. If ``axis`` is negative, the function must count from the last dimension. If provided more than one axis, the function must flip only the specified axes. Default: ``None``. Returns ------- result : COO An output array having the same data type and shape as ``x`` and whose elements, relative to ``x``, are reordered. """ x = _validate_coo_input(x) if axis is None: axis = range(x.ndim) if not isinstance(axis, Iterable): axis = (axis,) new_coords = x.coords.copy() for ax in axis: new_coords[ax, :] = x.shape[ax] - 1 - x.coords[ax, :] from .core import COO return COO( new_coords, x.data, shape=x.shape, fill_value=x.fill_value, ) # Array API set functions class UniqueCountsResult(NamedTuple): values: np.ndarray counts: np.ndarray def unique_counts(x, /): """ Returns the unique elements of an input array `x`, and the corresponding counts for each unique element in `x`. Parameters ---------- x : COO Input COO array. It will be flattened if it is not already 1-D. Returns ------- out : namedtuple The result containing: * values - The unique elements of an input array. * counts - The corresponding counts for each unique element. Raises ------ ValueError If the input array is in a different format than COO. Examples -------- >>> import sparse >>> x = sparse.COO.from_numpy([1, 0, 2, 1, 2, -3]) >>> sparse.unique_counts(x) UniqueCountsResult(values=array([-3, 0, 1, 2]), counts=array([1, 1, 2, 2])) """ x = _validate_coo_input(x) x = x.flatten() values, counts = np.unique(x.data, return_counts=True) if x.nnz < x.size: values = np.concatenate([[x.fill_value], values]) counts = np.concatenate([[x.size - x.nnz], counts]) sorted_indices = np.argsort(values) values[sorted_indices] = values.copy() counts[sorted_indices] = counts.copy() return UniqueCountsResult(values, counts) def unique_values(x, /): """ Returns the unique elements of an input array `x`. Parameters ---------- x : COO Input COO array. It will be flattened if it is not already 1-D. Returns ------- out : ndarray The unique elements of an input array. Raises ------ ValueError If the input array is in a different format than COO. Examples -------- >>> import sparse >>> x = sparse.COO.from_numpy([1, 0, 2, 1, 2, -3]) >>> sparse.unique_values(x) array([-3, 0, 1, 2]) """ x = _validate_coo_input(x) x = x.flatten() values = np.unique(x.data) if x.nnz < x.size: values = np.sort(np.concatenate([[x.fill_value], values])) return values def sort(x, /, *, axis=-1, descending=False, stable=False): """ Returns a sorted copy of an input array ``x``. Parameters ---------- x : SparseArray Input array. Should have a real-valued data type. axis : int Axis along which to sort. If set to ``-1``, the function must sort along the last axis. Default: ``-1``. descending : bool Sort order. If ``True``, the array must be sorted in descending order (by value). If ``False``, the array must be sorted in ascending order (by value). Default: ``False``. stable : bool Whether the sort is stable. Only ``False`` is supported currently. Returns ------- out : COO A sorted array. Raises ------ ValueError If the input array isn't and can't be converted to COO format. Examples -------- >>> import sparse >>> x = sparse.COO.from_numpy([1, 0, 2, 0, 2, -3]) >>> sparse.sort(x).todense() array([-3, 0, 0, 1, 2, 2]) >>> sparse.sort(x, descending=True).todense() array([ 2, 2, 1, 0, 0, -3]) """ from .._common import moveaxis from .core import COO x = _validate_coo_input(x) if stable: raise ValueError("`stable=True` isn't currently supported.") original_ndim = x.ndim if x.ndim == 1: x = x[None, :] axis = -1 x = moveaxis(x, source=axis, destination=-1) x_shape = x.shape x = x.reshape((-1, x_shape[-1])) new_coords, new_data = _sort_coo(x.coords, x.data, x.fill_value, sort_axis_len=x_shape[-1], descending=descending) x = COO(new_coords, new_data, x.shape, has_duplicates=False, sorted=True, fill_value=x.fill_value) x = x.reshape(x_shape[:-1] + (x_shape[-1],)) x = moveaxis(x, source=-1, destination=axis) return x if original_ndim == x.ndim else x.squeeze() def take(x, indices, /, *, axis=None): """ Returns elements of an array along an axis. Parameters ---------- x : SparseArray Input array. indices : ndarray Array indices. The array must be one-dimensional and have an integer data type. axis : int Axis over which to select values. If ``axis`` is negative, the function must determine the axis along which to select values by counting from the last dimension. For ``None``, the flattened input array is used. Default: ``None``. Returns ------- out : COO A COO array with requested indices. Raises ------ ValueError If the input array isn't and can't be converted to COO format. """ x = _validate_coo_input(x) if axis is None: x = x.flatten() return x[indices] axis = normalize_axis(axis, x.ndim) full_index = (slice(None),) * axis + (indices, ...) return x[full_index] def _validate_coo_input(x: Any): from .._common import _is_scipy_sparse_obj from .core import COO if _is_scipy_sparse_obj(x): x = COO.from_scipy_sparse(x) elif not isinstance(x, SparseArray): raise ValueError(f"Input must be an instance of SparseArray, but it's {type(x)}.") elif not isinstance(x, COO): x = x.asformat(COO) return x @numba.jit(nopython=True, nogil=True) def _sort_coo( coords: np.ndarray, data: np.ndarray, fill_value: float, sort_axis_len: int, descending: bool ) -> tuple[np.ndarray, np.ndarray]: assert coords.shape[0] == 2 group_coords = coords[0, :] sort_coords = coords[1, :] data = data.copy() result_indices = np.empty_like(sort_coords) # We iterate through all groups and sort each one of them. # first and last index of a group is tracked. prev_group = -1 group_first_idx = -1 group_last_idx = -1 # We add `-1` sentinel to know when the last group ends for idx, group in enumerate(np.append(group_coords, -1)): if group == prev_group: continue if prev_group != -1: group_last_idx = idx group_slice = slice(group_first_idx, group_last_idx) group_size = group_last_idx - group_first_idx # SORT VALUES if group_size > 1: # np.sort in numba doesn't support `np.sort`'s arguments so `stable` # keyword can't be supported. # https://numba.pydata.org/numba-doc/latest/reference/numpysupported.html#other-methods data[group_slice] = np.sort(data[group_slice]) if descending: data[group_slice] = data[group_slice][::-1] # SORT INDICES fill_value_count = sort_axis_len - group_size indices = np.arange(group_size) # find a place where fill_value would be for pos in range(group_size): if (not descending and fill_value < data[group_slice][pos]) or ( descending and fill_value > data[group_slice][pos] ): indices[pos:] += fill_value_count break result_indices[group_first_idx:group_last_idx] = indices prev_group = group group_first_idx = idx return np.vstack((group_coords, result_indices)), data @numba.jit(nopython=True, nogil=True) def _compute_minmax_args( coords: np.ndarray, data: np.ndarray, reduce_size: int, fill_value: float, max_mode_flag: bool, ) -> tuple[np.ndarray, np.ndarray]: assert coords.shape[0] == 2 reduce_coords = coords[0, :] index_coords = coords[1, :] result_indices = np.unique(index_coords) result_data = [] # we iterate through each trace for result_index in np.nditer(result_indices): mask = index_coords == result_index masked_reduce_coords = reduce_coords[mask] masked_data = data[mask] compared_data = operator.gt(masked_data, fill_value) if max_mode_flag else operator.lt(masked_data, fill_value) if np.any(compared_data) or len(masked_data) == reduce_size: # best value is a non-fill value best_arg = np.argmax(masked_data) if max_mode_flag else np.argmin(masked_data) result_data.append(masked_reduce_coords[best_arg]) else: # best value is a fill value, find the first occurrence of it current_coord = np.array(-1, dtype=coords.dtype) found = False for idx, new_coord in enumerate(np.nditer(np.sort(masked_reduce_coords))): # there is at least one fill value between consecutive non-fill values if new_coord - current_coord > 1: result_data.append(idx) found = True break current_coord = new_coord # get the first fill value after all non-fill values if not found: result_data.append(current_coord + 1) return (result_indices, np.array(result_data, dtype=np.intp)) def _arg_minmax_common( x: SparseArray, axis: int | None, keepdims: bool, mode: str, ): """ Internal implementation for argmax and argmin functions. """ assert mode in ("max", "min") max_mode_flag = mode == "max" x = _validate_coo_input(x) if not isinstance(axis, int | type(None)): raise ValueError(f"`axis` must be `int` or `None`, but it's: {type(axis)}.") if isinstance(axis, int) and axis >= x.ndim: raise ValueError(f"`axis={axis}` is out of bounds for array of dimension {x.ndim}.") if x.ndim == 0: raise ValueError("Input array must be at least 1-D, but it's 0-D.") # If `axis` is None then we need to flatten the input array and memorize # the original dimensionality for the final reshape operation. axis_none_original_ndim: int | None = None if axis is None: axis_none_original_ndim = x.ndim x = x.reshape(-1)[:, None] axis = 0 # A 1-D array must have one more singleton dimension. if axis == 0 and x.ndim == 1: x = x[:, None] # We need to move `axis` to the front. new_transpose = list(range(x.ndim)) new_transpose.insert(0, new_transpose.pop(axis)) new_transpose = tuple(new_transpose) # And reshape it to 2-D (reduce axis, the rest of axes flattened) new_shape = list(x.shape) new_shape.insert(0, new_shape.pop(axis)) new_shape = tuple(new_shape) x = x.transpose(new_transpose) x = x.reshape((new_shape[0], np.prod(new_shape[1:]))) # Compute max/min arguments result_indices, result_data = _compute_minmax_args( x.coords.copy(), x.data.copy(), reduce_size=x.shape[0], fill_value=x.fill_value, max_mode_flag=max_mode_flag, ) from .core import COO result = COO(result_indices, result_data, shape=(x.shape[1],), fill_value=0, prune=True) # Let's reshape the result to the original shape. result = result.reshape((1, *new_shape[1:])) new_transpose = list(range(result.ndim)) new_transpose.insert(axis, new_transpose.pop(0)) result = result.transpose(new_transpose) # If `axis=None` we need to reshape flattened array into original dimensionality. if axis_none_original_ndim is not None: result = result.reshape([1 for _ in range(axis_none_original_ndim)]) return result if keepdims else result.squeeze() def matrix_transpose(x, /): """ Transposes a matrix or a stack of matrices. Parameters ---------- x : SparseArray Input array. Returns ------- out : COO Transposed COO array. Raises ------ ValueError If the input array isn't and can't be converted to COO format, or if ``x.ndim < 2``. """ if hasattr(x, "ndim") and x.ndim < 2: raise ValueError("`x.ndim >= 2` must hold.") x = _validate_coo_input(x) transpose_axes = list(range(x.ndim)) transpose_axes[-2:] = transpose_axes[-2:][::-1] return x.transpose(transpose_axes) sparse-0.16.0a9/sparse/numba_backend/_coo/core.py000066400000000000000000001436511463475501500216400ustar00rootroot00000000000000import copy as _copy import operator import warnings from collections import defaultdict, deque from collections.abc import Iterable, Iterator, Sized from functools import reduce import numba import numpy as np from numpy.lib.mixins import NDArrayOperatorsMixin from .._sparse_array import SparseArray from .._umath import broadcast_to from .._utils import ( _zero_of_dtype, can_store, check_fill_value, check_zero_fill_value, equivalent, normalize_axis, ) from .indexing import getitem class COO(SparseArray, NDArrayOperatorsMixin): # lgtm [py/missing-equals] """ A sparse multidimensional array. This is stored in COO format. It depends on NumPy and Scipy.sparse for computation, but supports arrays of arbitrary dimension. Parameters ---------- coords : numpy.ndarray (COO.ndim, COO.nnz) An array holding the index locations of every value Should have shape (number of dimensions, number of non-zeros). data : numpy.ndarray (COO.nnz,) An array of Values. A scalar can also be supplied if the data is the same across all coordinates. If not given, defers to :obj:`as_coo`. shape : tuple[int] (COO.ndim,) The shape of the array. has_duplicates : bool, optional A value indicating whether the supplied value for :code:`coords` has duplicates. Note that setting this to `False` when :code:`coords` does have duplicates may result in undefined behaviour. See :obj:`COO.sum_duplicates` sorted : bool, optional A value indicating whether the values in `coords` are sorted. Note that setting this to `True` when :code:`coords` isn't sorted may result in undefined behaviour. See :obj:`COO.sort_indices`. prune : bool, optional A flag indicating whether or not we should prune any fill-values present in ``data``. cache : bool, optional Whether to enable cacheing for various operations. See :obj:`COO.enable_caching` fill_value: scalar, optional The fill value for this array. Attributes ---------- coords : numpy.ndarray (ndim, nnz) An array holding the coordinates of every nonzero element. data : numpy.ndarray (nnz,) An array holding the values corresponding to :obj:`COO.coords`. shape : tuple[int] (ndim,) The dimensions of this array. See Also -------- DOK : A mostly write-only sparse array. as_coo : Convert any given format to :obj:`COO`. Examples -------- You can create :obj:`COO` objects from Numpy arrays. >>> x = np.eye(4, dtype=np.uint8) >>> x[2, 3] = 5 >>> s = COO.from_numpy(x) >>> s >>> s.data # doctest: +NORMALIZE_WHITESPACE array([1, 1, 1, 5, 1], dtype=uint8) >>> s.coords # doctest: +NORMALIZE_WHITESPACE array([[0, 1, 2, 2, 3], [0, 1, 2, 3, 3]]) :obj:`COO` objects support basic arithmetic and binary operations. >>> x2 = np.eye(4, dtype=np.uint8) >>> x2[3, 2] = 5 >>> s2 = COO.from_numpy(x2) >>> (s + s2).todense() # doctest: +NORMALIZE_WHITESPACE array([[2, 0, 0, 0], [0, 2, 0, 0], [0, 0, 2, 5], [0, 0, 5, 2]], dtype=uint8) >>> (s * s2).todense() # doctest: +NORMALIZE_WHITESPACE array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], dtype=uint8) Binary operations support broadcasting. >>> x3 = np.zeros((4, 1), dtype=np.uint8) >>> x3[2, 0] = 1 >>> s3 = COO.from_numpy(x3) >>> (s * s3).todense() # doctest: +NORMALIZE_WHITESPACE array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 5], [0, 0, 0, 0]], dtype=uint8) :obj:`COO` objects also support dot products and reductions. >>> s.dot(s.T).sum(axis=0).todense() # doctest: +NORMALIZE_WHITESPACE array([ 1, 1, 31, 6], dtype=uint64) You can use Numpy :code:`ufunc` operations on :obj:`COO` arrays as well. >>> np.sum(s, axis=1).todense() # doctest: +NORMALIZE_WHITESPACE array([1, 1, 6, 1], dtype=uint64) >>> np.round(np.sqrt(s, dtype=np.float64), decimals=1).todense() # doctest: +SKIP array([[ 1. , 0. , 0. , 0. ], [ 0. , 1. , 0. , 0. ], [ 0. , 0. , 1. , 2.2], [ 0. , 0. , 0. , 1. ]]) Operations that will result in a dense array will usually result in a different fill value, such as the following. >>> np.exp(s) You can also create :obj:`COO` arrays from coordinates and data. >>> coords = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 3], [0, 3, 2, 0, 1]] >>> data = [1, 2, 3, 4, 5] >>> s4 = COO(coords, data, shape=(3, 4, 5)) >>> s4 If the data is same across all coordinates, you can also specify a scalar. >>> coords = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 3], [0, 3, 2, 0, 1]] >>> data = 1 >>> s5 = COO(coords, data, shape=(3, 4, 5)) >>> s5 Following scipy.sparse conventions you can also pass these as a tuple with rows and columns >>> rows = [0, 1, 2, 3, 4] >>> cols = [0, 0, 0, 1, 1] >>> data = [10, 20, 30, 40, 50] >>> z = COO((data, (rows, cols))) >>> z.todense() # doctest: +NORMALIZE_WHITESPACE array([[10, 0], [20, 0], [30, 0], [ 0, 40], [ 0, 50]]) You can also pass a dictionary or iterable of index/value pairs. Repeated indices imply summation: >>> d = {(0, 0, 0): 1, (1, 2, 3): 2, (1, 1, 0): 3} >>> COO(d) >>> L = [((0, 0), 1), ((1, 1), 2), ((0, 0), 3)] >>> COO(L).todense() # doctest: +NORMALIZE_WHITESPACE array([[4, 0], [0, 2]]) You can convert :obj:`DOK` arrays to :obj:`COO` arrays. >>> from sparse import DOK >>> s6 = DOK((5, 5), dtype=np.int64) >>> s6[1:3, 1:3] = [[4, 5], [6, 7]] >>> s6 >>> s7 = s6.asformat("coo") >>> s7 >>> s7.todense() # doctest: +NORMALIZE_WHITESPACE array([[0, 0, 0, 0, 0], [0, 4, 5, 0, 0], [0, 6, 7, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]) """ __array_priority__ = 12 def __init__( self, coords, data=None, shape=None, has_duplicates=True, sorted=False, prune=False, cache=False, fill_value=None, idx_dtype=None, ): self._cache = None if cache: self.enable_caching() if not isinstance(coords, np.ndarray): warnings.warn( "coords should be an ndarray. This will raise a ValueError in the future.", DeprecationWarning, stacklevel=1, ) if data is None: arr = as_coo(coords, shape=shape, fill_value=fill_value, idx_dtype=idx_dtype) self._make_shallow_copy_of(arr) if cache: self.enable_caching() return self.data = np.asarray(data) self.coords = np.asarray(coords) if self.coords.ndim == 1: if self.coords.size == 0 and shape is not None: self.coords = self.coords.reshape((len(shape), len(data))) else: self.coords = self.coords[None, :] if self.data.ndim == 0: self.data = np.broadcast_to(self.data, self.coords.shape[1]) if self.data.ndim != 1: raise ValueError("data must be a scalar or 1-dimensional.") if shape is None: warnings.warn( "shape should be provided. This will raise a ValueError in the future.", DeprecationWarning, stacklevel=1, ) shape = tuple(self.coords.max(axis=1) + 1) if self.coords.nbytes else () if not isinstance(shape, Iterable): shape = (shape,) if isinstance(shape, np.ndarray): shape = tuple(shape) if shape and not self.coords.size: self.coords = np.zeros((len(shape) if isinstance(shape, Iterable) else 1, 0), dtype=np.intp) super().__init__(shape, fill_value=fill_value) if idx_dtype: if not can_store(idx_dtype, max(shape)): raise ValueError(f"cannot cast array with shape {shape} to dtype {idx_dtype}.") self.coords = self.coords.astype(idx_dtype) if self.shape: if len(self.data) != self.coords.shape[1]: msg = "The data length does not match the coordinates given.\nlen(data) = {}, but {} coords specified." raise ValueError(msg.format(len(data), self.coords.shape[1])) if len(self.shape) != self.coords.shape[0]: msg = ( "Shape specified by `shape` doesn't match the " "shape of `coords`; len(shape)={} != coords.shape[0]={}" "(and coords.shape={})" ) raise ValueError(msg.format(len(shape), self.coords.shape[0], self.coords.shape)) from .._settings import WARN_ON_TOO_DENSE if WARN_ON_TOO_DENSE and self.nbytes >= self.size * self.data.itemsize: warnings.warn( "Attempting to create a sparse array that takes no less " "memory than than an equivalent dense array. You may want to " "use a dense array here instead.", RuntimeWarning, stacklevel=1, ) if not sorted: self._sort_indices() if has_duplicates: self._sum_duplicates() if prune: self._prune() def __getstate__(self): return (self.coords, self.data, self.shape, self.fill_value) def __setstate__(self, state): self.coords, self.data, self.shape, self.fill_value = state self._cache = None def __dask_tokenize__(self): "Produce a deterministic, content-based hash for dask." from dask.base import normalize_token return normalize_token((type(self), self.coords, self.data, self.shape, self.fill_value)) def copy(self, deep=True): """Return a copy of the array. Parameters ---------- deep : boolean, optional If True (default), the internal coords and data arrays are also copied. Set to ``False`` to only make a shallow copy. """ return _copy.deepcopy(self) if deep else _copy.copy(self) def enable_caching(self): """Enable caching of reshape, transpose, and tocsr/csc operations This enables efficient iterative workflows that make heavy use of csr/csc operations, such as tensordot. This maintains a cache of recent results of reshape and transpose so that operations like tensordot (which uses both internally) store efficiently stored representations for repeated use. This can significantly cut down on computational costs in common numeric algorithms. However, this also assumes that neither this object, nor the downstream objects will have their data mutated. Examples -------- >>> s.enable_caching() # doctest: +SKIP >>> csr1 = s.transpose((2, 0, 1)).reshape((100, 120)).tocsr() # doctest: +SKIP >>> csr2 = s.transpose((2, 0, 1)).reshape((100, 120)).tocsr() # doctest: +SKIP >>> csr1 is csr2 # doctest: +SKIP True """ self._cache = defaultdict(lambda: deque(maxlen=3)) @classmethod def from_numpy(cls, x, fill_value=None, idx_dtype=None): """ Convert the given :obj:`numpy.ndarray` to a :obj:`COO` object. Parameters ---------- x : np.ndarray The dense array to convert. fill_value : scalar The fill value of the constructed :obj:`COO` array. Zero if unspecified. Returns ------- COO The converted COO array. Examples -------- >>> x = np.eye(5) >>> s = COO.from_numpy(x) >>> s >>> x[x == 0] = np.nan >>> COO.from_numpy(x, fill_value=np.nan) """ x = np.asanyarray(x).view(type=np.ndarray) if fill_value is None: fill_value = _zero_of_dtype(x.dtype) if x.shape else x coords = np.atleast_2d(np.flatnonzero(~equivalent(x, fill_value))) data = x.ravel()[tuple(coords)] return cls( coords, data, shape=x.size, has_duplicates=False, sorted=True, fill_value=fill_value, idx_dtype=idx_dtype, ).reshape(x.shape) def todense(self): """ Convert this :obj:`COO` array to a dense :obj:`numpy.ndarray`. Note that this may take a large amount of memory if the :obj:`COO` object's :code:`shape` is large. Returns ------- numpy.ndarray The converted dense array. See Also -------- DOK.todense : Equivalent :obj:`DOK` array method. scipy.sparse.coo_matrix.todense : Equivalent Scipy method. Examples -------- >>> x = np.random.randint(100, size=(7, 3)) >>> s = COO.from_numpy(x) >>> x2 = s.todense() >>> np.array_equal(x, x2) True """ x = np.full(self.shape, self.fill_value, self.dtype) coords = tuple([self.coords[i, :] for i in range(self.ndim)]) data = self.data if coords != (): x[coords] = data else: if len(data) != 0: x[coords] = data return x @classmethod def from_scipy_sparse(cls, x, /, *, fill_value=None): """ Construct a :obj:`COO` array from a :obj:`scipy.sparse.spmatrix` Parameters ---------- x : scipy.sparse.spmatrix The sparse matrix to construct the array from. fill_value : scalar The fill-value to use when converting. Returns ------- COO The converted :obj:`COO` object. Examples -------- >>> x = scipy.sparse.rand(6, 3, density=0.2) >>> s = COO.from_scipy_sparse(x) >>> np.array_equal(x.todense(), s.todense()) True """ x = x.asformat("coo") coords = np.empty((2, x.nnz), dtype=x.row.dtype) coords[0, :] = x.row coords[1, :] = x.col return COO( coords, x.data, shape=x.shape, has_duplicates=not x.has_canonical_format, sorted=x.has_canonical_format, fill_value=fill_value, ) @classmethod def from_iter(cls, x, shape=None, fill_value=None, dtype=None): """ Converts an iterable in certain formats to a :obj:`COO` array. See examples for details. Parameters ---------- x : Iterable or Iterator The iterable to convert to :obj:`COO`. shape : tuple[int], optional The shape of the array. fill_value : scalar The fill value for this array. dtype : numpy.dtype The dtype of the input array. Inferred from the input if not given. Returns ------- out : COO The output :obj:`COO` array. Examples -------- You can convert items of the format ``[((i, j, k), value), ((i, j, k), value)]`` to :obj:`COO`. Here, the first part represents the coordinate and the second part represents the value. >>> x = [((0, 0), 1), ((1, 1), 1)] >>> s = COO.from_iter(x) >>> s.todense() array([[1, 0], [0, 1]]) You can also have a similar format with a dictionary. >>> x = {(0, 0): 1, (1, 1): 1} >>> s = COO.from_iter(x) >>> s.todense() array([[1, 0], [0, 1]]) The third supported format is ``(data, (..., row, col))``. >>> x = ([1, 1], ([0, 1], [0, 1])) >>> s = COO.from_iter(x) >>> s.todense() array([[1, 0], [0, 1]]) You can also pass in a :obj:`collections.Iterator` object. >>> x = [((0, 0), 1), ((1, 1), 1)].__iter__() >>> s = COO.from_iter(x) >>> s.todense() array([[1, 0], [0, 1]]) """ if isinstance(x, dict): x = list(x.items()) if not isinstance(x, Sized): x = list(x) if len(x) != 2 and not all(len(item) == 2 for item in x): raise ValueError("Invalid iterable to convert to COO.") if not x: ndim = 0 if shape is None else len(shape) coords = np.empty((ndim, 0), dtype=np.uint8) data = np.empty((0,), dtype=dtype) shape = () if shape is None else shape elif not isinstance(x[0][0], Iterable): coords = np.stack(x[1], axis=0) data = np.asarray(x[0], dtype=dtype) else: coords = np.array([item[0] for item in x]).T data = np.array([item[1] for item in x], dtype=dtype) if not ( coords.ndim == 2 and data.ndim == 1 and np.issubdtype(coords.dtype, np.integer) and np.all(coords >= 0) ): raise ValueError("Invalid iterable to convert to COO.") return COO(coords, data, shape=shape, fill_value=fill_value) @property def dtype(self): """ The datatype of this array. Returns ------- numpy.dtype The datatype of this array. See Also -------- numpy.ndarray.dtype : Numpy equivalent property. scipy.sparse.coo_matrix.dtype : Scipy equivalent property. Examples -------- >>> x = (200 * np.random.rand(5, 4)).astype(np.int32) >>> s = COO.from_numpy(x) >>> s.dtype dtype('int32') >>> x.dtype == s.dtype True """ return self.data.dtype @property def nnz(self): """ The number of nonzero elements in this array. Note that any duplicates in :code:`coords` are counted multiple times. To avoid this, call :obj:`COO.sum_duplicates`. Returns ------- int The number of nonzero elements in this array. See Also -------- DOK.nnz : Equivalent :obj:`DOK` array property. numpy.count_nonzero : A similar Numpy function. scipy.sparse.coo_matrix.nnz : The Scipy equivalent property. Examples -------- >>> x = np.array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3, 0, 0]) >>> np.count_nonzero(x) 6 >>> s = COO.from_numpy(x) >>> s.nnz 6 >>> np.count_nonzero(x) == s.nnz True """ return self.coords.shape[1] @property def format(self): """ The storage format of this array. Returns ------- str The storage format of this array. See Also ------- scipy.sparse.dok_matrix.format : The Scipy equivalent property. Examples ------- >>> import sparse >>> s = sparse.random((5, 5), density=0.2, format="dok") >>> s.format 'dok' >>> t = sparse.random((5, 5), density=0.2, format="coo") >>> t.format 'coo' """ return "coo" @property def nbytes(self): """ The number of bytes taken up by this object. Note that for small arrays, this may undercount the number of bytes due to the large constant overhead. Returns ------- int The approximate bytes of memory taken by this object. See Also -------- numpy.ndarray.nbytes : The equivalent Numpy property. Examples -------- >>> data = np.arange(6, dtype=np.uint8) >>> coords = np.random.randint(1000, size=(3, 6), dtype=np.uint16) >>> s = COO(coords, data, shape=(1000, 1000, 1000)) >>> s.nbytes 42 """ return self.data.nbytes + self.coords.nbytes def __len__(self): """ Get "length" of array, which is by definition the size of the first dimension. Returns ------- int The size of the first dimension. See Also -------- numpy.ndarray.__len__ : Numpy equivalent property. Examples -------- >>> x = np.zeros((10, 10)) >>> s = COO.from_numpy(x) >>> len(s) 10 """ return self.shape[0] def __sizeof__(self): return self.nbytes __getitem__ = getitem def __str__(self): summary = f"" return self._str_impl(summary) __repr__ = __str__ def _reduce_calc(self, method, axis, keepdims=False, **kwargs): if axis == (None,): axis = tuple(range(self.ndim)) axis = tuple(a if a >= 0 else a + self.ndim for a in axis) neg_axis = tuple(ax for ax in range(self.ndim) if ax not in set(axis)) a = self.transpose(neg_axis + axis) a = a.reshape( ( np.prod([self.shape[d] for d in neg_axis], dtype=np.intp), np.prod([self.shape[d] for d in axis], dtype=np.intp), ) ) data, inv_idx, counts = _grouped_reduce(a.data, a.coords[0], method, **kwargs) n_cols = a.shape[1] arr_attrs = (a, neg_axis, inv_idx) return (data, counts, axis, n_cols, arr_attrs) def _reduce_return(self, data, arr_attrs, result_fill_value): a, neg_axis, inv_idx = arr_attrs coords = a.coords[0:1, inv_idx] out = COO( coords, data, shape=(a.shape[0],), has_duplicates=False, sorted=True, prune=True, fill_value=result_fill_value, ) return out.reshape(tuple(self.shape[d] for d in neg_axis)) def transpose(self, axes=None): """ Returns a new array which has the order of the axes switched. Parameters ---------- axes : Iterable[int], optional The new order of the axes compared to the previous one. Reverses the axes by default. Returns ------- COO The new array with the axes in the desired order. See Also -------- :obj:`COO.T` : A quick property to reverse the order of the axes. numpy.ndarray.transpose : Numpy equivalent function. Examples -------- We can change the order of the dimensions of any :obj:`COO` array with this function. >>> x = np.add.outer(np.arange(5), np.arange(5)[::-1]) >>> x # doctest: +NORMALIZE_WHITESPACE array([[4, 3, 2, 1, 0], [5, 4, 3, 2, 1], [6, 5, 4, 3, 2], [7, 6, 5, 4, 3], [8, 7, 6, 5, 4]]) >>> s = COO.from_numpy(x) >>> s.transpose((1, 0)).todense() # doctest: +NORMALIZE_WHITESPACE array([[4, 5, 6, 7, 8], [3, 4, 5, 6, 7], [2, 3, 4, 5, 6], [1, 2, 3, 4, 5], [0, 1, 2, 3, 4]]) Note that by default, this reverses the order of the axes rather than switching the last and second-to-last axes as required by some linear algebra operations. >>> x = np.random.rand(2, 3, 4) >>> s = COO.from_numpy(x) >>> s.transpose().shape (4, 3, 2) """ if axes is None: axes = list(reversed(range(self.ndim))) # Normalize all axes indices to positive values axes = normalize_axis(axes, self.ndim) if len(np.unique(axes)) < len(axes): raise ValueError("repeated axis in transpose") if not len(axes) == self.ndim: raise ValueError("axes don't match array") axes = tuple(axes) if axes == tuple(range(self.ndim)): return self if self._cache is not None: for ax, value in self._cache["transpose"]: if ax == axes: return value shape = tuple(self.shape[ax] for ax in axes) result = COO( self.coords[axes, :], self.data, shape, has_duplicates=False, cache=self._cache is not None, fill_value=self.fill_value, ) if self._cache is not None: self._cache["transpose"].append((axes, result)) return result @property def T(self): """ Returns a new array which has the order of the axes reversed. Returns ------- COO The new array with the axes in the desired order. See Also -------- :obj:`COO.transpose` : A method where you can specify the order of the axes. numpy.ndarray.T : Numpy equivalent property. Examples -------- We can change the order of the dimensions of any :obj:`COO` array with this function. >>> x = np.add.outer(np.arange(5), np.arange(5)[::-1]) >>> x # doctest: +NORMALIZE_WHITESPACE array([[4, 3, 2, 1, 0], [5, 4, 3, 2, 1], [6, 5, 4, 3, 2], [7, 6, 5, 4, 3], [8, 7, 6, 5, 4]]) >>> s = COO.from_numpy(x) >>> s.T.todense() # doctest: +NORMALIZE_WHITESPACE array([[4, 5, 6, 7, 8], [3, 4, 5, 6, 7], [2, 3, 4, 5, 6], [1, 2, 3, 4, 5], [0, 1, 2, 3, 4]]) Note that by default, this reverses the order of the axes rather than switching the last and second-to-last axes as required by some linear algebra operations. >>> x = np.random.rand(2, 3, 4) >>> s = COO.from_numpy(x) >>> s.T.shape (4, 3, 2) """ return self.transpose(tuple(range(self.ndim))[::-1]) @property def mT(self): if self.ndim < 2: raise ValueError("Cannot compute matrix transpose if `ndim < 2`.") axis = list(range(self.ndim)) axis[-1], axis[-2] = axis[-2], axis[-1] return self.transpose(axis) def swapaxes(self, axis1, axis2): """Returns array that has axes axis1 and axis2 swapped. Parameters ---------- axis1 : int first axis to swap axis2 : int second axis to swap Returns ------- COO The new array with the axes axis1 and axis2 swapped. Examples -------- >>> x = COO.from_numpy(np.ones((2, 3, 4))) >>> x.swapaxes(0, 2) """ # Normalize all axis1, axis2 to positive values axis1, axis2 = normalize_axis((axis1, axis2), self.ndim) # checks if axis1,2 are in range + raises ValueError axes = list(range(self.ndim)) axes[axis1], axes[axis2] = axes[axis2], axes[axis1] return self.transpose(axes) def dot(self, other): """ Performs the equivalent of :code:`x.dot(y)` for :obj:`COO`. Parameters ---------- other : Union[COO, numpy.ndarray, scipy.sparse.spmatrix] The second operand of the dot product operation. Returns ------- {COO, numpy.ndarray} The result of the dot product. If the result turns out to be dense, then a dense array is returned, otherwise, a sparse array. Raises ------ ValueError If all arguments don't have zero fill-values. See Also -------- dot : Equivalent function for two arguments. :obj:`numpy.dot` : Numpy equivalent function. scipy.sparse.coo_matrix.dot : Scipy equivalent function. Examples -------- >>> x = np.arange(4).reshape((2, 2)) >>> s = COO.from_numpy(x) >>> s.dot(s) # doctest: +SKIP array([[ 2, 3], [ 6, 11]], dtype=int64) """ from .._common import dot return dot(self, other) def __matmul__(self, other): from .._common import matmul try: return matmul(self, other) except NotImplementedError: return NotImplemented def __rmatmul__(self, other): from .._common import matmul try: return matmul(other, self) except NotImplementedError: return NotImplemented def linear_loc(self): """ The nonzero coordinates of a flattened version of this array. Note that the coordinates may be out of order. Returns ------- numpy.ndarray The flattened coordinates. See Also -------- :obj:`numpy.flatnonzero` : Equivalent Numpy function. Examples -------- >>> x = np.eye(5) >>> s = COO.from_numpy(x) >>> s.linear_loc() # doctest: +NORMALIZE_WHITESPACE array([ 0, 6, 12, 18, 24]) >>> np.array_equal(np.flatnonzero(x), s.linear_loc()) True """ from .common import linear_loc return linear_loc(self.coords, self.shape) def flatten(self, order="C"): """ Returns a new :obj:`COO` array that is a flattened version of this array. Returns ------- COO The flattened output array. Notes ----- The :code:`order` parameter is provided just for compatibility with Numpy and isn't actually supported. Examples -------- >>> s = COO.from_numpy(np.arange(10)) >>> s2 = s.reshape((2, 5)).flatten() >>> s2.todense() array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) """ if order not in {"C", None}: raise NotImplementedError("The `order` parameter is notsupported.") return self.reshape(-1) def reshape(self, shape, order="C"): """ Returns a new :obj:`COO` array that is a reshaped version of this array. Parameters ---------- shape : tuple[int] The desired shape of the output array. Returns ------- COO The reshaped output array. See Also -------- numpy.ndarray.reshape : The equivalent Numpy function. Notes ----- The :code:`order` parameter is provided just for compatibility with Numpy and isn't actually supported. Examples -------- >>> s = COO.from_numpy(np.arange(25)) >>> s2 = s.reshape((5, 5)) >>> s2.todense() # doctest: +NORMALIZE_WHITESPACE array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) """ shape = tuple(shape) if isinstance(shape, Iterable) else (shape,) if order not in {"C", None}: raise NotImplementedError("The `order` parameter is not supported") if self.shape == shape: return self if any(d == -1 for d in shape): extra = int(self.size / np.prod([d for d in shape if d != -1])) shape = tuple([d if d != -1 else extra for d in shape]) if self.size != reduce(operator.mul, shape, 1): raise ValueError(f"cannot reshape array of size {self.size} into shape {shape}") if self._cache is not None: for sh, value in self._cache["reshape"]: if sh == shape: return value # TODO: this self.size enforces a 2**64 limit to array size linear_loc = self.linear_loc() idx_dtype = self.coords.dtype if shape != () and not can_store(idx_dtype, max(shape)): idx_dtype = np.min_scalar_type(max(shape)) coords = np.empty((len(shape), self.nnz), dtype=idx_dtype) strides = 1 for i, d in enumerate(shape[::-1]): coords[-(i + 1), :] = (linear_loc // strides) % d strides *= d result = COO( coords, self.data, shape, has_duplicates=False, sorted=True, cache=self._cache is not None, fill_value=self.fill_value, ) if self._cache is not None: self._cache["reshape"].append((shape, result)) return result def squeeze(self, axis=None): """ Removes singleton dimensions (axes) from ``x``. Parameters ---------- axis : Union[None, int, Tuple[int, ...]] The axis (or axes) to squeeze. If a specified axis has a size greater than one, a `ValueError` is raised. ``axis=None`` removes all singleton dimensions. Default: ``None``. Returns ------- COO The output array without ``axis`` dimensions. Examples -------- >>> s = COO.from_numpy(np.eye(2)).reshape((2, 1, 2, 1)) >>> s.squeeze().shape (2, 2) >>> s.squeeze(axis=1).shape (2, 2, 1) """ squeezable_dims = tuple([d for d in range(self.ndim) if self.shape[d] == 1]) if axis is None: axis = squeezable_dims if isinstance(axis, int): axis = (axis,) elif isinstance(axis, Iterable): axis = tuple(axis) else: raise ValueError(f"Invalid axis parameter: `{axis}`.") for d in axis: if d not in squeezable_dims: raise ValueError(f"Specified axis `{d}` has a size greater than one: {self.shape[d]}") retained_dims = [d for d in range(self.ndim) if d not in axis] coords = self.coords[retained_dims, :] shape = tuple([s for idx, s in enumerate(self.shape) if idx in retained_dims]) return COO( coords, self.data, shape, has_duplicates=False, sorted=True, cache=self._cache is not None, fill_value=self.fill_value, ) def resize(self, *args, refcheck=True, coords_dtype=np.intp): """ This method changes the shape and size of an array in-place. Parameters ---------- args : tuple, or series of integers The desired shape of the output array. See Also -------- numpy.ndarray.resize : The equivalent Numpy function. """ warnings.warn("resize is deprecated on all SpraseArray objects.", DeprecationWarning, stacklevel=1) if len(args) == 1 and isinstance(args[0], tuple): shape = args[0] elif all(isinstance(arg, int) for arg in args): shape = tuple(args) else: raise ValueError("Invalid input") if any(d < 0 for d in shape): raise ValueError("negative dimensions not allowed") new_size = reduce(operator.mul, shape, 1) # TODO: this self.size enforces a 2**64 limit to array size linear_loc = self.linear_loc() end_idx = np.searchsorted(linear_loc, new_size, side="left") linear_loc = linear_loc[:end_idx] idx_dtype = self.coords.dtype if shape != () and not can_store(idx_dtype, max(shape)): idx_dtype = np.min_scalar_type(max(shape)) coords = np.empty((len(shape), len(linear_loc)), dtype=idx_dtype) strides = 1 for i, d in enumerate(shape[::-1]): coords[-(i + 1), :] = (linear_loc // strides) % d strides *= d self.shape = shape self.coords = coords if len(self.data) != len(linear_loc): self.data = self.data[:end_idx].copy() def to_scipy_sparse(self, /, *, accept_fv=None): """ Converts this :obj:`COO` object into a :obj:`scipy.sparse.coo_matrix`. Parameters ---------- accept_fv : scalar or list of scalar, optional The list of accepted fill-values. The default accepts only zero. Returns ------- :obj:`scipy.sparse.coo_matrix` The converted Scipy sparse matrix. Raises ------ ValueError If the array is not two-dimensional. ValueError If all the array doesn't zero fill-values. See Also -------- COO.tocsr : Convert to a :obj:`scipy.sparse.csr_matrix`. COO.tocsc : Convert to a :obj:`scipy.sparse.csc_matrix`. """ import scipy.sparse check_fill_value(self, accept_fv=accept_fv) if self.ndim != 2: raise ValueError("Can only convert a 2-dimensional array to a Scipy sparse matrix.") result = scipy.sparse.coo_matrix((self.data, (self.coords[0], self.coords[1])), shape=self.shape) result.has_canonical_format = True return result def _tocsr(self): import scipy.sparse if self.ndim != 2: raise ValueError("This array must be two-dimensional for this conversion to work.") row, col = self.coords # Pass 3: count nonzeros in each row indptr = np.zeros(self.shape[0] + 1, dtype=np.int64) np.cumsum(np.bincount(row, minlength=self.shape[0]), out=indptr[1:]) return scipy.sparse.csr_matrix((self.data, col, indptr), shape=self.shape) def tocsr(self): """ Converts this array to a :obj:`scipy.sparse.csr_matrix`. Returns ------- scipy.sparse.csr_matrix The result of the conversion. Raises ------ ValueError If the array is not two-dimensional. ValueError If all the array doesn't have zero fill-values. See Also -------- COO.tocsc : Convert to a :obj:`scipy.sparse.csc_matrix`. COO.to_scipy_sparse : Convert to a :obj:`scipy.sparse.coo_matrix`. scipy.sparse.coo_matrix.tocsr : Equivalent Scipy function. """ check_zero_fill_value(self) if self._cache is not None: try: return self._csr except AttributeError: pass try: self._csr = self._csc.tocsr() return self._csr except AttributeError: pass self._csr = csr = self._tocsr() else: csr = self._tocsr() return csr def tocsc(self): """ Converts this array to a :obj:`scipy.sparse.csc_matrix`. Returns ------- scipy.sparse.csc_matrix The result of the conversion. Raises ------ ValueError If the array is not two-dimensional. ValueError If the array doesn't have zero fill-values. See Also -------- COO.tocsr : Convert to a :obj:`scipy.sparse.csr_matrix`. COO.to_scipy_sparse : Convert to a :obj:`scipy.sparse.coo_matrix`. scipy.sparse.coo_matrix.tocsc : Equivalent Scipy function. """ check_zero_fill_value(self) if self._cache is not None: try: return self._csc except AttributeError: pass try: self._csc = self._csr.tocsc() return self._csc except AttributeError: pass self._csc = csc = self.tocsr().tocsc() else: csc = self.tocsr().tocsc() return csc def _sort_indices(self): """ Sorts the :obj:`COO.coords` attribute. Also sorts the data in :obj:`COO.data` to match. Examples -------- >>> coords = np.array([[1, 2, 0]], dtype=np.uint8) >>> data = np.array([4, 1, 3], dtype=np.uint8) >>> s = COO(coords, data) >>> s._sort_indices() >>> s.coords # doctest: +NORMALIZE_WHITESPACE array([[0, 1, 2]], dtype=uint8) >>> s.data # doctest: +NORMALIZE_WHITESPACE array([3, 4, 1], dtype=uint8) """ linear = self.linear_loc() if (np.diff(linear) >= 0).all(): # already sorted return order = np.argsort(linear, kind="mergesort") self.coords = self.coords[:, order] self.data = self.data[order] def _sum_duplicates(self): """ Sums data corresponding to duplicates in :obj:`COO.coords`. See Also -------- scipy.sparse.coo_matrix.sum_duplicates : Equivalent Scipy function. Examples -------- >>> coords = np.array([[0, 1, 1, 2]], dtype=np.uint8) >>> data = np.array([6, 5, 2, 2], dtype=np.uint8) >>> s = COO(coords, data) >>> s._sum_duplicates() >>> s.coords # doctest: +NORMALIZE_WHITESPACE array([[0, 1, 2]], dtype=uint8) >>> s.data # doctest: +NORMALIZE_WHITESPACE array([6, 7, 2], dtype=uint8) """ # Inspired by scipy/sparse/coo.py::sum_duplicates # See https://github.com/scipy/scipy/blob/main/LICENSE.txt linear = self.linear_loc() unique_mask = np.diff(linear) != 0 if unique_mask.sum() == len(unique_mask): # already unique return unique_mask = np.append(True, unique_mask) coords = self.coords[:, unique_mask] (unique_inds,) = np.nonzero(unique_mask) data = np.add.reduceat(self.data, unique_inds, dtype=self.data.dtype) self.data = data self.coords = coords def _prune(self): """ Prunes data so that if any fill-values are present, they are removed from both coordinates and data. Examples -------- >>> coords = np.array([[0, 1, 2, 3]]) >>> data = np.array([1, 0, 1, 2]) >>> s = COO(coords, data) >>> s._prune() >>> s.nnz 3 """ mask = ~equivalent(self.data, self.fill_value) self.coords = self.coords[:, mask] self.data = self.data[mask] def broadcast_to(self, shape): """ Performs the equivalent of :obj:`numpy.broadcast_to` for :obj:`COO`. Note that this function returns a new array instead of a view. Parameters ---------- shape : tuple[int] The shape to broadcast the data to. Returns ------- COO The broadcasted sparse array. Raises ------ ValueError If the operand cannot be broadcast to the given shape. See Also -------- :obj:`numpy.broadcast_to` : NumPy equivalent function """ return broadcast_to(self, shape) def maybe_densify(self, max_size=1000, min_density=0.25): """ Converts this :obj:`COO` array to a :obj:`numpy.ndarray` if not too costly. Parameters ---------- max_size : int Maximum number of elements in output min_density : float Minimum density of output Returns ------- numpy.ndarray The dense array. Raises ------ ValueError If the returned array would be too large. Examples -------- Convert a small sparse array to a dense array. >>> s = COO.from_numpy(np.random.rand(2, 3, 4)) >>> x = s.maybe_densify() >>> np.allclose(x, s.todense()) True You can also specify the minimum allowed density or the maximum number of output elements. If both conditions are unmet, this method will throw an error. >>> x = np.zeros((5, 5), dtype=np.uint8) >>> x[2, 2] = 1 >>> s = COO.from_numpy(x) >>> s.maybe_densify(max_size=5, min_density=0.25) Traceback (most recent call last): ... ValueError: Operation would require converting large sparse array to dense """ if self.size > max_size and self.density < min_density: raise ValueError("Operation would require converting large sparse array to dense") return self.todense() def nonzero(self): """ Get the indices where this array is nonzero. Returns ------- idx : tuple[numpy.ndarray] The indices where this array is nonzero. See Also -------- :obj:`numpy.ndarray.nonzero` : NumPy equivalent function Raises ------ ValueError If the array doesn't have zero fill-values. Examples -------- >>> s = COO.from_numpy(np.eye(5)) >>> s.nonzero() (array([0, 1, 2, 3, 4]), array([0, 1, 2, 3, 4])) """ check_zero_fill_value(self) if self.ndim == 0: raise ValueError("`nonzero` is undefined for `self.ndim == 0`.") return tuple(self.coords) def asformat(self, format, **kwargs): """ Convert this sparse array to a given format. Parameters ---------- format : str A format string. Returns ------- out : SparseArray The converted array. Raises ------ NotImplementedError If the format isn't supported. """ from .._utils import convert_format format = convert_format(format) if format == "gcxs": from .._compressed import GCXS return GCXS.from_coo(self, **kwargs) if len(kwargs) != 0: raise TypeError(f"Invalid keyword arguments provided: {kwargs}") if format == "coo": return self if format == "dok": from .._dok import DOK return DOK.from_coo(self, **kwargs) return self.asformat("gcxs", **kwargs).asformat(format, **kwargs) def isinf(self): """ Tests each element ``x_i`` of the array to determine if equal to positive or negative infinity. """ new_fill_value = bool(np.isinf(self.fill_value)) new_data = np.isinf(self.data) return COO( self.coords, new_data, shape=self.shape, fill_value=new_fill_value, prune=True, ) def isnan(self): """ Tests each element ``x_i`` of the array to determine whether the element is ``NaN``. """ new_fill_value = bool(np.isnan(self.fill_value)) new_data = np.isnan(self.data) return COO( self.coords, new_data, shape=self.shape, fill_value=new_fill_value, prune=True, ) def as_coo(x, shape=None, fill_value=None, idx_dtype=None): """ Converts any given format to :obj:`COO`. See the "See Also" section for details. Parameters ---------- x : SparseArray or numpy.ndarray or scipy.sparse.spmatrix or Iterable. The item to convert. shape : tuple[int], optional The shape of the output array. Can only be used in case of Iterable. Returns ------- out : COO The converted :obj:`COO` array. See Also -------- SparseArray.asformat : A utility function to convert between formats in this library. COO.from_numpy : Convert a Numpy array to :obj:`COO`. COO.from_scipy_sparse : Convert a SciPy sparse matrix to :obj:`COO`. COO.from_iter : Convert an iterable to :obj:`COO`. """ from .._common import _is_scipy_sparse_obj if hasattr(x, "shape") and shape is not None: raise ValueError("Cannot provide a shape in combination with something that already has a shape.") if hasattr(x, "fill_value") and fill_value is not None: raise ValueError("Cannot provide a fill-value in combination with something that already has a fill-value.") if isinstance(x, SparseArray): return x.asformat("coo") if isinstance(x, np.ndarray) or np.isscalar(x): return COO.from_numpy(x, fill_value=fill_value, idx_dtype=idx_dtype) if _is_scipy_sparse_obj(x): return COO.from_scipy_sparse(x) if isinstance(x, Iterable | Iterator): return COO.from_iter(x, shape=shape, fill_value=fill_value) raise NotImplementedError( f"Format not supported for conversion. Supplied type is " f"{type(x)}, see help(sparse.as_coo) for supported formats." ) @numba.jit(nopython=True, nogil=True) # pragma: no cover def _calc_counts_invidx(groups): inv_idx = [] counts = [] if len(groups) == 0: return ( np.array(inv_idx, dtype=groups.dtype), np.array(counts, dtype=groups.dtype), ) inv_idx.append(0) last_group = groups[0] for i in range(1, len(groups)): if groups[i] != last_group: counts.append(i - inv_idx[-1]) inv_idx.append(i) last_group = groups[i] counts.append(len(groups) - inv_idx[-1]) return (np.array(inv_idx, dtype=groups.dtype), np.array(counts, dtype=groups.dtype)) def _grouped_reduce(x, groups, method, **kwargs): """ Performs a :code:`ufunc` grouped reduce. Parameters ---------- x : np.ndarray The data to reduce. groups : np.ndarray The groups the data belongs to. The groups must be contiguous. method : np.ufunc The :code:`ufunc` to use to perform the reduction. **kwargs : dict The kwargs to pass to the :code:`ufunc`'s :code:`reduceat` function. Returns ------- result : np.ndarray The result of the grouped reduce operation. inv_idx : np.ndarray The index of the first element where each group is found. counts : np.ndarray The number of elements in each group. """ # Partial credit to @shoyer # Ref: https://gist.github.com/shoyer/f538ac78ae904c936844 inv_idx, counts = _calc_counts_invidx(groups) result = method.reduceat(x, inv_idx, **kwargs) return result, inv_idx, counts sparse-0.16.0a9/sparse/numba_backend/_coo/indexing.py000066400000000000000000000502771463475501500225160ustar00rootroot00000000000000from itertools import zip_longest from numbers import Integral import numba import numpy as np from .._slicing import normalize_index from .._utils import _zero_of_dtype, equivalent def getitem(x, index): """ This function implements the indexing functionality for COO. The overall algorithm has three steps: 1. Normalize the index to canonical form. Function: normalize_index 2. Get the mask, which is a list of integers corresponding to the indices in coords/data for the output data. Function: _mask 3. Transform the coordinates to what they will be in the output. Parameters ---------- x : COO The array to apply the indexing operation on. index : {tuple, str} The index into the array. """ from .core import COO # If string, this is an index into an np.void # Custom dtype. if isinstance(index, str): data = x.data[index] idx = np.where(data) data = data[idx].flatten() coords = list(x.coords[:, idx[0]]) coords.extend(idx[1:]) fill_value_idx = np.asarray(x.fill_value[index]).flatten() fill_value = fill_value_idx[0] if fill_value_idx.size else _zero_of_dtype(data.dtype)[()] if not equivalent(fill_value, fill_value_idx).all(): raise ValueError("Fill-values in the array are inconsistent.") return COO( coords, data, shape=x.shape + x.data.dtype[index].shape, has_duplicates=False, sorted=True, fill_value=fill_value, ) # Otherwise, convert into a tuple. if not isinstance(index, tuple): index = (index,) # Check if the last index is an ellipsis. last_ellipsis = len(index) > 0 and index[-1] is Ellipsis # Normalize the index into canonical form. index = normalize_index(index, x.shape) # zip_longest so things like x[..., None] are picked up. if len(index) != 0 and all( isinstance(ind, slice) and ind == slice(0, dim, 1) for ind, dim in zip_longest(index, x.shape) ): return x # Get the mask mask, adv_idx = _mask(x.coords, index, x.shape) # Get the length of the mask n = len(range(mask.start, mask.stop, mask.step)) if isinstance(mask, slice) else len(mask) coords = [] shape = [] i = 0 sorted = adv_idx is None or adv_idx.pos == 0 adv_idx_added = False for ind in index: # Nothing is added to shape or coords if the index is an integer. if isinstance(ind, Integral): i += 1 continue # Add to the shape and transform the coords in the case of a slice. if isinstance(ind, slice): shape.append(len(range(ind.start, ind.stop, ind.step))) coords.append((x.coords[i, mask] - ind.start) // ind.step) i += 1 if ind.step < 0: sorted = False # Add the index and shape for the advanced index. if isinstance(ind, np.ndarray): if not adv_idx_added: shape.append(adv_idx.length) coords.append(adv_idx.idx) adv_idx_added = True i += 1 # Add a dimension for None. if ind is None: coords.append(np.zeros(n, dtype=np.intp)) shape.append(1) # Join all the transformed coords. if coords: coords = np.stack(coords, axis=0) else: # If index result is a scalar, return a 0-d COO or # a scalar depending on whether the last index is an ellipsis. if last_ellipsis: coords = np.empty((0, n), dtype=np.uint8) else: if n != 0: return x.data[mask][0] return x.fill_value shape = tuple(shape) data = x.data[mask] return COO( coords, data, shape=shape, has_duplicates=False, sorted=sorted, fill_value=x.fill_value, ) def _mask(coords, indices, shape): indices = _prune_indices(indices, shape) indices, adv_idx, adv_idx_pos = _separate_adv_indices(indices) if len(adv_idx) != 0: if len(adv_idx) != 1: # Ensure if multiple advanced indices are passed, all are of the same length # Also check each advanced index to ensure each is only a one-dimensional iterable adv_ix_len = len(adv_idx[0]) for ai in adv_idx: if len(ai) != adv_ix_len: raise IndexError( "shape mismatch: indexing arrays could not be broadcast together. Ensure all indexing arrays " "are of the same length." ) if ai.ndim != 1: raise IndexError("Only one-dimensional iterable indices supported.") mask, aidxs = _compute_multi_axis_multi_mask( coords, _ind_ar_from_indices(indices), np.array(adv_idx, dtype=np.intp), np.array(adv_idx_pos, dtype=np.intp), ) return mask, _AdvIdxInfo(aidxs, adv_idx_pos, adv_ix_len) adv_idx = adv_idx[0] adv_idx_pos = adv_idx_pos[0] if adv_idx.ndim != 1: raise IndexError("Only one-dimensional iterable indices supported.") mask, aidxs = _compute_multi_mask(coords, _ind_ar_from_indices(indices), adv_idx, adv_idx_pos) return mask, _AdvIdxInfo(aidxs, adv_idx_pos, len(adv_idx)) mask, is_slice = _compute_mask(coords, _ind_ar_from_indices(indices)) if is_slice: return slice(mask[0], mask[1], 1), None return mask, None def _ind_ar_from_indices(indices): """ Computes an index "array" from indices, such that ``indices[i]`` is transformed to ``ind_ar[i]`` and ``ind_ar[i].shape == (3,)``. It has the format ``[start, stop, step]``. Integers are converted into steps as well. Parameters ---------- indices : Iterable Input indices (slices and integers) Returns ------- ind_ar : np.ndarray The output array. Examples -------- >>> _ind_ar_from_indices([1]) array([[1, 2, 1]]) >>> _ind_ar_from_indices([slice(5, 7, 2)]) array([[5, 7, 2]]) """ ind_ar = np.empty((len(indices), 3), dtype=np.intp) for i, idx in enumerate(indices): if isinstance(idx, slice): ind_ar[i] = [idx.start, idx.stop, idx.step] elif isinstance(idx, Integral): ind_ar[i] = [idx, idx + 1, 1] return ind_ar def _prune_indices(indices, shape, prune_none=True): """ Gets rid of the indices that do not contribute to the overall mask, e.g. None and full slices. Parameters ---------- indices : tuple The indices to the array. shape : tuple[int] The shape of the array. Returns ------- indices : tuple The filtered indices. Examples -------- >>> _prune_indices((None, 5), (10,)) # None won't affect the mask [5] >>> _prune_indices((slice(0, 10, 1),), (10,)) # Full slices don't affect the mask [] """ if prune_none: indices = [idx for idx in indices if idx is not None] i = 0 for idx, sh in zip(indices[::-1], shape[::-1], strict=True): if not isinstance(idx, slice): break if idx.start == 0 and idx.stop == sh and idx.step == 1: i += 1 continue if idx.start == sh - 1 and idx.stop == -1 and idx.step == -1: i += 1 continue break if i != 0: indices = indices[:-i] return indices def _separate_adv_indices(indices): """ Separates advanced from normal indices. Parameters ---------- indices : list The input indices Returns ------- new_idx : list The normal indices. adv_idx : list The advanced indices. adv_idx_pos : list The positions of the advanced indices. """ adv_idx_pos = [] new_idx = [] adv_idx = [] for i, idx in enumerate(indices): if isinstance(idx, np.ndarray): adv_idx.append(idx) adv_idx_pos.append(i) else: new_idx.append(idx) return new_idx, adv_idx, adv_idx_pos @numba.jit(nopython=True, nogil=True) def _compute_multi_axis_multi_mask(coords, indices, adv_idx, adv_idx_pos): # pragma: no cover """ Computes a mask with the advanced index, and also returns the advanced index dimension. Parameters ---------- coords : np.ndarray Coordinates of the input array. indices : np.ndarray The indices in slice format. adv_idx : np.ndarray List of advanced indices. adv_idx_pos : np.ndarray The position of the advanced indices. Returns ------- mask : np.ndarray The mask. aidxs : np.ndarray The advanced array index. """ n_adv_idx = len(adv_idx_pos) mask = numba.typed.List.empty_list(numba.types.intp) a_indices = numba.typed.List.empty_list(numba.types.intp) full_idx = np.empty((len(indices) + len(adv_idx_pos), 3), dtype=np.intp) # Get location of non-advanced indices if len(indices) != 0: ixx = 0 for ix in range(coords.shape[0]): isin = False for ax in adv_idx_pos: if ix == ax: isin = True break if not isin: full_idx[ix] = indices[ixx] ixx += 1 for i in range(len(adv_idx[0])): for ii in range(n_adv_idx): full_idx[adv_idx_pos[ii]] = [adv_idx[ii][i], adv_idx[ii][i] + 1, 1] partial_mask, is_slice = _compute_mask(coords, full_idx) if is_slice: slice_mask = numba.typed.List.empty_list(numba.types.intp) for j in range(partial_mask[0], partial_mask[1]): slice_mask.append(j) partial_mask = array_from_list_intp(slice_mask) for j in range(len(partial_mask)): mask.append(partial_mask[j]) a_indices.append(i) return array_from_list_intp(mask), array_from_list_intp(a_indices) @numba.jit(nopython=True, nogil=True) def _compute_multi_mask(coords, indices, adv_idx, adv_idx_pos): # pragma: no cover """ Computes a mask with the advanced index, and also returns the advanced index dimension. Parameters ---------- coords : np.ndarray Coordinates of the input array. indices : np.ndarray The indices in slice format. adv_idx : list(int) The advanced index. adv_idx_pos : list(int) The position of the advanced index. Returns ------- mask : np.ndarray The mask. aidxs : np.ndarray The advanced array index. """ mask = numba.typed.List.empty_list(numba.types.intp) a_indices = numba.typed.List.empty_list(numba.types.intp) full_idx = np.empty((len(indices) + 1, 3), dtype=np.intp) full_idx[:adv_idx_pos] = indices[:adv_idx_pos] full_idx[adv_idx_pos + 1 :] = indices[adv_idx_pos:] for i, aidx in enumerate(adv_idx): full_idx[adv_idx_pos] = [aidx, aidx + 1, 1] partial_mask, is_slice = _compute_mask(coords, full_idx) if is_slice: slice_mask = numba.typed.List.empty_list(numba.types.intp) for j in range(partial_mask[0], partial_mask[1]): slice_mask.append(j) partial_mask = array_from_list_intp(slice_mask) for j in range(len(partial_mask)): mask.append(partial_mask[j]) a_indices.append(i) return array_from_list_intp(mask), array_from_list_intp(a_indices) @numba.jit(nopython=True, nogil=True) def _compute_mask(coords, indices): # pragma: no cover """ Gets the mask for the coords given the indices in slice format. Works with either start-stop ranges of matching indices into coords called "pairs" (start-stop pairs) or filters the mask directly, based on which is faster. Exploits the structure in sorted coords, which is that for a constant value of coords[i - 1], coords[i - 2] and so on, coords[i] is sorted. Concretely, ``coords[i, coords[i - 1] == v1 & coords[i - 2] = v2, ...]`` is always sorted. It uses this sortedness to find sub-pairs for each dimension given the previous, and so on. This is efficient for small slices or ints, but not for large ones. After it detects that working with pairs is rather inefficient (or after going through each possible index), it constructs a filtered mask from the start-stop pairs. Parameters ---------- coords : np.ndarray The coordinates of the array. indices : np.ndarray The indices in the form of slices such that indices[:, 0] are starts, indices[:, 1] are stops and indices[:, 2] are steps. Returns ------- mask : np.ndarray The starts and stops in the mask. is_slice : bool Whether or not the array represents a continuous slice. Examples -------- Let's create some mock coords and indices >>> import numpy as np >>> coords = np.array([[0, 0, 1, 1, 2, 2]]) >>> indices = np.array([[0, 3, 2]]) # Equivalent to slice(0, 3, 2) Now let's get the mask. Notice that the indices of ``0`` and ``2`` are matched. >>> _compute_mask(coords, indices) (array([0, 1, 4, 5]), False) Now, let's try with a more "continuous" slice. Matches ``0`` and ``1``. >>> indices = np.array([[0, 2, 1]]) >>> _compute_mask(coords, indices) (array([0, 4]), True) This is equivalent to mask being ``slice(0, 4, 1)``. """ # Set the initial mask to be the entire range of coordinates. starts = numba.typed.List.empty_list(numba.types.intp) starts.append(0) stops = numba.typed.List.empty_list(numba.types.intp) stops.append(coords.shape[1]) n_matches = np.intp(coords.shape[1]) i = 0 while i < len(indices): # Guesstimate whether working with pairs is more efficient or # working with the mask directly. # One side is the estimate of time taken for binary searches # (n_searches * log(avg_length)) # The other is an estimated time of a linear filter for the mask. n_pairs = len(starts) n_current_slices = len(range(indices[i, 0], indices[i, 1], indices[i, 2])) * n_pairs + 2 if n_current_slices * np.log(n_current_slices / max(n_pairs, 1)) > n_matches + n_pairs: break # For each of the pairs, search inside the coordinates for other # matching sub-pairs. # This gets the start-end coordinates in coords for each 'sub-array' # Which would come out of indexing a single integer. starts, stops, n_matches = _get_mask_pairs(starts, stops, coords[i], indices[i]) i += 1 # Combine adjacent pairs starts, stops = _join_adjacent_pairs(starts, stops) # If just one pair is left over, treat it as a slice. if i == len(indices) and len(starts) == 1: return np.array([starts[0], stops[0]]), True # Convert start-stop pairs into mask, filtering by remaining # coordinates. mask = _filter_pairs(starts, stops, coords[i:], indices[i:]) return array_from_list_intp(mask), False @numba.jit(nopython=True, nogil=True) def _get_mask_pairs(starts_old, stops_old, c, idx): # pragma: no cover """ Gets the pairs for a following dimension given the pairs for a dimension. For each pair, it searches in the following dimension for matching coords and returns those. The total combined length of all pairs is returned to help with the performance guesstimate. Parameters ---------- starts_old, stops_old : list[int] The starts and stops from the previous index. c : np.ndarray The coords for this index's dimension. idx : np.ndarray The index in the form of a slice. idx[0], idx[1], idx[2] = start, stop, step Returns ------- starts, stops: list The starts and stops after applying the current index. n_matches : int The sum of elements in all ranges. Examples -------- >>> c = np.array([1, 2, 1, 2, 1, 1, 2, 2]) >>> starts_old = numba.typed.List() ... starts_old.append(4) >>> stops_old = numba.typed.List() ... stops_old.append(8) >>> idx = np.array([1, 2, 1]) >>> _get_mask_pairs(starts_old, stops_old, c, idx) (ListType[int64]([4]), ListType[int64]([6]), 2) """ starts = numba.typed.List.empty_list(numba.types.intp) stops = numba.typed.List.empty_list(numba.types.intp) n_matches = np.intp(0) for j in range(len(starts_old)): # For each matching "integer" in the slice, search within the "sub-coords" # Using binary search. for p_match in range(idx[0], idx[1], idx[2]): start = np.searchsorted(c[starts_old[j] : stops_old[j]], p_match, side="left") + starts_old[j] stop = np.searchsorted(c[starts_old[j] : stops_old[j]], p_match, side="right") + starts_old[j] if start != stop: starts.append(start) stops.append(stop) n_matches += stop - start return starts, stops, n_matches @numba.jit(nopython=True, nogil=True) def _filter_pairs(starts, stops, coords, indices): # pragma: no cover """ Converts all the pairs into a single integer mask, additionally filtering by the indices. Parameters ---------- starts, stops : list[int] The starts and stops to convert into an array. coords : np.ndarray The coordinates to filter by. indices : np.ndarray The indices in the form of slices such that indices[:, 0] are starts, indices[:, 1] are stops and indices[:, 2] are steps. Returns ------- mask : list The output integer mask. Examples -------- >>> import numpy as np >>> starts = numba.typed.List() ... starts.append(2) >>> stops = numba.typed.List() ... stops.append(7) >>> coords = np.array([[0, 1, 2, 3, 4, 5, 6, 7]]) >>> indices = np.array([[2, 8, 2]]) # Start, stop, step pairs >>> _filter_pairs(starts, stops, coords, indices) ListType[int64]([2, 4, 6]) """ mask = numba.typed.List.empty_list(numba.types.intp) # For each pair, for i in range(len(starts)): # For each element match within the pair range for j in range(starts[i], stops[i]): match = True # Check if it matches all indices for k in range(len(indices)): idx = indices[k] elem = coords[k, j] match &= (elem - idx[0]) % idx[2] == 0 and ( (idx[2] > 0 and idx[0] <= elem < idx[1]) or (idx[2] < 0 and idx[0] >= elem > idx[1]) ) # and append to the mask if so. if match: mask.append(j) return mask @numba.jit(nopython=True, nogil=True) def _join_adjacent_pairs(starts_old, stops_old): # pragma: no cover """ Joins adjacent pairs into one. For example, 2-5 and 5-7 will reduce to 2-7 (a single pair). This may help in returning a slice in the end which could be faster. Parameters ---------- starts_old, stops_old : list[int] The input starts and stops Returns ------- starts, stops : list[int] The reduced starts and stops. Examples -------- >>> starts = numba.typed.List() ... starts.append(2) ... starts.append(5) >>> stops = numba.typed.List() ... stops.append(5) ... stops.append(7) >>> _join_adjacent_pairs(starts, stops) (ListType[int64]([2]), ListType[int64]([7])) """ if len(starts_old) <= 1: return starts_old, stops_old starts = numba.typed.List.empty_list(numba.types.intp) starts.append(starts_old[0]) stops = numba.typed.List.empty_list(numba.types.intp) for i in range(1, len(starts_old)): if starts_old[i] != stops_old[i - 1]: starts.append(starts_old[i]) stops.append(stops_old[i - 1]) stops.append(stops_old[-1]) return starts, stops @numba.jit(nopython=True, nogil=True) def array_from_list_intp(x): # pragma: no cover n = len(x) a = np.empty(n, dtype=np.intp) for i in range(n): a[i] = x[i] return a class _AdvIdxInfo: def __init__(self, idx, pos, length): self.idx = idx self.pos = pos self.length = length sparse-0.16.0a9/sparse/numba_backend/_coo/numba_extension.py000066400000000000000000000226351463475501500241040ustar00rootroot00000000000000""" Numba support for COO objects. For now, this just supports attribute access """ import contextlib import numba from numba.core import cgutils, types from numba.core.imputils import impl_ret_borrowed, lower_builtin, lower_constant from numba.core.typing.typeof import typeof_impl from numba.extending import ( NativeValue, box, make_attribute_wrapper, models, register_model, type_callable, unbox, ) import numpy as np from .._utils import _zero_of_dtype from . import COO __all__ = ["COOType"] class COOType(types.Type): def __init__(self, data_dtype: np.dtype, coords_dtype: np.dtype, ndim: int): assert isinstance(data_dtype, np.dtype) assert isinstance(coords_dtype, np.dtype) self.data_dtype = data_dtype self.coords_dtype = coords_dtype self.ndim = ndim super().__init__( name=f"COOType[{numba.from_dtype(data_dtype)!r}, {numba.from_dtype(coords_dtype)!r}, {ndim!r}]" ) @property def key(self): return self.data_dtype, self.coords_dtype, self.ndim @property def data_type(self): return numba.from_dtype(self.data_dtype)[:] @property def coords_type(self): return numba.from_dtype(self.coords_dtype)[:, :] @property def shape_type(self): dt = numba.np.numpy_support.from_dtype(self.coords_dtype) return types.UniTuple(dt, self.ndim) @property def fill_value_type(self): return numba.from_dtype(self.data_dtype) @typeof_impl.register(COO) def _typeof_COO(val: COO, c) -> COOType: return COOType(data_dtype=val.data.dtype, coords_dtype=val.coords.dtype, ndim=val.ndim) @register_model(COOType) class COOModel(models.StructModel): def __init__(self, dmm, fe_type): members = [ ("data", fe_type.data_type), ("coords", fe_type.coords_type), ("shape", fe_type.shape_type), ("fill_value", fe_type.fill_value_type), ] models.StructModel.__init__(self, dmm, fe_type, members) @type_callable(COO) def type_COO(context): # TODO: accept a fill_value kwarg def typer(coords, data, shape): return COOType( coords_dtype=numba.np.numpy_support.as_dtype(coords.dtype), data_dtype=numba.np.numpy_support.as_dtype(data.dtype), ndim=len(shape), ) return typer @lower_builtin(COO, types.Any, types.Any, types.Any) def impl_COO(context, builder, sig, args): typ = sig.return_type coords, data, shape = args coo = cgutils.create_struct_proxy(typ)(context, builder) coo.coords = coords coo.data = data coo.shape = shape coo.fill_value = context.get_constant_generic(builder, typ.fill_value_type, _zero_of_dtype(typ.data_dtype)) return impl_ret_borrowed(context, builder, sig.return_type, coo._getvalue()) @lower_constant(COOType) def lower_constant_COO(context, builder, typ, pyval): coords = context.get_constant_generic(builder, typ.coords_type, pyval.coords) data = context.get_constant_generic(builder, typ.data_type, pyval.data) shape = context.get_constant_generic(builder, typ.shape_type, pyval.shape) fill_value = context.get_constant_generic(builder, typ.fill_value_type, pyval.fill_value) return impl_ret_borrowed( context, builder, typ, cgutils.pack_struct(builder, (data, coords, shape, fill_value)), ) @contextlib.contextmanager def local_return(builder): """ Create a scope which can be broken from locally. Used as:: with local_return(c.builder) as ret: with c.builder.if(abort_cond): ret() do_some_other_stuff # no ret needed at the end, it's implied stuff_that_runs_unconditionally """ end_blk = builder.append_basic_block("end") def return_(): builder.branch(end_blk) yield return_ builder.branch(end_blk) # make sure all remaining code goes to the next block builder.position_at_end(end_blk) def _unbox_native_field(typ, obj, field_name: str, c): ret_ptr = cgutils.alloca_once(c.builder, c.context.get_value_type(typ)) is_error_ptr = cgutils.alloca_once_value(c.builder, cgutils.false_bit) fail_obj = c.context.get_constant_null(typ) with local_return(c.builder) as ret: fail_blk = c.builder.append_basic_block("fail") with c.builder.goto_block(fail_blk): c.builder.store(cgutils.true_bit, is_error_ptr) c.builder.store(fail_obj, ret_ptr) ret() field_obj = c.pyapi.object_getattr_string(obj, field_name) with cgutils.if_unlikely(c.builder, cgutils.is_null(c.builder, field_obj)): c.builder.branch(fail_blk) field_native = c.unbox(typ, field_obj) c.pyapi.decref(field_obj) with cgutils.if_unlikely(c.builder, field_native.is_error): c.builder.branch(fail_blk) c.builder.store(cgutils.false_bit, is_error_ptr) c.builder.store(field_native.value, ret_ptr) return NativeValue(c.builder.load(ret_ptr), is_error=c.builder.load(is_error_ptr)) @unbox(COOType) def unbox_COO(typ: COOType, obj: COO, c) -> NativeValue: ret_ptr = cgutils.alloca_once(c.builder, c.context.get_value_type(typ)) is_error_ptr = cgutils.alloca_once_value(c.builder, cgutils.false_bit) fail_obj = c.context.get_constant_null(typ) with local_return(c.builder) as ret: fail_blk = c.builder.append_basic_block("fail") with c.builder.goto_block(fail_blk): c.builder.store(cgutils.true_bit, is_error_ptr) c.builder.store(fail_obj, ret_ptr) ret() data = _unbox_native_field(typ.data_type, obj, "data", c) with cgutils.if_unlikely(c.builder, data.is_error): c.builder.branch(fail_blk) coords = _unbox_native_field(typ.coords_type, obj, "coords", c) with cgutils.if_unlikely(c.builder, coords.is_error): c.builder.branch(fail_blk) shape = _unbox_native_field(typ.shape_type, obj, "shape", c) with cgutils.if_unlikely(c.builder, shape.is_error): c.builder.branch(fail_blk) fill_value = _unbox_native_field(typ.fill_value_type, obj, "fill_value", c) with cgutils.if_unlikely(c.builder, fill_value.is_error): c.builder.branch(fail_blk) coo = cgutils.create_struct_proxy(typ)(c.context, c.builder) coo.coords = coords.value coo.data = data.value coo.shape = shape.value coo.fill_value = fill_value.value c.builder.store(cgutils.false_bit, is_error_ptr) c.builder.store(coo._getvalue(), ret_ptr) return NativeValue(c.builder.load(ret_ptr), is_error=c.builder.load(is_error_ptr)) @box(COOType) def box_COO(typ: COOType, val, c) -> COO: ret_ptr = cgutils.alloca_once(c.builder, c.pyapi.pyobj) fail_obj = c.pyapi.get_null_object() coo = cgutils.create_struct_proxy(typ)(c.context, c.builder, value=val) with local_return(c.builder) as ret: data_obj = c.box(typ.data_type, coo.data) with cgutils.if_unlikely(c.builder, cgutils.is_null(c.builder, data_obj)): c.builder.store(fail_obj, ret_ptr) ret() coords_obj = c.box(typ.coords_type, coo.coords) with cgutils.if_unlikely(c.builder, cgutils.is_null(c.builder, coords_obj)): c.pyapi.decref(data_obj) c.builder.store(fail_obj, ret_ptr) ret() shape_obj = c.box(typ.shape_type, coo.shape) with cgutils.if_unlikely(c.builder, cgutils.is_null(c.builder, shape_obj)): c.pyapi.decref(coords_obj) c.pyapi.decref(data_obj) c.builder.store(fail_obj, ret_ptr) ret() fill_value_obj = c.box(typ.fill_value_type, coo.fill_value) with cgutils.if_unlikely(c.builder, cgutils.is_null(c.builder, fill_value_obj)): c.pyapi.decref(shape_obj) c.pyapi.decref(coords_obj) c.pyapi.decref(data_obj) c.builder.store(fail_obj, ret_ptr) ret() class_obj = c.pyapi.unserialize(c.pyapi.serialize_object(COO)) with cgutils.if_unlikely(c.builder, cgutils.is_null(c.builder, class_obj)): c.pyapi.decref(shape_obj) c.pyapi.decref(coords_obj) c.pyapi.decref(data_obj) c.pyapi.decref(fill_value_obj) c.builder.store(fail_obj, ret_ptr) ret() args = c.pyapi.tuple_pack([coords_obj, data_obj, shape_obj]) c.pyapi.decref(shape_obj) c.pyapi.decref(coords_obj) c.pyapi.decref(data_obj) with cgutils.if_unlikely(c.builder, cgutils.is_null(c.builder, args)): c.pyapi.decref(fill_value_obj) c.pyapi.decref(class_obj) c.builder.store(fail_obj, ret_ptr) ret() kwargs = c.pyapi.dict_pack([("fill_value", fill_value_obj)]) c.pyapi.decref(fill_value_obj) with cgutils.if_unlikely(c.builder, cgutils.is_null(c.builder, kwargs)): c.pyapi.decref(class_obj) c.builder.store(fail_obj, ret_ptr) ret() c.builder.store(c.pyapi.call(class_obj, args, kwargs), ret_ptr) c.pyapi.decref(class_obj) c.pyapi.decref(args) c.pyapi.decref(kwargs) return c.builder.load(ret_ptr) make_attribute_wrapper(COOType, "data", "data") make_attribute_wrapper(COOType, "coords", "coords") make_attribute_wrapper(COOType, "shape", "shape") make_attribute_wrapper(COOType, "fill_value", "fill_value") sparse-0.16.0a9/sparse/numba_backend/_dok.py000066400000000000000000000375321463475501500207050ustar00rootroot00000000000000from collections.abc import Iterable from numbers import Integral import numpy as np from numpy.lib.mixins import NDArrayOperatorsMixin from ._slicing import normalize_index from ._sparse_array import SparseArray from ._utils import equivalent class DOK(SparseArray, NDArrayOperatorsMixin): """ A class for building sparse multidimensional arrays. Parameters ---------- shape : tuple[int] (DOK.ndim,) The shape of the array. data : dict, optional The key-value pairs for the data in this array. dtype : np.dtype, optional The data type of this array. If left empty, it is inferred from the first element. fill_value : scalar, optional The fill value of this array. Attributes ---------- dtype : numpy.dtype The datatype of this array. Can be :code:`None` if no elements have been set yet. shape : tuple[int] The shape of this array. data : dict The keys of this dictionary contain all the indices and the values contain the nonzero entries. See Also -------- COO : A read-only sparse array. Examples -------- You can create :obj:`DOK` objects from Numpy arrays. >>> x = np.eye(5, dtype=np.uint8) >>> x[2, 3] = 5 >>> s = DOK.from_numpy(x) >>> s You can also create them from just shapes, and use slicing assignment. >>> s2 = DOK((5, 5), dtype=np.int64) >>> s2[1:3, 1:3] = [[4, 5], [6, 7]] >>> s2 You can convert :obj:`DOK` arrays to :obj:`COO` arrays, or :obj:`numpy.ndarray` objects. >>> from sparse import COO >>> s3 = COO(s2) >>> s3 >>> s2.todense() # doctest: +NORMALIZE_WHITESPACE array([[0, 0, 0, 0, 0], [0, 4, 5, 0, 0], [0, 6, 7, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]) >>> s4 = COO.from_numpy(np.eye(4, dtype=np.uint8)) >>> s4 >>> s5 = DOK.from_coo(s4) >>> s5 You can also create :obj:`DOK` arrays from a shape and a dict of values. Zeros are automatically ignored. >>> values = { ... (1, 2, 3): 4, ... (3, 2, 1): 0, ... } >>> s6 = DOK((5, 5, 5), values) >>> s6 """ def __init__(self, shape, data=None, dtype=None, fill_value=None): from ._common import _is_scipy_sparse_obj from ._coo import COO self.data = {} if isinstance(shape, COO): ar = DOK.from_coo(shape) self._make_shallow_copy_of(ar) return if isinstance(shape, np.ndarray): ar = DOK.from_numpy(shape) self._make_shallow_copy_of(ar) return if _is_scipy_sparse_obj(shape): ar = DOK.from_scipy_sparse(shape) self._make_shallow_copy_of(ar) return self.dtype = np.dtype(dtype) if not data: data = {} super().__init__(shape, fill_value=fill_value) if isinstance(data, dict): if not dtype: if not len(data): self.dtype = np.dtype("float64") else: self.dtype = np.result_type(*(np.asarray(x).dtype for x in data.values())) for c, d in data.items(): self[c] = d else: raise ValueError("data must be a dict.") @classmethod def from_scipy_sparse(cls, x, /, *, fill_value=None): """ Create a :obj:`DOK` array from a :obj:`scipy.sparse.spmatrix`. Parameters ---------- x : scipy.sparse.spmatrix The matrix to convert. fill_value : scalar The fill-value to use when converting. Returns ------- DOK The equivalent :obj:`DOK` array. Examples -------- >>> x = scipy.sparse.rand(6, 3, density=0.2) >>> s = DOK.from_scipy_sparse(x) >>> np.array_equal(x.todense(), s.todense()) True """ from sparse import COO return COO.from_scipy_sparse(x, fill_value=fill_value).asformat(cls) @classmethod def from_coo(cls, x): """ Get a :obj:`DOK` array from a :obj:`COO` array. Parameters ---------- x : COO The array to convert. Returns ------- DOK The equivalent :obj:`DOK` array. Examples -------- >>> from sparse import COO >>> s = COO.from_numpy(np.eye(4)) >>> s2 = DOK.from_coo(s) >>> s2 """ ar = cls(x.shape, dtype=x.dtype, fill_value=x.fill_value) for c, d in zip(x.coords.T, x.data, strict=True): ar.data[tuple(c)] = d return ar def to_coo(self): """ Convert this :obj:`DOK` array to a :obj:`COO` array. Returns ------- COO The equivalent :obj:`COO` array. Examples -------- >>> s = DOK((5, 5)) >>> s[1:3, 1:3] = [[4, 5], [6, 7]] >>> s >>> s2 = s.to_coo() >>> s2 """ from ._coo import COO return COO(self) @classmethod def from_numpy(cls, x): """ Get a :obj:`DOK` array from a Numpy array. Parameters ---------- x : np.ndarray The array to convert. Returns ------- DOK The equivalent :obj:`DOK` array. Examples -------- >>> s = DOK.from_numpy(np.eye(4)) >>> s """ ar = cls(x.shape, dtype=x.dtype) coords = np.nonzero(x) data = x[coords] for c in zip(data, *coords, strict=True): d, c = c[0], c[1:] ar.data[c] = d return ar @property def nnz(self): """ The number of nonzero elements in this array. Returns ------- int The number of nonzero elements. See Also -------- COO.nnz : Equivalent :obj:`COO` array property. numpy.count_nonzero : A similar Numpy function. scipy.sparse.dok_matrix.nnz : The Scipy equivalent property. Examples -------- >>> values = { ... (1, 2, 3): 4, ... (3, 2, 1): 0, ... } >>> s = DOK((5, 5, 5), values) >>> s.nnz 1 """ return len(self.data) @property def format(self): """ The storage format of this array. Returns ------- str The storage format of this array. See Also ------- scipy.sparse.dok_matrix.format : The Scipy equivalent property. Examples ------- >>> import sparse >>> s = sparse.random((5, 5), density=0.2, format="dok") >>> s.format 'dok' >>> t = sparse.random((5, 5), density=0.2, format="coo") >>> t.format 'coo' """ return "dok" @property def nbytes(self): """ The number of bytes taken up by this object. Note that for small arrays, this may undercount the number of bytes due to the large constant overhead. Returns ------- int The approximate bytes of memory taken by this object. See Also -------- numpy.ndarray.nbytes : The equivalent Numpy property. Examples -------- >>> import sparse >>> x = sparse.random((100, 100), density=0.1, format="dok") >>> x.nbytes 8000 """ return self.nnz * self.dtype.itemsize def __getitem__(self, key): if not isinstance(key, tuple): key = (key,) if all(isinstance(k, Iterable) for k in key): if len(key) != self.ndim: raise NotImplementedError(f"Index sequences for all {self.ndim} array dimensions needed!") if not all(len(key[0]) == len(k) for k in key): raise IndexError("Unequal length of index sequences!") return self._fancy_getitem(key) key = normalize_index(key, self.shape) ret = self.asformat("coo")[key] if isinstance(ret, SparseArray): ret = ret.asformat("dok") return ret def _fancy_getitem(self, key): """Subset of fancy indexing, when all dimensions are accessed""" new_data = {} for i, k in enumerate(zip(*key, strict=True)): if k in self.data: new_data[i] = self.data[k] return DOK( shape=(len(key[0])), data=new_data, dtype=self.dtype, fill_value=self.fill_value, ) def __setitem__(self, key, value): value = np.asarray(value, dtype=self.dtype) # 1D fancy indexing if self.ndim == 1 and isinstance(key, Iterable) and all(isinstance(i, int | np.integer) for i in key): key = (key,) if isinstance(key, tuple) and all(isinstance(k, Iterable) for k in key): if len(key) != self.ndim: raise NotImplementedError(f"Index sequences for all {self.ndim} array dimensions needed!") if not all(len(key[0]) == len(k) for k in key): raise IndexError("Unequal length of index sequences!") self._fancy_setitem(key, value) return key = normalize_index(key, self.shape) key_list = [int(k) if isinstance(k, Integral) else k for k in key] self._setitem(key_list, value) def _fancy_setitem(self, idxs, values): idxs = tuple(np.asanyarray(idxs) for idxs in idxs) if not all(np.issubdtype(k.dtype, np.integer) for k in idxs): raise IndexError("Indices must be sequences of integer types!") if idxs[0].ndim != 1: raise IndexError("Indices are not 1d sequences!") if values.ndim == 0: values = np.full(idxs[0].size, values, self.dtype) elif values.ndim > 1: raise ValueError(f"Dimension of values ({values.ndim}) must be 0 or 1!") if not idxs[0].shape == values.shape: raise ValueError(f"Shape mismatch of indices ({idxs[0].shape}) and values ({values.shape})!") fill_value = self.fill_value data = self.data for idx, value in zip(zip(*idxs, strict=True), values, strict=True): if value != fill_value: data[idx] = value elif idx in data: del data[idx] def _setitem(self, key_list, value): value_missing_dims = len([ind for ind in key_list if isinstance(ind, slice)]) - value.ndim if value_missing_dims < 0: raise ValueError("setting an array element with a sequence.") for i, ind in enumerate(key_list): if isinstance(ind, slice): step = ind.step if ind.step is not None else 1 if step > 0: start = ind.start if ind.start is not None else 0 start = max(start, 0) stop = ind.stop if ind.stop is not None else self.shape[i] stop = min(stop, self.shape[i]) if start > stop: start = stop else: start = ind.start or self.shape[i] - 1 stop = ind.stop if ind.stop is not None else -1 start = min(start, self.shape[i] - 1) stop = max(stop, -1) if start < stop: start = stop key_list_temp = key_list[:] for v_idx, ki in enumerate(range(start, stop, step)): key_list_temp[i] = ki vi = value if value_missing_dims > 0 else (value[0] if value.shape[0] == 1 else value[v_idx]) self._setitem(key_list_temp, vi) return if not isinstance(ind, Integral): raise IndexError("All indices must be slices or integers when setting an item.") key = tuple(key_list) if not equivalent(value, self.fill_value): self.data[key] = value[()] elif key in self.data: del self.data[key] def __str__(self): summary = f"" return self._str_impl(summary) __repr__ = __str__ def todense(self): """ Convert this :obj:`DOK` array into a Numpy array. Returns ------- numpy.ndarray The equivalent dense array. See Also -------- COO.todense : Equivalent :obj:`COO` array method. scipy.sparse.dok_matrix.todense : Equivalent Scipy method. Examples -------- >>> s = DOK((5, 5)) >>> s[1:3, 1:3] = [[4, 5], [6, 7]] >>> s.todense() # doctest: +SKIP array([[0., 0., 0., 0., 0.], [0., 4., 5., 0., 0.], [0., 6., 7., 0., 0.], [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.]]) """ result = np.full(self.shape, self.fill_value, self.dtype) for c, d in self.data.items(): result[c] = d return result def asformat(self, format, **kwargs): """ Convert this sparse array to a given format. Parameters ---------- format : str A format string. Returns ------- out : SparseArray The converted array. Raises ------ NotImplementedError If the format isn't supported. """ from ._utils import convert_format format = convert_format(format) if format == "dok": return self if format == "coo": from ._coo import COO if len(kwargs) != 0: raise ValueError(f"Extra kwargs found: {kwargs}") return COO.from_iter( self.data, shape=self.shape, fill_value=self.fill_value, dtype=self.dtype, ) return self.asformat("coo").asformat(format, **kwargs) def reshape(self, shape, order="C"): """ Returns a new :obj:`DOK` array that is a reshaped version of this array. Parameters ---------- shape : tuple[int] The desired shape of the output array. Returns ------- DOK The reshaped output array. See Also -------- numpy.ndarray.reshape : The equivalent Numpy function. Notes ----- The :code:`order` parameter is provided just for compatibility with Numpy and isn't actually supported. Examples -------- >>> s = DOK.from_numpy(np.arange(25)) >>> s2 = s.reshape((5, 5)) >>> s2.todense() # doctest: +NORMALIZE_WHITESPACE array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) """ if order not in {"C", None}: raise NotImplementedError("The 'order' parameter is not supported") return DOK.from_coo(self.to_coo().reshape(shape)) def to_slice(k): """Convert integer indices to one-element slices for consistency""" if isinstance(k, Integral): return slice(k, k + 1, 1) return k sparse-0.16.0a9/sparse/numba_backend/_io.py000066400000000000000000000073101463475501500205260ustar00rootroot00000000000000import numpy as np from ._compressed import GCXS from ._coo.core import COO def save_npz(filename, matrix, compressed=True): """Save a sparse matrix to disk in numpy's ``.npz`` format. Note: This is not binary compatible with scipy's ``save_npz()``. This binary format is not currently stable. Will save a file that can only be opend with this package's ``load_npz()``. Parameters ---------- filename : string or file Either the file name (string) or an open file (file-like object) where the data will be saved. If file is a string or a Path, the ``.npz`` extension will be appended to the file name if it is not already there matrix : SparseArray The matrix to save to disk compressed : bool Whether to save in compressed or uncompressed mode Examples -------- Store sparse matrix to disk, and load it again: >>> import os >>> import sparse >>> import numpy as np >>> dense_mat = np.array([[[0.0, 0.0], [0.0, 0.70677779]], [[0.0, 0.0], [0.0, 0.86522495]]]) >>> mat = sparse.COO(dense_mat) >>> mat >>> sparse.save_npz("mat.npz", mat) >>> loaded_mat = sparse.load_npz("mat.npz") >>> loaded_mat >>> os.remove("mat.npz") See Also -------- load_npz scipy.sparse.save_npz scipy.sparse.load_npz numpy.savez numpy.load """ nodes = { "data": matrix.data, "shape": matrix.shape, "fill_value": matrix.fill_value, } if type(matrix) == COO: nodes["coords"] = matrix.coords elif type(matrix) == GCXS: nodes["indices"] = matrix.indices nodes["indptr"] = matrix.indptr nodes["compressed_axes"] = matrix.compressed_axes if compressed: np.savez_compressed(filename, **nodes) else: np.savez(filename, **nodes) def load_npz(filename): """Load a sparse matrix in numpy's ``.npz`` format from disk. Note: This is not binary compatible with scipy's ``save_npz()`` output. This binary format is not currently stable. Will only load files saved by this package. Parameters ---------- filename : file-like object, string, or pathlib.Path The file to read. File-like objects must support the ``seek()`` and ``read()`` methods. Returns ------- SparseArray The sparse matrix at path ``filename``. Examples -------- See :obj:`save_npz` for usage examples. See Also -------- save_npz scipy.sparse.save_npz scipy.sparse.load_npz numpy.savez numpy.load """ with np.load(filename) as fp: try: coords = fp["coords"] data = fp["data"] shape = tuple(fp["shape"]) fill_value = fp["fill_value"][()] return COO( coords=coords, data=data, shape=shape, sorted=True, has_duplicates=False, fill_value=fill_value, ) except KeyError: pass try: data = fp["data"] indices = fp["indices"] indptr = fp["indptr"] comp_axes = fp["compressed_axes"] shape = tuple(fp["shape"]) fill_value = fp["fill_value"][()] return GCXS( (data, indices, indptr), shape=shape, fill_value=fill_value, compressed_axes=comp_axes, ) except KeyError as e: raise RuntimeError(f"The file {filename!s} does not contain a valid sparse matrix") from e sparse-0.16.0a9/sparse/numba_backend/_numba_extension.py000066400000000000000000000002701463475501500233130ustar00rootroot00000000000000def _init_extension(): """ Load extensions when numba is loaded. This name must match the one in pyproject.toml """ from ._coo import numba_extension # noqa: F401 sparse-0.16.0a9/sparse/numba_backend/_settings.py000066400000000000000000000006531463475501500217620ustar00rootroot00000000000000import os import numpy as np AUTO_DENSIFY = bool(int(os.environ.get("SPARSE_AUTO_DENSIFY", "0"))) WARN_ON_TOO_DENSE = bool(int(os.environ.get("SPARSE_WARN_ON_TOO_DENSE", "0"))) def _is_nep18_enabled(): class A: def __array_function__(self, *args, **kwargs): return True try: return np.concatenate([A()]) except ValueError: return False NEP18_ENABLED = _is_nep18_enabled() sparse-0.16.0a9/sparse/numba_backend/_slicing.py000066400000000000000000000210431463475501500215460ustar00rootroot00000000000000# Most of this file is taken from https://github.com/dask/dask/blob/main/dask/array/slicing.py # See license at https://github.com/dask/dask/blob/main/LICENSE.txt import math from collections.abc import Iterable from numbers import Integral, Number import numpy as np def normalize_index(idx, shape): """Normalize slicing indexes 1. Replaces ellipses with many full slices 2. Adds full slices to end of index 3. Checks bounding conditions 4. Replaces numpy arrays with lists 5. Posify's slices integers and lists 6. Normalizes slices to canonical form Examples -------- >>> normalize_index(1, (10,)) (1,) >>> normalize_index(-1, (10,)) (9,) >>> normalize_index([-1], (10,)) (array([9]),) >>> normalize_index(slice(-3, 10, 1), (10,)) (slice(7, 10, 1),) >>> normalize_index((Ellipsis, None), (10,)) (slice(0, 10, 1), None) """ if not isinstance(idx, tuple): idx = (idx,) idx = replace_ellipsis(len(shape), idx) n_sliced_dims = 0 for i in idx: if hasattr(i, "ndim") and i.ndim >= 1: n_sliced_dims += i.ndim elif i is None: continue else: n_sliced_dims += 1 idx += (slice(None),) * (len(shape) - n_sliced_dims) if len([i for i in idx if i is not None]) > len(shape): raise IndexError("Too many indices for array") none_shape = [] i = 0 for ind in idx: if ind is not None: none_shape.append(shape[i]) i += 1 else: none_shape.append(None) for i, d in zip(idx, none_shape, strict=True): if d is not None: check_index(i, d) idx = tuple(map(sanitize_index, idx)) idx = tuple(map(replace_none, idx, none_shape)) idx = posify_index(none_shape, idx) return tuple(map(clip_slice, idx, none_shape)) def replace_ellipsis(n, index): """Replace ... with slices, :, : ,: >>> replace_ellipsis(4, (3, Ellipsis, 2)) (3, slice(None, None, None), slice(None, None, None), 2) >>> replace_ellipsis(2, (Ellipsis, None)) (slice(None, None, None), slice(None, None, None), None) """ # Careful about using in or index because index may contain arrays isellipsis = [i for i, ind in enumerate(index) if ind is Ellipsis] if not isellipsis: return index if len(isellipsis) > 1: raise IndexError("an index can only have a single ellipsis ('...')") loc = isellipsis[0] extra_dimensions = n - (len(index) - sum(i is None for i in index) - 1) return index[:loc] + (slice(None, None, None),) * extra_dimensions + index[loc + 1 :] def check_index(ind, dimension): """Check validity of index for a given dimension Examples -------- >>> check_index(3, 5) >>> check_index(5, 5) Traceback (most recent call last): ... IndexError: Index is not smaller than dimension 5 >= 5 >>> check_index(6, 5) Traceback (most recent call last): ... IndexError: Index is not smaller than dimension 6 >= 5 >>> check_index(-1, 5) >>> check_index(-6, 5) Traceback (most recent call last): ... IndexError: Negative index is not greater than negative dimension -6 <= -5 >>> check_index([1, 2], 5) >>> check_index([6, 3], 5) Traceback (most recent call last): ... IndexError: Index out of bounds for dimension 5 >>> check_index(slice(0, 3), 5) """ # unknown dimension, assumed to be in bounds if isinstance(ind, Iterable): x = np.asanyarray(ind) if np.issubdtype(x.dtype, np.integer) and ((x >= dimension) | (x < -dimension)).any(): raise IndexError(f"Index out of bounds for dimension {dimension:d}") if x.dtype == np.bool_ and len(x) != dimension: raise IndexError( f"boolean index did not match indexed array; dimension is {dimension:d} " f"but corresponding boolean dimension is {len(x):d}" ) elif isinstance(ind, slice): return elif not isinstance(ind, Integral): raise IndexError( "only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and " "integer or boolean arrays are valid indices" ) elif ind >= dimension: raise IndexError(f"Index is not smaller than dimension {ind:d} >= {dimension:d}") elif ind < -dimension: msg = "Negative index is not greater than negative dimension {:d} <= -{:d}" raise IndexError(msg.format(ind, dimension)) def sanitize_index(ind): """Sanitize the elements for indexing along one axis >>> sanitize_index([2, 3, 5]) array([2, 3, 5]) >>> sanitize_index([True, False, True, False]) array([0, 2]) >>> sanitize_index(np.array([1, 2, 3])) array([1, 2, 3]) >>> sanitize_index(np.array([False, True, True])) array([1, 2]) >>> type(sanitize_index(np.int32(0))) # doctest: +SKIP >>> sanitize_index(0.5) # doctest: +SKIP Traceback (most recent call last): ... IndexError: only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and integer or boolean arrays are valid indices """ if ind is None: return None if isinstance(ind, slice): return slice( _sanitize_index_element(ind.start), _sanitize_index_element(ind.stop), _sanitize_index_element(ind.step), ) if isinstance(ind, Number): return _sanitize_index_element(ind) if not hasattr(ind, "dtype") and len(ind) == 0: ind = np.array([], dtype=np.intp) ind = np.asarray(ind) if ind.dtype == np.bool_: nonzero = np.nonzero(ind) if len(nonzero) == 1: # If a 1-element tuple, unwrap the element nonzero = nonzero[0] return np.asanyarray(nonzero) if np.issubdtype(ind.dtype, np.integer): return ind raise IndexError( "only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and " "integer or boolean arrays are valid indices" ) def _sanitize_index_element(ind): """Sanitize a one-element index.""" if ind is None: return None return int(ind) def posify_index(shape, ind): """Flip negative indices around to positive ones >>> posify_index(10, 3) 3 >>> posify_index(10, -3) 7 >>> posify_index(10, [3, -3]) array([3, 7]) >>> posify_index((10, 20), (3, -3)) (3, 17) >>> posify_index((10, 20), (3, [3, 4, -3])) # doctest: +NORMALIZE_WHITESPACE (3, array([ 3, 4, 17])) """ if isinstance(ind, tuple): return tuple(map(posify_index, shape, ind)) if isinstance(ind, Integral): if ind < 0 and not math.isnan(shape): return ind + shape return ind if isinstance(ind, np.ndarray | list) and not math.isnan(shape): ind = np.asanyarray(ind) return np.where(ind < 0, ind + shape, ind) if isinstance(ind, slice): start, stop, step = ind.start, ind.stop, ind.step if start < 0: start += shape if not (0 > stop >= step) and stop < 0: stop += shape return slice(start, stop, ind.step) return ind def clip_slice(idx, dim): """ Clip slice to its effective size given the shape. Parameters ---------- idx : The index. dim : The size along the corresponding dimension. Returns ------- idx : slice Examples -------- >>> clip_slice(slice(0, 20, 1), 10) slice(0, 10, 1) """ if not isinstance(idx, slice): return idx start, stop, step = idx.start, idx.stop, idx.step if step > 0: start = max(start, 0) stop = min(stop, dim) if start > stop: start = stop else: start = min(start, dim - 1) stop = max(stop, -1) if start < stop: start = stop return slice(start, stop, step) def replace_none(idx, dim): """ Normalize slices to canonical form, i.e. replace ``None`` with the appropriate integers. Parameters ---------- idx : slice or other index dim : dimension length Examples -------- >>> replace_none(slice(None, None, None), 10) slice(0, 10, 1) """ if not isinstance(idx, slice): return idx start, stop, step = idx.start, idx.stop, idx.step if step is None: step = 1 if step > 0: if start is None: start = 0 if stop is None: stop = dim else: if start is None: start = dim - 1 if stop is None: stop = -1 return slice(start, stop, step) sparse-0.16.0a9/sparse/numba_backend/_sparse_array.py000066400000000000000000000741041463475501500226170ustar00rootroot00000000000000import contextlib import operator import warnings from abc import ABCMeta, abstractmethod from collections.abc import Callable, Iterable from functools import reduce from numbers import Integral import numpy as np from ._umath import elemwise from ._utils import _zero_of_dtype, equivalent, html_table, normalize_axis _reduce_super_ufunc = {np.add: np.multiply, np.multiply: np.power} class SparseArray: """ An abstract base class for all the sparse array classes. Attributes ---------- dtype : numpy.dtype The data type of this array. fill_value : scalar The fill value of this array. """ __metaclass__ = ABCMeta def __init__(self, shape, fill_value=None): if not isinstance(shape, Iterable): shape = (shape,) if not all(isinstance(sh, Integral) and int(sh) >= 0 for sh in shape): raise ValueError("shape must be an non-negative integer or a tuple of non-negative integers.") self.shape = tuple(int(sh) for sh in shape) if fill_value is not None: if not hasattr(fill_value, "dtype") or fill_value.dtype != self.dtype: self.fill_value = self.dtype.type(fill_value) else: self.fill_value = fill_value else: self.fill_value = _zero_of_dtype(self.dtype) dtype = None @property def device(self): data = getattr(self, "data", None) return getattr(data, "device", "cpu") def to_device(self, device, /, *, stream=None): if device != "cpu": raise ValueError("Only `device='cpu'` is supported.") return self @property @abstractmethod def nnz(self): """ The number of nonzero elements in this array. Note that any duplicates in :code:`coords` are counted multiple times. To avoid this, call :obj:`COO.sum_duplicates`. Returns ------- int The number of nonzero elements in this array. See Also -------- DOK.nnz : Equivalent :obj:`DOK` array property. numpy.count_nonzero : A similar Numpy function. scipy.sparse.coo_matrix.nnz : The Scipy equivalent property. Examples -------- >>> import numpy as np >>> from sparse import COO >>> x = np.array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3, 0, 0]) >>> np.count_nonzero(x) 6 >>> s = COO.from_numpy(x) >>> s.nnz 6 >>> np.count_nonzero(x) == s.nnz True """ @property def ndim(self): """ The number of dimensions of this array. Returns ------- int The number of dimensions of this array. See Also -------- DOK.ndim : Equivalent property for :obj:`DOK` arrays. numpy.ndarray.ndim : Numpy equivalent property. Examples -------- >>> from sparse import COO >>> import numpy as np >>> x = np.random.rand(1, 2, 3, 1, 2) >>> s = COO.from_numpy(x) >>> s.ndim 5 >>> s.ndim == x.ndim True """ return len(self.shape) @property def size(self): """ The number of all elements (including zeros) in this array. Returns ------- int The number of elements. See Also -------- numpy.ndarray.size : Numpy equivalent property. Examples -------- >>> from sparse import COO >>> import numpy as np >>> x = np.zeros((10, 10)) >>> s = COO.from_numpy(x) >>> s.size 100 """ # We use this instead of np.prod because np.prod # returns a float64 for an empty shape. return reduce(operator.mul, self.shape, 1) @property def density(self): """ The ratio of nonzero to all elements in this array. Returns ------- float The ratio of nonzero to all elements. See Also -------- COO.size : Number of elements. COO.nnz : Number of nonzero elements. Examples -------- >>> import numpy as np >>> from sparse import COO >>> x = np.zeros((8, 8)) >>> x[0, :] = 1 >>> s = COO.from_numpy(x) >>> s.density 0.125 """ return self.nnz / self.size def _repr_html_(self): """ Diagnostic report about this array. Renders in Jupyter. """ try: from matrepr import to_html from matrepr.adapters.sparse_driver import PyDataSparseDriver return to_html(PyDataSparseDriver.adapt(self), notebook=True) except (ImportError, ValueError): return html_table(self) def _str_impl(self, summary): """ A human-readable representation of this array, including a metadata summary and a tabular view of the array values. Values view only included if `matrepr` is available. Parameters ---------- summary A type-specific summary of this array, used as the first line of return value. Returns ------- str A human-readable representation of this array. """ try: from matrepr import to_str from matrepr.adapters.sparse_driver import PyDataSparseDriver values = to_str( PyDataSparseDriver.adapt(self), title=False, # disable matrepr description width_str=0, # autodetect terminal width max_cols=9999, ) return f"{summary}\n{values}" except (ImportError, ValueError): return summary @abstractmethod def asformat(self, format): """ Convert this sparse array to a given format. Parameters ---------- format : str A format string. Returns ------- out : SparseArray The converted array. Raises ------ NotImplementedError If the format isn't supported. """ @abstractmethod def todense(self): """ Convert this :obj:`SparseArray` array to a dense :obj:`numpy.ndarray`. Note that this may take a large amount of memory and time. Returns ------- numpy.ndarray The converted dense array. See Also -------- DOK.todense : Equivalent :obj:`DOK` array method. COO.todense : Equivalent :obj:`COO` array method. scipy.sparse.coo_matrix.todense : Equivalent Scipy method. Examples -------- >>> import sparse >>> x = np.random.randint(100, size=(7, 3)) >>> s = sparse.COO.from_numpy(x) >>> x2 = s.todense() >>> np.array_equal(x, x2) True """ def _make_shallow_copy_of(self, other): self.__dict__ = other.__dict__.copy() def __array__(self, *args, **kwargs): from ._settings import AUTO_DENSIFY if not AUTO_DENSIFY: raise RuntimeError( "Cannot convert a sparse array to dense automatically. To manually densify, use the todense method." ) return np.asarray(self.todense(), *args, **kwargs) def __array_function__(self, func, types, args, kwargs): import sparse as module sparse_func = None try: submodules = getattr(func, "__module__", "numpy").split(".")[1:] for submodule in submodules: module = getattr(module, submodule) sparse_func = getattr(module, func.__name__) except AttributeError: pass else: return sparse_func(*args, **kwargs) with contextlib.suppress(AttributeError): sparse_func = getattr(type(self), func.__name__) if not isinstance(sparse_func, Callable) and len(args) == 1 and len(kwargs) == 0: try: return getattr(self, func.__name__) except AttributeError: pass if sparse_func is None: return NotImplemented return sparse_func(*args, **kwargs) @staticmethod def _reduce(method, *args, **kwargs): from ._common import _is_scipy_sparse_obj assert len(args) == 1 self = args[0] if _is_scipy_sparse_obj(self): self = type(self).from_scipy_sparse(self) return self.reduce(method, **kwargs) def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): out = kwargs.pop("out", None) if out is not None and not all(isinstance(x, type(self)) for x in out): return NotImplemented if getattr(ufunc, "signature", None) is not None: return self.__array_function__(ufunc, (np.ndarray, type(self)), inputs, kwargs) if out is not None: test_args = [np.empty((1,), dtype=a.dtype) if hasattr(a, "dtype") else a for a in inputs] test_kwargs = kwargs.copy() if method == "reduce": test_kwargs["axis"] = None test_out = tuple(np.empty((1,), dtype=a.dtype) for a in out) if len(test_out) == 1: test_out = test_out[0] getattr(ufunc, method)(*test_args, out=test_out, **test_kwargs) kwargs["dtype"] = out[0].dtype if method == "outer": method = "__call__" cum_ndim = 0 inputs_transformed = [] for inp in reversed(inputs): inputs_transformed.append(inp[(Ellipsis,) + (None,) * cum_ndim]) cum_ndim += inp.ndim inputs = tuple(reversed(inputs_transformed)) if method == "__call__": result = elemwise(ufunc, *inputs, **kwargs) elif method == "reduce": result = SparseArray._reduce(ufunc, *inputs, **kwargs) else: return NotImplemented if out is not None: (out,) = out if out.shape != result.shape: raise ValueError( f"non-broadcastable output operand with shape {out.shape} " f"doesn't match the broadcast shape {result.shape}" ) out._make_shallow_copy_of(result) return out return result def reduce(self, method, axis=(0,), keepdims=False, **kwargs): """ Performs a reduction operation on this array. Parameters ---------- method : numpy.ufunc The method to use for performing the reduction. axis : Union[int, Iterable[int]], optional The axes along which to perform the reduction. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. **kwargs : dict Any extra arguments to pass to the reduction operation. See Also -------- numpy.ufunc.reduce : A similar Numpy method. COO.reduce : This method implemented on COO arrays. GCXS.reduce : This method implemented on GCXS arrays. """ axis = normalize_axis(axis, self.ndim) zero_reduce_result = method.reduce([self.fill_value, self.fill_value], **kwargs) reduce_super_ufunc = _reduce_super_ufunc.get(method) if not equivalent(zero_reduce_result, self.fill_value) and reduce_super_ufunc is None: raise ValueError(f"Performing this reduction operation would produce a dense result: {method!s}") if not isinstance(axis, tuple): axis = (axis,) out = self._reduce_calc(method, axis, keepdims, **kwargs) if len(out) == 1: return out[0] data, counts, axis, n_cols, arr_attrs = out result_fill_value = self.fill_value if reduce_super_ufunc is None: missing_counts = counts != n_cols data[missing_counts] = method(data[missing_counts], self.fill_value, **kwargs) else: data = method( data, reduce_super_ufunc(self.fill_value, n_cols - counts), ).astype(data.dtype) result_fill_value = reduce_super_ufunc(self.fill_value, n_cols) out = self._reduce_return(data, arr_attrs, result_fill_value) if keepdims: shape = list(self.shape) for ax in axis: shape[ax] = 1 out = out.reshape(shape) if out.ndim == 0: return out[()] return out def _reduce_calc(self, method, axis, keepdims, **kwargs): raise NotImplementedError def _reduce_return(self, data, arr_attrs, result_fill_value): raise NotImplementedError def sum(self, axis=None, keepdims=False, dtype=None, out=None): """ Performs a sum operation along the given axes. Uses all axes by default. Parameters ---------- axis : Union[int, Iterable[int]], optional The axes along which to sum. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. dtype : numpy.dtype The data type of the output array. Returns ------- SparseArray The reduced output sparse array. See Also -------- :obj:`numpy.sum` : Equivalent numpy function. scipy.sparse.coo_matrix.sum : Equivalent Scipy function. """ return np.add.reduce(self, out=out, axis=axis, keepdims=keepdims, dtype=dtype) def max(self, axis=None, keepdims=False, out=None): """ Maximize along the given axes. Uses all axes by default. Parameters ---------- axis : Union[int, Iterable[int]], optional The axes along which to maximize. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. out : numpy.dtype The data type of the output array. Returns ------- SparseArray The reduced output sparse array. See Also -------- :obj:`numpy.max` : Equivalent numpy function. scipy.sparse.coo_matrix.max : Equivalent Scipy function. """ return np.maximum.reduce(self, out=out, axis=axis, keepdims=keepdims) amax = max def any(self, axis=None, keepdims=False, out=None): """ See if any values along array are ``True``. Uses all axes by default. Parameters ---------- axis : Union[int, Iterable[int]], optional The axes along which to minimize. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. Returns ------- SparseArray The reduced output sparse array. See Also -------- :obj:`numpy.any` : Equivalent numpy function. """ return np.logical_or.reduce(self, out=out, axis=axis, keepdims=keepdims) def all(self, axis=None, keepdims=False, out=None): """ See if all values in an array are ``True``. Uses all axes by default. Parameters ---------- axis : Union[int, Iterable[int]], optional The axes along which to minimize. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. Returns ------- SparseArray The reduced output sparse array. See Also -------- :obj:`numpy.all` : Equivalent numpy function. """ return np.logical_and.reduce(self, out=out, axis=axis, keepdims=keepdims) def min(self, axis=None, keepdims=False, out=None): """ Minimize along the given axes. Uses all axes by default. Parameters ---------- axis : Union[int, Iterable[int]], optional The axes along which to minimize. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. out : numpy.dtype The data type of the output array. Returns ------- SparseArray The reduced output sparse array. See Also -------- :obj:`numpy.min` : Equivalent numpy function. scipy.sparse.coo_matrix.min : Equivalent Scipy function. """ return np.minimum.reduce(self, out=out, axis=axis, keepdims=keepdims) amin = min def prod(self, axis=None, keepdims=False, dtype=None, out=None): """ Performs a product operation along the given axes. Uses all axes by default. Parameters ---------- axis : Union[int, Iterable[int]], optional The axes along which to multiply. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. dtype : numpy.dtype The data type of the output array. Returns ------- SparseArray The reduced output sparse array. See Also -------- :obj:`numpy.prod` : Equivalent numpy function. """ return np.multiply.reduce(self, out=out, axis=axis, keepdims=keepdims, dtype=dtype) def round(self, decimals=0, out=None): """ Evenly round to the given number of decimals. See Also -------- :obj:`numpy.round` : NumPy equivalent ufunc. :obj:`COO.elemwise` : Apply an arbitrary element-wise function to one or two arguments. """ if out is not None and not isinstance(out, tuple): out = (out,) return self.__array_ufunc__(np.round, "__call__", self, decimals=decimals, out=out) round_ = round def clip(self, min=None, max=None, out=None): """ Clip (limit) the values in the array. Return an array whose values are limited to ``[min, max]``. One of min or max must be given. See Also -------- sparse.clip : For full documentation and more details. numpy.clip : Equivalent NumPy function. """ if min is None and max is None: raise ValueError("One of max or min must be given.") if out is not None and not isinstance(out, tuple): out = (out,) return self.__array_ufunc__(np.clip, "__call__", self, a_min=min, a_max=max, out=out) def astype(self, dtype, casting="unsafe", copy=True): """ Copy of the array, cast to a specified type. See Also -------- scipy.sparse.coo_matrix.astype : SciPy sparse equivalent function numpy.ndarray.astype : NumPy equivalent ufunc. :obj:`COO.elemwise` : Apply an arbitrary element-wise function to one or two arguments. """ # this matches numpy's behavior if self.dtype == dtype and not copy: return self return self.__array_ufunc__(np.ndarray.astype, "__call__", self, dtype=dtype, copy=copy, casting=casting) def mean(self, axis=None, keepdims=False, dtype=None, out=None): """ Compute the mean along the given axes. Uses all axes by default. Parameters ---------- axis : Union[int, Iterable[int]], optional The axes along which to compute the mean. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. dtype : numpy.dtype The data type of the output array. Returns ------- SparseArray The reduced output sparse array. See Also -------- numpy.ndarray.mean : Equivalent numpy method. scipy.sparse.coo_matrix.mean : Equivalent Scipy method. Notes ----- * This function internally calls :obj:`COO.sum_duplicates` to bring the array into canonical form. * The :code:`out` parameter is provided just for compatibility with Numpy and isn't actually supported. Examples -------- You can use :obj:`COO.mean` to compute the mean of an array across any dimension. >>> from sparse import COO >>> x = np.array([[1, 2, 0, 0], [0, 1, 0, 0]], dtype="i8") >>> s = COO.from_numpy(x) >>> s2 = s.mean(axis=1) >>> s2.todense() # doctest: +SKIP array([0.5, 1.5, 0., 0.]) You can also use the :code:`keepdims` argument to keep the dimensions after the mean. >>> s3 = s.mean(axis=0, keepdims=True) >>> s3.shape (1, 4) You can pass in an output datatype, if needed. >>> s4 = s.mean(axis=0, dtype=np.float16) >>> s4.dtype dtype('float16') By default, this reduces the array down to one number, computing the mean along all axes. >>> s.mean() 0.5 """ if axis is None: axis = tuple(range(self.ndim)) elif not isinstance(axis, tuple): axis = (axis,) den = reduce(operator.mul, (self.shape[i] for i in axis), 1) if dtype is None: if issubclass(self.dtype.type, np.integer | np.bool_): dtype = inter_dtype = np.dtype("f8") else: dtype = self.dtype inter_dtype = np.dtype("f4") if issubclass(dtype.type, np.float16) else dtype else: inter_dtype = dtype num = self.sum(axis=axis, keepdims=keepdims, dtype=inter_dtype) if num.ndim: out = np.true_divide(num, den, casting="unsafe") return out.astype(dtype) if out.dtype != dtype else out return np.divide(num, den, dtype=dtype, out=out) def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): """ Compute the variance along the given axes. Uses all axes by default. Parameters ---------- axis : Union[int, Iterable[int]], optional The axes along which to compute the variance. Uses all axes by default. dtype : numpy.dtype, optional The output datatype. out : SparseArray, optional The array to write the output to. ddof : int The degrees of freedom. keepdims : bool, optional Whether or not to keep the dimensions of the original array. Returns ------- SparseArray The reduced output sparse array. See Also -------- numpy.ndarray.var : Equivalent numpy method. Notes ----- * This function internally calls :obj:`COO.sum_duplicates` to bring the array into canonical form. Examples -------- You can use :obj:`COO.var` to compute the variance of an array across any dimension. >>> from sparse import COO >>> x = np.array([[1, 2, 0, 0], [0, 1, 0, 0]], dtype="i8") >>> s = COO.from_numpy(x) >>> s2 = s.var(axis=1) >>> s2.todense() # doctest: +SKIP array([0.6875, 0.1875]) You can also use the :code:`keepdims` argument to keep the dimensions after the variance. >>> s3 = s.var(axis=0, keepdims=True) >>> s3.shape (1, 4) You can pass in an output datatype, if needed. >>> s4 = s.var(axis=0, dtype=np.float16) >>> s4.dtype dtype('float16') By default, this reduces the array down to one number, computing the variance along all axes. >>> s.var() 0.5 """ axis = normalize_axis(axis, self.ndim) if axis is None: axis = tuple(range(self.ndim)) if not isinstance(axis, tuple): axis = (axis,) rcount = reduce(operator.mul, (self.shape[a] for a in axis), 1) # Make this warning show up on top. if ddof >= rcount: warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, stacklevel=1) # Cast bool, unsigned int, and int to float64 by default if dtype is None and issubclass(self.dtype.type, np.integer | np.bool_): dtype = np.dtype("f8") arrmean = self.sum(axis, dtype=dtype, keepdims=True)[...] np.divide(arrmean, rcount, out=arrmean) x = self - arrmean if issubclass(self.dtype.type, np.complexfloating): x = x.real * x.real + x.imag * x.imag else: x = np.multiply(x, x, out=x) ret = x.sum(axis=axis, dtype=dtype, out=out, keepdims=keepdims) # Compute degrees of freedom and make sure it is not negative. rcount = max([rcount - ddof, 0]) ret = ret[...] np.divide(ret, rcount, out=ret, casting="unsafe") return ret[()] def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): """ Compute the standard deviation along the given axes. Uses all axes by default. Parameters ---------- axis : Union[int, Iterable[int]], optional The axes along which to compute the standard deviation. Uses all axes by default. dtype : numpy.dtype, optional The output datatype. out : SparseArray, optional The array to write the output to. ddof : int The degrees of freedom. keepdims : bool, optional Whether or not to keep the dimensions of the original array. Returns ------- SparseArray The reduced output sparse array. See Also -------- numpy.ndarray.std : Equivalent numpy method. Notes ----- * This function internally calls :obj:`COO.sum_duplicates` to bring the array into canonical form. Examples -------- You can use :obj:`COO.std` to compute the standard deviation of an array across any dimension. >>> from sparse import COO >>> x = np.array([[1, 2, 0, 0], [0, 1, 0, 0]], dtype="i8") >>> s = COO.from_numpy(x) >>> s2 = s.std(axis=1) >>> s2.todense() # doctest: +SKIP array([0.8291562, 0.4330127]) You can also use the :code:`keepdims` argument to keep the dimensions after the standard deviation. >>> s3 = s.std(axis=0, keepdims=True) >>> s3.shape (1, 4) You can pass in an output datatype, if needed. >>> s4 = s.std(axis=0, dtype=np.float16) >>> s4.dtype dtype('float16') By default, this reduces the array down to one number, computing the standard deviation along all axes. >>> s.std() # doctest: +SKIP 0.7071067811865476 """ ret = self.var(axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims) return np.sqrt(ret) @property def real(self): """The real part of the array. Examples -------- >>> from sparse import COO >>> x = COO.from_numpy([1 + 0j, 0 + 1j]) >>> x.real.todense() # doctest: +SKIP array([1., 0.]) >>> x.real.dtype dtype('float64') Returns ------- out : SparseArray The real component of the array elements. If the array dtype is real, the dtype of the array is used for the output. If the array is complex, the output dtype is float. See Also -------- numpy.ndarray.real : NumPy equivalent attribute. numpy.real : NumPy equivalent function. """ return self.__array_ufunc__(np.real, "__call__", self) @property def imag(self): """The imaginary part of the array. Examples -------- >>> from sparse import COO >>> x = COO.from_numpy([1 + 0j, 0 + 1j]) >>> x.imag.todense() # doctest: +SKIP array([0., 1.]) >>> x.imag.dtype dtype('float64') Returns ------- out : SparseArray The imaginary component of the array elements. If the array dtype is real, the dtype of the array is used for the output. If the array is complex, the output dtype is float. See Also -------- numpy.ndarray.imag : NumPy equivalent attribute. numpy.imag : NumPy equivalent function. """ return self.__array_ufunc__(np.imag, "__call__", self) def conj(self): """Return the complex conjugate, element-wise. The complex conjugate of a complex number is obtained by changing the sign of its imaginary part. Examples -------- >>> from sparse import COO >>> x = COO.from_numpy([1 + 2j, 2 - 1j]) >>> res = x.conj() >>> res.todense() # doctest: +SKIP array([1.-2.j, 2.+1.j]) >>> res.dtype dtype('complex128') Returns ------- out : SparseArray The complex conjugate, with same dtype as the input. See Also -------- numpy.ndarray.conj : NumPy equivalent method. numpy.conj : NumPy equivalent function. """ return np.conj(self) def __array_namespace__(self, *, api_version=None): if api_version is None: api_version = "2022.12" if api_version not in {"2021.12", "2022.12"}: raise ValueError(f'"{api_version}" Array API version not supported.') import sparse return sparse def __bool__(self): """ """ return self._to_scalar(bool) def __float__(self): """ """ return self._to_scalar(float) def __int__(self): """ """ return self._to_scalar(int) def __index__(self): """ """ return self._to_scalar(int) def __complex__(self): """ """ return self._to_scalar(complex) def _to_scalar(self, builtin): if self.size != 1 or self.shape != (): raise ValueError(f"{builtin} can be computed for one-element arrays only.") return builtin(self.todense().flatten()[0]) @abstractmethod def isinf(self): """ """ @abstractmethod def isnan(self): """ """ sparse-0.16.0a9/sparse/numba_backend/_umath.py000066400000000000000000000570641463475501500212500ustar00rootroot00000000000000import itertools import operator from functools import reduce from itertools import zip_longest import numba import numpy as np from ._utils import _zero_of_dtype, equivalent, isscalar def elemwise(func, *args, **kwargs): """ Apply a function to any number of arguments. Parameters ---------- func : Callable The function to apply. Must support broadcasting. *args : tuple, optional The arguments to the function. Can be :obj:`SparseArray` objects or :obj:`scipy.sparse.spmatrix` objects. **kwargs : dict, optional Any additional arguments to pass to the function. Returns ------- SparseArray The result of applying the function. Raises ------ ValueError If the operation would result in a dense matrix, or if the operands don't have broadcastable shapes. See Also -------- :obj:`numpy.ufunc` : A similar Numpy construct. Note that any :code:`ufunc` can be used as the :code:`func` input to this function. Notes ----- Previously, operations with Numpy arrays were sometimes supported. Now, it is necessary to convert Numpy arrays to :obj:`COO` objects. """ return _Elemwise(func, *args, **kwargs).get_result() @numba.jit(nopython=True, nogil=True) def _match_arrays(a, b): # pragma: no cover """ Finds all indexes into a and b such that a[i] = b[j]. The outputs are sorted in lexographical order. Parameters ---------- a, b : np.ndarray The input 1-D arrays to match. If matching of multiple fields is needed, use np.recarrays. These two arrays must be sorted. Returns ------- a_idx, b_idx : np.ndarray The output indices of every possible pair of matching elements. """ if len(a) == 0 or len(b) == 0: return np.empty(0, dtype=np.uintp), np.empty(0, dtype=np.uintp) a_ind, b_ind = [], [] nb = len(b) ib = 0 match = 0 for ia, j in enumerate(a): if j == b[match]: ib = match while ib < nb and j >= b[ib]: if j == b[ib]: a_ind.append(ia) b_ind.append(ib) if b[match] < b[ib]: match = ib ib += 1 return np.array(a_ind, dtype=np.uintp), np.array(b_ind, dtype=np.uintp) def _get_nary_broadcast_shape(*shapes): """ Broadcast any number of shapes to a result shape. Parameters ---------- *shapes : tuple[tuple[int]] The shapes to broadcast. Returns ------- tuple[int] The output shape. Raises ------ ValueError If the input shapes cannot be broadcast to a single shape. """ result_shape = () for shape in shapes: try: result_shape = _get_broadcast_shape(shape, result_shape) except ValueError as e: # noqa: PERF203 shapes_str = ", ".join(str(shape) for shape in shapes) raise ValueError(f"operands could not be broadcast together with shapes {shapes_str}") from e return result_shape def _get_broadcast_shape(shape1, shape2, is_result=False): """ Get the overall broadcasted shape. Parameters ---------- shape1, shape2 : tuple[int] The input shapes to broadcast together. is_result : bool Whether or not shape2 is also the result shape. Returns ------- result_shape : tuple[int] The overall shape of the result. Raises ------ ValueError If the two shapes cannot be broadcast together. """ # https://stackoverflow.com/a/47244284/774273 if not all( (l1 == l2) or (l1 == 1) or ((l2 == 1) and not is_result) for l1, l2 in zip(shape1[::-1], shape2[::-1], strict=False) ): raise ValueError(f"operands could not be broadcast together with shapes {shape1}, {shape2}") return tuple(l1 if l1 != 1 else l2 for l1, l2 in zip_longest(shape1[::-1], shape2[::-1], fillvalue=1))[::-1] def _get_broadcast_parameters(shape, broadcast_shape): """ Get the broadcast parameters. Parameters ---------- shape : tuple[int] The input shape. broadcast_shape The shape to broadcast to. Returns ------- params : list A list containing None if the dimension isn't in the original array, False if it needs to be broadcast, and True if it doesn't. """ return [ None if l1 is None else l1 == l2 for l1, l2 in zip_longest(shape[::-1], broadcast_shape[::-1], fillvalue=None) ][::-1] def _get_reduced_coords(coords, params): """ Gets only those dimensions of the coordinates that don't need to be broadcast. Parameters ---------- coords : np.ndarray The coordinates to reduce. params : list The params from which to check which dimensions to get. Returns ------- reduced_coords : np.ndarray The reduced coordinates. """ reduced_params = [bool(param) for param in params] return coords[reduced_params] def _get_reduced_shape(shape, params): """ Gets only those dimensions of the coordinates that don't need to be broadcast. Parameters ---------- shape : np.ndarray The coordinates to reduce. params : list The params from which to check which dimensions to get. Returns ------- reduced_coords : np.ndarray The reduced coordinates. """ return tuple(sh for sh, p in zip(shape, params, strict=True) if p) def _get_expanded_coords_data(coords, data, params, broadcast_shape): """ Expand coordinates/data to broadcast_shape. Does most of the heavy lifting for broadcast_to. Produces sorted output for sorted inputs. Parameters ---------- coords : np.ndarray The coordinates to expand. data : np.ndarray The data corresponding to the coordinates. params : list The broadcast parameters. broadcast_shape : tuple[int] The shape to broadcast to. Returns ------- expanded_coords : np.ndarray List of 1-D arrays. Each item in the list has one dimension of coordinates. expanded_data : np.ndarray The data corresponding to expanded_coords. """ first_dim = -1 expand_shapes = [] for d, p, sh in zip(range(len(broadcast_shape)), params, broadcast_shape, strict=True): if p and first_dim == -1: expand_shapes.append(coords.shape[1]) first_dim = d if not p: expand_shapes.append(sh) all_idx = _cartesian_product(*(np.arange(d, dtype=np.intp) for d in expand_shapes)) false_dim = 0 dim = 0 expanded_coords = np.empty((len(broadcast_shape), all_idx.shape[1]), dtype=np.intp) if first_dim != -1: expanded_data = data[all_idx[first_dim]] else: expanded_coords = all_idx if len(data) else np.empty((0, all_idx.shape[1]), dtype=np.intp) expanded_data = np.repeat(data, reduce(operator.mul, broadcast_shape, 1)) return np.asarray(expanded_coords), np.asarray(expanded_data) for d, p in zip(range(len(broadcast_shape)), params, strict=True): if p: expanded_coords[d] = coords[dim, all_idx[first_dim]] else: expanded_coords[d] = all_idx[false_dim + (d > first_dim)] false_dim += 1 if p is not None: dim += 1 return np.asarray(expanded_coords), np.asarray(expanded_data) # (c) senderle # Taken from https://stackoverflow.com/a/11146645/774273 # License: https://creativecommons.org/licenses/by-sa/3.0/ def _cartesian_product(*arrays): """ Get the cartesian product of a number of arrays. Parameters ---------- *arrays : Tuple[np.ndarray] The arrays to get a cartesian product of. Always sorted with respect to the original array. Returns ------- out : np.ndarray The overall cartesian product of all the input arrays. """ broadcastable = np.ix_(*arrays) broadcasted = np.broadcast_arrays(*broadcastable) rows, cols = np.prod(broadcasted[0].shape), len(broadcasted) dtype = np.result_type(*arrays) out = np.empty(rows * cols, dtype=dtype) start, end = 0, rows for a in broadcasted: out[start:end] = a.reshape(-1) start, end = end, end + rows return out.reshape(cols, rows) def _get_matching_coords(coords, params): """ Get the matching coords across a number of broadcast operands. Parameters ---------- coords : list[numpy.ndarray] The input coordinates. params : list[Union[bool, none]] The broadcast parameters. Returns ------- numpy.ndarray The broacasted coordinates """ matching_coords = [] dims = np.zeros(len(coords), dtype=np.uint8) for p_all in zip(*params, strict=True): for i, p in enumerate(p_all): if p: matching_coords.append(coords[i][dims[i]]) break else: matching_coords.append(coords[dims[0]]) for i, p in enumerate(p_all): if p is not None: dims[i] += 1 return np.asarray(matching_coords, dtype=np.intp) def broadcast_to(x, shape): """ Performs the equivalent of :obj:`numpy.broadcast_to` for :obj:`COO`. Note that this function returns a new array instead of a view. Parameters ---------- shape : tuple[int] The shape to broadcast the data to. Returns ------- COO The broadcasted sparse array. Raises ------ ValueError If the operand cannot be broadcast to the given shape. See Also -------- :obj:`numpy.broadcast_to` : NumPy equivalent function """ from ._coo import COO if shape == x.shape: return x result_shape = _get_broadcast_shape(x.shape, shape, is_result=True) params = _get_broadcast_parameters(x.shape, result_shape) coords, data = _get_expanded_coords_data(x.coords, x.data, params, result_shape) # Check if all the non-broadcast axes are next to each other nonbroadcast_idx = [idx for idx, p in enumerate(params) if p] diff_nonbroadcast_idx = [a - b for a, b in zip(nonbroadcast_idx[1:], nonbroadcast_idx[:-1], strict=True)] sorted = all(d == 1 for d in diff_nonbroadcast_idx) return COO( coords, data, shape=result_shape, has_duplicates=False, sorted=sorted, fill_value=x.fill_value, ) class _Elemwise: def __init__(self, func, *args, **kwargs): """ Initialize the element-wise function calculator. Parameters ---------- func : types.Callable The function to compute *args : tuple[Union[SparseArray, ndarray, scipy.sparse.spmatrix]] The arguments to compute the function on. **kwargs : dict Extra arguments to pass to the function. """ from ._common import _is_scipy_sparse_obj from ._compressed import GCXS from ._coo import COO from ._dok import DOK from ._sparse_array import SparseArray processed_args = [] out_type = GCXS out_kwargs = {} sparse_args = [arg for arg in args if isinstance(arg, SparseArray)] if len(sparse_args) == 0: raise ValueError(f"None of the args is sparse: {args}") if all(isinstance(arg, DOK) for arg in sparse_args): out_type = DOK elif all(isinstance(arg, GCXS) for arg in sparse_args): out_type = GCXS if len({arg.compressed_axes for arg in sparse_args}) == 1: out_kwargs["compressed_axes"] = sparse_args[0].compressed_axes else: out_type = COO for arg in args: if _is_scipy_sparse_obj(arg): processed_args.append(COO.from_scipy_sparse(arg)) elif isscalar(arg) or isinstance(arg, np.ndarray): # Faster and more reliable to pass ()-shaped ndarrays as scalars. processed_args.append(arg) elif isinstance(arg, SparseArray): if not isinstance(arg, COO): arg = arg.asformat(COO) if arg.ndim == 0: arg = arg.todense() processed_args.append(arg) else: self.args = None return self.out_type = out_type self.out_kwargs = out_kwargs self.args = tuple(processed_args) self.func = func self.dtype = kwargs.pop("dtype", None) self.kwargs = kwargs self.cache = {} self._dense_result = False self._check_broadcast() self._get_fill_value() def get_result(self): from ._coo import COO if self.args is None: return NotImplemented if self._dense_result: args = [a.todense() if isinstance(a, COO) else a for a in self.args] return self.func(*args, **self.kwargs) if any(s == 0 for s in self.shape): data = np.empty((0,), dtype=self.fill_value.dtype) coords = np.empty((0, len(self.shape)), dtype=np.intp) return COO( coords, data, shape=self.shape, has_duplicates=False, fill_value=self.fill_value, ) data_list = [] coords_list = [] for mask in itertools.product(*[[True, False] if isinstance(arg, COO) else [None] for arg in self.args]): if not any(mask): continue r = self._get_func_coords_data(mask) if r is not None: coords_list.append(r[0]) data_list.append(r[1]) # Concatenate matches and mismatches data = np.concatenate(data_list) if len(data_list) else np.empty((0,), dtype=self.fill_value.dtype) coords = ( np.concatenate(coords_list, axis=1) if len(coords_list) else np.empty((0, len(self.shape)), dtype=np.intp) ) return COO( coords, data, shape=self.shape, has_duplicates=False, fill_value=self.fill_value, ).asformat(self.out_type, **self.out_kwargs) def _get_fill_value(self): """ A function that finds and returns the fill-value. Raises ------ ValueError If the fill-value is inconsistent. """ from ._coo import COO def get_zero_arg(x): if isinstance(x, COO): return np.atleast_1d(x.fill_value) if isinstance(x, np.generic | np.ndarray): return np.atleast_1d(x) return x zero_args = tuple(get_zero_arg(a) for a in self.args) # Some elemwise functions require a dtype argument, some abhorr it. try: fill_value_array = self.func(*zero_args, dtype=self.dtype, **self.kwargs) except TypeError: fill_value_array = self.func(*zero_args, **self.kwargs) try: fill_value = fill_value_array[(0,) * fill_value_array.ndim] except IndexError: zero_args = tuple( arg.fill_value if isinstance(arg, COO) else _zero_of_dtype(arg.dtype) for arg in self.args ) fill_value = self.func(*zero_args, **self.kwargs)[()] equivalent_fv = equivalent(fill_value, fill_value_array, loose=True).all() if not equivalent_fv and self.shape != self.ndarray_shape: raise ValueError( "Performing a mixed sparse-dense operation that would result in a dense array. " "Please make sure that func(sparse_fill_values, ndarrays) is a constant array." ) if not equivalent_fv: self._dense_result = True # Store dtype separately if needed. if self.dtype is not None: fill_value = fill_value.astype(self.dtype) self.fill_value = fill_value self.dtype = self.fill_value.dtype def _check_broadcast(self): """ Checks if adding the ndarrays changes the broadcast shape. Raises ------ ValueError If the check fails. """ from ._coo import COO full_shape = _get_nary_broadcast_shape(*tuple(np.shape(arg) for arg in self.args)) non_ndarray_shape = _get_nary_broadcast_shape(*tuple(arg.shape for arg in self.args if isinstance(arg, COO))) ndarray_shape = _get_nary_broadcast_shape(*tuple(arg.shape for arg in self.args if isinstance(arg, np.ndarray))) self.shape = full_shape self.ndarray_shape = ndarray_shape self.non_ndarray_shape = non_ndarray_shape def _get_func_coords_data(self, mask): """ Gets the coords/data for a certain mask Parameters ---------- mask : tuple[Union[bool, NoneType]] The mask determining whether to match or unmatch. Returns ------- None or tuple The coords/data tuple for the given mask. """ from ._coo import COO matched_args = [arg for arg, m in zip(self.args, mask, strict=True) if m is not None and m] unmatched_args = [arg for arg, m in zip(self.args, mask, strict=True) if m is not None and not m] ndarray_args = [arg for arg, m in zip(self.args, mask, strict=True) if m is None] matched_broadcast_shape = _get_nary_broadcast_shape( *tuple(np.shape(arg) for arg in itertools.chain(matched_args, ndarray_args)) ) matched_arrays = self._match_coo(*matched_args, cache=self.cache, broadcast_shape=matched_broadcast_shape) func_args = [] m_arg = 0 for arg, m in zip(self.args, mask, strict=True): if m is None: func_args.append(np.broadcast_to(arg, matched_broadcast_shape)[tuple(matched_arrays[0].coords)]) continue if m: func_args.append(matched_arrays[m_arg].data) m_arg += 1 else: func_args.append(arg.fill_value) # Try our best to preserve the output dtype. try: func_data = self.func(*func_args, dtype=self.dtype, **self.kwargs) except TypeError: try: func_args = np.broadcast_arrays(*func_args) out = np.empty(func_args[0].shape, dtype=self.dtype) func_data = self.func(*func_args, out=out, **self.kwargs) except TypeError: func_data = self.func(*func_args, **self.kwargs).astype(self.dtype) unmatched_mask = ~equivalent(func_data, self.fill_value) if not unmatched_mask.any(): return None func_coords = matched_arrays[0].coords[:, unmatched_mask] func_data = func_data[unmatched_mask] if matched_arrays[0].shape != self.shape: params = _get_broadcast_parameters(matched_arrays[0].shape, self.shape) func_coords, func_data = _get_expanded_coords_data(func_coords, func_data, params, self.shape) if all(m is None or m for m in mask): return func_coords, func_data # Not really sorted but we need the sortedness. func_array = COO(func_coords, func_data, self.shape, has_duplicates=False, sorted=True) unmatched_mask = np.ones(func_array.nnz, dtype=np.bool_) for arg in unmatched_args: matched_idx = self._match_coo(func_array, arg, return_midx=True)[0] unmatched_mask[matched_idx] = False coords = np.asarray(func_array.coords[:, unmatched_mask], order="C") data = np.asarray(func_array.data[unmatched_mask], order="C") return coords, data @staticmethod def _match_coo(*args, **kwargs): """ Matches the coordinates for any number of input :obj:`COO` arrays. Equivalent to "sparse" broadcasting for all arrays. Parameters ---------- *args : Tuple[COO] The input :obj:`COO` arrays. return_midx : bool Whether to return matched indices or matched arrays. Matching only supported for two arrays. ``False`` by default. cache : dict Cache of things already matched. No cache by default. Returns ------- matched_idx : List[ndarray] The indices of matched elements in the original arrays. Only returned if ``return_midx`` is ``True``. matched_arrays : List[COO] The expanded, matched :obj:`COO` objects. Only returned if ``return_midx`` is ``False``. """ from ._coo import COO from ._coo.common import linear_loc cache = kwargs.pop("cache", None) return_midx = kwargs.pop("return_midx", False) broadcast_shape = kwargs.pop("broadcast_shape", None) if kwargs: raise ValueError(f"Unknown kwargs: {kwargs.keys()}") if return_midx and (len(args) != 2 or cache is not None): raise NotImplementedError("Matching indices only supported for two args, and no cache.") matched_arrays = [args[0]] cache_key = [id(args[0])] for arg2 in args[1:]: cache_key.append(id(arg2)) key = tuple(cache_key) if cache is not None and key in cache: matched_arrays = cache[key] continue cargs = [matched_arrays[0], arg2] current_shape = _get_broadcast_shape(matched_arrays[0].shape, arg2.shape) params = [_get_broadcast_parameters(arg.shape, current_shape) for arg in cargs] reduced_params = [all(p) for p in zip(*params, strict=True)] reduced_shape = _get_reduced_shape(arg2.shape, _rev_idx(reduced_params, arg2.ndim)) reduced_coords = [_get_reduced_coords(arg.coords, _rev_idx(reduced_params, arg.ndim)) for arg in cargs] linear = [linear_loc(rc, reduced_shape) for rc in reduced_coords] sorted_idx = [np.argsort(idx) for idx in linear] linear = [idx[s] for idx, s in zip(linear, sorted_idx, strict=True)] matched_idx = _match_arrays(*linear) if return_midx: return [sidx[midx] for sidx, midx in zip(sorted_idx, matched_idx, strict=True)] coords = [arg.coords[:, s] for arg, s in zip(cargs, sorted_idx, strict=True)] mcoords = [c[:, idx] for c, idx in zip(coords, matched_idx, strict=True)] mcoords = _get_matching_coords(mcoords, params) mdata = [arg.data[sorted_idx[0]][matched_idx[0]] for arg in matched_arrays] mdata.append(arg2.data[sorted_idx[1]][matched_idx[1]]) # The coords aren't truly sorted, but we don't need them, so it's # best to avoid the extra cost. matched_arrays = [COO(mcoords, md, shape=current_shape, sorted=True, has_duplicates=False) for md in mdata] if cache is not None: cache[key] = matched_arrays if broadcast_shape is not None and matched_arrays[0].shape != broadcast_shape: params = _get_broadcast_parameters(matched_arrays[0].shape, broadcast_shape) coords, idx = _get_expanded_coords_data( matched_arrays[0].coords, np.arange(matched_arrays[0].nnz), params, broadcast_shape, ) matched_arrays = [ COO( coords, arr.data[idx], shape=broadcast_shape, sorted=True, has_duplicates=False, ) for arr in matched_arrays ] return matched_arrays def _rev_idx(arg, idx): if idx == 0: return arg[len(arg) :] return arg[-idx:] sparse-0.16.0a9/sparse/numba_backend/_utils.py000066400000000000000000000462421463475501500212660ustar00rootroot00000000000000import functools import operator import warnings from collections.abc import Iterable from functools import reduce from numbers import Integral import numba import numpy as np def assert_eq(x, y, check_nnz=True, compare_dtype=True, **kwargs): from ._coo import COO assert x.shape == y.shape if compare_dtype: assert x.dtype == y.dtype check_equal = ( np.array_equal if (np.issubdtype(x.dtype, np.integer) and np.issubdtype(y.dtype, np.integer)) or (np.issubdtype(x.dtype, np.flexible) and np.issubdtype(y.dtype, np.flexible)) else functools.partial(np.allclose, equal_nan=True) ) if isinstance(x, COO): assert is_canonical(x) if isinstance(y, COO): assert is_canonical(y) if isinstance(x, COO) and isinstance(y, COO) and check_nnz: assert np.array_equal(x.coords, y.coords) assert check_equal(x.data, y.data, **kwargs) assert x.fill_value == y.fill_value or (np.isnan(x.fill_value) and np.isnan(y.fill_value)) return if hasattr(x, "todense"): xx = x.todense() if check_nnz: assert_nnz(x, xx) else: xx = x if hasattr(y, "todense"): yy = y.todense() if check_nnz: assert_nnz(y, yy) else: yy = y assert check_equal(xx, yy, **kwargs) def assert_gcxs_slicing(s, x): """ Util function to test slicing of GCXS matrices after product multiplication. For simplicity, it tests only tensors with number of dimension = 3. Parameters ---------- s: sparse product matrix x: dense product matrix """ rng = np.random.default_rng() row = rng.integers(s.shape[s.ndim - 2]) assert np.allclose(s[0][row].data, [num for num in x[0][row] if num != 0]) # regression test col = s.shape[s.ndim - 1] for i in range(len(s.indices) // col): j = col * i k = col * (1 + i) s.data[j:k] = s.data[j:k][::-1] s.indices[j:k] = s.indices[j:k][::-1] assert np.array_equal(s[0][row].data, np.array([])) def assert_nnz(s, x): fill_value = s.fill_value if hasattr(s, "fill_value") else _zero_of_dtype(s.dtype) assert np.sum(~equivalent(x, fill_value)) == s.nnz def is_canonical(x): return not x.shape or ((np.diff(x.linear_loc()) > 0).all() and not equivalent(x.data, x.fill_value).any()) def _zero_of_dtype(dtype): """ Creates a ()-shaped 0-dimensional zero array of a given dtype. Parameters ---------- dtype : numpy.dtype The dtype for the array. Returns ------- np.ndarray The zero array. """ return np.zeros((), dtype=dtype)[()] @numba.jit(nopython=True, nogil=True) def algD(n, N, random_state): """ Random Sampling without Replacement Alg D proposed by J.S. Vitter in Faster Methods for Random Sampling Parameters: n = sample size (nnz) N = size of system (elements) random_state = seed for random number generation """ n = np.intp(n + 1) N = np.intp(N) qu1 = N - n + 1 Vprime = np.exp(np.log(random_state.random()) / n) i = 0 arr = np.zeros(n - 1, dtype=np.intp) arr[-1] = -1 while n > 1: nmin1inv = 1 / (n - 1) while True: while True: X = N * (1 - Vprime) S = np.intp(X) if qu1 > S: break Vprime = np.exp(np.log(random_state.random()) / n) y1 = np.exp(np.log(random_state.random() * N / qu1) * nmin1inv) Vprime = y1 * (1 - X / N) * (qu1 / (qu1 - S)) if Vprime <= 1: break y2 = 1 top = N - 1 if n - 1 > S: bottom = N - n limit = N - S else: bottom = N - S - 1 limit = qu1 t = N - 1 while t >= limit: y2 *= top / bottom top -= 1 bottom -= 1 t -= 1 if y1 * np.exp(np.log(y2) / nmin1inv) <= N / (N - X): Vprime = np.exp(np.log(random_state.random()) * nmin1inv) break Vprime = np.exp(np.log(random_state.random()) / n) arr[i] = arr[i - 1] + S + 1 i += 1 N = N - S - 1 n -= 1 qu1 = qu1 - S return arr @numba.jit(nopython=True, nogil=True) def algA(n, N, random_state): """ Random Sampling without Replacement Alg A proposed by J.S. Vitter in Faster Methods for Random Sampling Parameters: n = sample size (nnz) N = size of system (elements) random_state = seed for random number generation """ n = np.intp(n) N = np.intp(N) arr = np.zeros(n, dtype=np.intp) arr[-1] = -1 i = 0 top = N - n while n >= 2: V = random_state.random() S = 0 quot = top / N while quot > V: S += 1 top -= 1 N -= 1 quot *= top / N arr[i] = arr[i - 1] + S + 1 i += 1 N -= 1 n -= 1 S = np.intp(N * random_state.random()) arr[i] = arr[i - 1] + S + 1 i += 1 return arr @numba.jit(nopython=True, nogil=True) def reverse(inv, N): """ If density of random matrix is greater than .5, it is faster to sample states not included Parameters: arr = np.array(np.intp) of indices to be excluded from sample N = size of the system (elements) """ N = np.intp(N) a = np.zeros(np.intp(N - len(inv)), dtype=np.intp) j = 0 k = 0 for i in range(N): if j == len(inv): a[k:] = np.arange(i, N) break if i == inv[j]: j += 1 else: a[k] = i k += 1 return a default_rng = np.random.default_rng() def random( shape, density=None, nnz=None, random_state=None, data_rvs=None, format="coo", fill_value=None, idx_dtype=None, **kwargs, ): """Generate a random sparse multidimensional array Parameters ---------- shape : Tuple[int] Shape of the array density : float, optional Density of the generated array; default is 0.01. Mutually exclusive with `nnz`. nnz : int, optional Number of nonzero elements in the generated array. Mutually exclusive with `density`. random_state : Union[numpy.random.Generator, int], optional Random number generator or random seed. If not given, the singleton numpy.random will be used. This random state will be used for sampling the sparsity structure, but not necessarily for sampling the values of the structurally nonzero entries of the matrix. data_rvs : Callable Data generation callback. Must accept one single parameter: number of :code:`nnz` elements, and return one single NumPy array of exactly that length. format : str The format to return the output array in. fill_value : scalar The fill value of the output array. Returns ------- SparseArray The generated random matrix. See Also -------- :obj:`scipy.sparse.rand` : Equivalent Scipy function. :obj:`numpy.random.rand` : Similar Numpy function. Examples -------- >>> from sparse import random >>> from scipy import stats >>> rvs = lambda x: stats.poisson(25, loc=10).rvs(x, random_state=np.random.RandomState(1)) >>> s = random((2, 3, 4), density=0.25, random_state=np.random.RandomState(1), data_rvs=rvs) >>> s.todense() # doctest: +NORMALIZE_WHITESPACE array([[[ 0, 0, 0, 0], [34, 0, 29, 30], [ 0, 0, 0, 0]], [[33, 0, 0, 34], [34, 0, 0, 0], [ 0, 0, 0, 0]]]) """ # Copied, in large part, from scipy.sparse.random # See https://github.com/scipy/scipy/blob/main/LICENSE.txt from ._coo import COO if density is not None and nnz is not None: raise ValueError("'density' and 'nnz' are mutually exclusive") if density is None: density = 0.01 if not (0 <= density <= 1): raise ValueError(f"density {density} is not in the unit interval") elements = np.prod(shape, dtype=np.intp) if nnz is None: nnz = int(elements * density) if not (0 <= nnz <= elements): raise ValueError(f"cannot generate {nnz} nonzero elements for an array with {elements} total elements") if random_state is None: random_state = default_rng elif isinstance(random_state, Integral): random_state = np.random.default_rng(random_state) if data_rvs is None: data_rvs = random_state.random if nnz == elements or density >= 1: ind = np.arange(elements) elif nnz < 2: ind = random_state.choice(elements, nnz) # Faster to find non-sampled indices and remove them for dens > .5 elif elements - nnz < 2: ind = reverse(random_state.choice(elements, elements - nnz), elements) elif nnz > elements / 2: nnztemp = elements - nnz # Using algorithm A for dens > .1 if elements > 10 * nnztemp: ind = reverse( algD(nnztemp, elements, random_state), elements, ) else: ind = reverse( algA(nnztemp, elements, random_state), elements, ) else: ind = algD(nnz, elements, random_state) if elements > 10 * nnz else algA(nnz, elements, random_state) data = data_rvs(nnz) ar = COO( ind[None, :], data, shape=elements, fill_value=fill_value, ).reshape(shape) if idx_dtype: if can_store(idx_dtype, max(shape)): ar.coords = ar.coords.astype(idx_dtype) else: raise ValueError(f"cannot cast array with shape {shape} to dtype {idx_dtype}.") return ar.asformat(format, **kwargs) def isscalar(x): from ._sparse_array import SparseArray return not isinstance(x, SparseArray) and np.isscalar(x) def random_value_array(value, fraction): def replace_values(n): i = int(n * fraction) ar = np.empty((n,), dtype=np.float64) ar[:i] = value ar[i:] = default_rng.random(n - i) return ar return replace_values def normalize_axis(axis, ndim): """ Normalize negative axis indices to their positive counterpart for a given number of dimensions. Parameters ---------- axis : Union[int, Iterable[int], None] The axis indices. ndim : int Number of dimensions to normalize axis indices against. Returns ------- axis The normalized axis indices. """ if axis is None: return None if isinstance(axis, Integral): axis = int(axis) if axis < 0: axis += ndim if axis >= ndim or axis < 0: raise ValueError(f"Invalid axis index {axis} for ndim={ndim}") return axis if isinstance(axis, Iterable): if not all(isinstance(a, Integral) for a in axis): raise ValueError(f"axis {axis} not understood") return tuple(normalize_axis(a, ndim) for a in axis) raise ValueError(f"axis {axis} not understood") def equivalent(x, y, /, loose=False): """ Checks the equivalence of two scalars or arrays with broadcasting. Assumes a consistent dtype. Parameters ---------- x : scalar or numpy.ndarray y : scalar or numpy.ndarray Returns ------- equivalent : scalar or numpy.ndarray The element-wise comparison of where two arrays are equivalent. Examples -------- >>> equivalent(1, 1) True >>> equivalent(np.nan, np.nan + 1) True >>> equivalent(1, 2) False >>> equivalent(np.inf, np.inf) True >>> equivalent(np.PZERO, np.NZERO) False """ x = np.asarray(x) y = np.asarray(y) # Can't contain NaNs dt = np.result_type(x.dtype, y.dtype) if not any(np.issubdtype(dt, t) for t in [np.floating, np.complexfloating]): return x == y if loose: if np.issubdtype(dt, np.complexfloating): return equivalent(x.real, y.real) & equivalent(x.imag, y.imag) # TODO: Rec array handling return (x == y) | ((x != x) & (y != y)) if x.size == 0 or y.size == 0: shape = np.broadcast_shapes(x.shape, y.shape) return np.empty(shape, dtype=np.bool_) x, y = np.broadcast_arrays(x[..., None], y[..., None]) return (x.astype(dt).view(np.uint8) == y.astype(dt).view(np.uint8)).all(axis=-1) # copied from zarr # See https://github.com/zarr-developers/zarr-python/blob/main/zarr/util.py def human_readable_size(size): if size < 2**10: return str(size) if size < 2**20: return f"{size / 2**10:.1f}K" if size < 2**30: return f"{size / 2**20:.1f}M" if size < 2**40: return f"{size / 2**30:.1f}G" if size < 2**50: return f"{size / 2**40:.1f}T" return f"{size / 2**50:.1f}P" def html_table(arr): table = [""] headings = ["Format", "Data Type", "Shape", "nnz", "Density", "Read-only"] density = np.float64(arr.nnz) / np.float64(arr.size) info = [ type(arr).__name__.lower(), str(arr.dtype), str(arr.shape), str(arr.nnz), str(density), ] # read-only info.append(str(not hasattr(arr, "__setitem__"))) if hasattr(arr, "nbytes"): headings.append("Size") info.append(human_readable_size(arr.nbytes)) headings.append("Storage ratio") info.append( f"{np.float64(arr.nbytes) / np.float64(reduce(operator.mul, arr.shape, 1) * arr.dtype.itemsize):.2f}" ) # compressed_axes if type(arr).__name__ == "GCXS": headings.append("Compressed Axes") info.append(str(arr.compressed_axes)) for h, i in zip(headings, info, strict=True): table.append(f'') table.append("
{h}{i}
") return "".join(table) def check_compressed_axes(ndim, compressed_axes): """ Checks if the given compressed_axes are compatible with the shape of the array. Parameters ---------- ndim : int compressed_axes : Iterable Raises ------ ValueError If the compressed_axes are incompatible with the number of dimensions """ if compressed_axes is None: return if isinstance(ndim, Iterable): ndim = len(ndim) if not isinstance(compressed_axes, Iterable): raise ValueError("compressed_axes must be an iterable") if len(compressed_axes) == ndim: raise ValueError("cannot compress all axes") if not np.array_equal(list(set(compressed_axes)), compressed_axes): raise ValueError("axes must be sorted without repeats") if not all(isinstance(a, Integral) for a in compressed_axes): raise ValueError("axes must be represented with integers") if min(compressed_axes) < 0 or max(compressed_axes) >= ndim: raise ValueError("axis out of range") def check_fill_value(x, /, *, accept_fv=None) -> None: """Raises on incorrect fill-values. Parameters ---------- x : SparseArray The array to check accept_fv : scalar or list of scalar, optional The list of accepted fill-values. The default accepts only zero. Raises ------ ValueError If the fill-value doesn't match. """ if accept_fv is None: accept_fv = [0] if not isinstance(accept_fv, Iterable): accept_fv = [accept_fv] if not any(equivalent(fv, x.fill_value, loose=True) for fv in accept_fv): raise ValueError(f"{x.fill_value=} but should be in {accept_fv}.") def check_zero_fill_value(*args): """ Checks if all the arguments have zero fill-values. Parameters ---------- *args : Iterable[SparseArray] Raises ------ ValueError If all arguments don't have zero fill-values. Examples -------- >>> import sparse >>> s1 = sparse.random((10,), density=0.5) >>> s2 = sparse.random((10,), density=0.5, fill_value=0.5) >>> check_zero_fill_value(s1) >>> check_zero_fill_value(s2) Traceback (most recent call last): ... ValueError: This operation requires zero fill values, but argument 0 had a fill value of 0.5. >>> check_zero_fill_value(s1, s2) Traceback (most recent call last): ... ValueError: This operation requires zero fill values, but argument 1 had a fill value of 0.5. """ for i, arg in enumerate(args): if hasattr(arg, "fill_value") and not equivalent(arg.fill_value, _zero_of_dtype(arg.dtype)): raise ValueError( "This operation requires zero fill values, " f"but argument {i:d} had a fill value of {arg.fill_value!s}." ) def check_consistent_fill_value(arrays): """ Checks if all the arguments have consistent fill-values. Parameters ---------- args : Iterable[SparseArray] Raises ------ ValueError If all elements of :code:`arrays` don't have the same fill-value. Examples -------- >>> import sparse >>> s1 = sparse.random((10,), density=0.5, fill_value=0.1) >>> s2 = sparse.random((10,), density=0.5, fill_value=0.5) >>> check_consistent_fill_value([s1, s1]) >>> check_consistent_fill_value([s1, s2]) # doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): ... ValueError: This operation requires consistent fill-values, but argument 1 had a fill value of 0.5,\ which is different from a fill_value of 0.1 in the first argument. """ arrays = list(arrays) from ._sparse_array import SparseArray if not all(isinstance(s, SparseArray) for s in arrays): raise ValueError("All arrays must be instances of SparseArray.") if len(arrays) == 0: raise ValueError("At least one array required.") fv = arrays[0].fill_value for i, arg in enumerate(arrays): if not equivalent(fv, arg.fill_value): raise ValueError( "This operation requires consistent fill-values, " f"but argument {i:d} had a fill value of {arg.fill_value!s}, which " f"is different from a fill_value of {fv!s} in the first " "argument." ) def get_out_dtype(arr, scalar): out_type = arr.dtype if not can_store(out_type, scalar): out_type = np.min_scalar_type(scalar) return out_type def can_store(dtype, scalar): try: with warnings.catch_warnings(): warnings.simplefilter("ignore") warnings.filterwarnings("error", "out-of-bound", DeprecationWarning) return np.array(scalar, dtype=dtype) == np.array(scalar) except (ValueError, OverflowError): return False def is_unsigned_dtype(dtype): return np.issubdtype(dtype, np.integer) and np.iinfo(dtype).min == 0 def convert_format(format): from ._sparse_array import SparseArray if isinstance(format, type): if not issubclass(format, SparseArray): raise ValueError(f"Invalid format: {format}") return format.__name__.lower() if isinstance(format, str): return format raise ValueError(f"Invalid format: {format}") sparse-0.16.0a9/sparse/numba_backend/tests/000077500000000000000000000000001463475501500205475ustar00rootroot00000000000000sparse-0.16.0a9/sparse/numba_backend/tests/conftest.py000066400000000000000000000002141463475501500227430ustar00rootroot00000000000000import pytest @pytest.fixture(scope="session") def rng(): from sparse.numba_backend._utils import default_rng return default_rng sparse-0.16.0a9/sparse/numba_backend/tests/test_array_function.py000066400000000000000000000071451463475501500252120ustar00rootroot00000000000000import sparse from sparse.numba_backend._settings import NEP18_ENABLED from sparse.numba_backend._utils import assert_eq import pytest import numpy as np import scipy if not NEP18_ENABLED: pytest.skip("NEP18 is not enabled", allow_module_level=True) @pytest.mark.parametrize( "func", [ np.mean, np.std, np.var, np.sum, lambda x: np.sum(x, axis=0), lambda x: np.transpose(x), ], ) def test_unary(func): y = sparse.random((50, 50), density=0.25) x = y.todense() xx = func(x) yy = func(y) assert_eq(xx, yy) @pytest.mark.parametrize("arg_order", [(0, 1), (1, 0), (1, 1)]) @pytest.mark.parametrize("func", [np.dot, np.result_type, np.tensordot, np.matmul]) def test_binary(func, arg_order): y = sparse.random((50, 50), density=0.25) x = y.todense() xx = func(x, x) args = [(x, y)[i] for i in arg_order] yy = func(*args) if isinstance(xx, np.ndarray): assert_eq(xx, yy) else: # result_type returns a dtype assert xx == yy def test_stack(): """stack(), by design, does not allow for mixed type inputs""" y = sparse.random((50, 50), density=0.25) x = y.todense() xx = np.stack([x, x]) yy = np.stack([y, y]) assert_eq(xx, yy) @pytest.mark.parametrize( "arg_order", [(0, 0, 1), (0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1)], ) @pytest.mark.parametrize("func", [lambda a, b, c: np.where(a.astype(bool), b, c)]) def test_ternary(func, arg_order): y = sparse.random((50, 50), density=0.25) x = y.todense() xx = func(x, x, x) args = [(x, y)[i] for i in arg_order] yy = func(*args) assert_eq(xx, yy) @pytest.mark.parametrize("func", [np.shape, np.size, np.ndim]) def test_property(func): y = sparse.random((50, 50), density=0.25) x = y.todense() xx = func(x) yy = func(y) assert xx == yy def test_broadcast_to_scalar(): s = sparse.COO.from_numpy([0, 0, 1, 2]) actual = np.broadcast_to(np.zeros_like(s, shape=()), (3,)) expected = np.broadcast_to(np.zeros_like(s.todense(), shape=()), (3,)) assert isinstance(actual, sparse.COO) assert_eq(actual, expected) def test_zeros_like_order(): s = sparse.COO.from_numpy([0, 0, 1, 2]) actual = np.zeros_like(s, order="C") expected = np.zeros_like(s.todense(), order="C") assert isinstance(actual, sparse.COO) assert_eq(actual, expected) @pytest.mark.parametrize("format", ["dok", "gcxs", "coo"]) def test_format(format): s = sparse.random((5, 5), density=0.2, format=format) assert s.format == format class TestAsarray: np_eye = np.eye(5) @pytest.mark.parametrize( "input", [ np_eye, scipy.sparse.csr_matrix(np_eye), scipy.sparse.csc_matrix(np_eye), 4, np.array(5), np.arange(12).reshape((2, 3, 2)), sparse.COO.from_numpy(np_eye), sparse.GCXS.from_numpy(np_eye), sparse.DOK.from_numpy(np_eye), ], ) @pytest.mark.parametrize("dtype", [np.int64, np.float64, np.complex128]) @pytest.mark.parametrize("format", ["dok", "gcxs", "coo"]) def test_asarray(self, input, dtype, format): if format == "dok" and (np.isscalar(input) or input.ndim == 0): # scalars and 0-D arrays aren't supported in DOK format return s = sparse.asarray(input, dtype=dtype, format=format) actual = s.todense() if hasattr(s, "todense") else s expected = input.todense() if hasattr(input, "todense") else np.asarray(input) np.testing.assert_equal(actual, expected) sparse-0.16.0a9/sparse/numba_backend/tests/test_compressed.py000066400000000000000000000324661463475501500243370ustar00rootroot00000000000000import sparse from sparse.numba_backend._compressed import GCXS from sparse.numba_backend._utils import assert_eq, equivalent import pytest import numpy as np @pytest.fixture(scope="module", params=["f8", "f4", "i8", "i4"]) def random_sparse(request, rng): dtype = request.param if np.issubdtype(dtype, np.integer): def data_rvs(n): return rng.integers(-1000, 1000, n) else: data_rvs = None return sparse.random((20, 30, 40), density=0.25, format="gcxs", data_rvs=data_rvs, random_state=rng).astype(dtype) @pytest.fixture(scope="module", params=["f8", "f4", "i8", "i4"]) def random_sparse_small(request, rng): dtype = request.param if np.issubdtype(dtype, np.integer): def data_rvs(n): return rng.integers(-10, 10, n) else: data_rvs = None return sparse.random((20, 30, 40), density=0.25, format="gcxs", data_rvs=data_rvs, random_state=rng).astype(dtype) @pytest.mark.parametrize( "reduction, kwargs", [ ("sum", {}), ("sum", {"dtype": np.float32}), ("mean", {}), ("mean", {"dtype": np.float32}), ("prod", {}), ("max", {}), ("min", {}), ("std", {}), ("var", {}), ], ) @pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 2), -3, (1, -1)]) @pytest.mark.parametrize("keepdims", [True, False]) def test_reductions(reduction, random_sparse, axis, keepdims, kwargs): x = random_sparse y = x.todense() xx = getattr(x, reduction)(axis=axis, keepdims=keepdims, **kwargs) yy = getattr(y, reduction)(axis=axis, keepdims=keepdims, **kwargs) assert_eq(xx, yy) @pytest.mark.xfail(reason=("Setting output dtype=float16 produces results inconsistent with numpy")) @pytest.mark.filterwarnings("ignore:overflow") @pytest.mark.parametrize( "reduction, kwargs", [("sum", {"dtype": np.float16}), ("mean", {"dtype": np.float16})], ) @pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 2)]) def test_reductions_float16(random_sparse, reduction, kwargs, axis): x = random_sparse y = x.todense() xx = getattr(x, reduction)(axis=axis, **kwargs) yy = getattr(y, reduction)(axis=axis, **kwargs) assert_eq(xx, yy, atol=1e-2) @pytest.mark.parametrize("reduction,kwargs", [("any", {}), ("all", {})]) @pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 2), -3, (1, -1)]) @pytest.mark.parametrize("keepdims", [True, False]) def test_reductions_bool(random_sparse, reduction, kwargs, axis, keepdims): y = np.zeros((2, 3, 4), dtype=bool) y[0] = True y[1, 1, 1] = True x = sparse.COO.from_numpy(y) xx = getattr(x, reduction)(axis=axis, keepdims=keepdims, **kwargs) yy = getattr(y, reduction)(axis=axis, keepdims=keepdims, **kwargs) assert_eq(xx, yy) @pytest.mark.parametrize( "reduction,kwargs", [ (np.max, {}), (np.sum, {}), (np.sum, {"dtype": np.float32}), (np.mean, {}), (np.mean, {"dtype": np.float32}), (np.prod, {}), (np.min, {}), ], ) @pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 2), -1, (0, -1)]) @pytest.mark.parametrize("keepdims", [True, False]) def test_ufunc_reductions(random_sparse, reduction, kwargs, axis, keepdims): x = random_sparse y = x.todense() xx = reduction(x, axis=axis, keepdims=keepdims, **kwargs) yy = reduction(y, axis=axis, keepdims=keepdims, **kwargs) assert_eq(xx, yy) # If not a scalar/1 element array, must be a sparse array if xx.size > 1: assert isinstance(xx, GCXS) @pytest.mark.parametrize( "reduction,kwargs", [ (np.max, {}), (np.sum, {"axis": 0}), (np.prod, {"keepdims": True}), (np.minimum.reduce, {"axis": 0}), ], ) @pytest.mark.parametrize("fill_value", [0, 1.0, -1, -2.2, 5.0]) def test_ufunc_reductions_kwargs(reduction, kwargs, fill_value): x = sparse.random((2, 3, 4), density=0.5, format="gcxs", fill_value=fill_value) y = x.todense() xx = reduction(x, **kwargs) yy = reduction(y, **kwargs) assert_eq(xx, yy) # If not a scalar/1 element array, must be a sparse array if xx.size > 1: assert isinstance(xx, GCXS) @pytest.mark.parametrize( "a,b", [ [(3, 4), (3, 4)], [(12,), (3, 4)], [(12,), (3, -1)], [(3, 4), (12,)], [(3, 4), (-1, 4)], [(3, 4), (3, -1)], [(2, 3, 4, 5), (8, 15)], [(2, 3, 4, 5), (24, 5)], [(2, 3, 4, 5), (20, 6)], [(), ()], ], ) def test_reshape(a, b): s = sparse.random(a, density=0.5, format="gcxs") x = s.todense() assert_eq(x.reshape(b), s.reshape(b)) def test_reshape_same(): s = sparse.random((3, 5), density=0.5, format="gcxs") assert s.reshape(s.shape) is s @pytest.mark.parametrize( "a,b", [ [(3, 4, 5), (2, 1, 0)], [(12,), None], [(9, 10), (1, 0)], [(4, 3, 5), (1, 0, 2)], [(5, 4, 3), (0, 2, 1)], [(3, 4, 5, 6), (0, 2, 1, 3)], ], ) def test_tranpose(a, b): s = sparse.random(a, density=0.5, format="gcxs") x = s.todense() assert_eq(x.transpose(b), s.transpose(b)) @pytest.mark.parametrize("fill_value_in", [0, np.inf, np.nan, 5, None]) @pytest.mark.parametrize("fill_value_out", [0, np.inf, np.nan, 5, None]) @pytest.mark.parametrize("format", [sparse.COO, sparse._compressed.CSR]) def test_to_scipy_sparse(fill_value_in, fill_value_out, format): s = sparse.random((3, 5), density=0.5, format=format, fill_value=fill_value_in) if not ((fill_value_in in {0, None} and fill_value_out in {0, None}) or equivalent(fill_value_in, fill_value_out)): with pytest.raises(ValueError, match=r"fill_value=.* but should be in .*\."): s.to_scipy_sparse(accept_fv=fill_value_out) return sps_matrix = s.to_scipy_sparse(accept_fv=fill_value_in) s2 = format.from_scipy_sparse(sps_matrix, fill_value=fill_value_out) assert_eq(s, s2) def test_tocoo(): coo = sparse.random((5, 6), density=0.5) b = GCXS.from_coo(coo) assert_eq(b.tocoo(), coo) @pytest.mark.parametrize("complex", [True, False]) def test_complex_methods(complex): x = np.array([1 + 2j, 2 - 1j, 0, 1, 0]) if complex else np.array([1, 2, 0, 0, 0]) s = GCXS.from_numpy(x) assert_eq(s.imag, x.imag) assert_eq(s.real, x.real) assert_eq(s.conj(), x.conj()) @pytest.mark.parametrize( "index", [ # Integer 0, 1, -1, (1, 1, 1), # Pure slices (slice(0, 2),), (slice(None, 2), slice(None, 2)), (slice(1, None), slice(1, None)), (slice(None, None),), (slice(None, None, -1),), (slice(None, 2, -1), slice(None, 2, -1)), (slice(1, None, 2), slice(1, None, 2)), (slice(None, None, 2),), (slice(None, 2, -1), slice(None, 2, -2)), (slice(1, None, 2), slice(1, None, 1)), (slice(None, None, -2),), # Combinations (0, slice(0, 2)), (slice(0, 1), 0), (None, slice(1, 3), 0), (slice(0, 3), None, 0), (slice(1, 2), slice(2, 4)), (slice(1, 2), slice(None, None)), (slice(1, 2), slice(None, None), 2), (slice(1, 2, 2), slice(None, None), 2), (slice(1, 2, None), slice(None, None, 2), 2), (slice(1, 2, -2), slice(None, None), -2), (slice(1, 2, None), slice(None, None, -2), 2), (slice(1, 2, -1), slice(None, None), -1), (slice(1, 2, None), slice(None, None, -1), 2), (slice(2, 0, -1), slice(None, None), -1), (slice(-2, None, None),), (slice(-1, None, None), slice(-2, None, None)), # With ellipsis (Ellipsis, slice(1, 3)), (1, Ellipsis, slice(1, 3)), (slice(0, 1), Ellipsis), (Ellipsis, None), (None, Ellipsis), (1, Ellipsis), (1, Ellipsis, None), (1, 1, 1, Ellipsis), (Ellipsis, 1, None), # Pathological - Slices larger than array (slice(None, 1000)), (slice(None), slice(None, 1000)), (slice(None), slice(1000, -1000, -1)), (slice(None), slice(1000, -1000, -50)), # Pathological - Wrong ordering of start/stop (slice(5, 0),), (slice(0, 5, -1),), ], ) @pytest.mark.parametrize("compressed_axes", [(0,), (1,), (2,), (0, 1), (0, 2), (1, 2)]) def test_slicing(index, compressed_axes): s = sparse.random((2, 3, 4), density=0.5, format="gcxs", compressed_axes=compressed_axes) x = s.todense() assert_eq(x[index], s[index]) @pytest.mark.parametrize( "index", [ ([1, 0], 0), (1, [0, 2]), (0, [1, 0], 0), (1, [2, 0], 0), ([True, False], slice(1, None), slice(-2, None)), (slice(1, None), slice(-2, None), [True, False, True, False]), ([1, 0],), (Ellipsis, [2, 1, 3]), (slice(None), [2, 1, 2]), (1, [2, 0, 1]), ], ) @pytest.mark.parametrize("compressed_axes", [(0,), (1,), (2,), (0, 1), (0, 2), (1, 2)]) def test_advanced_indexing(index, compressed_axes): s = sparse.random((2, 3, 4), density=0.5, format="gcxs", compressed_axes=compressed_axes) x = s.todense() assert_eq(x[index], s[index]) @pytest.mark.parametrize( "index", [ (Ellipsis, Ellipsis), (1, 1, 1, 1), (slice(None),) * 4, 5, -5, "foo", [True, False, False], 0.5, [0.5], {"potato": "kartoffel"}, ([[0, 1]],), ], ) def test_slicing_errors(index): s = sparse.random((2, 3, 4), density=0.5, format="gcxs") with pytest.raises(IndexError): s[index] def test_change_compressed_axes(): coo = sparse.random((3, 4, 5), density=0.5) s = GCXS.from_coo(coo, compressed_axes=(0, 1)) b = GCXS.from_coo(coo, compressed_axes=(1, 2)) assert_eq(s, b) s.change_compressed_axes((1, 2)) assert_eq(s, b) def test_concatenate(): xx = sparse.random((2, 3, 4), density=0.5, format="gcxs") x = xx.todense() yy = sparse.random((5, 3, 4), density=0.5, format="gcxs") y = yy.todense() zz = sparse.random((4, 3, 4), density=0.5, format="gcxs") z = zz.todense() assert_eq(np.concatenate([x, y, z], axis=0), sparse.concatenate([xx, yy, zz], axis=0)) xx = sparse.random((5, 3, 1), density=0.5, format="gcxs") x = xx.todense() yy = sparse.random((5, 3, 3), density=0.5, format="gcxs") y = yy.todense() zz = sparse.random((5, 3, 2), density=0.5, format="gcxs") z = zz.todense() assert_eq(np.concatenate([x, y, z], axis=2), sparse.concatenate([xx, yy, zz], axis=2)) assert_eq(np.concatenate([x, y, z], axis=-1), sparse.concatenate([xx, yy, zz], axis=-1)) @pytest.mark.parametrize("axis", [0, 1]) @pytest.mark.parametrize("func", [sparse.stack, sparse.concatenate]) def test_concatenate_mixed(func, axis): s = sparse.random((10, 10), density=0.5, format="gcxs") d = s.todense() with pytest.raises(ValueError): func([d, s, s], axis=axis) def test_concatenate_noarrays(): with pytest.raises(ValueError): sparse.concatenate([]) @pytest.mark.parametrize("shape", [(5,), (2, 3, 4), (5, 2)]) @pytest.mark.parametrize("axis", [0, 1, -1]) def test_stack(shape, axis): xx = sparse.random(shape, density=0.5, format="gcxs") x = xx.todense() yy = sparse.random(shape, density=0.5, format="gcxs") y = yy.todense() zz = sparse.random(shape, density=0.5, format="gcxs") z = zz.todense() assert_eq(np.stack([x, y, z], axis=axis), sparse.stack([xx, yy, zz], axis=axis)) @pytest.mark.parametrize("in_shape", [(5, 5), 62, (3, 3, 3)]) def test_flatten(in_shape): s = sparse.random(in_shape, format="gcxs", density=0.5) x = s.todense() a = s.flatten() e = x.flatten() assert_eq(e, a) def test_gcxs_valerr(): a = np.arange(300) with pytest.raises(ValueError): GCXS.from_numpy(a, idx_dtype=np.int8) def test_upcast(): a = sparse.random((50, 50, 50), density=0.1, format="coo", idx_dtype=np.uint8) b = a.asformat("gcxs") assert b.indices.dtype == np.uint16 a = sparse.random((8, 7, 6), density=0.5, format="gcxs", idx_dtype=np.uint8) b = sparse.random((6, 6, 6), density=0.8, format="gcxs", idx_dtype=np.uint8) assert sparse.concatenate((a, a)).indptr.dtype == np.uint16 assert sparse.stack((b, b)).indptr.dtype == np.uint16 def test_from_coo(): a = sparse.random((5, 5, 5), density=0.1, format="coo") b = GCXS(a) assert_eq(a, b) def test_from_coo_valerr(): a = sparse.random((25, 25, 25), density=0.01, format="coo") with pytest.raises(ValueError): GCXS.from_coo(a, idx_dtype=np.int8) @pytest.mark.parametrize( "pad_width", [ 2, (2, 1), ((2), (1)), ((1, 2), (4, 5), (7, 8)), ], ) @pytest.mark.parametrize("constant_values", [0, 1, 150, np.nan]) def test_pad_valid(pad_width, constant_values): y = sparse.random((50, 50, 3), density=0.15, fill_value=constant_values, format="gcxs") x = y.todense() xx = np.pad(x, pad_width=pad_width, constant_values=constant_values) yy = np.pad(y, pad_width=pad_width, constant_values=constant_values) assert_eq(xx, yy) @pytest.mark.parametrize( "pad_width", [ ((2, 1), (5, 7)), ], ) @pytest.mark.parametrize("constant_values", [150, 2, (1, 2)]) def test_pad_invalid(pad_width, constant_values, fill_value=0): y = sparse.random((50, 50, 3), density=0.15, format="gcxs") with pytest.raises(ValueError): np.pad(y, pad_width, constant_values=constant_values) sparse-0.16.0a9/sparse/numba_backend/tests/test_compressed_2d.py000066400000000000000000000065741463475501500247250ustar00rootroot00000000000000import sparse from sparse import COO from sparse.numba_backend._compressed.compressed import CSC, CSR, GCXS from sparse.numba_backend._utils import assert_eq import pytest import numpy as np import scipy.sparse import scipy.stats @pytest.fixture(scope="module", params=[CSR, CSC]) def cls(request): return request.param @pytest.fixture(scope="module", params=["f8", "f4", "i8", "i4"]) def dtype(request): return request.param @pytest.fixture(scope="module") def random_sparse(cls, dtype, rng): if np.issubdtype(dtype, np.integer): def data_rvs(n): return rng.integers(-1000, 1000, n) else: data_rvs = None return cls(sparse.random((20, 30), density=0.25, data_rvs=data_rvs).astype(dtype)) @pytest.fixture(scope="module") def random_sparse_small(cls, dtype, rng): if np.issubdtype(dtype, np.integer): def data_rvs(n): return rng.integers(-10, 10, n) else: data_rvs = None return cls(sparse.random((20, 20), density=0.25, data_rvs=data_rvs).astype(dtype)) def test_repr(random_sparse): cls = type(random_sparse).__name__ str_repr = repr(random_sparse) assert cls in str_repr def test_bad_constructor_input(cls): with pytest.raises(ValueError, match=r".*shape.*"): cls(arg="hello world") @pytest.mark.parametrize("n", [0, 1, 3]) def test_bad_nd_input(cls, n): a = np.ones(shape=tuple(5 for _ in range(n))) with pytest.raises(ValueError, match=f"{n}-d"): cls(a) @pytest.mark.parametrize("source_type", ["gcxs", "coo"]) def test_from_sparse(cls, source_type): gcxs = sparse.random((20, 30), density=0.25, format=source_type) result = cls(gcxs) assert_eq(result, gcxs) @pytest.mark.parametrize("scipy_type", ["coo", "csr", "csc", "lil"]) @pytest.mark.parametrize("CLS", [CSR, CSC, GCXS]) def test_from_scipy_sparse(scipy_type, CLS, dtype): orig = scipy.sparse.random(20, 30, density=0.2, format=scipy_type, dtype=dtype) ref = COO.from_scipy_sparse(orig) result = CLS.from_scipy_sparse(orig) assert_eq(ref, result) result_via_init = CLS(orig) assert_eq(ref, result_via_init) @pytest.mark.parametrize("cls_str", ["coo", "dok", "csr", "csc", "gcxs"]) def test_to_sparse(cls_str, random_sparse): result = random_sparse.asformat(cls_str) assert_eq(random_sparse, result) @pytest.mark.parametrize("copy", [True, False]) def test_transpose(random_sparse, copy): from operator import is_, is_not t = random_sparse.transpose(copy=copy) tt = t.transpose(copy=copy) # Check if a copy was made check = is_not if copy else is_ assert check(random_sparse.data, t.data) assert check(random_sparse.indices, t.indices) assert check(random_sparse.indptr, t.indptr) assert random_sparse.shape == t.shape[::-1] assert_eq(random_sparse, tt) assert type(random_sparse) == type(tt) assert_eq(random_sparse.transpose(axes=(0, 1)), random_sparse) assert_eq(random_sparse.transpose(axes=(1, 0)), t) with pytest.raises(ValueError, match="Invalid transpose axes"): random_sparse.transpose(axes=0) def test_transpose_error(random_sparse): with pytest.raises(ValueError): random_sparse.transpose(axes=1) def test_matmul(random_sparse_small): arr = random_sparse_small.todense() actual = random_sparse_small @ random_sparse_small expected = arr @ arr assert_eq(actual, expected) sparse-0.16.0a9/sparse/numba_backend/tests/test_compressed_convert.py000066400000000000000000000051671463475501500260750ustar00rootroot00000000000000from sparse.numba_backend._compressed import convert from sparse.numba_backend._utils import assert_eq import pytest from numba.typed import List import numpy as np def make_inds(shape): return [np.arange(1, a - 1) for a in shape] def make_increments(shape): inds = make_inds(shape) shape_bins = convert.transform_shape(np.asarray(shape)) return List([inds[i] * shape_bins[i] for i in range(len(shape))]) @pytest.mark.parametrize( "shape, expected_subsample, subsample", [ [(5, 6, 7, 8, 9), np.array([3610, 6892, 10338]), 1000], [(13, 12, 12, 9, 7), np.array([9899, 34441, 60635, 86703]), 10000], [ (12, 15, 7, 14, 9), np.array([14248, 36806, 61382, 85956, 110532, 135106]), 10000, ], [(9, 9, 12, 7, 12), np.array([10177, 34369, 60577]), 10000], ], ) def test_convert_to_flat(shape, expected_subsample, subsample): inds = make_inds(shape) dtype = inds[0].dtype assert_eq( convert.convert_to_flat(inds, shape, dtype)[::subsample], expected_subsample.astype(dtype), ) @pytest.mark.parametrize( "shape, expected_subsample, subsample", [ [(5, 6, 7, 8, 9), np.array([3610, 6892, 10338]), 1000], [(13, 12, 12, 9, 7), np.array([9899, 34441, 60635, 86703]), 10000], [ (12, 15, 7, 14, 9), np.array([14248, 36806, 61382, 85956, 110532, 135106]), 10000, ], [(9, 9, 12, 7, 12), np.array([10177, 34369, 60577]), 10000], ], ) def test_compute_flat(shape, expected_subsample, subsample): increments = make_increments(shape) dtype = increments[0].dtype operations = np.prod([inc.shape[0] for inc in increments[:-1]], dtype=dtype) cols = np.tile(increments[-1], operations) assert_eq( convert.compute_flat(increments, cols, operations)[::subsample], expected_subsample.astype(dtype), ) @pytest.mark.parametrize( "shape, expected_shape", [ [(5, 6, 7, 8, 9), np.array([3024, 504, 72, 9, 1])], [(13, 12, 12, 9, 7), np.array([9072, 756, 63, 7, 1])], [(12, 15, 7, 14, 9), np.array([13230, 882, 126, 9, 1])], [ (18, 5, 12, 14, 9, 11, 8, 14), np.array([9313920, 1862784, 155232, 11088, 1232, 112, 14, 1]), ], [ (11, 6, 13, 11, 17, 7, 15), np.array([1531530, 255255, 19635, 1785, 105, 15, 1]), ], [(9, 9, 12, 7, 12), np.array([9072, 1008, 84, 12, 1])], ], ) def test_transform_shape(shape, expected_shape): assert_eq(convert.transform_shape(np.asarray(shape)), expected_shape, compare_dtype=False) sparse-0.16.0a9/sparse/numba_backend/tests/test_conversion.py000066400000000000000000000016471463475501500243550ustar00rootroot00000000000000import sparse from sparse.numba_backend._utils import assert_eq import pytest FORMATS_ND = [ sparse.COO, sparse.DOK, sparse.GCXS, ] FORMATS_2D = [ sparse.numba_backend._compressed.CSC, sparse.numba_backend._compressed.CSR, ] FORMATS = FORMATS_2D + FORMATS_ND @pytest.mark.parametrize("format1", FORMATS) @pytest.mark.parametrize("format2", FORMATS) def test_conversion(format1, format2): x = sparse.random((10, 10), density=0.5, format=format1, fill_value=0.5) y = x.asformat(format2) assert_eq(x, y) def test_extra_kwargs(): x = sparse.full((2, 2), 1, format="gcxs", compressed_axes=[1]) y = sparse.full_like(x, 1) assert_eq(x, y) @pytest.mark.parametrize("format1", FORMATS_ND) @pytest.mark.parametrize("format2", FORMATS_ND) def test_conversion_scalar(format1, format2): x = sparse.random((), format=format1, fill_value=0.5) y = x.asformat(format2) assert_eq(x, y) sparse-0.16.0a9/sparse/numba_backend/tests/test_coo.py000066400000000000000000001562621463475501500227540ustar00rootroot00000000000000import contextlib import operator import pickle import sys import sparse from sparse import COO, DOK from sparse.numba_backend._settings import NEP18_ENABLED from sparse.numba_backend._utils import assert_eq, html_table, random_value_array import pytest import numpy as np import scipy.sparse import scipy.stats @pytest.fixture(scope="module", params=["f8", "f4", "i8", "i4"]) def random_sparse(request, rng): dtype = request.param if np.issubdtype(dtype, np.integer): def data_rvs(n): return rng.integers(-1000, 1000, n) else: data_rvs = None return sparse.random((20, 30, 40), density=0.25, data_rvs=data_rvs).astype(dtype) @pytest.fixture(scope="module", params=["f8", "f4", "i8", "i4"]) def random_sparse_small(request, rng): dtype = request.param if np.issubdtype(dtype, np.integer): def data_rvs(n): return rng.integers(-10, 10, n) else: data_rvs = None return sparse.random((20, 30, 40), density=0.25, data_rvs=data_rvs).astype(dtype) @pytest.mark.parametrize("reduction, kwargs", [("sum", {}), ("sum", {"dtype": np.float32}), ("prod", {})]) @pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 2), -3, (1, -1)]) @pytest.mark.parametrize("keepdims", [True, False]) def test_reductions_fv(reduction, random_sparse_small, axis, keepdims, kwargs, rng): x = random_sparse_small + rng.integers(-1, 1, dtype="i4") y = x.todense() xx = getattr(x, reduction)(axis=axis, keepdims=keepdims, **kwargs) yy = getattr(y, reduction)(axis=axis, keepdims=keepdims, **kwargs) assert_eq(xx, yy) @pytest.mark.parametrize( "reduction, kwargs", [ ("sum", {}), ("sum", {"dtype": np.float32}), ("mean", {}), ("mean", {"dtype": np.float32}), ("prod", {}), ("max", {}), ("min", {}), ("std", {}), ("var", {}), ], ) @pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 2), -3, (1, -1)]) @pytest.mark.parametrize("keepdims", [True, False]) def test_reductions(reduction, random_sparse, axis, keepdims, kwargs): x = random_sparse y = x.todense() xx = getattr(x, reduction)(axis=axis, keepdims=keepdims, **kwargs) yy = getattr(y, reduction)(axis=axis, keepdims=keepdims, **kwargs) assert_eq(xx, yy) @pytest.mark.xfail(reason=("Setting output dtype=float16 produces results inconsistent with numpy")) @pytest.mark.filterwarnings("ignore:overflow") @pytest.mark.parametrize( "reduction, kwargs", [("sum", {"dtype": np.float16}), ("mean", {"dtype": np.float16})], ) @pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 2)]) def test_reductions_float16(random_sparse, reduction, kwargs, axis): x = random_sparse y = x.todense() xx = getattr(x, reduction)(axis=axis, **kwargs) yy = getattr(y, reduction)(axis=axis, **kwargs) assert_eq(xx, yy, atol=1e-2) @pytest.mark.parametrize("reduction,kwargs", [("any", {}), ("all", {})]) @pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 2), -3, (1, -1)]) @pytest.mark.parametrize("keepdims", [True, False]) def test_reductions_bool(random_sparse, reduction, kwargs, axis, keepdims): y = np.zeros((2, 3, 4), dtype=bool) y[0] = True y[1, 1, 1] = True x = sparse.COO.from_numpy(y) xx = getattr(x, reduction)(axis=axis, keepdims=keepdims, **kwargs) yy = getattr(y, reduction)(axis=axis, keepdims=keepdims, **kwargs) assert_eq(xx, yy) @pytest.mark.parametrize( "reduction,kwargs", [ (np.max, {}), (np.sum, {}), (np.sum, {"dtype": np.float32}), (np.mean, {}), (np.mean, {"dtype": np.float32}), (np.prod, {}), (np.min, {}), ], ) @pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 2), -1, (0, -1)]) @pytest.mark.parametrize("keepdims", [True, False]) def test_ufunc_reductions(random_sparse, reduction, kwargs, axis, keepdims): x = random_sparse y = x.todense() xx = reduction(x, axis=axis, keepdims=keepdims, **kwargs) yy = reduction(y, axis=axis, keepdims=keepdims, **kwargs) assert_eq(xx, yy) # If not a scalar/1 element array, must be a sparse array if xx.size > 1: assert isinstance(xx, COO) @pytest.mark.parametrize( "reduction,kwargs", [ (np.max, {}), (np.sum, {"axis": 0}), (np.prod, {"keepdims": True}), (np.add.reduce, {}), (np.add.reduce, {"keepdims": True}), (np.minimum.reduce, {"axis": 0}), ], ) def test_ufunc_reductions_kwargs(reduction, kwargs): x = sparse.random((2, 3, 4), density=0.5) y = x.todense() xx = reduction(x, **kwargs) yy = reduction(y, **kwargs) assert_eq(xx, yy) # If not a scalar/1 element array, must be a sparse array if xx.size > 1: assert isinstance(xx, COO) @pytest.mark.parametrize("reduction", ["nansum", "nanmean", "nanprod", "nanmax", "nanmin"]) @pytest.mark.parametrize("axis", [None, 0, 1]) @pytest.mark.parametrize("keepdims", [False]) @pytest.mark.parametrize("fraction", [0.25, 0.5, 0.75, 1.0]) @pytest.mark.filterwarnings("ignore:All-NaN") @pytest.mark.filterwarnings("ignore:Mean of empty slice") def test_nan_reductions(reduction, axis, keepdims, fraction): s = sparse.random((2, 3, 4), data_rvs=random_value_array(np.nan, fraction), density=0.25) x = s.todense() expected = getattr(np, reduction)(x, axis=axis, keepdims=keepdims) actual = getattr(sparse, reduction)(s, axis=axis, keepdims=keepdims) assert_eq(expected, actual) @pytest.mark.parametrize("reduction", ["nanmax", "nanmin", "nanmean"]) @pytest.mark.parametrize("axis", [None, 0, 1]) def test_all_nan_reduction_warning(reduction, axis): x = random_value_array(np.nan, 1.0)(2 * 3 * 4).reshape(2, 3, 4) s = COO.from_numpy(x) with pytest.warns(RuntimeWarning): getattr(sparse, reduction)(s, axis=axis) @pytest.mark.parametrize( "axis", [None, (1, 2, 0), (2, 1, 0), (0, 1, 2), (0, 1, -1), (0, -2, -1), (-3, -2, -1)], ) def test_transpose(axis): x = sparse.random((2, 3, 4), density=0.25) y = x.todense() xx = x.transpose(axis) yy = y.transpose(axis) assert_eq(xx, yy) @pytest.mark.parametrize( "axis", [ (0, 1), # too few (0, 1, 2, 3), # too many (3, 1, 0), # axis 3 illegal (0, -1, -4), # axis -4 illegal (0, 0, 1), # duplicate axis 0 (0, -1, 2), # duplicate axis -1 == 2 0.3, # Invalid type in axis ((0, 1, 2),), # Iterable inside iterable ], ) def test_transpose_error(axis): x = sparse.random((2, 3, 4), density=0.25) with pytest.raises(ValueError): x.transpose(axis) @pytest.mark.parametrize( "a,b", [ [(3, 4), (5, 5)], [(12,), (3, 4)], [(12,), (3, 6)], [(5, 5, 5), (6, 6, 6)], [(3, 4), (9, 4)], [(5,), (4,)], [(2, 3, 4, 5), (2, 3, 4, 5, 6)], [(100,), (5, 5)], [(2, 3, 4, 5), (20, 6)], [(), ()], ], ) def test_resize(a, b): s = sparse.random(a, density=0.5) orig_size = s.size x = s.todense() x = np.resize(x, b) s.resize(b) temp = x.reshape(x.size) temp[orig_size:] = s.fill_value assert isinstance(s, sparse.SparseArray) assert_eq(x, s) def test_resize_upcast(): s = sparse.random((10, 10, 10), density=0.5, format="coo", idx_dtype=np.uint8) s.resize(600) assert s.coords.dtype == np.uint16 @pytest.mark.parametrize("axis1", [-3, -2, -1, 0, 1, 2]) @pytest.mark.parametrize("axis2", [-3, -2, -1, 0, 1, 2]) def test_swapaxes(axis1, axis2): x = sparse.random((2, 3, 4), density=0.25) y = x.todense() xx = x.swapaxes(axis1, axis2) yy = y.swapaxes(axis1, axis2) assert_eq(xx, yy) @pytest.mark.parametrize("axis1", [-4, 3]) @pytest.mark.parametrize("axis2", [-4, 3, 0]) def test_swapaxes_error(axis1, axis2): x = sparse.random((2, 3, 4), density=0.25) with pytest.raises(ValueError): x.swapaxes(axis1, axis2) @pytest.mark.parametrize( "source, destination", [ [0, 1], [2, 1], [-2, 1], [-2, -3], [(0, 1), (2, 3)], [(-1, 0), (0, 1)], [(0, 1, 2), (2, 1, 0)], [(0, 1, 2), (-2, -3, -1)], ], ) def test_moveaxis(source, destination): x = sparse.random((2, 3, 4, 5), density=0.25) y = x.todense() xx = sparse.moveaxis(x, source, destination) yy = np.moveaxis(y, source, destination) assert_eq(xx, yy) @pytest.mark.parametrize("source, destination", [[0, -4], [(0, 5), (1, 2)], [(0, 1, 2), (2, 1)]]) def test_moveaxis_error(source, destination): x = sparse.random((2, 3, 4), density=0.25) with pytest.raises(ValueError): sparse.moveaxis(x, source, destination) @pytest.mark.parametrize( "a,b", [ [(3, 4), (5, 5)], [(12,), (3, 4)], [(12,), (3, 6)], [(5, 5, 5), (6, 6, 6)], [(3, 4), (9, 4)], [(5,), (4,)], [(2, 3, 4, 5), (2, 3, 4, 5, 6)], [(100,), (5, 5)], [(2, 3, 4, 5), (20, 6)], [(), ()], ], ) def test_resize_2(a, b): s = sparse.random(a, density=0.5) orig_size = s.size x = s.todense() x = np.resize(x, b) s.resize(b) temp = x.reshape(x.size) temp[orig_size:] = s.fill_value assert isinstance(s, sparse.SparseArray) assert_eq(x, s) @pytest.mark.parametrize( "a,b", [ [(3, 4), (3, 4)], [(12,), (3, 4)], [(12,), (3, -1)], [(3, 4), (12,)], [(3, 4), (-1, 4)], [(3, 4), (3, -1)], [(2, 3, 4, 5), (8, 15)], [(2, 3, 4, 5), (24, 5)], [(2, 3, 4, 5), (20, 6)], [(), ()], ], ) @pytest.mark.parametrize("format", ["coo", "dok"]) def test_reshape(a, b, format): s = sparse.random(a, density=0.5, format=format) x = s.todense() assert_eq(x.reshape(b), s.reshape(b)) def test_large_reshape(): n = 100 m = 10 row = np.arange(n, dtype=np.uint16) # np.random.randint(0, n, size=n, dtype=np.uint16) col = row % m # np.random.randint(0, m, size=n, dtype=np.uint16) data = np.ones(n, dtype=np.uint8) x = COO((data, (row, col)), sorted=True, has_duplicates=False) assert_eq(x, x.reshape(x.shape)) def test_reshape_same(): s = sparse.random((3, 5), density=0.5) assert s.reshape(s.shape) is s @pytest.mark.parametrize("format", [COO, DOK]) def test_reshape_function(format): s = sparse.random((5, 3), density=0.5, format=format) x = s.todense() shape = (3, 5) s2 = np.reshape(s, shape) assert isinstance(s2, format) assert_eq(s2, x.reshape(shape)) def test_reshape_upcast(): a = sparse.random((10, 10, 10), density=0.5, format="coo", idx_dtype=np.uint8) assert a.reshape(1000).coords.dtype == np.uint16 @pytest.mark.parametrize("format", [COO, DOK]) def test_reshape_errors(format): s = sparse.random((5, 3), density=0.5, format=format) with pytest.raises(NotImplementedError): s.reshape((3, 5, 1), order="F") @pytest.mark.parametrize("a_ndim", [1, 2, 3]) @pytest.mark.parametrize("b_ndim", [1, 2, 3]) def test_kron(a_ndim, b_ndim): a_shape = (2, 3, 4)[:a_ndim] b_shape = (5, 6, 7)[:b_ndim] sa = sparse.random(a_shape, density=0.5) a = sa.todense() sb = sparse.random(b_shape, density=0.5) b = sb.todense() sol = np.kron(a, b) assert_eq(sparse.kron(sa, sb), sol) assert_eq(sparse.kron(sa, b), sol) assert_eq(sparse.kron(a, sb), sol) with pytest.raises(ValueError): assert_eq(sparse.kron(a, b), sol) @pytest.mark.parametrize("a_spmatrix, b_spmatrix", [(True, True), (True, False), (False, True)]) def test_kron_spmatrix(a_spmatrix, b_spmatrix): sa = sparse.random((3, 4), density=0.5) a = sa.todense() sb = sparse.random((5, 6), density=0.5) b = sb.todense() if a_spmatrix: sa = sa.tocsr() if b_spmatrix: sb = sb.tocsr() sol = np.kron(a, b) assert_eq(sparse.kron(sa, sb), sol) assert_eq(sparse.kron(sa, b), sol) assert_eq(sparse.kron(a, sb), sol) with pytest.raises(ValueError): assert_eq(sparse.kron(a, b), sol) @pytest.mark.parametrize("ndim", [1, 2, 3]) def test_kron_scalar(ndim): if ndim: a_shape = (3, 4, 5)[:ndim] sa = sparse.random(a_shape, density=0.5) a = sa.todense() else: sa = a = np.array(6) scalar = np.array(5) sol = np.kron(a, scalar) assert_eq(sparse.kron(sa, scalar), sol) assert_eq(sparse.kron(scalar, sa), sol) def test_gt(): s = sparse.random((2, 3, 4), density=0.5) x = s.todense() m = x.mean() assert_eq(x > m, s > m) m = s.data[2] assert_eq(x > m, s > m) assert_eq(x >= m, s >= m) @pytest.mark.parametrize( "index", [ # Integer 0, 1, -1, (1, 1, 1), # Pure slices (slice(0, 2),), (slice(None, 2), slice(None, 2)), (slice(1, None), slice(1, None)), (slice(None, None),), (slice(None, None, -1),), (slice(None, 2, -1), slice(None, 2, -1)), (slice(1, None, 2), slice(1, None, 2)), (slice(None, None, 2),), (slice(None, 2, -1), slice(None, 2, -2)), (slice(1, None, 2), slice(1, None, 1)), (slice(None, None, -2),), # Combinations (0, slice(0, 2)), (slice(0, 1), 0), (None, slice(1, 3), 0), (slice(0, 3), None, 0), (slice(1, 2), slice(2, 4)), (slice(1, 2), slice(None, None)), (slice(1, 2), slice(None, None), 2), (slice(1, 2, 2), slice(None, None), 2), (slice(1, 2, None), slice(None, None, 2), 2), (slice(1, 2, -2), slice(None, None), -2), (slice(1, 2, None), slice(None, None, -2), 2), (slice(1, 2, -1), slice(None, None), -1), (slice(1, 2, None), slice(None, None, -1), 2), (slice(2, 0, -1), slice(None, None), -1), (slice(-2, None, None),), (slice(-1, None, None), slice(-2, None, None)), # With ellipsis (Ellipsis, slice(1, 3)), (1, Ellipsis, slice(1, 3)), (slice(0, 1), Ellipsis), (Ellipsis, None), (None, Ellipsis), (1, Ellipsis), (1, Ellipsis, None), (1, 1, 1, Ellipsis), (Ellipsis, 1, None), # With multi-axis advanced indexing ([0, 1],) * 2, ([0, 1], [0, 2]), ([0, 0, 0], [0, 1, 2], [1, 2, 1]), # Pathological - Slices larger than array (slice(None, 1000)), (slice(None), slice(None, 1000)), (slice(None), slice(1000, -1000, -1)), (slice(None), slice(1000, -1000, -50)), # Pathological - Wrong ordering of start/stop (slice(5, 0),), (slice(0, 5, -1),), (slice(0, 0, None),), ], ) def test_slicing(index): s = sparse.random((2, 3, 4), density=0.5) x = s.todense() assert_eq(x[index], s[index]) @pytest.mark.parametrize( "index", [ ([1, 0], 0), (1, [0, 2]), (0, [1, 0], 0), (1, [2, 0], 0), (1, [], 0), ([True, False], slice(1, None), slice(-2, None)), (slice(1, None), slice(-2, None), [True, False, True, False]), ([1, 0],), (Ellipsis, [2, 1, 3]), (slice(None), [2, 1, 2]), (1, [2, 0, 1]), ], ) def test_advanced_indexing(index): s = sparse.random((2, 3, 4), density=0.5) x = s.todense() assert_eq(x[index], s[index]) def test_custom_dtype_slicing(): dt = np.dtype([("part1", np.float64), ("part2", np.int64, (2,)), ("part3", np.int64, (2, 2))]) x = np.zeros((2, 3, 4), dtype=dt) x[1, 1, 1] = (0.64, [4, 2], [[1, 2], [3, 0]]) s = COO.from_numpy(x) assert x[1, 1, 1] == s[1, 1, 1] assert x[0, 1, 2] == s[0, 1, 2] assert_eq(x["part1"], s["part1"]) assert_eq(x["part2"], s["part2"]) assert_eq(x["part3"], s["part3"]) @pytest.mark.parametrize( "index", [ (Ellipsis, Ellipsis), (1, 1, 1, 1), (slice(None),) * 4, 5, -5, "foo", [True, False, False], 0.5, [0.5], {"potato": "kartoffel"}, ([[0, 1]],), ], ) def test_slicing_errors(index): s = sparse.random((2, 3, 4), density=0.5) with pytest.raises(IndexError): s[index] def test_concatenate(): xx = sparse.random((2, 3, 4), density=0.5) x = xx.todense() yy = sparse.random((5, 3, 4), density=0.5) y = yy.todense() zz = sparse.random((4, 3, 4), density=0.5) z = zz.todense() assert_eq(np.concatenate([x, y, z], axis=0), sparse.concatenate([xx, yy, zz], axis=0)) xx = sparse.random((5, 3, 1), density=0.5) x = xx.todense() yy = sparse.random((5, 3, 3), density=0.5) y = yy.todense() zz = sparse.random((5, 3, 2), density=0.5) z = zz.todense() assert_eq(np.concatenate([x, y, z], axis=2), sparse.concatenate([xx, yy, zz], axis=2)) assert_eq(np.concatenate([x, y, z], axis=-1), sparse.concatenate([xx, yy, zz], axis=-1)) @pytest.mark.parametrize("axis", [0, 1]) @pytest.mark.parametrize("func", [sparse.stack, sparse.concatenate]) def test_concatenate_mixed(func, axis): s = sparse.random((10, 10), density=0.5) d = s.todense() with pytest.raises(ValueError): func([d, s, s], axis=axis) def test_concatenate_noarrays(): with pytest.raises(ValueError): sparse.concatenate([]) @pytest.mark.parametrize("shape", [(5,), (2, 3, 4), (5, 2)]) @pytest.mark.parametrize("axis", [0, 1, -1]) def test_stack(shape, axis): xx = sparse.random(shape, density=0.5) x = xx.todense() yy = sparse.random(shape, density=0.5) y = yy.todense() zz = sparse.random(shape, density=0.5) z = zz.todense() assert_eq(np.stack([x, y, z], axis=axis), sparse.stack([xx, yy, zz], axis=axis)) def test_large_concat_stack(): data = np.array([1], dtype=np.uint8) coords = np.array([[255]], dtype=np.uint8) xs = COO(coords, data, shape=(256,), has_duplicates=False, sorted=True) x = xs.todense() assert_eq(np.stack([x, x]), sparse.stack([xs, xs])) assert_eq(np.concatenate((x, x)), sparse.concatenate((xs, xs))) def test_addition(): a = sparse.random((2, 3, 4), density=0.5) x = a.todense() b = sparse.random((2, 3, 4), density=0.5) y = b.todense() assert_eq(x + y, a + b) assert_eq(x - y, a - b) @pytest.mark.parametrize("scalar", [2, 2.5, np.float32(2.0), np.int8(3)]) def test_scalar_multiplication(scalar): a = sparse.random((2, 3, 4), density=0.5) x = a.todense() assert_eq(x * scalar, a * scalar) assert (a * scalar).nnz == a.nnz assert_eq(scalar * x, scalar * a) assert (scalar * a).nnz == a.nnz assert_eq(x / scalar, a / scalar) assert (a / scalar).nnz == a.nnz assert_eq(x // scalar, a // scalar) # division may reduce nnz. @pytest.mark.filterwarnings("ignore:divide by zero") def test_scalar_exponentiation(): a = sparse.random((2, 3, 4), density=0.5) x = a.todense() assert_eq(x**2, a**2) assert_eq(x**0.5, a**0.5) assert_eq(x**-1, a**-1) def test_create_with_lists_of_tuples(): L = [((0, 0, 0), 1), ((1, 2, 1), 1), ((1, 1, 1), 2), ((1, 3, 2), 3)] s = COO(L) x = np.zeros((2, 4, 3), dtype=np.asarray([1, 2, 3]).dtype) for ind, value in L: x[ind] = value assert_eq(s, x) def test_sizeof(): x = np.eye(100) y = COO.from_numpy(x) nb = sys.getsizeof(y) assert 400 < nb < x.nbytes / 10 def test_scipy_sparse_interface(rng): n = 100 m = 10 row = rng.integers(0, n, size=n, dtype=np.uint16) col = rng.integers(0, m, size=n, dtype=np.uint16) data = np.ones(n, dtype=np.uint8) inp = (data, (row, col)) x = scipy.sparse.coo_matrix(inp) xx = sparse.COO(inp) assert_eq(x, xx, check_nnz=False) assert_eq(x.T, xx.T, check_nnz=False) assert_eq(xx.to_scipy_sparse(), x, check_nnz=False) assert_eq(COO.from_scipy_sparse(xx.to_scipy_sparse()), xx, check_nnz=False) assert_eq(x, xx, check_nnz=False) assert_eq(x.T.dot(x), xx.T.dot(xx), check_nnz=False) assert isinstance(x + xx, COO) assert isinstance(xx + x, COO) @pytest.mark.parametrize("scipy_format", ["coo", "csr", "dok", "csc"]) def test_scipy_sparse_interaction(scipy_format): x = sparse.random((10, 20), density=0.2).todense() sp = getattr(scipy.sparse, scipy_format + "_matrix")(x) coo = COO(x) assert isinstance(sp + coo, COO) assert isinstance(coo + sp, COO) assert_eq(sp, coo) @pytest.mark.parametrize( "func", [operator.mul, operator.add, operator.sub, operator.gt, operator.lt, operator.ne], ) def test_op_scipy_sparse(func): xs = sparse.random((3, 4), density=0.5) y = sparse.random((3, 4), density=0.5).todense() ys = scipy.sparse.csr_matrix(y) x = xs.todense() assert_eq(func(x, y), func(xs, ys)) @pytest.mark.parametrize( "func", [ operator.add, operator.sub, pytest.param( operator.mul, marks=pytest.mark.xfail(reason="Scipy sparse auto-densifies in this case."), ), pytest.param( operator.gt, marks=pytest.mark.xfail(reason="Scipy sparse doesn't support this yet."), ), pytest.param( operator.lt, marks=pytest.mark.xfail(reason="Scipy sparse doesn't support this yet."), ), pytest.param( operator.ne, marks=pytest.mark.xfail(reason="Scipy sparse doesn't support this yet."), ), ], ) def test_op_scipy_sparse_left(func): ys = sparse.random((3, 4), density=0.5) x = sparse.random((3, 4), density=0.5).todense() xs = scipy.sparse.csr_matrix(x) y = ys.todense() assert_eq(func(x, y), func(xs, ys)) def test_cache_csr(): x = sparse.random((10, 5), density=0.5).todense() s = COO(x, cache=True) assert isinstance(s.tocsr(), scipy.sparse.csr_matrix) assert isinstance(s.tocsc(), scipy.sparse.csc_matrix) assert s.tocsr() is s.tocsr() assert s.tocsc() is s.tocsc() def test_empty_shape(): x = COO(np.empty((0, 1), dtype=np.int8), [1.0]) assert x.shape == () assert_eq(2 * x, np.float64(2.0)) def test_single_dimension(): x = COO([1, 3], [1.0, 3.0]) assert x.shape == (4,) assert_eq(x, np.array([0, 1.0, 0, 3.0])) def test_large_sum(rng): n = 500000 x = rng.integers(0, 10000, size=(n,)) y = rng.integers(0, 1000, size=(n,)) z = rng.integers(0, 3, size=(n,)) data = rng.random(n) a = COO((x, y, z), data) assert a.shape == (10000, 1000, 3) b = a.sum(axis=2) assert b.nnz > 100000 def test_add_many_sparse_arrays(): x = COO({(1, 1): 1}) y = sum([x] * 100) assert y.nnz < np.prod(y.shape) def test_caching(): x = COO({(9, 9, 9): 1}) assert x[:].reshape((100, 10)).transpose().tocsr() is not x[:].reshape((100, 10)).transpose().tocsr() x = COO({(9, 9, 9): 1}, cache=True) assert x[:].reshape((100, 10)).transpose().tocsr() is x[:].reshape((100, 10)).transpose().tocsr() x = COO({(1, 1, 1, 1, 1, 1, 1, 2): 1}, cache=True) for _ in range(x.ndim): x.reshape(x.size) assert len(x._cache["reshape"]) < 5 def test_scalar_slicing(): x = np.array([0, 1]) s = COO(x) assert np.isscalar(s[0]) assert_eq(x[0], s[0]) assert isinstance(s[0, ...], COO) assert s[0, ...].shape == () assert_eq(x[0, ...], s[0, ...]) assert np.isscalar(s[1]) assert_eq(x[1], s[1]) assert isinstance(s[1, ...], COO) assert s[1, ...].shape == () assert_eq(x[1, ...], s[1, ...]) @pytest.mark.parametrize( "shape, k", [((3, 4), 0), ((3, 4, 5), 1), ((4, 2), -1), ((2, 4), -2), ((4, 4), 1000)], ) def test_triul(shape, k): s = sparse.random(shape, density=0.5) x = s.todense() assert_eq(np.triu(x, k), sparse.triu(s, k)) assert_eq(np.tril(x, k), sparse.tril(s, k)) def test_empty_reduction(): x = np.zeros((2, 3, 4), dtype=np.float64) xs = COO.from_numpy(x) assert_eq(x.sum(axis=(0, 2)), xs.sum(axis=(0, 2))) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4)]) @pytest.mark.parametrize("density", [0.1, 0.3, 0.5, 0.7]) def test_random_shape(shape, density): s = sparse.random(shape, density) assert isinstance(s, COO) assert s.shape == shape expected_nnz = density * np.prod(shape) assert np.floor(expected_nnz) <= s.nnz <= np.ceil(expected_nnz) @pytest.mark.parametrize("shape, nnz", [((1,), 1), ((2,), 0), ((3, 4), 5)]) def test_random_nnz(shape, nnz): s = sparse.random(shape, nnz=nnz) assert isinstance(s, COO) assert s.nnz == nnz @pytest.mark.parametrize("density, nnz", [(1, 1), (1.01, None), (-0.01, None), (None, 2)]) def test_random_invalid_density_and_nnz(density, nnz): with pytest.raises(ValueError): sparse.random((1,), density, nnz=nnz) def test_two_random_unequal(): s1 = sparse.random((2, 3, 4), 0.3) s2 = sparse.random((2, 3, 4), 0.3) assert not np.allclose(s1.todense(), s2.todense()) def test_two_random_same_seed(rng): state = rng.integers(100) s1 = sparse.random((2, 3, 4), 0.3, random_state=state) s2 = sparse.random((2, 3, 4), 0.3, random_state=state) assert_eq(s1, s2) @pytest.mark.parametrize( "rvs, dtype", [ (None, np.float64), (scipy.stats.poisson(25, loc=10).rvs, np.int64), (lambda x: np.random.default_rng().choice([True, False], size=x), np.bool_), ], ) @pytest.mark.parametrize("shape", [(2, 4, 5), (20, 40, 50)]) @pytest.mark.parametrize("density", [0.0, 0.01, 0.1, 0.2]) def test_random_rvs(rvs, dtype, shape, density): x = sparse.random(shape, density, data_rvs=rvs) assert x.shape == shape assert x.dtype == dtype @pytest.mark.parametrize("format", ["coo", "dok"]) def test_random_fv(format, rng): fv = rng.random() s = sparse.random((2, 3, 4), density=0.5, format=format, fill_value=fv) assert s.fill_value == fv def test_scalar_shape_construction(rng): x = rng.random(5) coords = np.arange(5)[None] s = COO(coords, x, shape=5) assert_eq(x, s) def test_len(): s = sparse.random((20, 30, 40)) assert len(s) == 20 def test_density(): s = sparse.random((20, 30, 40), density=0.1) assert np.isclose(s.density, 0.1) def test_size(): s = sparse.random((20, 30, 40)) assert s.size == 20 * 30 * 40 def test_np_array(): s = sparse.random((20, 30, 40)) with pytest.raises(RuntimeError): np.array(s) @pytest.mark.parametrize( "shapes", [ [(2,), (3, 2), (4, 3, 2)], [(3,), (2, 3), (2, 2, 3)], [(2,), (2, 2), (2, 2, 2)], [(4,), (4, 4), (4, 4, 4)], [(4,), (4, 4), (4, 4, 4)], [(4,), (4, 4), (4, 4, 4)], [(1, 1, 2), (1, 3, 1), (4, 1, 1)], [(2,), (2, 1), (2, 1, 1)], [(3,), (), (2, 3)], [(4, 4), (), ()], ], ) def test_three_arg_where(shapes): cs = sparse.random(shapes[0], density=0.5).astype(np.bool_) xs = sparse.random(shapes[1], density=0.5) ys = sparse.random(shapes[2], density=0.5) c = cs.todense() x = xs.todense() y = ys.todense() expected = np.where(c, x, y) actual = sparse.where(cs, xs, ys) assert isinstance(actual, COO) assert_eq(expected, actual) def test_one_arg_where(): s = sparse.random((2, 3, 4), density=0.5) x = s.todense() expected = np.where(x) actual = sparse.where(s) assert len(expected) == len(actual) for e, a in zip(expected, actual, strict=True): assert_eq(e, a, compare_dtype=False) def test_one_arg_where_dense(rng): x = rng.random((2, 3, 4)) with pytest.raises(ValueError): sparse.where(x) def test_two_arg_where(): cs = sparse.random((2, 3, 4), density=0.5).astype(np.bool_) xs = sparse.random((2, 3, 4), density=0.5) with pytest.raises(ValueError): sparse.where(cs, xs) @pytest.mark.parametrize("func", [operator.imul, operator.iadd, operator.isub]) def test_inplace_invalid_shape(func): xs = sparse.random((3, 4), density=0.5) ys = sparse.random((2, 3, 4), density=0.5) with pytest.raises(ValueError): func(xs, ys) def test_nonzero(): s = sparse.random((2, 3, 4), density=0.5) x = s.todense() expected = x.nonzero() actual = s.nonzero() assert isinstance(actual, tuple) assert len(expected) == len(actual) for e, a in zip(expected, actual, strict=True): assert_eq(e, a, compare_dtype=False) def test_argwhere(): s = sparse.random((2, 3, 4), density=0.5) x = s.todense() assert_eq(np.argwhere(s), np.argwhere(x), compare_dtype=False) @pytest.mark.parametrize("format", ["coo", "dok"]) def test_asformat(format): s = sparse.random((2, 3, 4), density=0.5, format="coo") s2 = s.asformat(format) assert_eq(s, s2) @pytest.mark.parametrize("format", [sparse.COO, sparse.DOK, scipy.sparse.csr_matrix, np.asarray]) def test_as_coo(format): x = format(sparse.random((3, 4), density=0.5, format="coo").todense()) s1 = sparse.as_coo(x) s2 = COO(x) assert_eq(x, s1) assert_eq(x, s2) def test_invalid_attrs_error(): s = sparse.random((3, 4), density=0.5, format="coo") with pytest.raises(ValueError): sparse.as_coo(s, shape=(2, 3)) with pytest.raises(ValueError): COO(s, shape=(2, 3)) with pytest.raises(ValueError): sparse.as_coo(s, fill_value=0.0) with pytest.raises(ValueError): COO(s, fill_value=0.0) def test_invalid_iterable_error(): with pytest.raises(ValueError): x = [(3, 4, 5)] COO.from_iter(x) with pytest.raises(ValueError): x = [((2.3, 4.5), 3.2)] COO.from_iter(x) def test_prod_along_axis(): s1 = sparse.random((10, 10), density=0.1) s2 = 1 - s1 x1 = s1.todense() x2 = s2.todense() assert_eq(s1.prod(axis=0), x1.prod(axis=0)) assert_eq(s2.prod(axis=0), x2.prod(axis=0)) class TestRoll: # test on 1d array # @pytest.mark.parametrize("shift", [0, 2, -2, 20, -20]) def test_1d(self, shift): xs = sparse.random((100,), density=0.5) x = xs.todense() assert_eq(np.roll(x, shift), sparse.roll(xs, shift)) assert_eq(np.roll(x, shift), sparse.roll(x, shift)) # test on 2d array # @pytest.mark.parametrize("shift", [0, 2, -2, 20, -20]) @pytest.mark.parametrize("ax", [None, 0, 1, (0, 1)]) def test_2d(self, shift, ax): xs = sparse.random((10, 10), density=0.5) x = xs.todense() assert_eq(np.roll(x, shift, axis=ax), sparse.roll(xs, shift, axis=ax)) assert_eq(np.roll(x, shift, axis=ax), sparse.roll(x, shift, axis=ax)) # test on rolling multiple axes at once # @pytest.mark.parametrize("shift", [(0, 0), (1, -1), (-1, 1), (10, -10)]) @pytest.mark.parametrize("ax", [(0, 1), (0, 2), (1, 2), (-1, 1)]) def test_multiaxis(self, shift, ax): xs = sparse.random((9, 9, 9), density=0.5) x = xs.todense() assert_eq(np.roll(x, shift, axis=ax), sparse.roll(xs, shift, axis=ax)) assert_eq(np.roll(x, shift, axis=ax), sparse.roll(x, shift, axis=ax)) # test original is unchanged # @pytest.mark.parametrize("shift", [0, 2, -2, 20, -20]) @pytest.mark.parametrize("ax", [None, 0, 1, (0, 1)]) def test_original_is_copied(self, shift, ax): xs = sparse.random((10, 10), density=0.5) xc = COO(np.copy(xs.coords), np.copy(xs.data), shape=xs.shape) sparse.roll(xs, shift, axis=ax) assert_eq(xs, xc) # test on empty array # def test_empty(self): x = np.array([]) assert_eq(np.roll(x, 1), sparse.roll(sparse.as_coo(x), 1)) # test error handling # @pytest.mark.parametrize( "args", [ # iterable shift, but axis not iterable ((1, 1), 0), # ndim(axis) != 1 (1, [[0, 1]]), # ndim(shift) != 1 ([[0, 1]], [0, 1]), ([[0, 1], [0, 1]], [0, 1]), ], ) def test_valerr(self, args): x = sparse.random((2, 2, 2), density=1) with pytest.raises(ValueError): sparse.roll(x, *args) @pytest.mark.parametrize("dtype", [np.uint8, np.int8]) @pytest.mark.parametrize("shift", [300, -300]) def test_dtype_errors(self, dtype, shift): x = sparse.random((5, 5, 5), density=0.2, idx_dtype=dtype) with pytest.raises(ValueError): sparse.roll(x, shift) def test_unsigned_type_error(self): x = sparse.random((5, 5, 5), density=0.3, idx_dtype=np.uint8) with pytest.raises(ValueError): sparse.roll(x, -1) def test_clip(): x = np.array([[0, 0, 1, 0, 2], [5, 0, 0, 3, 0]]) s = sparse.COO.from_numpy(x) assert_eq(s.clip(min=1), x.clip(min=1)) assert_eq(s.clip(max=3), x.clip(max=3)) assert_eq(s.clip(min=1, max=3), x.clip(min=1, max=3)) assert_eq(s.clip(min=1, max=3.0), x.clip(min=1, max=3.0)) assert_eq(np.clip(s, 1, 3), np.clip(x, 1, 3)) with pytest.raises(ValueError): s.clip() out = sparse.COO.from_numpy(np.zeros_like(x)) out2 = s.clip(min=1, max=3, out=out) assert out is out2 assert_eq(out, x.clip(min=1, max=3)) class TestFailFillValue: # Check failed fill_value op def test_nonzero_fv(self): xs = sparse.random((2, 3), density=0.5, fill_value=1) ys = sparse.random((3, 4), density=0.5) with pytest.raises(ValueError): sparse.dot(xs, ys) def test_inconsistent_fv(self): xs = sparse.random((3, 4), density=0.5, fill_value=1) ys = sparse.random((3, 4), density=0.5, fill_value=2) with pytest.raises(ValueError): sparse.concatenate([xs, ys]) def test_pickle(): x = sparse.COO.from_numpy([1, 0, 0, 0, 0]).reshape((5, 1)) # Enable caching and add some data to it x.enable_caching() x.T # noqa: B018 assert x._cache is not None # Pickle sends data but not cache x2 = pickle.loads(pickle.dumps(x)) assert_eq(x, x2) assert x2._cache is None @pytest.mark.parametrize("deep", [True, False]) def test_copy(deep): x = sparse.COO.from_numpy([1, 0, 0, 0, 0]).reshape((5, 1)) # Enable caching and add some data to it x.enable_caching() x.T # noqa: B018 assert x._cache is not None x2 = x.copy(deep) assert_eq(x, x2) assert (x2.data is x.data) is not deep assert (x2.coords is x.coords) is not deep assert x2._cache is None @pytest.mark.parametrize("ndim", [2, 3, 4, 5]) def test_initialization(ndim, rng): shape = [10] * ndim shape[1] *= 2 shape = tuple(shape) coords = rng.integers(10, size=(ndim, 20)) data = rng.random(20) COO(coords, data=data, shape=shape) with pytest.raises(ValueError, match="data length"): COO(coords, data=data[:5], shape=shape) with pytest.raises(ValueError, match="shape of `coords`"): coords = rng.integers(10, size=(1, 20)) COO(coords, data=data, shape=shape) @pytest.mark.parametrize("N, M", [(4, None), (4, 10), (10, 4), (0, 10)]) def test_eye(N, M): m = M or N for k in [0, N - 2, N + 2, m - 2, m + 2]: assert_eq(sparse.eye(N, M=M, k=k), np.eye(N, M=M, k=k)) assert_eq(sparse.eye(N, M=M, k=k, dtype="i4"), np.eye(N, M=M, k=k, dtype="i4")) @pytest.mark.parametrize("funcname", ["ones", "zeros"]) def test_ones_zeros(funcname): sp_func = getattr(sparse, funcname) np_func = getattr(np, funcname) assert_eq(sp_func(5), np_func(5)) assert_eq(sp_func((5, 4)), np_func((5, 4))) assert_eq(sp_func((5, 4), dtype="i4"), np_func((5, 4), dtype="i4")) assert_eq(sp_func((5, 4), dtype=None), np_func((5, 4), dtype=None)) @pytest.mark.parametrize("funcname", ["ones_like", "zeros_like"]) def test_ones_zeros_like(funcname): sp_func = getattr(sparse, funcname) np_func = getattr(np, funcname) x = np.ones((5, 5), dtype="i8") assert_eq(sp_func(x), np_func(x)) assert_eq(sp_func(x, dtype="f8"), np_func(x, dtype="f8")) assert_eq(sp_func(x, dtype=None), np_func(x, dtype=None)) assert_eq(sp_func(x, shape=(2, 2)), np_func(x, shape=(2, 2))) def test_full(): assert_eq(sparse.full(5, 9), np.full(5, 9)) assert_eq(sparse.full(5, 9, dtype="f8"), np.full(5, 9, dtype="f8")) assert_eq(sparse.full((5, 4), 9.5), np.full((5, 4), 9.5)) assert_eq(sparse.full((5, 4), 9.5, dtype="i4"), np.full((5, 4), 9.5, dtype="i4")) def test_full_like(): x = np.zeros((5, 5), dtype="i8") assert_eq(sparse.full_like(x, 9.5), np.full_like(x, 9.5)) assert_eq(sparse.full_like(x, 9.5, dtype="f8"), np.full_like(x, 9.5, dtype="f8")) assert_eq(sparse.full_like(x, 9.5, shape=(2, 2)), np.full_like(x, 9.5, shape=(2, 2))) @pytest.mark.parametrize( "x", [ np.array([1, 2, 0, 0, 0]), np.array([1 + 2j, 2 - 1j, 0, 1, 0]), np.array(["a", "b", "c"]), ], ) def test_complex_methods(x): s = sparse.COO.from_numpy(x) assert_eq(s.imag, x.imag) assert_eq(s.real, x.real) if np.issubdtype(s.dtype, np.number): assert_eq(s.conj(), x.conj()) def test_np_matrix(rng): x = rng.random((10, 1)).view(type=np.matrix) s = sparse.COO.from_numpy(x) assert_eq(x, s) def test_out_dtype(): a = sparse.eye(5, dtype="float32") b = sparse.eye(5, dtype="float64") assert np.positive(a, out=b).dtype == np.positive(a.todense(), out=b.todense()).dtype assert ( np.positive(a, out=b, dtype="float64").dtype == np.positive(a.todense(), out=b.todense(), dtype="float64").dtype ) @contextlib.contextmanager def auto_densify(): "For use in tests only! Not threadsafe." import os from importlib import reload os.environ["SPARSE_AUTO_DENSIFY"] = "1" reload(sparse.numba_backend._settings) yield del os.environ["SPARSE_AUTO_DENSIFY"] reload(sparse.numba_backend._settings) def test_setting_into_numpy_slice(): actual = np.zeros((5, 5)) s = sparse.COO(data=[1, 1], coords=(2, 4), shape=(5,)) # This calls s.__array__(dtype('float64')) which means that __array__ # must accept a positional argument. If not this will raise, of course, # TypeError: __array__() takes 1 positional argument but 2 were given with auto_densify(): actual[:, 0] = s # Might as well check the content of the result as well. expected = np.zeros((5, 5)) expected[:, 0] = s.todense() assert_eq(actual, expected) # Without densification, setting is unsupported. with pytest.raises(RuntimeError): actual[:, 0] = s def test_successful_densification(): s = sparse.random((3, 4, 5), density=0.5) with auto_densify(): x = np.array(s) assert isinstance(x, np.ndarray) assert_eq(s, x) def test_failed_densification(): s = sparse.random((3, 4, 5), density=0.5) with pytest.raises(RuntimeError): np.array(s) def test_warn_on_too_dense(): import os from importlib import reload os.environ["SPARSE_WARN_ON_TOO_DENSE"] = "1" reload(sparse.numba_backend._settings) with pytest.warns(RuntimeWarning): sparse.random((3, 4, 5), density=1.0) del os.environ["SPARSE_WARN_ON_TOO_DENSE"] reload(sparse.numba_backend._settings) def test_prune_coo(): coords = np.array([[0, 1, 2, 3]]) data = np.array([1, 0, 1, 2]) s1 = COO(coords, data) s2 = COO(coords, data, prune=True) assert s2.nnz == 3 # Densify s1 because it isn't canonical assert_eq(s1.todense(), s2, check_nnz=False) def test_diagonal(): a = sparse.random((4, 4), density=0.5) assert_eq(sparse.diagonal(a, offset=0), np.diagonal(a.todense(), offset=0)) assert_eq(sparse.diagonal(a, offset=1), np.diagonal(a.todense(), offset=1)) assert_eq(sparse.diagonal(a, offset=2), np.diagonal(a.todense(), offset=2)) a = sparse.random((4, 5, 4, 6), density=0.5) assert_eq( sparse.diagonal(a, offset=0, axis1=0, axis2=2), np.diagonal(a.todense(), offset=0, axis1=0, axis2=2), ) assert_eq( sparse.diagonal(a, offset=1, axis1=0, axis2=2), np.diagonal(a.todense(), offset=1, axis1=0, axis2=2), ) assert_eq( sparse.diagonal(a, offset=2, axis1=0, axis2=2), np.diagonal(a.todense(), offset=2, axis1=0, axis2=2), ) def test_diagonalize(): assert_eq(sparse.diagonalize(np.ones(3)), sparse.eye(3)) assert_eq( sparse.diagonalize(scipy.sparse.coo_matrix(np.eye(3))), sparse.diagonalize(sparse.eye(3)), ) # inverse of diagonal b = sparse.random((4, 3, 2), density=0.5) b_diag = sparse.diagonalize(b, axis=1) assert_eq(b, sparse.diagonal(b_diag, axis1=1, axis2=3).transpose([0, 2, 1])) RESULT_TYPE_DTYPES = [ "i1", "i2", "i4", "i8", "u1", "u2", "u4", "u8", "f4", "f8", "c8", "c16", object, ] @pytest.mark.parametrize("t1", RESULT_TYPE_DTYPES) @pytest.mark.parametrize("t2", RESULT_TYPE_DTYPES) @pytest.mark.parametrize( "func", [ sparse.result_type, pytest.param( np.result_type, marks=pytest.mark.skipif(not NEP18_ENABLED, reason="NEP18 is not enabled"), ), ], ) @pytest.mark.parametrize("data", [1, [1]]) # Not the same outputs! def test_result_type(t1, t2, func, data): a = np.array(data, dtype=t1) b = np.array(data, dtype=t2) expect = np.result_type(a, b) assert func(a, sparse.COO(b)) == expect assert func(sparse.COO(a), b) == expect assert func(sparse.COO(a), sparse.COO(b)) == expect assert func(a.dtype, sparse.COO(b)) == np.result_type(a.dtype, b) assert func(sparse.COO(a), b.dtype) == np.result_type(a, b.dtype) @pytest.mark.parametrize("in_shape", [(5, 5), 62, (3, 3, 3)]) def test_flatten(in_shape): s = sparse.random(in_shape, density=0.5) x = s.todense() a = s.flatten() e = x.flatten() assert_eq(e, a) def test_asnumpy(): s = sparse.COO(data=[1], coords=[2], shape=(5,)) assert_eq(sparse.asnumpy(s), s.todense()) assert_eq(sparse.asnumpy(s, dtype=np.float64), np.asarray(s.todense(), dtype=np.float64)) a = np.array([1, 2, 3]) # Array passes through with no copying. assert sparse.asnumpy(a) is a @pytest.mark.parametrize("shape1", [(2,), (2, 3), (2, 3, 4)]) @pytest.mark.parametrize("shape2", [(2,), (2, 3), (2, 3, 4)]) def test_outer(shape1, shape2): s1 = sparse.random(shape1, density=0.5) s2 = sparse.random(shape2, density=0.5) x1 = s1.todense() x2 = s2.todense() assert_eq(sparse.outer(s1, s2), np.outer(x1, x2)) assert_eq(np.multiply.outer(s1, s2), np.multiply.outer(x1, x2)) def test_scalar_list_init(): a = sparse.COO([], [], ()) b = sparse.COO([], [1], ()) assert a.todense() == 0 assert b.todense() == 1 def test_raise_on_nd_data(): s1 = sparse.random((2, 3, 4), density=0.5) with pytest.raises(ValueError): sparse.COO(s1.coords, s1.data[:, None], shape=(2, 3, 4)) def test_astype_casting(): s1 = sparse.random((2, 3, 4), density=0.5) with pytest.raises(TypeError): s1.astype(dtype=np.int64, casting="safe") def test_astype_no_copy(): s1 = sparse.random((2, 3, 4), density=0.5) s2 = s1.astype(s1.dtype, copy=False) assert s1 is s2 def test_coo_valerr(): a = np.arange(300) with pytest.raises(ValueError): COO.from_numpy(a, idx_dtype=np.int8) def test_random_idx_dtype(): with pytest.raises(ValueError): sparse.random((300,), density=0.1, format="coo", idx_dtype=np.int8) def test_html_for_size_zero(): arr = sparse.COO.from_numpy(np.array(())) ground_truth = "" ground_truth += '' ground_truth += '' ground_truth += '' ground_truth += '' ground_truth += '' ground_truth += '' ground_truth += '' ground_truth += '' ground_truth += "
Formatcoo
Data Typefloat64
Shape(0,)
nnz0
Densitynan
Read-onlyTrue
Size0
Storage rationan
" table = html_table(arr) assert table == ground_truth @pytest.mark.parametrize( "pad_width", [ 2, (2, 1), ((2), (1)), ((1, 2), (4, 5), (7, 8)), ], ) @pytest.mark.parametrize("constant_values", [0, 1, 150, np.nan]) def test_pad_valid(pad_width, constant_values): y = sparse.random((50, 50, 3), density=0.15, fill_value=constant_values) x = y.todense() xx = np.pad(x, pad_width=pad_width, constant_values=constant_values) yy = np.pad(y, pad_width=pad_width, constant_values=constant_values) assert_eq(xx, yy) @pytest.mark.parametrize( "pad_width", [ ((2, 1), (5, 7)), ], ) @pytest.mark.parametrize("constant_values", [150, 2, (1, 2)]) def test_pad_invalid(pad_width, constant_values, fill_value=0): y = sparse.random((50, 50, 3), density=0.15) with pytest.raises(ValueError): np.pad(y, pad_width, constant_values=constant_values) @pytest.mark.parametrize("val", [0, 5]) def test_scalar_from_numpy(val): x = np.int64(val) s = sparse.COO.from_numpy(x) assert s.nnz == 0 assert_eq(x, s) def test_scalar_elemwise(rng): s1 = sparse.random((), density=0.5) x2 = rng.random(2) x1 = s1.todense() assert_eq(s1 * x2, x1 * x2) def test_array_as_shape(): coords = [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]] data = [10, 20, 30, 40, 50] sparse.COO(coords, data, shape=np.array((5, 5))) @pytest.mark.parametrize( "arr", [np.array([[0, 3, 0], [1, 2, 0]]), np.array([[[0, 0], [1, 0]], [[5, 0], [0, -3]]])], ) @pytest.mark.parametrize("axis", [None, 0, 1]) @pytest.mark.parametrize("keepdims", [True, False]) @pytest.mark.parametrize("mode", [(sparse.argmax, np.argmax), (sparse.argmin, np.argmin)]) def test_argmax_argmin(arr, axis, keepdims, mode): sparse_func, np_func = mode s_arr = sparse.COO.from_numpy(arr) result = sparse_func(s_arr, axis=axis, keepdims=keepdims).todense() expected = np_func(arr, axis=axis, keepdims=keepdims) np.testing.assert_equal(result, expected) @pytest.mark.parametrize("axis", [None, 0, 1, 2]) @pytest.mark.parametrize("mode", [(sparse.argmax, np.argmax), (sparse.argmin, np.argmin)]) def test_argmax_argmin_3D(axis, mode): sparse_func, np_func = mode s_arr = sparse.zeros(shape=(1000, 550, 3), format="dok") s_arr[100, 100, 0] = 3 s_arr[100, 100, 1] = 3 s_arr[100, 99, 0] = -2 s_arr = s_arr.to_coo() result = sparse_func(s_arr, axis=axis).todense() expected = np_func(s_arr.todense(), axis=axis) np.testing.assert_equal(result, expected) @pytest.mark.parametrize("func", [sparse.argmax, sparse.argmin]) def test_argmax_argmin_constraint(func): s = sparse.COO.from_numpy(np.full((2, 2), 2), fill_value=2) with pytest.raises(ValueError, match="`axis=2` is out of bounds for array of dimension 2."): func(s, axis=2) @pytest.mark.parametrize("config", [(np.inf, "isinf"), (np.nan, "isnan")]) def test_isinf_isnan(config): obj, func_name = config arr = np.array([[1, 1, obj], [-obj, 1, 1]]) s = sparse.COO.from_numpy(arr) result = getattr(s, func_name)().todense() expected = getattr(np, func_name)(arr) np.testing.assert_equal(result, expected) class TestSqueeze: eye_arr = np.eye(2).reshape(1, 2, 1, 2) @pytest.mark.parametrize( "arr_and_axis", [ (eye_arr, None), (eye_arr, 0), (eye_arr, 2), (eye_arr, (0, 2)), (np.zeros((5,)), None), ], ) def test_squeeze(self, arr_and_axis): arr, axis = arr_and_axis s_arr = sparse.COO.from_numpy(arr) result_1 = sparse.squeeze(s_arr, axis=axis).todense() result_2 = s_arr.squeeze(axis=axis).todense() expected = np.squeeze(arr, axis=axis) np.testing.assert_equal(result_1, result_2) np.testing.assert_equal(result_1, expected) def test_squeeze_validation(self): s_arr = sparse.COO.from_numpy(np.eye(3)) with pytest.raises(IndexError, match="tuple index out of range"): s_arr.squeeze(3) with pytest.raises(ValueError, match="Invalid axis parameter: `1.1`."): s_arr.squeeze(1.1) with pytest.raises(ValueError, match="Specified axis `0` has a size greater than one: 3"): s_arr.squeeze(0) class TestUnique: arr = np.array([[0, 0, 1, 5, 3, 0], [1, 0, 4, 0, 3, 0], [0, 1, 0, 1, 1, 0]], dtype=np.int64) arr_empty = np.zeros((5, 5)) arr_full = np.arange(1, 10) @pytest.mark.parametrize("arr", [arr, arr_empty, arr_full]) @pytest.mark.parametrize("fill_value", [-1, 0, 1]) def test_unique_counts(self, arr, fill_value): s_arr = sparse.COO.from_numpy(arr, fill_value) result_values, result_counts = sparse.unique_counts(s_arr) expected_values, expected_counts = np.unique(arr, return_counts=True) np.testing.assert_equal(result_values, expected_values) np.testing.assert_equal(result_counts, expected_counts) @pytest.mark.parametrize("arr", [arr, arr_empty, arr_full]) @pytest.mark.parametrize("fill_value", [-1, 0, 1]) def test_unique_values(self, arr, fill_value): s_arr = sparse.COO.from_numpy(arr, fill_value) result = sparse.unique_values(s_arr) expected = np.unique(arr) np.testing.assert_equal(result, expected) @pytest.mark.parametrize("func", [sparse.unique_counts, sparse.unique_values]) def test_input_validation(self, func): with pytest.raises(ValueError, match="Input must be an instance of SparseArray"): func(self.arr) @pytest.mark.parametrize("axis", [-1, 0, 1, 2, 3]) def test_expand_dims(axis): arr = np.arange(24).reshape((2, 3, 4)) s_arr = sparse.COO.from_numpy(arr) result = sparse.expand_dims(s_arr, axis=axis) expected = np.expand_dims(arr, axis=axis) np.testing.assert_equal(result.todense(), expected) @pytest.mark.parametrize( "arr", [ np.array([[0, 0, 1, 5, 3, 0], [1, 0, 4, 0, 3, 0], [0, 1, 0, 1, 1, 0]], dtype=np.int64), np.array([[[2, 0], [0, 5]], [[1, 0], [4, 0]], [[0, 1], [0, -1]]], dtype=np.float64), np.arange(3, 10), ], ) @pytest.mark.parametrize("fill_value", [-1, 0, 1, 3]) @pytest.mark.parametrize("axis", [0, 1, -1]) @pytest.mark.parametrize("descending", [False, True]) @pytest.mark.parametrize( "stable", [False, pytest.param(True, marks=pytest.mark.xfail(reason="Numba doesn't support `stable=True`."))] ) def test_sort(arr, fill_value, axis, descending, stable): if axis >= arr.ndim: return s_arr = sparse.COO.from_numpy(arr, fill_value) kind = "mergesort" if stable else "quicksort" result = sparse.sort(s_arr, axis=axis, descending=descending, stable=stable) expected = -np.sort(-arr, axis=axis, kind=kind) if descending else np.sort(arr, axis=axis, kind=kind) np.testing.assert_equal(result.todense(), expected) # make sure no inplace changes happened np.testing.assert_equal(s_arr.todense(), arr) @pytest.mark.parametrize("fill_value", [-1, 0, 1]) @pytest.mark.parametrize("descending", [False, True]) def test_sort_only_fill_value(fill_value, descending): arr = np.full((3, 3), fill_value=fill_value) s_arr = sparse.COO.from_numpy(arr, fill_value) result = sparse.sort(s_arr, axis=0, descending=descending) expected = np.sort(arr, axis=0) np.testing.assert_equal(result.todense(), expected) @pytest.mark.parametrize("axis", [None, -1, 0, 1, 2, (0, 1), (2, 0)]) def test_flip(axis): arr = np.arange(24).reshape((2, 3, 4)) s_arr = sparse.COO.from_numpy(arr) result = sparse.flip(s_arr, axis=axis) expected = np.flip(arr, axis=axis) np.testing.assert_equal(result.todense(), expected) @pytest.mark.parametrize("fill_value", [-1, 0, 1, 3]) @pytest.mark.parametrize( "indices,axis", [ ( [1], 0, ), ([2, 1], 1), ([1, 2, 3], 2), ([2, 3], -1), ([5, 3, 7, 8], None), ], ) def test_take(fill_value, indices, axis): arr = np.arange(24).reshape((2, 3, 4)) s_arr = sparse.COO.from_numpy(arr, fill_value) result = sparse.take(s_arr, np.array(indices), axis=axis) expected = np.take(arr, indices, axis) np.testing.assert_equal(result.todense(), expected) @pytest.mark.parametrize("ndim", [2, 3, 4, 5]) @pytest.mark.parametrize("density", [0.0, 0.1, 0.25, 1.0]) def test_matrix_transpose(ndim, density): shape = tuple(range(2, 34)[:ndim]) xs = sparse.random(shape, density=density) xd = xs.todense() transpose_axes = list(range(ndim)) transpose_axes[-2:] = transpose_axes[-2:][::-1] expected = np.transpose(xd, axes=transpose_axes) actual = sparse.matrix_transpose(xs) assert_eq(actual, expected) assert_eq(xs.mT, expected) @pytest.mark.parametrize( ("shape1", "shape2", "axis"), [ ((2, 3, 4), (3, 4), -2), ((3, 4), (2, 3, 4), -1), ((3, 1, 4), (3, 2, 4), 2), ((1, 3, 4), (3, 4), -2), ((3, 4, 1), (3, 4, 2), 0), ((3, 1), (3, 4), -2), ((1, 4), (3, 4), 1), ], ) @pytest.mark.parametrize("density", [0.0, 0.1, 0.25, 1.0]) @pytest.mark.parametrize("is_complex", [False, True]) def test_vecdot(shape1, shape2, axis, density, rng, is_complex): def data_rvs(size): data = rng.random(size) if is_complex: data = data + rng.random(size) * 1j return data s1 = sparse.random(shape1, density=density, data_rvs=data_rvs) s2 = sparse.random(shape2, density=density, data_rvs=data_rvs) x1 = s1.todense() x2 = s2.todense() def np_vecdot(x1, x2, /, *, axis=-1): if np.issubdtype(x1.dtype, np.complexfloating): x1 = np.conjugate(x1) return np.sum(x1 * x2, axis=axis) actual = sparse.vecdot(s1, s2, axis=axis) expected = np_vecdot(x1, x2, axis=axis) np.testing.assert_allclose(actual.todense(), expected) @pytest.mark.parametrize( ("shape1", "shape2", "axis"), [ ((2, 3, 4), (3, 4), 0), ((3, 4), (2, 3, 4), 0), ((3, 1, 4), (3, 2, 4), -2), ((1, 3, 4), (3, 4), -3), ((3, 4, 1), (3, 4, 2), -1), ((3, 1), (3, 4), 1), ((1, 4), (3, 4), -2), ], ) def test_vecdot_invalid_axis(shape1, shape2, axis): s1 = sparse.random(shape1, density=0.5) s2 = sparse.random(shape2, density=0.5) with pytest.raises(ValueError, match=r"Shapes must match along"): sparse.vecdot(s1, s2, axis=axis) @pytest.mark.parametrize( ("func", "args", "kwargs"), [ (sparse.eye, (5,), {}), (sparse.zeros, ((5,)), {}), (sparse.ones, ((5,)), {}), (sparse.full, ((5,), 5), {}), (sparse.empty, ((5,)), {}), (sparse.full_like, (5,), {}), (sparse.ones_like, (), {}), (sparse.zeros_like, (), {}), (sparse.empty_like, (), {}), (sparse.asarray, (), {}), ], ) def test_invalid_device(func, args, kwargs): if func.__name__.endswith("_like") or func is sparse.asarray: like = sparse.random((5, 5), density=0.5) args = (like,) + args with pytest.raises(ValueError, match="Device must be"): func(*args, device="invalid_device", **kwargs) def test_device(): s = sparse.random((5, 5), density=0.5) data = getattr(s, "data", None) device = getattr(data, "device", "cpu") assert s.device == device def test_to_device(): s = sparse.random((5, 5), density=0.5) s2 = s.to_device(s.device) assert s is s2 def test_to_invalid_device(): s = sparse.random((5, 5), density=0.5) with pytest.raises(ValueError, match=r"Only .* is supported."): s.to_device("invalid_device") sparse-0.16.0a9/sparse/numba_backend/tests/test_coo_numba.py000066400000000000000000000033301463475501500241210ustar00rootroot00000000000000import sparse import numba import numpy as np @numba.njit def identity(x): """Pass an object through numba and back""" return x def identity_constant(x): @numba.njit def get_it(): """Pass an object through numba and back as a constant""" return x return get_it() def assert_coo_equal(c1, c2): assert c1.shape == c2.shape assert sparse.all(c1 == c2) assert c1.data.dtype == c2.data.dtype assert c1.fill_value == c2.fill_value def assert_coo_same_memory(c1, c2): assert_coo_equal(c1, c2) assert c1.coords.data == c2.coords.data assert c1.data.data == c2.data.data class TestBasic: """Test very simple construction and field access""" def test_roundtrip(self): c1 = sparse.COO(np.eye(3), fill_value=1) c2 = identity(c1) assert type(c1) is type(c2) assert_coo_same_memory(c1, c2) def test_roundtrip_constant(self): c1 = sparse.COO(np.eye(3), fill_value=1) c2 = identity_constant(c1) # constants are always copies assert_coo_equal(c1, c2) def test_unpack_attrs(self): @numba.njit def unpack(c): return c.coords, c.data, c.shape, c.fill_value c1 = sparse.COO(np.eye(3), fill_value=1) coords, data, shape, fill_value = unpack(c1) c2 = sparse.COO(coords, data, shape, fill_value=fill_value) assert_coo_same_memory(c1, c2) def test_repack_attrs(self): @numba.njit def pack(coords, data, shape): return sparse.COO(coords, data, shape) # repacking fill_value isn't possible yet c1 = sparse.COO(np.eye(3)) c2 = pack(c1.coords, c1.data, c1.shape) assert_coo_same_memory(c1, c2) sparse-0.16.0a9/sparse/numba_backend/tests/test_dask_interop.py000066400000000000000000000006371463475501500246500ustar00rootroot00000000000000import sparse from dask.base import tokenize def test_deterministic_token(): a = sparse.COO(data=[1, 2, 3], coords=[10, 20, 30], shape=(40,)) b = sparse.COO(data=[1, 2, 3], coords=[10, 20, 30], shape=(40,)) assert tokenize(a) == tokenize(b) # One of these things is not like the other.... c = sparse.COO(data=[1, 2, 4], coords=[10, 20, 30], shape=(40,)) assert tokenize(a) != tokenize(c) sparse-0.16.0a9/sparse/numba_backend/tests/test_dok.py000066400000000000000000000176031463475501500227440ustar00rootroot00000000000000import sparse from sparse import DOK from sparse.numba_backend._utils import assert_eq import pytest import numpy as np @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4)]) @pytest.mark.parametrize("density", [0.1, 0.3, 0.5, 0.7]) def test_random_shape_nnz(shape, density): s = sparse.random(shape, density, format="dok") assert isinstance(s, DOK) assert s.shape == shape expected_nnz = density * np.prod(shape) assert np.floor(expected_nnz) <= s.nnz <= np.ceil(expected_nnz) def test_convert_to_coo(): s1 = sparse.random((2, 3, 4), 0.5, format="dok") s2 = sparse.COO(s1) assert_eq(s1, s2) def test_convert_from_coo(): s1 = sparse.random((2, 3, 4), 0.5, format="coo") s2 = DOK(s1) assert_eq(s1, s2) def test_convert_from_numpy(rng): x = rng.random((2, 3, 4)) s = DOK(x) assert_eq(x, s) def test_convert_to_numpy(): s = sparse.random((2, 3, 4), 0.5, format="dok") x = s.todense() assert_eq(x, s) def test_convert_from_scipy_sparse(): import scipy.sparse x = scipy.sparse.rand(6, 3, density=0.2) s = DOK(x) assert_eq(x, s) @pytest.mark.parametrize( "shape, data", [ (2, {0: 1}), ((2, 3), {(0, 1): 3, (1, 2): 4}), ((2, 3, 4), {(0, 1): 3, (1, 2, 3): 4, (1, 1): [6, 5, 4, 1]}), ], ) def test_construct(shape, data): s = DOK(shape, data) x = np.zeros(shape, dtype=s.dtype) for c, d in data.items(): x[c] = d assert_eq(x, s) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4)]) @pytest.mark.parametrize("density", [0.1, 0.3, 0.5, 0.7]) def test_getitem_single(shape, density, rng): s = sparse.random(shape, density, format="dok") x = s.todense() for _ in range(s.nnz): idx = rng.integers(np.prod(shape)) idx = np.unravel_index(idx, shape) assert np.isclose(s[idx], x[idx]) @pytest.mark.parametrize( "shape, density, indices", [ ((2, 3), 0.5, (slice(1),)), ((5, 5), 0.2, (slice(0, 4, 2),)), ((10, 10), 0.2, (slice(5), slice(0, 10, 3))), ((5, 5), 0.5, (slice(0, 4, 4), slice(0, 4, 4))), ((5, 5), 0.4, (1, slice(0, 4, 1))), ((10, 10), 0.8, ([0, 4, 5], [3, 2, 4])), ((10, 10), 0, (slice(10), slice(10))), ], ) def test_getitem(shape, density, indices): s = sparse.random(shape, density, format="dok") x = s.todense() sparse_sliced = s[indices] dense_sliced = x[indices] assert_eq(sparse_sliced.todense(), dense_sliced) @pytest.mark.parametrize( "shape, density, indices", [ ((10, 10), 0.8, ([0, 4, 5],)), ((5, 5, 5), 0.5, ([1, 2, 3], [0, 2, 2])), ], ) def test_getitem_notimplemented_error(shape, density, indices): s = sparse.random(shape, density, format="dok") with pytest.raises(NotImplementedError): s[indices] @pytest.mark.parametrize( "shape, density, indices", [ ((10, 10), 0.8, ([0, 4, 5], [0, 2])), ((5, 5, 5), 0.5, ([1, 2, 3], [0], [2, 3, 4])), ((10,), 0.5, (5, 6)), ], ) def test_getitem_index_error(shape, density, indices): s = sparse.random(shape, density, format="dok") with pytest.raises(IndexError): s[indices] @pytest.mark.parametrize( "shape, index, value_shape", [ ((2,), slice(None), ()), ((2,), slice(1, 2), ()), ((2,), slice(0, 2), (2,)), ((2,), 1, ()), ((2, 3), (0, slice(None)), ()), ((2, 3), (0, slice(1, 3)), ()), ((2, 3), (1, slice(None)), (3,)), ((2, 3), (0, slice(1, 3)), (2,)), ((2, 3), (0, slice(2, 0, -1)), (2,)), ((2, 3), (slice(None), 1), ()), ((2, 3), (slice(None), 1), (2,)), ((2, 3), (slice(1, 2), 1), ()), ((2, 3), (slice(1, 2), 1), (1,)), ((2, 3), (0, 2), ()), ((2, 3), ([0, 1], [1, 2]), (2,)), ((2, 3), ([0, 1], [1, 2]), ()), ((4,), ([1, 3]), ()), ], ) def test_setitem(shape, index, value_shape, rng): s = sparse.random(shape, 0.5, format="dok") x = s.todense() value = rng.random(value_shape) s[index] = value x[index] = value assert_eq(x, s) def test_setitem_delete(): shape = (2, 3) index = [0, 1], [1, 2] value = 0 s = sparse.random(shape, 1.0, format="dok") x = s.todense() s[index] = value x[index] = value assert_eq(x, s) assert s.nnz < s.size @pytest.mark.parametrize( "shape, index, value_shape", [ ((2, 3), ([0, 1.5], [1, 2]), ()), ((2, 3), ([0, 1], [1]), ()), ((2, 3), ([[0], [1]], [1, 2]), ()), ], ) def test_setitem_index_error(shape, index, value_shape, rng): s = sparse.random(shape, 0.5, format="dok") value = rng.random(value_shape) with pytest.raises(IndexError): s[index] = value @pytest.mark.parametrize( "shape, index, value_shape", [ ((2, 3), ([0, 1],), ()), ], ) def test_setitem_notimplemented_error(shape, index, value_shape, rng): s = sparse.random(shape, 0.5, format="dok") value = rng.random(value_shape) with pytest.raises(NotImplementedError): s[index] = value @pytest.mark.parametrize( "shape, index, value_shape", [ ((2, 3), ([0, 1], [1, 2]), (1, 2)), ((2, 3), ([0, 1], [1, 2]), (3,)), ((2,), 1, (2,)), ], ) def test_setitem_value_error(shape, index, value_shape, rng): s = sparse.random(shape, 0.5, format="dok") value = rng.random(value_shape) with pytest.raises(ValueError): s[index] = value def test_default_dtype(): s = DOK((5,)) assert s.dtype == np.float64 def test_int_dtype(): data = {1: np.uint8(1), 2: np.uint16(2)} s = DOK((5,), data) assert s.dtype == np.uint16 def test_float_dtype(): data = {1: np.uint8(1), 2: np.float32(2)} s = DOK((5,), data) assert s.dtype == np.float32 def test_set_zero(): s = DOK((1,), dtype=np.uint8) s[0] = 1 s[0] = 0 assert s[0] == 0 assert s.nnz == 0 @pytest.mark.parametrize("format", ["coo", "dok"]) def test_asformat(format): s = sparse.random((2, 3, 4), density=0.5, format="dok") s2 = s.asformat(format) assert_eq(s, s2) def test_coo_fv_interface(rng): s1 = sparse.full((5, 5), fill_value=1 + rng.random()) s2 = sparse.DOK(s1) assert_eq(s1, s2) s3 = sparse.COO(s2) assert_eq(s1, s3) def test_empty_dok_dtype(): d = sparse.DOK(5, dtype=np.uint8) s = sparse.COO(d) assert s.dtype == d.dtype def test_zeros_like(): s = sparse.random((2, 3, 4), density=0.5) s2 = sparse.zeros_like(s, format="dok") assert s.shape == s2.shape assert s.dtype == s2.dtype assert isinstance(s2, sparse.DOK) @pytest.mark.parametrize( "pad_width", [ 2, (2, 1), ((2), (1)), ((1, 2), (4, 5), (7, 8)), ], ) @pytest.mark.parametrize("constant_values", [0, 1, 150, np.nan]) def test_pad_valid(pad_width, constant_values): y = sparse.random((50, 50, 3), density=0.15, fill_value=constant_values, format="dok") x = y.todense() xx = np.pad(x, pad_width=pad_width, constant_values=constant_values) yy = np.pad(y, pad_width=pad_width, constant_values=constant_values) assert_eq(xx, yy) @pytest.mark.parametrize( "pad_width", [ ((2, 1), (5, 7)), ], ) @pytest.mark.parametrize("constant_values", [150, 2, (1, 2)]) def test_pad_invalid(pad_width, constant_values, fill_value=0): y = sparse.random((50, 50, 3), density=0.15, format="dok") with pytest.raises(ValueError): np.pad(y, pad_width, constant_values=constant_values) @pytest.mark.parametrize("func", [np.concatenate, np.stack]) def test_dok_concat_stack(func): s1 = sparse.random((4, 4), density=0.25, format="dok") s2 = sparse.random((4, 4), density=0.25, format="dok") x1 = s1.todense() x2 = s2.todense() assert_eq(func([s1, s2]), func([x1, x2])) def test_dok_indexing(): s = sparse.DOK((3, 3)) s[1, 2] = 0.5 x = s.todense() assert_eq(x[1::-1], s[1::-1]) sparse-0.16.0a9/sparse/numba_backend/tests/test_dot.py000066400000000000000000000253661463475501500227620ustar00rootroot00000000000000import operator import sparse from sparse import COO from sparse.numba_backend._compressed import GCXS from sparse.numba_backend._utils import assert_eq, assert_gcxs_slicing, default_rng import pytest import numpy as np import scipy.sparse import scipy.stats @pytest.mark.parametrize( "a_shape,b_shape,axes", [ [(3, 4), (4, 3), (1, 0)], [(3, 4), (4, 3), (0, 1)], [(3, 4, 5), (4, 3), (1, 0)], [(3, 4), (5, 4, 3), (1, 1)], [(3, 4), (5, 4, 3), ((0, 1), (2, 1))], [(3, 4), (5, 4, 3), ((1, 0), (1, 2))], [(3, 4, 5), (4,), (1, 0)], [(4,), (3, 4, 5), (0, 1)], [(4,), (4,), (0, 0)], [(4,), (4,), 0], ], ) @pytest.mark.parametrize( "a_format, b_format", [("coo", "coo"), ("coo", "gcxs"), ("gcxs", "coo"), ("gcxs", "gcxs")], ) def test_tensordot(a_shape, b_shape, axes, a_format, b_format): sa = sparse.random(a_shape, density=0.5, format=a_format) sb = sparse.random(b_shape, density=0.5, format=b_format) a = sa.todense() b = sb.todense() a_b = np.tensordot(a, b, axes) # tests for return_type=None sa_sb = sparse.tensordot(sa, sb, axes) sa_b = sparse.tensordot(sa, b, axes) a_sb = sparse.tensordot(a, sb, axes) assert_eq(a_b, sa_sb) assert_eq(a_b, sa_b) assert_eq(a_b, a_sb) if all(isinstance(arr, COO) for arr in [sa, sb]): assert isinstance(sa_sb, COO) else: assert isinstance(sa_sb, GCXS) assert isinstance(sa_b, np.ndarray) assert isinstance(a_sb, np.ndarray) # tests for return_type=COO sa_b = sparse.tensordot(sa, b, axes, return_type=COO) a_sb = sparse.tensordot(a, sb, axes, return_type=COO) assert_eq(a_b, sa_b) assert_eq(a_b, a_sb) assert isinstance(sa_b, COO) assert isinstance(a_sb, COO) # tests form return_type=GCXS sa_b = sparse.tensordot(sa, b, axes, return_type=GCXS) a_sb = sparse.tensordot(a, sb, axes, return_type=GCXS) assert_eq(a_b, sa_b) assert_eq(a_b, a_sb) assert isinstance(sa_b, GCXS) assert isinstance(a_sb, GCXS) # tests for return_type=np.ndarray sa_sb = sparse.tensordot(sa, sb, axes, return_type=np.ndarray) assert_eq(a_b, sa_sb) assert isinstance(sa_sb, np.ndarray) def test_tensordot_empty(): x1 = np.empty((0, 0, 0)) x2 = np.empty((0, 0, 0)) s1 = sparse.COO.from_numpy(x1) s2 = sparse.COO.from_numpy(x2) assert_eq(np.tensordot(x1, x2), sparse.tensordot(s1, s2)) def test_tensordot_valueerror(): x1 = sparse.COO(np.array(1)) x2 = sparse.COO(np.array(1)) with pytest.raises(ValueError): x1 @ x2 def gen_kwargs(format): from sparse.numba_backend._utils import convert_format format = convert_format(format) if format == "gcxs": return [{"compressed_axes": c} for c in [(0,), (1,)]] return [{}] def gen_for_format(format): return [(format, g) for g in gen_kwargs(format)] @pytest.mark.parametrize( "a_shape, b_shape", [ ((3, 1, 6, 5), (2, 1, 4, 5, 6)), ((2, 1, 4, 5, 6), (3, 1, 6, 5)), ((1, 1, 5), (3, 5, 6)), ((3, 4, 5), (1, 5, 6)), ((3, 4, 5), (3, 5, 6)), ((3, 4, 5), (5, 6)), ((4, 5), (5, 6)), ((5,), (5, 6)), ((4, 5), (5,)), ((5,), (5,)), ((3, 4), (1, 2, 4, 3)), ], ) @pytest.mark.parametrize( "a_format, a_kwargs", [*gen_for_format("coo"), *gen_for_format("gcxs")], ) @pytest.mark.parametrize( "b_format, b_kwargs", [*gen_for_format("coo"), *gen_for_format("gcxs")], ) def test_matmul(a_shape, b_shape, a_format, b_format, a_kwargs, b_kwargs): if len(a_shape) == 1: a_kwargs = {} if len(b_shape) == 1: b_kwargs = {} sa = sparse.random(a_shape, density=0.5, format=a_format, **a_kwargs) sb = sparse.random(b_shape, density=0.5, format=b_format, **b_kwargs) a = sa.todense() b = sb.todense() assert_eq(np.matmul(a, b), sparse.matmul(sa, sb)) assert_eq(sparse.matmul(sa, b), sparse.matmul(a, sb)) assert_eq(np.matmul(a, b), sparse.matmul(sa, sb)) if a.ndim == 2 or b.ndim == 2: assert_eq( np.matmul(a, b), sparse.matmul( scipy.sparse.coo_matrix(a) if a.ndim == 2 else sa, scipy.sparse.coo_matrix(b) if b.ndim == 2 else sb, ), ) if hasattr(operator, "matmul"): assert_eq(operator.matmul(a, b), operator.matmul(sa, sb)) def test_matmul_errors(): with pytest.raises(ValueError): sa = sparse.random((3, 4, 5, 6), 0.5) sb = sparse.random((3, 6, 5, 6), 0.5) sparse.matmul(sa, sb) @pytest.mark.parametrize( "a, b", [ ( sparse.GCXS.from_numpy(default_rng.choice([0, np.nan, 2], size=[100, 100], p=[0.99, 0.001, 0.009])), sparse.random((100, 100), density=0.01), ), ( sparse.COO.from_numpy(default_rng.choice([0, np.nan, 2], size=[100, 100], p=[0.99, 0.001, 0.009])), sparse.random((100, 100), density=0.01), ), ( sparse.GCXS.from_numpy(default_rng.choice([0, np.nan, 2], size=[100, 100], p=[0.99, 0.001, 0.009])), scipy.sparse.random(100, 100), ), ( default_rng.choice([0, np.nan, 2], size=[100, 100], p=[0.99, 0.001, 0.009]), sparse.random((100, 100), density=0.01), ), ], ) def test_matmul_nan_warnings(a, b): with pytest.warns(RuntimeWarning): a @ b @pytest.mark.parametrize( "a_shape, b_shape", [ ((1, 4, 5), (3, 5, 6)), ((3, 4, 5), (1, 5, 6)), ((3, 4, 5), (3, 5, 6)), ((3, 4, 5), (5, 6)), ((4, 5), (5, 6)), ((5,), (5, 6)), ((4, 5), (5,)), ((5,), (5,)), ], ) @pytest.mark.parametrize( "a_format, a_kwargs", [*gen_for_format("coo"), *gen_for_format("gcxs")], ) @pytest.mark.parametrize( "b_format, b_kwargs", [*gen_for_format("coo"), *gen_for_format("gcxs")], ) def test_dot(a_shape, b_shape, a_format, b_format, a_kwargs, b_kwargs): if len(a_shape) == 1: a_kwargs = {} if len(b_shape) == 1: b_kwargs = {} sa = sparse.random(a_shape, density=0.5, format=a_format, **a_kwargs) sb = sparse.random(b_shape, density=0.5, format=b_format, **b_kwargs) a = sa.todense() b = sb.todense() e = np.dot(a, b) assert_eq(e, sa.dot(sb)) assert_eq(e, sparse.dot(sa, sb)) assert_eq(e, sparse.dot(a, sb)) assert_eq(e, sparse.dot(a, sb)) # Basic equivalences e = operator.matmul(a, b) assert_eq(e, operator.matmul(sa, sb)) assert_eq(e, operator.matmul(a, sb)) assert_eq(e, operator.matmul(sa, b)) @pytest.mark.parametrize( "a_dense, b_dense, o_type", [ (False, False, sparse.SparseArray), (False, True, np.ndarray), (True, False, np.ndarray), ], ) def test_dot_type(a_dense, b_dense, o_type): a = sparse.random((3, 4), density=0.8) b = sparse.random((4, 5), density=0.8) if a_dense: a = a.todense() if b_dense: b = b.todense() assert isinstance(sparse.dot(a, b), o_type) @pytest.mark.xfail def test_dot_nocoercion(): sa = sparse.random((3, 4, 5), density=0.5) sb = sparse.random((5, 6), density=0.5) a = sa.todense() b = sb.todense() la = a.tolist() lb = b.tolist() if hasattr(operator, "matmul"): # Operations with naive collection (list) assert_eq(operator.matmul(la, b), operator.matmul(la, sb)) assert_eq(operator.matmul(a, lb), operator.matmul(sa, lb)) dot_formats = [ lambda x: x.asformat("coo"), lambda x: x.asformat("gcxs"), lambda x: x.todense(), ] @pytest.mark.parametrize("format1", dot_formats) @pytest.mark.parametrize("format2", dot_formats) def test_small_values(format1, format2): s1 = format1(sparse.COO(coords=[[0, 10]], data=[3.6e-100, 7.2e-009], shape=(20,))) s2 = format2(sparse.COO(coords=[[0, 0], [4, 28]], data=[3.8e-25, 4.5e-225], shape=(20, 50))) def dense_convertor(x): return x.todense() if isinstance(x, sparse.SparseArray) else x x1, x2 = dense_convertor(s1), dense_convertor(s2) assert_eq(x1 @ x2, s1 @ s2) dot_dtypes = [np.complex64, np.complex128] @pytest.mark.parametrize("dtype1", dot_dtypes) @pytest.mark.parametrize("dtype2", dot_dtypes) @pytest.mark.parametrize("format1", dot_formats) @pytest.mark.parametrize("format2", dot_formats) @pytest.mark.parametrize("ndim1", (1, 2)) @pytest.mark.parametrize("ndim2", (1, 2)) def test_complex(dtype1, dtype2, format1, format2, ndim1, ndim2): s1 = format1(sparse.random((20,) * ndim1, density=0.5).astype(dtype1)) s2 = format2(sparse.random((20,) * ndim2, density=0.5).astype(dtype2)) def dense_convertor(x): return x.todense() if isinstance(x, sparse.SparseArray) else x x1, x2 = dense_convertor(s1), dense_convertor(s2) assert_eq(x1 @ x2, s1 @ s2) @pytest.mark.parametrize("dtype1", dot_dtypes) @pytest.mark.parametrize("dtype2", dot_dtypes) @pytest.mark.parametrize("ndim1", (1, 2)) @pytest.mark.parametrize("ndim2", (1, 2)) def test_dot_dense(dtype1, dtype2, ndim1, ndim2): a = sparse.random((20,) * ndim1, density=0.5).astype(dtype1).todense() b = sparse.random((20,) * ndim2, density=0.5).astype(dtype2).todense() assert_eq(sparse.dot(a, b), np.dot(a, b)) assert_eq(sparse.matmul(a, b), np.matmul(a, b)) if ndim1 == 2 and ndim2 == 2: assert_eq(sparse.tensordot(a, b), np.tensordot(a, b)) @pytest.mark.parametrize( "a_shape, b_shape", [((3, 4, 5), (5, 6)), ((2, 8, 6), (6, 3))], ) def test_dot_GCXS_slicing(a_shape, b_shape): sa = sparse.random(shape=a_shape, density=1, format="gcxs") sb = sparse.random(shape=b_shape, density=1, format="gcxs") a = sa.todense() b = sb.todense() # tests dot sa_sb = sparse.dot(sa, sb) a_b = np.dot(a, b) assert_gcxs_slicing(sa_sb, a_b) @pytest.mark.parametrize( "a_shape,b_shape,axes", [ [(3, 4, 5), (4, 3), (1, 0)], [(3, 4), (5, 4, 3), (1, 1)], [(5, 9), (9, 5, 6), (0, 1)], ], ) def test_tensordot_GCXS_slicing(a_shape, b_shape, axes): sa = sparse.random(shape=a_shape, density=1, format="gcxs") sb = sparse.random(shape=b_shape, density=1, format="gcxs") a = sa.todense() b = sb.todense() sa_sb = sparse.tensordot(sa, sb, axes) a_b = np.tensordot(a, b, axes) assert_gcxs_slicing(sa_sb, a_b) @pytest.mark.parametrize( "a_shape, b_shape", [ [(1, 1, 5), (3, 5, 6)], [(3, 4, 5), (1, 5, 6)], [(3, 4, 5), (3, 5, 6)], [(3, 4, 5), (5, 6)], ], ) def test_matmul_GCXS_slicing(a_shape, b_shape): sa = sparse.random(shape=a_shape, density=1, format="gcxs") sb = sparse.random(shape=b_shape, density=1, format="gcxs") a = sa.todense() b = sb.todense() sa_sb = sparse.matmul(sa, sb) a_b = np.matmul(a, b) assert_gcxs_slicing(sa_sb, a_b) sparse-0.16.0a9/sparse/numba_backend/tests/test_einsum.py000066400000000000000000000126021463475501500234610ustar00rootroot00000000000000import sparse import pytest import numpy as np einsum_cases = [ "a,->a", "ab,->ab", ",ab,->ab", ",,->", "a,ab,abc->abc", "a,b,ab->ab", "ea,fb,gc,hd,abcd->efgh", "ea,fb,abcd,gc,hd->efgh", "abcd,ea,fb,gc,hd->efgh", "acdf,jbje,gihb,hfac,gfac,gifabc,hfac", "cd,bdhe,aidb,hgca,gc,hgibcd,hgac", "abhe,hidj,jgba,hiab,gab", "bde,cdh,agdb,hica,ibd,hgicd,hiac", "chd,bde,agbc,hiad,hgc,hgi,hiad", "chd,bde,agbc,hiad,bdi,cgh,agdb", "bdhe,acad,hiab,agac,hibd", "ab,ab,c->", "ab,ab,c->c", "ab,ab,cd,cd->", "ab,ab,cd,cd->ac", "ab,ab,cd,cd->cd", "ab,ab,cd,cd,ef,ef->", "ab,cd,ef->abcdef", "ab,cd,ef->acdf", "ab,cd,de->abcde", "ab,cd,de->be", "ab,bcd,cd->abcd", "ab,bcd,cd->abd", "eb,cb,fb->cef", "dd,fb,be,cdb->cef", "bca,cdb,dbf,afc->", "dcc,fce,ea,dbf->ab", "fdf,cdd,ccd,afe->ae", "abcd,ad", "ed,fcd,ff,bcf->be", "baa,dcf,af,cde->be", "bd,db,eac->ace", "fff,fae,bef,def->abd", "efc,dbc,acf,fd->abe", "ab,ab", "ab,ba", "abc,abc", "abc,bac", "abc,cba", "ab,bc", "ab,cb", "ba,bc", "ba,cb", "abcd,cd", "abcd,ab", "abcd,cdef", "abcd,cdef->feba", "abcd,efdc", "aab,bc->ac", "ab,bcc->ac", "aab,bcc->ac", "baa,bcc->ac", "aab,ccb->ac", "aab,fa,df,ecc->bde", "ecb,fef,bad,ed->ac", "bcf,bbb,fbf,fc->", "bb,ff,be->e", "bcb,bb,fc,fff->", "fbb,dfd,fc,fc->", "afd,ba,cc,dc->bf", "adb,bc,fa,cfc->d", "bbd,bda,fc,db->acf", "dba,ead,cad->bce", "aef,fbc,dca->bde", "abab->ba", "...ab,...ab", "...ab,...b->...a", "a...,a...", "a...,a...", ] @pytest.mark.parametrize("subscripts", einsum_cases) @pytest.mark.parametrize("density", [0.1, 1.0]) def test_einsum(subscripts, density): d = 4 terms = subscripts.split("->")[0].split(",") arrays = [sparse.random((d,) * len(term), density=density) for term in terms] sparse_out = sparse.einsum(subscripts, *arrays) numpy_out = np.einsum(subscripts, *(s.todense() for s in arrays)) if not numpy_out.shape: # scalar output assert np.allclose(numpy_out, sparse_out) else: # array output assert np.allclose(numpy_out, sparse_out.todense()) @pytest.mark.parametrize("input", [[[0, 0]], [[0, Ellipsis]], [[Ellipsis, 1], [Ellipsis]], [[0, 1], [0]]]) @pytest.mark.parametrize("density", [0.1, 1.0]) def test_einsum_nosubscript(input, density): d = 4 arrays = [sparse.random((d, d), density=density)] sparse_out = sparse.einsum(*arrays, *input) numpy_out = np.einsum(*(s.todense() for s in arrays), *input) if not numpy_out.shape: # scalar output assert np.allclose(numpy_out, sparse_out) else: # array output assert np.allclose(numpy_out, sparse_out.todense()) def test_einsum_input_fill_value(): x = sparse.random(shape=(2,), density=0.5, format="coo", fill_value=2) with pytest.raises(ValueError): sparse.einsum("cba", x) def test_einsum_no_input(): with pytest.raises(ValueError): sparse.einsum() @pytest.mark.parametrize("subscript", ["a+b->c", "i->&", "i->ij", "ij->jij", "a..,a...", ".i...", "a,a->->"]) def test_einsum_invalid_input(subscript): x = sparse.random(shape=(2,), density=0.5, format="coo") y = sparse.random(shape=(2,), density=0.5, format="coo") with pytest.raises(ValueError): sparse.einsum(subscript, x, y) @pytest.mark.parametrize("subscript", [0, [0, 0]]) def test_einsum_type_error(subscript): x = sparse.random(shape=(2,), density=0.5, format="coo") y = sparse.random(shape=(2,), density=0.5, format="coo") with pytest.raises(TypeError): sparse.einsum(subscript, x, y) format_test_cases = [ (("coo",), "coo"), (("dok",), "dok"), (("gcxs",), "gcxs"), (("dense",), "dense"), (("coo", "coo"), "coo"), (("dok", "coo"), "coo"), (("coo", "dok"), "coo"), (("coo", "dense"), "coo"), (("dense", "coo"), "coo"), (("dok", "dense"), "dok"), (("dense", "dok"), "dok"), (("gcxs", "dense"), "gcxs"), (("dense", "gcxs"), "gcxs"), (("dense", "dense"), "dense"), (("dense", "dok", "gcxs"), "coo"), ] @pytest.mark.parametrize("formats,expected", format_test_cases) def test_einsum_format(formats, expected, rng): inputs = [ rng.standard_normal((2, 2, 2)) if format == "dense" else sparse.random((2, 2, 2), density=0.5, format=format) for format in formats ] if len(inputs) == 1: eq = "abc->bc" elif len(inputs) == 2: eq = "abc,cda->abd" elif len(inputs) == 3: eq = "abc,cad,dea->abe" out = sparse.einsum(eq, *inputs) assert { sparse.COO: "coo", sparse.DOK: "dok", sparse.GCXS: "gcxs", np.ndarray: "dense", }[out.__class__] == expected def test_einsum_shape_check(): x = sparse.random((2, 3, 4), density=0.5) with pytest.raises(ValueError): sparse.einsum("aab", x) y = sparse.random((2, 3, 4), density=0.5) with pytest.raises(ValueError): sparse.einsum("abc,acb", x, y) @pytest.mark.parametrize("dtype", [np.int64, np.complex128]) def test_einsum_dtype(dtype): x = sparse.random((3, 3), density=0.5) * 10.0 x = x.astype(np.float64) y = sparse.COO.from_numpy(np.ones((3, 1), dtype=np.float64)) result = sparse.einsum("ij,i->j", x, y, dtype=dtype) assert result.dtype == dtype sparse-0.16.0a9/sparse/numba_backend/tests/test_elemwise.py000066400000000000000000000471371463475501500240060ustar00rootroot00000000000000import operator import sparse from sparse import COO, DOK from sparse.numba_backend._compressed import GCXS from sparse.numba_backend._utils import assert_eq, random_value_array import pytest import numpy as np @pytest.mark.parametrize( "func", [ np.expm1, np.log1p, np.sin, np.tan, np.sinh, np.tanh, np.floor, np.ceil, np.sqrt, np.conj, np.round, np.rint, lambda x: x.astype("int32"), np.conjugate, np.conj, lambda x: x.round(decimals=2), abs, ], ) @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_elemwise(func, format): s = sparse.random((2, 3, 4), density=0.5, format=format) x = s.todense() fs = func(s) assert isinstance(fs, format) assert fs.nnz <= s.nnz assert_eq(func(x), fs) @pytest.mark.parametrize( "func", [ np.expm1, np.log1p, np.sin, np.tan, np.sinh, np.tanh, np.floor, np.ceil, np.sqrt, np.conj, np.round, np.rint, np.conjugate, np.conj, lambda x, out: x.round(decimals=2, out=out), ], ) @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_elemwise_inplace(func, format): s = sparse.random((2, 3, 4), density=0.5, format=format) x = s.todense() func(s, out=s) func(x, out=x) assert isinstance(s, format) assert_eq(x, s) @pytest.mark.parametrize( "shape1, shape2", [ ((2, 3, 4), (3, 4)), ((3, 4), (2, 3, 4)), ((3, 1, 4), (3, 2, 4)), ((1, 3, 4), (3, 4)), ((3, 4, 1), (3, 4, 2)), ((1, 5), (5, 1)), ((3, 1), (3, 4)), ((3, 1), (1, 4)), ((1, 4), (3, 4)), ((2, 2, 2), (1, 1, 1)), ], ) @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_elemwise_mixed(shape1, shape2, format, rng): s1 = sparse.random(shape1, density=0.5, format=format) x2 = rng.random(shape2) x1 = s1.todense() assert_eq(s1 * x2, x1 * x2) @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_elemwise_mixed_empty(format, rng): s1 = sparse.random((2, 0, 4), density=0.5, format=format) x2 = rng.random((2, 0, 4)) x1 = s1.todense() assert_eq(s1 * x2, x1 * x2) @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_elemwise_unsupported(format): class A: pass s1 = sparse.random((2, 3, 4), density=0.5, format=format) x2 = A() with pytest.raises(TypeError): s1 + x2 assert sparse.elemwise(operator.add, s1, x2) is NotImplemented @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_elemwise_mixed_broadcast(format, rng): s1 = sparse.random((2, 3, 4), density=0.5, format=format) s2 = sparse.random(4, density=0.5) x3 = rng.random((3, 4)) x1 = s1.todense() x2 = s2.todense() def func(x1, x2, x3): return x1 * x2 * x3 assert_eq(sparse.elemwise(func, s1, s2, x3), func(x1, x2, x3)) @pytest.mark.parametrize( "func", [operator.mul, operator.add, operator.sub, operator.gt, operator.lt, operator.ne], ) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_elemwise_binary(func, shape, format): xs = sparse.random(shape, density=0.5, format=format) ys = sparse.random(shape, density=0.5, format=format) x = xs.todense() y = ys.todense() assert_eq(func(xs, ys), func(x, y)) @pytest.mark.parametrize("func", [operator.imul, operator.iadd, operator.isub]) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_elemwise_binary_inplace(func, shape, format): xs = sparse.random(shape, density=0.5, format=format) ys = sparse.random(shape, density=0.5, format=format) x = xs.todense() y = ys.todense() xs = func(xs, ys) x = func(x, y) assert_eq(xs, x) @pytest.mark.parametrize( "func", [ lambda x, y, z: x + y + z, lambda x, y, z: x * y * z, lambda x, y, z: x + y * z, lambda x, y, z: (x + y) * z, ], ) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) @pytest.mark.parametrize( "formats", [ [COO, COO, COO], [GCXS, GCXS, GCXS], [COO, GCXS, GCXS], ], ) def test_elemwise_trinary(func, shape, formats): xs = sparse.random(shape, density=0.5, format=formats[0]) ys = sparse.random(shape, density=0.5, format=formats[1]) zs = sparse.random(shape, density=0.5, format=formats[2]) x = xs.todense() y = ys.todense() z = zs.todense() fs = sparse.elemwise(func, xs, ys, zs) assert_eq(fs, func(x, y, z)) @pytest.mark.parametrize("func", [operator.add, operator.mul]) @pytest.mark.parametrize( "shape1,shape2", [ ((2, 3, 4), (3, 4)), ((3, 4), (2, 3, 4)), ((3, 1, 4), (3, 2, 4)), ((1, 3, 4), (3, 4)), ((3, 4, 1), (3, 4, 2)), ((1, 5), (5, 1)), ((3, 1), (3, 4)), ((3, 1), (1, 4)), ((1, 4), (3, 4)), ((2, 2, 2), (1, 1, 1)), ], ) def test_binary_broadcasting(func, shape1, shape2): density1 = 1 if np.prod(shape1) == 1 else 0.5 density2 = 1 if np.prod(shape2) == 1 else 0.5 xs = sparse.random(shape1, density=density1) x = xs.todense() ys = sparse.random(shape2, density=density2) y = ys.todense() expected = func(x, y) actual = func(xs, ys) assert isinstance(actual, COO) assert_eq(expected, actual) assert np.count_nonzero(expected) == actual.nnz @pytest.mark.parametrize( "shape1,shape2", [((3, 4), (2, 3, 4)), ((3, 1, 4), (3, 2, 4)), ((3, 4, 1), (3, 4, 2))], ) def test_broadcast_to(shape1, shape2): a = sparse.random(shape1, density=0.5) x = a.todense() assert_eq(np.broadcast_to(x, shape2), a.broadcast_to(shape2)) @pytest.mark.parametrize( "shapes", [ [(2,), (3, 2), (4, 3, 2)], [(3,), (2, 3), (2, 2, 3)], [(2,), (2, 2), (2, 2, 2)], [(4,), (4, 4), (4, 4, 4)], [(4,), (4, 4), (4, 4, 4)], [(4,), (4, 4), (4, 4, 4)], [(1, 1, 2), (1, 3, 1), (4, 1, 1)], [(2,), (2, 1), (2, 1, 1)], ], ) @pytest.mark.parametrize( "func", [ lambda x, y, z: (x + y) * z, lambda x, y, z: x * (y + z), lambda x, y, z: x * y * z, lambda x, y, z: x + y + z, lambda x, y, z: x + y - z, lambda x, y, z: x - y + z, ], ) def test_trinary_broadcasting(shapes, func): args = [sparse.random(s, density=0.5) for s in shapes] dense_args = [arg.todense() for arg in args] fs = sparse.elemwise(func, *args) assert isinstance(fs, COO) assert_eq(fs, func(*dense_args)) @pytest.mark.parametrize( "shapes, func", [ ([(2,), (3, 2), (4, 3, 2)], lambda x, y, z: (x + y) * z), ([(3,), (2, 3), (2, 2, 3)], lambda x, y, z: x * (y + z)), ([(2,), (2, 2), (2, 2, 2)], lambda x, y, z: x * y * z), ([(4,), (4, 4), (4, 4, 4)], lambda x, y, z: x + y + z), ], ) @pytest.mark.parametrize("value", [np.nan, np.inf, -np.inf]) @pytest.mark.parametrize("fraction", [0.25, 0.5, 0.75, 1.0]) @pytest.mark.filterwarnings("ignore:invalid value") def test_trinary_broadcasting_pathological(shapes, func, value, fraction): args = [sparse.random(s, density=0.5, data_rvs=random_value_array(value, fraction)) for s in shapes] dense_args = [arg.todense() for arg in args] fs = sparse.elemwise(func, *args) assert isinstance(fs, COO) assert_eq(fs, func(*dense_args)) def test_sparse_broadcasting(monkeypatch): orig_unmatch_coo = sparse.numba_backend._umath._Elemwise._get_func_coords_data state = {"num_matches": 0} xs = sparse.random((3, 4), density=0.5) ys = sparse.random((3, 4), density=0.5) def mock_unmatch_coo(*args, **kwargs): result = orig_unmatch_coo(*args, **kwargs) if result is not None: state["num_matches"] += 1 return result monkeypatch.setattr(sparse.numba_backend._umath._Elemwise, "_get_func_coords_data", mock_unmatch_coo) xs * ys # Less than in case there's absolutely no overlap in some cases. assert state["num_matches"] <= 1 def test_dense_broadcasting(monkeypatch): orig_unmatch_coo = sparse.numba_backend._umath._Elemwise._get_func_coords_data state = {"num_matches": 0} xs = sparse.random((3, 4), density=0.5) ys = sparse.random((3, 4), density=0.5) def mock_unmatch_coo(*args, **kwargs): result = orig_unmatch_coo(*args, **kwargs) if result is not None: state["num_matches"] += 1 return result monkeypatch.setattr(sparse.numba_backend._umath._Elemwise, "_get_func_coords_data", mock_unmatch_coo) xs + ys # Less than in case there's absolutely no overlap in some cases. assert state["num_matches"] <= 3 @pytest.mark.parametrize("format", ["coo", "dok", "gcxs"]) def test_sparsearray_elemwise(format): xs = sparse.random((3, 4), density=0.5, format=format) ys = sparse.random((3, 4), density=0.5, format=format) x = xs.todense() y = ys.todense() fs = sparse.elemwise(operator.add, xs, ys) if format == "gcxs": assert isinstance(fs, GCXS) elif format == "dok": assert isinstance(fs, DOK) else: assert isinstance(fs, COO) assert_eq(fs, x + y) def test_ndarray_densification_fails(rng): xs = sparse.random((2, 3, 4), density=0.5) y = rng.random((3, 4)) with pytest.raises(ValueError): xs + y def test_elemwise_noargs(): def func(): return np.float64(5.0) with pytest.raises(ValueError, match=r"None of the args is sparse:"): sparse.elemwise(func) @pytest.mark.parametrize( "func", [ operator.pow, operator.truediv, operator.floordiv, operator.ge, operator.le, operator.eq, operator.mod, ], ) @pytest.mark.filterwarnings("ignore:divide by zero") @pytest.mark.filterwarnings("ignore:invalid value") @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_nonzero_outout_fv_ufunc(func, format): xs = sparse.random((2, 3, 4), density=0.5, format=format) ys = sparse.random((2, 3, 4), density=0.5, format=format) x = xs.todense() y = ys.todense() f = func(x, y) fs = func(xs, ys) assert isinstance(fs, format) assert_eq(f, fs) @pytest.mark.parametrize( "func, scalar", [ (operator.mul, 5), (operator.add, 0), (operator.sub, 0), (operator.pow, 5), (operator.truediv, 3), (operator.floordiv, 4), (operator.gt, 5), (operator.lt, -5), (operator.ne, 0), (operator.ge, 5), (operator.le, -3), (operator.eq, 1), (operator.mod, 5), ], ) @pytest.mark.parametrize("convert_to_np_number", [True, False]) @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_elemwise_scalar(func, scalar, convert_to_np_number, format): xs = sparse.random((2, 3, 4), density=0.5, format=format) if convert_to_np_number: scalar = np.float32(scalar) y = scalar x = xs.todense() fs = func(xs, y) assert isinstance(fs, format) assert xs.nnz >= fs.nnz assert_eq(fs, func(x, y)) @pytest.mark.parametrize( "func, scalar", [ (operator.mul, 5), (operator.add, 0), (operator.sub, 0), (operator.gt, -5), (operator.lt, 5), (operator.ne, 0), (operator.ge, -5), (operator.le, 3), (operator.eq, 1), ], ) @pytest.mark.parametrize("convert_to_np_number", [True, False]) def test_leftside_elemwise_scalar(func, scalar, convert_to_np_number): xs = sparse.random((2, 3, 4), density=0.5) if convert_to_np_number: scalar = np.float32(scalar) y = scalar x = xs.todense() fs = func(y, xs) assert isinstance(fs, COO) assert xs.nnz >= fs.nnz assert_eq(fs, func(y, x)) @pytest.mark.parametrize( "func, scalar", [ (operator.add, 5), (operator.sub, -5), (operator.pow, -3), (operator.truediv, 0), (operator.floordiv, 0), (operator.gt, -5), (operator.lt, 5), (operator.ne, 1), (operator.ge, -3), (operator.le, 3), (operator.eq, 0), ], ) @pytest.mark.filterwarnings("ignore:divide by zero") @pytest.mark.filterwarnings("ignore:invalid value") def test_scalar_output_nonzero_fv(func, scalar): xs = sparse.random((2, 3, 4), density=0.5) y = scalar x = xs.todense() f = func(x, y) fs = func(xs, y) assert isinstance(fs, COO) assert fs.nnz <= xs.nnz assert_eq(f, fs) @pytest.mark.parametrize("func", [operator.and_, operator.or_, operator.xor]) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_bitwise_binary(func, shape, format): # Small arrays need high density to have nnz entries # Casting floats to int will result in all zeros, hence the * 100 xs = (sparse.random(shape, density=0.5, format=format) * 100).astype(np.int64) ys = (sparse.random(shape, density=0.5, format=format) * 100).astype(np.int64) x = xs.todense() y = ys.todense() assert_eq(func(xs, ys), func(x, y)) @pytest.mark.parametrize("func", [operator.iand, operator.ior, operator.ixor]) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) @pytest.mark.parametrize("format", [COO, GCXS, DOK]) def test_bitwise_binary_inplace(func, shape, format): # Small arrays need high density to have nnz entries # Casting floats to int will result in all zeros, hence the * 100 xs = (sparse.random(shape, density=0.5, format=format) * 100).astype(np.int64) ys = (sparse.random(shape, density=0.5, format=format) * 100).astype(np.int64) x = xs.todense() y = ys.todense() xs = func(xs, ys) x = func(x, y) assert_eq(xs, x) @pytest.mark.parametrize("func", [operator.lshift, operator.rshift]) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) def test_bitshift_binary(func, shape): # Small arrays need high density to have nnz entries # Casting floats to int will result in all zeros, hence the * 100 xs = (sparse.random(shape, density=0.5) * 100).astype(np.int64) # Can't merge into test_bitwise_binary because left/right shifting # with something >= 64 isn't defined. ys = (sparse.random(shape, density=0.5) * 64).astype(np.int64) x = xs.todense() y = ys.todense() assert_eq(func(xs, ys), func(x, y)) @pytest.mark.parametrize("func", [operator.ilshift, operator.irshift]) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) def test_bitshift_binary_inplace(func, shape): # Small arrays need high density to have nnz entries # Casting floats to int will result in all zeros, hence the * 100 xs = (sparse.random(shape, density=0.5) * 100).astype(np.int64) # Can't merge into test_bitwise_binary because left/right shifting # with something >= 64 isn't defined. ys = (sparse.random(shape, density=0.5) * 64).astype(np.int64) x = xs.todense() y = ys.todense() xs = func(xs, ys) x = func(x, y) assert_eq(xs, x) @pytest.mark.parametrize("func", [operator.and_]) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) def test_bitwise_scalar(func, shape, rng): # Small arrays need high density to have nnz entries # Casting floats to int will result in all zeros, hence the * 100 xs = (sparse.random(shape, density=0.5) * 100).astype(np.int64) y = rng.integers(100) x = xs.todense() assert_eq(func(xs, y), func(x, y)) assert_eq(func(y, xs), func(y, x)) @pytest.mark.parametrize("func", [operator.lshift, operator.rshift]) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) def test_bitshift_scalar(func, shape, rng): # Small arrays need high density to have nnz entries # Casting floats to int will result in all zeros, hence the * 100 xs = (sparse.random(shape, density=0.5) * 100).astype(np.int64) # Can't merge into test_bitwise_binary because left/right shifting # with something >= 64 isn't defined. y = rng.integers(64) x = xs.todense() assert_eq(func(xs, y), func(x, y)) @pytest.mark.parametrize("func", [operator.invert]) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) def test_unary_bitwise_nonzero_output_fv(func, shape): # Small arrays need high density to have nnz entries # Casting floats to int will result in all zeros, hence the * 100 xs = (sparse.random(shape, density=0.5) * 100).astype(np.int64) x = xs.todense() f = func(x) fs = func(xs) assert isinstance(fs, COO) assert fs.nnz <= xs.nnz assert_eq(f, fs) @pytest.mark.parametrize("func", [operator.or_, operator.xor]) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) def test_binary_bitwise_nonzero_output_fv(func, shape, rng): # Small arrays need high density to have nnz entries # Casting floats to int will result in all zeros, hence the * 100 xs = (sparse.random(shape, density=0.5) * 100).astype(np.int64) y = rng.integers(1, 100) x = xs.todense() f = func(x, y) fs = func(xs, y) assert isinstance(fs, COO) assert fs.nnz <= xs.nnz assert_eq(f, fs) @pytest.mark.parametrize( "func", [operator.mul, operator.add, operator.sub, operator.gt, operator.lt, operator.ne], ) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) def test_elemwise_nonzero_input_fv(func, shape, rng): xs = sparse.random(shape, density=0.5, fill_value=rng.random()) ys = sparse.random(shape, density=0.5, fill_value=rng.random()) x = xs.todense() y = ys.todense() assert_eq(func(xs, ys), func(x, y)) @pytest.mark.parametrize("func", [operator.lshift, operator.rshift]) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) def test_binary_bitshift_densification_fails(func, shape, rng): # Small arrays need high density to have nnz entries # Casting floats to int will result in all zeros, hence the * 100 x = rng.integers(1, 100) ys = (sparse.random(shape, density=0.5) * 64).astype(np.int64) y = ys.todense() f = func(x, y) fs = func(x, ys) assert isinstance(fs, COO) assert fs.nnz <= ys.nnz assert_eq(f, fs) @pytest.mark.parametrize("func", [operator.and_, operator.or_, operator.xor]) @pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) def test_bitwise_binary_bool(func, shape): # Small arrays need high density to have nnz entries xs = sparse.random(shape, density=0.5).astype(bool) ys = sparse.random(shape, density=0.5).astype(bool) x = xs.todense() y = ys.todense() assert_eq(func(xs, ys), func(x, y)) def test_elemwise_binary_empty(): x = COO({}, shape=(10, 10)) y = sparse.random((10, 10), density=0.5) for z in [x * y, y * x]: assert z.nnz == 0 assert z.coords.shape == (2, 0) assert z.data.shape == (0,) @pytest.mark.parametrize("dtype", [np.complex64, np.complex128]) def test_nanmean_regression(dtype): array = np.array([0.0 + 0.0j, 0.0 + np.nan * 1j], dtype=dtype) sparray = sparse.COO.from_numpy(array) assert_eq(array, sparray) # Regression test for gh-580 @pytest.mark.filterwarnings("error") def test_no_deprecation_warning(): a = np.array([1, 2]) s = sparse.COO(a, a, shape=(3,)) assert_eq(s == s, np.broadcast_to(True, s.shape)) # Regression test for gh-587 def test_no_out_upcast(): a = sparse.COO([[0, 1], [0, 1]], [1, 1], shape=(2, 2)) with pytest.raises(TypeError): a *= 0.5 sparse-0.16.0a9/sparse/numba_backend/tests/test_io.py000066400000000000000000000013721463475501500225720ustar00rootroot00000000000000import sparse from sparse import load_npz, save_npz from sparse.numba_backend._utils import assert_eq import pytest import numpy as np @pytest.mark.parametrize("compression", [True, False]) @pytest.mark.parametrize("format", ["coo", "gcxs"]) def test_save_load_npz_file(tmp_path, compression, format): x = sparse.random((2, 3, 4, 5), density=0.25, format=format) y = x.todense() filename = tmp_path / "mat.npz" save_npz(filename, x, compressed=compression) z = load_npz(filename) assert_eq(x, z) assert_eq(y, z.todense()) def test_load_wrong_format_exception(tmp_path): x = np.array([1, 2, 3]) filename = tmp_path / "mat.npz" np.savez(filename, x) with pytest.raises(RuntimeError): load_npz(filename) sparse-0.16.0a9/sparse/numba_backend/tests/test_namespace.py000066400000000000000000000060421463475501500241160ustar00rootroot00000000000000import sparse def test_namespace(): assert set(sparse.__all__) == { "COO", "DOK", "GCXS", "SparseArray", "abs", "acos", "acosh", "add", "all", "any", "argmax", "argmin", "argwhere", "asCOO", "as_coo", "asarray", "asin", "asinh", "asnumpy", "astype", "atan", "atan2", "atanh", "bitwise_and", "bitwise_invert", "bitwise_left_shift", "bitwise_not", "bitwise_or", "bitwise_right_shift", "bitwise_xor", "bool", "broadcast_arrays", "broadcast_to", "can_cast", "ceil", "clip", "complex128", "complex64", "concat", "concatenate", "conj", "cos", "cosh", "diagonal", "diagonalize", "divide", "dot", "e", "einsum", "elemwise", "empty", "empty_like", "equal", "exp", "expand_dims", "expm1", "eye", "finfo", "flip", "float16", "float32", "float64", "floor", "floor_divide", "full", "full_like", "greater", "greater_equal", "iinfo", "imag", "inf", "int16", "int32", "int64", "int8", "isfinite", "isinf", "isnan", "isneginf", "isposinf", "kron", "less", "less_equal", "load_npz", "log", "log10", "log1p", "log2", "logaddexp", "logical_and", "logical_not", "logical_or", "logical_xor", "matrix_transpose", "matmul", "max", "mean", "min", "moveaxis", "multiply", "nan", "nanmax", "nanmean", "nanmin", "nanprod", "nanreduce", "nansum", "negative", "newaxis", "nonzero", "not_equal", "ones", "ones_like", "outer", "pad", "permute_dims", "pi", "positive", "pow", "prod", "random", "real", "remainder", "reshape", "result_type", "roll", "round", "save_npz", "sign", "sin", "sinh", "sort", "sqrt", "square", "squeeze", "stack", "std", "subtract", "sum", "take", "tan", "tanh", "tensordot", "tril", "triu", "trunc", "uint16", "uint32", "uint64", "uint8", "unique_counts", "unique_values", "var", "vecdot", "where", "zeros", "zeros_like", } for attr in sparse.__all__: assert hasattr(sparse, attr) assert sorted(sparse.__all__) == sparse.__all__ sparse-0.16.0a9/sparse/tests/000077500000000000000000000000001463475501500157765ustar00rootroot00000000000000sparse-0.16.0a9/sparse/tests/conftest.py000066400000000000000000000005531463475501500202000ustar00rootroot00000000000000import sparse import pytest import numpy as np @pytest.fixture(scope="session") def backend(): yield sparse.BACKEND @pytest.fixture(scope="module") def graph(): return np.array( [ [0, 1, 1, 0, 0], [0, 0, 1, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 1], [0, 1, 0, 1, 0], ] ) sparse-0.16.0a9/sparse/tests/test_backends.py000066400000000000000000000174311463475501500211670ustar00rootroot00000000000000import sparse import pytest import numpy as np import scipy as sp import scipy.sparse as sps import scipy.sparse.csgraph as spgraph import scipy.sparse.linalg as splin from numpy.testing import assert_almost_equal, assert_equal def test_backends(backend): rng = np.random.default_rng(0) x = sparse.random((100, 10, 100), density=0.01, random_state=rng) y = sparse.random((100, 10, 100), density=0.01, random_state=rng) if backend == sparse.BackendType.Finch: import finch def storage(): return finch.Storage(finch.Dense(finch.SparseList(finch.SparseList(finch.Element(0.0)))), order="C") x = x.to_storage(storage()) y = y.to_storage(storage()) else: x.asformat("gcxs") y.asformat("gcxs") z = x + y result = sparse.sum(z) assert result.shape == () def test_finch_lazy_backend(backend): if backend != sparse.BackendType.Finch: pytest.skip("Tested only for Finch backend") import finch np_eye = np.eye(5) sp_arr = sps.csr_matrix(np_eye) finch_dense = finch.Tensor(np_eye) assert np.shares_memory(finch_dense.todense(), np_eye) finch_arr = finch.Tensor(sp_arr) assert_equal(finch_arr.todense(), np_eye) transposed = sparse.permute_dims(finch_arr, (1, 0)) assert_equal(transposed.todense(), np_eye.T) @sparse.compiled def my_fun(tns1, tns2): tmp = sparse.add(tns1, tns2) return sparse.sum(tmp, axis=0) result = my_fun(finch_dense, finch_arr) assert_equal(result.todense(), np.sum(2 * np_eye, axis=0)) @pytest.mark.parametrize("format, order", [("csc", "F"), ("csr", "C"), ("coo", "F"), ("coo", "C")]) def test_asarray(backend, format, order): arr = np.eye(5, order=order) result = sparse.asarray(arr, format=format) assert_equal(result.todense(), arr) @pytest.mark.parametrize("format, order", [("csc", "F"), ("csr", "C"), ("coo", "F"), ("coo", "C")]) def test_scipy_spsolve(backend, format, order): x = np.eye(10, order=order) * 2 y = np.ones((10, 1), order=order) x_pydata = sparse.asarray(x, format=format) y_pydata = sparse.asarray(y, format="coo") actual = splin.spsolve(x_pydata, y_pydata) expected = np.linalg.solve(x, y.ravel()) assert_almost_equal(actual, expected) @pytest.mark.parametrize("format, order", [("csc", "F"), ("csr", "C"), ("coo", "F"), ("coo", "C")]) def test_scipy_inv(backend, format, order): x = np.eye(10, order=order) * 2 x_pydata = sparse.asarray(x, format=format) actual = splin.inv(x_pydata) expected = np.linalg.inv(x) assert_almost_equal(actual.todense(), expected) @pytest.mark.skip(reason="https://github.com/scipy/scipy/pull/20759") @pytest.mark.parametrize("format, order", [("csc", "F"), ("csr", "C"), ("coo", "F"), ("coo", "C")]) def test_scipy_norm(backend, format, order): x = np.eye(10, order=order) * 2 x_pydata = sparse.asarray(x, format=format) actual = splin.norm(x_pydata) expected = sp.linalg.norm(x) assert_almost_equal(actual, expected) @pytest.mark.skip(reason="https://github.com/scipy/scipy/pull/20759") @pytest.mark.parametrize("format, order", [("csc", "F"), ("csr", "C"), ("coo", "F"), ("coo", "C")]) def test_scipy_lsqr(backend, format, order): x = np.eye(10, order=order) * 2 y = np.ones((10, 1), order=order) x_pydata = sparse.asarray(x, format=format) actual_x, _ = splin.lsqr(x_pydata, y)[:2] expected_x, _ = sp.linalg.lstsq(x, y)[:2] assert_almost_equal(actual_x, expected_x.ravel()) @pytest.mark.skip(reason="https://github.com/scipy/scipy/pull/20759") @pytest.mark.parametrize("format, order", [("csc", "F"), ("csr", "C"), ("coo", "F"), ("coo", "C")]) def test_scipy_eigs(backend, format, order): x = np.eye(10, order=order) * 2 x_pydata = sparse.asarray(x, format=format) x_sp = sps.coo_matrix(x) actual_vals, _ = splin.eigs(x_pydata, k=3) expected_vals, _ = splin.eigs(x_sp, k=3) assert_almost_equal(actual_vals, expected_vals) @pytest.mark.parametrize( "matrix_fn, format, order", [(sps.csc_matrix, "csc", "F"), (sps.csr_matrix, "csr", "C"), (sps.coo_matrix, "coo", "F")], ) def test_scipy_connected_components(backend, graph, matrix_fn, format, order): graph = matrix_fn(np.array(graph, order=order)) sp_graph = sparse.asarray(graph, format=format) actual_n_components, actual_labels = spgraph.connected_components(sp_graph) expected_n_components, expected_labels = spgraph.connected_components(graph) assert actual_n_components == expected_n_components assert_equal(actual_labels, expected_labels) @pytest.mark.parametrize( "matrix_fn, format, order", [(sps.csc_matrix, "csc", "F"), (sps.csr_matrix, "csr", "C"), (sps.coo_matrix, "coo", "F")], ) def test_scipy_laplacian(backend, graph, matrix_fn, format, order): graph = matrix_fn(np.array(graph, order=order)) sp_graph = sparse.asarray(graph, format=format) actual_lap = spgraph.laplacian(sp_graph) expected_lap = spgraph.laplacian(graph) assert_equal(actual_lap.todense(), expected_lap.toarray()) @pytest.mark.parametrize("matrix_fn, format, order", [(sps.csc_matrix, "csc", "F"), (sps.csr_matrix, "csr", "C")]) def test_scipy_shortest_path(backend, graph, matrix_fn, format, order): graph = matrix_fn(np.array(graph, order=order)) sp_graph = sparse.asarray(graph, format=format) actual_dist_matrix, actual_predecessors = spgraph.shortest_path(sp_graph, return_predecessors=True) expected_dist_matrix, expected_predecessors = spgraph.shortest_path(graph, return_predecessors=True) assert_equal(actual_dist_matrix, expected_dist_matrix) assert_equal(actual_predecessors, expected_predecessors) @pytest.mark.parametrize( "matrix_fn, format, order", [(sps.csc_matrix, "csc", "F"), (sps.csr_matrix, "csr", "C"), (sps.coo_matrix, "coo", "F")], ) def test_scipy_breadth_first_tree(backend, graph, matrix_fn, format, order): graph = matrix_fn(np.array(graph, order=order)) sp_graph = sparse.asarray(graph, format=format) actual_bft = spgraph.breadth_first_tree(sp_graph, 0, directed=False) expected_bft = spgraph.breadth_first_tree(graph, 0, directed=False) assert_equal(actual_bft.todense(), expected_bft.toarray()) @pytest.mark.parametrize( "matrix_fn, format, order", [(sps.csc_matrix, "csc", "F"), (sps.csr_matrix, "csr", "C"), (sps.coo_matrix, "coo", "F")], ) def test_scipy_dijkstra(backend, graph, matrix_fn, format, order): graph = matrix_fn(np.array(graph, order=order)) sp_graph = sparse.asarray(graph, format=format) actual_dist_matrix = spgraph.dijkstra(sp_graph, directed=False) expected_dist_matrix = spgraph.dijkstra(graph, directed=False) assert_equal(actual_dist_matrix, expected_dist_matrix) @pytest.mark.parametrize( "matrix_fn, format, order", [(sps.csc_matrix, "csc", "F"), (sps.csr_matrix, "csr", "C"), (sps.coo_matrix, "coo", "F")], ) def test_scipy_minimum_spanning_tree(backend, graph, matrix_fn, format, order): graph = matrix_fn(np.array(graph, order=order)) sp_graph = sparse.asarray(graph, format=format) actual_span_tree = spgraph.minimum_spanning_tree(sp_graph) expected_span_tree = spgraph.minimum_spanning_tree(graph) assert_equal(actual_span_tree.todense(), expected_span_tree.toarray()) @pytest.mark.skip(reason="https://github.com/scikit-learn/scikit-learn/pull/29031") @pytest.mark.parametrize("matrix_fn, format, order", [(sps.csc_matrix, "csc", "F")]) def test_scikit_learn_dispatch(backend, graph, matrix_fn, format, order): from sklearn.cluster import KMeans graph = matrix_fn(np.array(graph, order=order)) sp_graph = sparse.asarray(graph, format=format) neigh = KMeans(n_clusters=2) actual_labels = neigh.fit_predict(sp_graph) neigh = KMeans(n_clusters=2) expected_labels = neigh.fit_predict(graph) assert_equal(actual_labels, expected_labels) sparse-0.16.0a9/tox.ini000066400000000000000000000001361463475501500146520ustar00rootroot00000000000000[tox] envlist = py36, py37 [testenv] commands= pytest {posargs} extras= tests tox